Removed global using namespace from Plugin API (#3451)

This commit is contained in:
Ilya Lavrenov 2020-12-03 17:52:55 +03:00 committed by GitHub
parent f2c2636bb5
commit 2d75d8aff2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 82 additions and 86 deletions

View File

@ -25,7 +25,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const
try {
CompileNetwork(function);
InitExecutor(); // creates thread-based executor using for async requests
} catch (const InferenceEngineException&) {
} catch (const InferenceEngine::details::InferenceEngineException&) {
throw;
} catch (const std::exception & e) {
THROW_IE_EXCEPTION << "Standard exception from compilation library: " << e.what();
@ -83,9 +83,9 @@ void TemplatePlugin::ExecutableNetwork::InitExecutor() {
// it is better to avoid threads recreateion as some OSs memory allocator can not manage such usage cases
// and memory consumption can be larger than it is expected.
// So Inference Engone provides executors cache.
_taskExecutor = ExecutorManager::getInstance()->getIdleCPUStreamsExecutor(streamsExecutorConfig);
_taskExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor(streamsExecutorConfig);
// NOTE: callback Executor is not configured. So callback will be called in the thread of the last stage of inference request pipeline
// _callbackExecutor = ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateCallbackExecutor"});
// _callbackExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateCallbackExecutor"});
}
// ! [executable_network:init_executor]
@ -98,8 +98,8 @@ InferenceEngine::InferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::Cr
// ! [executable_network:create_infer_request_impl]
// ! [executable_network:create_infer_request]
IInferRequest::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() {
IInferRequest::Ptr asyncRequest;
InferenceEngine::IInferRequest::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() {
InferenceEngine::IInferRequest::Ptr asyncRequest;
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
auto asyncThreadSafeImpl = std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest),
_taskExecutor, _plugin->_waitExecutor, _callbackExecutor);
@ -111,7 +111,7 @@ IInferRequest::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() {
// ! [executable_network:create_infer_request]
// ! [executable_network:get_config]
Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name) const {
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name) const {
return _cfg.Get(name);
}
// ! [executable_network:get_config]
@ -130,7 +130,7 @@ InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const st
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT),
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) };
auto streamExecutorConfigKeys = IStreamsExecutor::Config{}.SupportedKeys();
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
for (auto&& configKey : streamExecutorConfigKeys) {
configKeys.emplace_back(configKey);
}

View File

@ -33,15 +33,15 @@ Plugin::Plugin() {
_backend = ngraph::runtime::Backend::create("INTERPRETER");
// create default stream executor with a given name
_waitExecutor = ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateWaitExecutor"});
_waitExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateWaitExecutor"});
}
// ! [plugin:ctor]
// ! [plugin:dtor]
Plugin::~Plugin() {
// Plugin should remove executors from executor cache to avoid threads number growth in the whole application
ExecutorManager::getInstance()->clear("TemplateStreamsExecutor");
ExecutorManager::getInstance()->clear("TemplateWaitExecutor");
InferenceEngine::ExecutorManager::getInstance()->clear("TemplateStreamsExecutor");
InferenceEngine::ExecutorManager::getInstance()->clear("TemplateWaitExecutor");
// NOTE: Uncomment this if Inference Engine Executor cache is used to create callback executor
// ExecutorManager::getInstance()->clear("TemplateCallbackExecutor");
}
@ -91,8 +91,8 @@ InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const
for (auto networkOutput : networkOutputs) {
auto output_precision = networkOutput.second->getPrecision();
if (output_precision != Precision::FP32 &&
output_precision != Precision::FP16) {
if (output_precision != InferenceEngine::Precision::FP32 &&
output_precision != InferenceEngine::Precision::FP16) {
THROW_IE_EXCEPTION << "Template device supports only FP16 and FP32 output precision.";
}
}
@ -135,8 +135,8 @@ InferenceEngine::ExecutableNetwork Plugin::ImportNetworkImpl(std::istream& model
// ! [plugin:import_network_impl]
// ! [plugin:query_network]
QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork &network, const ConfigMap& config) const {
QueryNetworkResult res;
InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork &network, const ConfigMap& config) const {
InferenceEngine::QueryNetworkResult res;
Configuration cfg{config, _cfg, false};
auto function = network.getFunction();
@ -163,7 +163,7 @@ QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork &network, const ConfigM
for (auto&& fusedLayerName : ngraph::getFusedNamesVector(node)) {
// Filter just nodes from original operation set
// TODO: fill with actual decision rules based on whether kernel is supported by backend
if (contains(originalOps, fusedLayerName)) {
if (InferenceEngine::details::contains(originalOps, fusedLayerName)) {
if (opset.contains_type_insensitive(fusedLayerName)) {
supported.emplace(fusedLayerName);
} else {
@ -175,7 +175,7 @@ QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork &network, const ConfigM
// 4. The result set should contains just nodes from supported set
for (auto&& layerName : supported) {
if (!contains(unsupported, layerName)) {
if (!InferenceEngine::details::contains(unsupported, layerName)) {
res.supportedLayersMap.emplace(layerName, GetName());
}
}
@ -219,7 +219,7 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std:
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT),
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
auto streamExecutorConfigKeys = IStreamsExecutor::Config{}.SupportedKeys();
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
for (auto&& configKey : streamExecutorConfigKeys) {
if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) {
configKeys.emplace_back(configKey);
@ -248,6 +248,6 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std:
// ! [plugin:get_metric]
// ! [plugin:create_plugin_engine]
static const Version version = {{2, 1}, CI_BUILD_NUMBER, "templatePlugin"};
static const InferenceEngine::Version version = {{2, 1}, CI_BUILD_NUMBER, "templatePlugin"};
IE_DEFINE_PLUGIN_CREATE_FUNCTION(Plugin, version)
// ! [plugin:create_plugin_engine]

View File

@ -10,8 +10,6 @@
#include "backend.hpp"
#include "backend.hpp"
//! [plugin:header]
namespace TemplatePlugin {

View File

@ -452,8 +452,8 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network,
std::vector<std::shared_ptr<ngraph::Node>> concats;
std::vector<std::shared_ptr<ngraph::Node>> nextLayerDependent;
for (CNNNetworkIterator itLayer{clonedNetwork.get()};
itLayer != CNNNetworkIterator();
for (InferenceEngine::details::CNNNetworkIterator itLayer{clonedNetwork.get()};
itLayer != InferenceEngine::details::CNNNetworkIterator();
itLayer++) {
auto layerIsSupported = [&] {
auto node = (*itLayer)->getNode();
@ -490,7 +490,7 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network,
continue;
}
for (auto&& fusedLayerName : ngraph::getFusedNamesVector(fusedNode)) {
if (contains(originalOps, fusedLayerName)) {
if (InferenceEngine::details::contains(originalOps, fusedLayerName)) {
if (layerIsSupported) {
supported.emplace(fusedLayerName);
} else {
@ -501,7 +501,7 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network,
}
for (auto&& layerName : supported) {
if (contains(unsupported, layerName)) {
if (InferenceEngine::details::contains(unsupported, layerName)) {
supported.erase(layerName);
}
}
@ -512,10 +512,10 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network,
const auto outputs = split->outputs();
for (const auto& output : outputs) {
const auto& name = output.get_node()->get_friendly_name();
if (!contains(supported, name) &&
!contains(depLayerNames, name) &&
!contains(concatNames, name) &&
!contains(splitNames, name)) {
if (!InferenceEngine::details::contains(supported, name) &&
!InferenceEngine::details::contains(depLayerNames, name) &&
!InferenceEngine::details::contains(concatNames, name) &&
!InferenceEngine::details::contains(splitNames, name)) {
is_supported = false;
break;
}
@ -530,9 +530,9 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network,
const auto inputs = concat->inputs();
for (const auto& input : inputs) {
const auto& name = input.get_node()->get_friendly_name();
if (!contains(supported, name) &&
!contains(depLayerNames, name) &&
!contains(concatNames, name)) {
if (!InferenceEngine::details::contains(supported, name) &&
!InferenceEngine::details::contains(depLayerNames, name) &&
!InferenceEngine::details::contains(concatNames, name)) {
is_supported = false;
break;
}
@ -548,7 +548,7 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network,
const auto inputs = cnl->inputs();
for (const auto& input : inputs) {
const auto& name = input.get_node()->get_friendly_name();
if (!contains(supported, name)) {
if (!InferenceEngine::details::contains(supported, name)) {
is_supported = false;
break;
}
@ -556,7 +556,7 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network,
const auto outputs = cnl->outputs();
for (const auto& output : outputs) {
const auto& name = output.get_node()->get_friendly_name();
if (!contains(supported, name)) {
if (!InferenceEngine::details::contains(supported, name)) {
is_supported = false;
break;
}
@ -567,7 +567,7 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network,
}
for (auto&& node : function->get_ops()) {
if (contains(supported, node->get_friendly_name())) {
if (InferenceEngine::details::contains(supported, node->get_friendly_name())) {
for (auto&& inputNodeOutput : node->input_values()) {
if (ngraph::op::is_constant(inputNodeOutput.get_node()) || ngraph::op::is_parameter(inputNodeOutput.get_node())) {
supported.emplace(inputNodeOutput.get_node()->get_friendly_name());

View File

@ -55,8 +55,8 @@ public:
return make_executable_network(std::make_shared<GNAExecutableNetwork>(modelFileName, plg));
}
ExecutableNetwork ImportNetwork(std::istream& networkModel,
const std::map<std::string, std::string>& config) override {
InferenceEngine::ExecutableNetwork ImportNetwork(std::istream& networkModel,
const std::map<std::string, std::string>& config) override {
Config updated_config(defaultConfig);
updated_config.UpdateFromMap(config);
auto plg = std::make_shared<GNAPlugin>(updated_config.key_config_map);

View File

@ -37,9 +37,9 @@ HeteroInferRequest::HeteroInferRequest(InferenceEngine::InputsDataMap networkInp
std::tie(itBlob, emplaced) = _blobs.emplace(intermediateBlobName, Blob::Ptr{});
if (emplaced) {
itBlob->second = r->GetBlob(blobName);
if (contains(networkInputs, blobName)) {
if (InferenceEngine::details::contains(networkInputs, blobName)) {
_inputs[blobName] = itBlob->second;
} else if (contains(networkOutputs, blobName)) {
} else if (InferenceEngine::details::contains(networkOutputs, blobName)) {
_outputs[blobName] = itBlob->second;
}
} else {

View File

@ -65,7 +65,7 @@ InferenceEngine::ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const
return std::make_shared<HeteroExecutableNetwork>(network, mergeConfigs(_config, config), this);
}
ExecutableNetwork Engine::ImportNetworkImpl(std::istream& heteroModel, const Configs& config) {
InferenceEngine::ExecutableNetwork Engine::ImportNetworkImpl(std::istream& heteroModel, const Configs& config) {
if (GetCore() == nullptr) {
THROW_IE_EXCEPTION << "Please, work with HETERO device via InferencEngine::Core object";
}

View File

@ -37,7 +37,7 @@ public:
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string,
InferenceEngine::Parameter> & options) const override;
ExecutableNetwork ImportNetworkImpl(std::istream& heteroModel, const Configs& config) override;
InferenceEngine::ExecutableNetwork ImportNetworkImpl(std::istream& heteroModel, const Configs& config) override;
DeviceMetaInformationMap GetDevicePlugins(const std::string& targetFallback,
const Configs & localConfig) const;

View File

@ -145,14 +145,14 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network
if (cfg.exclusiveAsyncRequests) {
// special case when all InferRequests are muxed into a single queue
_taskExecutor = ExecutorManager::getInstance()->getExecutor("CPU");
_taskExecutor = InferenceEngine::ExecutorManager::getInstance()->getExecutor("CPU");
} else {
auto streamsExecutorConfig = InferenceEngine::IStreamsExecutor::Config::MakeDefaultMultiThreaded(_cfg.streamExecutorConfig);
streamsExecutorConfig._name = "CPUStreamsExecutor";
_taskExecutor = ExecutorManager::getInstance()->getIdleCPUStreamsExecutor(streamsExecutorConfig);
_taskExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor(streamsExecutorConfig);
}
if (0 != cfg.streamExecutorConfig._streams) {
_callbackExecutor = ExecutorManager::getInstance()->getIdleCPUStreamsExecutor(
_callbackExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor(
IStreamsExecutor::Config{"CPUCallbackExecutor", 1, 0, IStreamsExecutor::ThreadBindingType::NONE});
} else {
_callbackExecutor = _taskExecutor;

View File

@ -259,7 +259,9 @@ static void Transformation(ICNNNetwork::Ptr& clonedNetwork, const Config& conf)
// WA: after conversion to CNNNetwork user precision can redefine input/output precisions
// so we need to apply additional precision conversion but only for inputs and outputs
for (auto & precision : convert_precision_list) {
NetPass::ConvertIOPrecision(*clonedNetwork, convertPrecision(precision.first), convertPrecision(precision.second));
NetPass::ConvertIOPrecision(*clonedNetwork,
InferenceEngine::details::convertPrecision(precision.first),
InferenceEngine::details::convertPrecision(precision.second));
}
}
@ -450,7 +452,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma
return true;
} ();
for (auto&& fusedLayerName : ngraph::getFusedNamesVector((*itLayer)->getNode())) {
if (contains(originalOps, fusedLayerName)) {
if (InferenceEngine::details::contains(originalOps, fusedLayerName)) {
if (layerIsSupported) {
supported.emplace(fusedLayerName);
} else {
@ -461,7 +463,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma
}
for (auto&& node : function->get_ops()) {
if (!contains(unsupported, node->get_friendly_name())) {
if (!InferenceEngine::details::contains(unsupported, node->get_friendly_name())) {
for (auto&& inputNodeOutput : node->input_values()) {
if (ngraph::op::is_constant(inputNodeOutput.get_node())) {
supported.emplace(inputNodeOutput.get_node()->get_friendly_name());
@ -478,7 +480,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma
}
for (auto&& layerName : supported) {
if (!contains(unsupported, layerName)) {
if (!InferenceEngine::details::contains(unsupported, layerName)) {
res.supportedLayersMap.emplace(layerName, GetName());
}
}

View File

@ -99,8 +99,8 @@ std::vector<DeviceInformation> MultiDeviceInferencePlugin::ParseMetaDevices(cons
return metaDevices;
}
Parameter MultiDeviceInferencePlugin::GetConfig(const std::string& name,
const std::map<std::string, Parameter> & options) const {
InferenceEngine::Parameter MultiDeviceInferencePlugin::GetConfig(const std::string& name,
const std::map<std::string, InferenceEngine::Parameter> & options) const {
if (name == MULTI_CONFIG_KEY(DEVICE_PRIORITIES)) {
auto it = _config.find(MULTI_CONFIG_KEY(DEVICE_PRIORITIES));
if (it == _config.end()) {
@ -219,7 +219,7 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const CNNNetwork&
}
supportedLayers = supportedLayers.empty()
? deviceSupportedLayers : (deviceSupportedLayers.empty()
? supportedLayers : Intersection(supportedLayers, deviceSupportedLayers));
? supportedLayers : InferenceEngine::details::Intersection(supportedLayers, deviceSupportedLayers));
}
for (auto&& supportedLayer : supportedLayers) {
queryResult.supportedLayersMap[supportedLayer] = GetName();

View File

@ -24,7 +24,7 @@ public:
const std::map<std::string, std::string>& config) override;
void SetConfig(const std::map<std::string, std::string>& config) override;
Parameter GetConfig(const std::string& name, const std::map<std::string, Parameter> & options) const override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config) const override;
InferenceEngine::Parameter GetMetric(const std::string& name,

View File

@ -21,10 +21,6 @@
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
#include "cpp_interfaces/plugin_itt.hpp"
using namespace InferenceEngine;
using namespace InferenceEngine::details;
namespace InferenceEngine {
namespace {

View File

@ -280,16 +280,16 @@ public:
* @brief Defines the exported `CreatePluginEngine` function which is used to create a plugin instance
* @ingroup ie_dev_api_plugin_api
*/
#define IE_DEFINE_PLUGIN_CREATE_FUNCTION(PluginType, version, ...) \
INFERENCE_PLUGIN_API(InferenceEngine::StatusCode) CreatePluginEngine( \
InferenceEngine::IInferencePlugin *&plugin, \
InferenceEngine::ResponseDesc *resp) noexcept { \
try { \
plugin = new PluginType(__VA_ARGS__); \
plugin->SetVersion(version); \
return OK; \
} \
catch (std::exception &ex) { \
return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what(); \
} \
#define IE_DEFINE_PLUGIN_CREATE_FUNCTION(PluginType, version, ...) \
INFERENCE_PLUGIN_API(InferenceEngine::StatusCode) CreatePluginEngine( \
InferenceEngine::IInferencePlugin *&plugin, \
InferenceEngine::ResponseDesc *resp) noexcept { \
try { \
plugin = new PluginType(__VA_ARGS__); \
plugin->SetVersion(version); \
return InferenceEngine::OK; \
} \
catch (std::exception &ex) { \
return InferenceEngine::DescriptionBuffer(InferenceEngine::GENERAL_ERROR, resp) << ex.what(); \
} \
}

View File

@ -93,7 +93,7 @@ static Set Intersection(const Set& lhs, const Set& rhs) {
const auto& minSizeSet = (lhs.size() < rhs.size()) ? lhs : rhs;
const auto& maxSizeSet = (lhs.size() >= rhs.size()) ? lhs : rhs;
for (auto&& val : minSizeSet) {
if (contains(maxSizeSet, val)) {
if (InferenceEngine::details::contains(maxSizeSet, val)) {
result.insert(val);
}
}
@ -112,7 +112,7 @@ static bool Intersects(const Set& lhs, const Set& rhs) {
const auto& minSizeSet = (lhs.size() < rhs.size()) ? lhs : rhs;
const auto& maxSizeSet = (lhs.size() >= rhs.size()) ? lhs : rhs;
for (auto&& val : minSizeSet) {
if (contains(maxSizeSet, val)) {
if (InferenceEngine::details::contains(maxSizeSet, val)) {
return true;
}
}

View File

@ -100,7 +100,7 @@ QueryNetworkResult Engine::QueryNetwork(
ngraph::NodeVector splits;
ngraph::NodeVector concats;
const auto isLayerSupported = [this, &splitNames, &concatNames, &concats, &splits](CNNNetworkIterator& layer) -> bool {
const auto isLayerSupported = [this, &splitNames, &concatNames, &concats, &splits](InferenceEngine::details::CNNNetworkIterator& layer) -> bool {
auto node = (*layer)->getNode();
if (std::dynamic_pointer_cast<const ::ngraph::opset3::Split>(node) != nullptr) {
splitNames.emplace(node->get_friendly_name());
@ -117,8 +117,8 @@ QueryNetworkResult Engine::QueryNetwork(
}
};
for (CNNNetworkIterator itLayer{convertedNetwork.get()};
itLayer != CNNNetworkIterator();
for (InferenceEngine::details::CNNNetworkIterator itLayer{convertedNetwork.get()};
itLayer != InferenceEngine::details::CNNNetworkIterator();
itLayer++) {
const auto fusedNode = (*itLayer)->getNode();
if (fusedNode == nullptr) {
@ -126,7 +126,7 @@ QueryNetworkResult Engine::QueryNetwork(
}
for (auto& fusedLayerName : ngraph::getFusedNamesVector(fusedNode)) {
if (contains(originalOps, fusedLayerName)) {
if (InferenceEngine::details::contains(originalOps, fusedLayerName)) {
if (isLayerSupported(itLayer)) {
supported.emplace(fusedLayerName);
} else {
@ -137,7 +137,7 @@ QueryNetworkResult Engine::QueryNetwork(
}
for (const auto& layerName : supported) {
if (contains(unsupported, layerName)) {
if (InferenceEngine::details::contains(unsupported, layerName)) {
supported.erase(layerName);
}
}
@ -149,13 +149,13 @@ QueryNetworkResult Engine::QueryNetwork(
const auto inputs = split->inputs();
for (const auto& input : inputs) {
const auto& parentName = input.get_source_output().get_node()->get_friendly_name();
if (contains(supported, parentName) &&
contains(splitNames, parentName)) {
if (InferenceEngine::details::contains(supported, parentName) &&
InferenceEngine::details::contains(splitNames, parentName)) {
markParentSplitAsUnsupported(input.get_source_output().get_node_shared_ptr());
}
}
const auto& name = split->get_friendly_name();
if (contains(supported, name)) {
if (InferenceEngine::details::contains(supported, name)) {
supported.erase(name);
}
};
@ -167,9 +167,9 @@ QueryNetworkResult Engine::QueryNetwork(
for (const auto& output : outputs) {
for (const auto& consumer : output.get_target_inputs()) {
const auto& name = consumer.get_node()->get_friendly_name();
if (!contains(supported, name) &&
!contains(concatNames, name) &&
!contains(splitNames, name)) {
if (!InferenceEngine::details::contains(supported, name) &&
!InferenceEngine::details::contains(concatNames, name) &&
!InferenceEngine::details::contains(splitNames, name)) {
is_supported = false;
break;
}
@ -189,8 +189,8 @@ QueryNetworkResult Engine::QueryNetwork(
const auto inputs = concat->inputs();
for (const auto& input : inputs) {
const auto& name = input.get_source_output().get_node()->get_friendly_name();
if (!contains(supported, name) &&
!contains(concatNames, name)) {
if (!InferenceEngine::details::contains(supported, name) &&
!InferenceEngine::details::contains(concatNames, name)) {
is_supported = false;
break;
}
@ -201,7 +201,7 @@ QueryNetworkResult Engine::QueryNetwork(
}
for (const auto& node : function->get_ops()) {
if (contains(supported, node->get_friendly_name())) {
if (InferenceEngine::details::contains(supported, node->get_friendly_name())) {
for (const auto& inputNodeOutput : node->input_values()) {
if (ngraph::op::is_constant(inputNodeOutput.get_node()) || ngraph::op::is_parameter(inputNodeOutput.get_node())) {
supported.emplace(inputNodeOutput.get_node()->get_friendly_name());

View File

@ -32,7 +32,7 @@ MockPlugin::LoadNetwork(const CNNNetwork &network,
}
}
ExecutableNetworkInternal::Ptr
InferenceEngine::ExecutableNetworkInternal::Ptr
MockPlugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config) {
return {};

View File

@ -20,7 +20,7 @@ public:
InferenceEngine::ExecutableNetwork
LoadNetwork(const InferenceEngine::CNNNetwork &network,
const std::map<std::string, std::string> &config) override;
ExecutableNetworkInternal::Ptr
InferenceEngine::ExecutableNetworkInternal::Ptr
LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config) override;

View File

@ -100,7 +100,7 @@ TEST(UtilTests, cloneLayers) {
namespace {
IE::CNNLayerPtr getLayer(const IE::details::CNNNetworkImplPtr n,
const char* name) {
if (contains(n->allLayers(), name)) {
if (InferenceEngine::details::contains(n->allLayers(), name)) {
return n->allLayers().find(name)->second;
}
return nullptr;