diff --git a/docs/template_plugin/src/template_executable_network.cpp b/docs/template_plugin/src/template_executable_network.cpp index 19a1d7b4404..d88e4284460 100644 --- a/docs/template_plugin/src/template_executable_network.cpp +++ b/docs/template_plugin/src/template_executable_network.cpp @@ -25,7 +25,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptrgetIdleCPUStreamsExecutor(streamsExecutorConfig); + _taskExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor(streamsExecutorConfig); // NOTE: callback Executor is not configured. So callback will be called in the thread of the last stage of inference request pipeline - // _callbackExecutor = ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateCallbackExecutor"}); + // _callbackExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateCallbackExecutor"}); } // ! [executable_network:init_executor] @@ -98,8 +98,8 @@ InferenceEngine::InferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::Cr // ! [executable_network:create_infer_request_impl] // ! [executable_network:create_infer_request] -IInferRequest::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() { - IInferRequest::Ptr asyncRequest; +InferenceEngine::IInferRequest::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() { + InferenceEngine::IInferRequest::Ptr asyncRequest; auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs); auto asyncThreadSafeImpl = std::make_shared(std::static_pointer_cast(internalRequest), _taskExecutor, _plugin->_waitExecutor, _callbackExecutor); @@ -111,7 +111,7 @@ IInferRequest::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() { // ! [executable_network:create_infer_request] // ! [executable_network:get_config] -Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name) const { +InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name) const { return _cfg.Get(name); } // ! [executable_network:get_config] @@ -130,7 +130,7 @@ InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const st CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) }; - auto streamExecutorConfigKeys = IStreamsExecutor::Config{}.SupportedKeys(); + auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys(); for (auto&& configKey : streamExecutorConfigKeys) { configKeys.emplace_back(configKey); } diff --git a/docs/template_plugin/src/template_plugin.cpp b/docs/template_plugin/src/template_plugin.cpp index c66b22c4615..64da1235e6a 100644 --- a/docs/template_plugin/src/template_plugin.cpp +++ b/docs/template_plugin/src/template_plugin.cpp @@ -33,15 +33,15 @@ Plugin::Plugin() { _backend = ngraph::runtime::Backend::create("INTERPRETER"); // create default stream executor with a given name - _waitExecutor = ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateWaitExecutor"}); + _waitExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateWaitExecutor"}); } // ! [plugin:ctor] // ! [plugin:dtor] Plugin::~Plugin() { // Plugin should remove executors from executor cache to avoid threads number growth in the whole application - ExecutorManager::getInstance()->clear("TemplateStreamsExecutor"); - ExecutorManager::getInstance()->clear("TemplateWaitExecutor"); + InferenceEngine::ExecutorManager::getInstance()->clear("TemplateStreamsExecutor"); + InferenceEngine::ExecutorManager::getInstance()->clear("TemplateWaitExecutor"); // NOTE: Uncomment this if Inference Engine Executor cache is used to create callback executor // ExecutorManager::getInstance()->clear("TemplateCallbackExecutor"); } @@ -91,8 +91,8 @@ InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const for (auto networkOutput : networkOutputs) { auto output_precision = networkOutput.second->getPrecision(); - if (output_precision != Precision::FP32 && - output_precision != Precision::FP16) { + if (output_precision != InferenceEngine::Precision::FP32 && + output_precision != InferenceEngine::Precision::FP16) { THROW_IE_EXCEPTION << "Template device supports only FP16 and FP32 output precision."; } } @@ -135,8 +135,8 @@ InferenceEngine::ExecutableNetwork Plugin::ImportNetworkImpl(std::istream& model // ! [plugin:import_network_impl] // ! [plugin:query_network] -QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork &network, const ConfigMap& config) const { - QueryNetworkResult res; +InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork &network, const ConfigMap& config) const { + InferenceEngine::QueryNetworkResult res; Configuration cfg{config, _cfg, false}; auto function = network.getFunction(); @@ -163,7 +163,7 @@ QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork &network, const ConfigM for (auto&& fusedLayerName : ngraph::getFusedNamesVector(node)) { // Filter just nodes from original operation set // TODO: fill with actual decision rules based on whether kernel is supported by backend - if (contains(originalOps, fusedLayerName)) { + if (InferenceEngine::details::contains(originalOps, fusedLayerName)) { if (opset.contains_type_insensitive(fusedLayerName)) { supported.emplace(fusedLayerName); } else { @@ -175,7 +175,7 @@ QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork &network, const ConfigM // 4. The result set should contains just nodes from supported set for (auto&& layerName : supported) { - if (!contains(unsupported, layerName)) { + if (!InferenceEngine::details::contains(unsupported, layerName)) { res.supportedLayersMap.emplace(layerName, GetName()); } } @@ -219,7 +219,7 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std: CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; - auto streamExecutorConfigKeys = IStreamsExecutor::Config{}.SupportedKeys(); + auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys(); for (auto&& configKey : streamExecutorConfigKeys) { if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) { configKeys.emplace_back(configKey); @@ -248,6 +248,6 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std: // ! [plugin:get_metric] // ! [plugin:create_plugin_engine] -static const Version version = {{2, 1}, CI_BUILD_NUMBER, "templatePlugin"}; +static const InferenceEngine::Version version = {{2, 1}, CI_BUILD_NUMBER, "templatePlugin"}; IE_DEFINE_PLUGIN_CREATE_FUNCTION(Plugin, version) // ! [plugin:create_plugin_engine] diff --git a/docs/template_plugin/src/template_plugin.hpp b/docs/template_plugin/src/template_plugin.hpp index 9ec278cd8c6..fe099ff734b 100644 --- a/docs/template_plugin/src/template_plugin.hpp +++ b/docs/template_plugin/src/template_plugin.hpp @@ -10,8 +10,6 @@ #include "backend.hpp" -#include "backend.hpp" - //! [plugin:header] namespace TemplatePlugin { diff --git a/inference-engine/src/cldnn_engine/cldnn_engine.cpp b/inference-engine/src/cldnn_engine/cldnn_engine.cpp index bcfa794c704..d772214e031 100644 --- a/inference-engine/src/cldnn_engine/cldnn_engine.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_engine.cpp @@ -452,8 +452,8 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network, std::vector> concats; std::vector> nextLayerDependent; - for (CNNNetworkIterator itLayer{clonedNetwork.get()}; - itLayer != CNNNetworkIterator(); + for (InferenceEngine::details::CNNNetworkIterator itLayer{clonedNetwork.get()}; + itLayer != InferenceEngine::details::CNNNetworkIterator(); itLayer++) { auto layerIsSupported = [&] { auto node = (*itLayer)->getNode(); @@ -490,7 +490,7 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network, continue; } for (auto&& fusedLayerName : ngraph::getFusedNamesVector(fusedNode)) { - if (contains(originalOps, fusedLayerName)) { + if (InferenceEngine::details::contains(originalOps, fusedLayerName)) { if (layerIsSupported) { supported.emplace(fusedLayerName); } else { @@ -501,7 +501,7 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network, } for (auto&& layerName : supported) { - if (contains(unsupported, layerName)) { + if (InferenceEngine::details::contains(unsupported, layerName)) { supported.erase(layerName); } } @@ -512,10 +512,10 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network, const auto outputs = split->outputs(); for (const auto& output : outputs) { const auto& name = output.get_node()->get_friendly_name(); - if (!contains(supported, name) && - !contains(depLayerNames, name) && - !contains(concatNames, name) && - !contains(splitNames, name)) { + if (!InferenceEngine::details::contains(supported, name) && + !InferenceEngine::details::contains(depLayerNames, name) && + !InferenceEngine::details::contains(concatNames, name) && + !InferenceEngine::details::contains(splitNames, name)) { is_supported = false; break; } @@ -530,9 +530,9 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network, const auto inputs = concat->inputs(); for (const auto& input : inputs) { const auto& name = input.get_node()->get_friendly_name(); - if (!contains(supported, name) && - !contains(depLayerNames, name) && - !contains(concatNames, name)) { + if (!InferenceEngine::details::contains(supported, name) && + !InferenceEngine::details::contains(depLayerNames, name) && + !InferenceEngine::details::contains(concatNames, name)) { is_supported = false; break; } @@ -548,7 +548,7 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network, const auto inputs = cnl->inputs(); for (const auto& input : inputs) { const auto& name = input.get_node()->get_friendly_name(); - if (!contains(supported, name)) { + if (!InferenceEngine::details::contains(supported, name)) { is_supported = false; break; } @@ -556,7 +556,7 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network, const auto outputs = cnl->outputs(); for (const auto& output : outputs) { const auto& name = output.get_node()->get_friendly_name(); - if (!contains(supported, name)) { + if (!InferenceEngine::details::contains(supported, name)) { is_supported = false; break; } @@ -567,7 +567,7 @@ QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network, } for (auto&& node : function->get_ops()) { - if (contains(supported, node->get_friendly_name())) { + if (InferenceEngine::details::contains(supported, node->get_friendly_name())) { for (auto&& inputNodeOutput : node->input_values()) { if (ngraph::op::is_constant(inputNodeOutput.get_node()) || ngraph::op::is_parameter(inputNodeOutput.get_node())) { supported.emplace(inputNodeOutput.get_node()->get_friendly_name()); diff --git a/inference-engine/src/gna_plugin/gna_plugin_internal.hpp b/inference-engine/src/gna_plugin/gna_plugin_internal.hpp index 815934b12c0..0b3e80e921e 100644 --- a/inference-engine/src/gna_plugin/gna_plugin_internal.hpp +++ b/inference-engine/src/gna_plugin/gna_plugin_internal.hpp @@ -55,8 +55,8 @@ public: return make_executable_network(std::make_shared(modelFileName, plg)); } - ExecutableNetwork ImportNetwork(std::istream& networkModel, - const std::map& config) override { + InferenceEngine::ExecutableNetwork ImportNetwork(std::istream& networkModel, + const std::map& config) override { Config updated_config(defaultConfig); updated_config.UpdateFromMap(config); auto plg = std::make_shared(updated_config.key_config_map); diff --git a/inference-engine/src/hetero_plugin/hetero_infer_request.cpp b/inference-engine/src/hetero_plugin/hetero_infer_request.cpp index b4b60690816..61d963cbab7 100644 --- a/inference-engine/src/hetero_plugin/hetero_infer_request.cpp +++ b/inference-engine/src/hetero_plugin/hetero_infer_request.cpp @@ -37,9 +37,9 @@ HeteroInferRequest::HeteroInferRequest(InferenceEngine::InputsDataMap networkInp std::tie(itBlob, emplaced) = _blobs.emplace(intermediateBlobName, Blob::Ptr{}); if (emplaced) { itBlob->second = r->GetBlob(blobName); - if (contains(networkInputs, blobName)) { + if (InferenceEngine::details::contains(networkInputs, blobName)) { _inputs[blobName] = itBlob->second; - } else if (contains(networkOutputs, blobName)) { + } else if (InferenceEngine::details::contains(networkOutputs, blobName)) { _outputs[blobName] = itBlob->second; } } else { diff --git a/inference-engine/src/hetero_plugin/hetero_plugin.cpp b/inference-engine/src/hetero_plugin/hetero_plugin.cpp index 9c7af172eb3..779da851eb0 100644 --- a/inference-engine/src/hetero_plugin/hetero_plugin.cpp +++ b/inference-engine/src/hetero_plugin/hetero_plugin.cpp @@ -65,7 +65,7 @@ InferenceEngine::ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const return std::make_shared(network, mergeConfigs(_config, config), this); } -ExecutableNetwork Engine::ImportNetworkImpl(std::istream& heteroModel, const Configs& config) { +InferenceEngine::ExecutableNetwork Engine::ImportNetworkImpl(std::istream& heteroModel, const Configs& config) { if (GetCore() == nullptr) { THROW_IE_EXCEPTION << "Please, work with HETERO device via InferencEngine::Core object"; } diff --git a/inference-engine/src/hetero_plugin/hetero_plugin.hpp b/inference-engine/src/hetero_plugin/hetero_plugin.hpp index c44b0e7e953..ee04693fcdc 100644 --- a/inference-engine/src/hetero_plugin/hetero_plugin.hpp +++ b/inference-engine/src/hetero_plugin/hetero_plugin.hpp @@ -37,7 +37,7 @@ public: InferenceEngine::Parameter GetConfig(const std::string& name, const std::map & options) const override; - ExecutableNetwork ImportNetworkImpl(std::istream& heteroModel, const Configs& config) override; + InferenceEngine::ExecutableNetwork ImportNetworkImpl(std::istream& heteroModel, const Configs& config) override; DeviceMetaInformationMap GetDevicePlugins(const std::string& targetFallback, const Configs & localConfig) const; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp index 4bcd2d9e4ef..39cb372c7e0 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp @@ -145,14 +145,14 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network if (cfg.exclusiveAsyncRequests) { // special case when all InferRequests are muxed into a single queue - _taskExecutor = ExecutorManager::getInstance()->getExecutor("CPU"); + _taskExecutor = InferenceEngine::ExecutorManager::getInstance()->getExecutor("CPU"); } else { auto streamsExecutorConfig = InferenceEngine::IStreamsExecutor::Config::MakeDefaultMultiThreaded(_cfg.streamExecutorConfig); streamsExecutorConfig._name = "CPUStreamsExecutor"; - _taskExecutor = ExecutorManager::getInstance()->getIdleCPUStreamsExecutor(streamsExecutorConfig); + _taskExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor(streamsExecutorConfig); } if (0 != cfg.streamExecutorConfig._streams) { - _callbackExecutor = ExecutorManager::getInstance()->getIdleCPUStreamsExecutor( + _callbackExecutor = InferenceEngine::ExecutorManager::getInstance()->getIdleCPUStreamsExecutor( IStreamsExecutor::Config{"CPUCallbackExecutor", 1, 0, IStreamsExecutor::ThreadBindingType::NONE}); } else { _callbackExecutor = _taskExecutor; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index eed640b0148..d5d8d8316fd 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -259,7 +259,9 @@ static void Transformation(ICNNNetwork::Ptr& clonedNetwork, const Config& conf) // WA: after conversion to CNNNetwork user precision can redefine input/output precisions // so we need to apply additional precision conversion but only for inputs and outputs for (auto & precision : convert_precision_list) { - NetPass::ConvertIOPrecision(*clonedNetwork, convertPrecision(precision.first), convertPrecision(precision.second)); + NetPass::ConvertIOPrecision(*clonedNetwork, + InferenceEngine::details::convertPrecision(precision.first), + InferenceEngine::details::convertPrecision(precision.second)); } } @@ -450,7 +452,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma return true; } (); for (auto&& fusedLayerName : ngraph::getFusedNamesVector((*itLayer)->getNode())) { - if (contains(originalOps, fusedLayerName)) { + if (InferenceEngine::details::contains(originalOps, fusedLayerName)) { if (layerIsSupported) { supported.emplace(fusedLayerName); } else { @@ -461,7 +463,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma } for (auto&& node : function->get_ops()) { - if (!contains(unsupported, node->get_friendly_name())) { + if (!InferenceEngine::details::contains(unsupported, node->get_friendly_name())) { for (auto&& inputNodeOutput : node->input_values()) { if (ngraph::op::is_constant(inputNodeOutput.get_node())) { supported.emplace(inputNodeOutput.get_node()->get_friendly_name()); @@ -478,7 +480,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma } for (auto&& layerName : supported) { - if (!contains(unsupported, layerName)) { + if (!InferenceEngine::details::contains(unsupported, layerName)) { res.supportedLayersMap.emplace(layerName, GetName()); } } diff --git a/inference-engine/src/multi_device/multi_device_plugin.cpp b/inference-engine/src/multi_device/multi_device_plugin.cpp index 8d1217fedc7..85172e37723 100644 --- a/inference-engine/src/multi_device/multi_device_plugin.cpp +++ b/inference-engine/src/multi_device/multi_device_plugin.cpp @@ -99,8 +99,8 @@ std::vector MultiDeviceInferencePlugin::ParseMetaDevices(cons return metaDevices; } -Parameter MultiDeviceInferencePlugin::GetConfig(const std::string& name, - const std::map & options) const { +InferenceEngine::Parameter MultiDeviceInferencePlugin::GetConfig(const std::string& name, + const std::map & options) const { if (name == MULTI_CONFIG_KEY(DEVICE_PRIORITIES)) { auto it = _config.find(MULTI_CONFIG_KEY(DEVICE_PRIORITIES)); if (it == _config.end()) { @@ -219,7 +219,7 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const CNNNetwork& } supportedLayers = supportedLayers.empty() ? deviceSupportedLayers : (deviceSupportedLayers.empty() - ? supportedLayers : Intersection(supportedLayers, deviceSupportedLayers)); + ? supportedLayers : InferenceEngine::details::Intersection(supportedLayers, deviceSupportedLayers)); } for (auto&& supportedLayer : supportedLayers) { queryResult.supportedLayersMap[supportedLayer] = GetName(); diff --git a/inference-engine/src/multi_device/multi_device_plugin.hpp b/inference-engine/src/multi_device/multi_device_plugin.hpp index 09124822ce8..0e2d9a43711 100644 --- a/inference-engine/src/multi_device/multi_device_plugin.hpp +++ b/inference-engine/src/multi_device/multi_device_plugin.hpp @@ -24,7 +24,7 @@ public: const std::map& config) override; void SetConfig(const std::map& config) override; - Parameter GetConfig(const std::string& name, const std::map & options) const override; + InferenceEngine::Parameter GetConfig(const std::string& name, const std::map & options) const override; InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, const std::map& config) const override; InferenceEngine::Parameter GetMetric(const std::string& name, diff --git a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp index 5b297574168..2f56b4827b4 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp @@ -21,10 +21,6 @@ #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" #include "cpp_interfaces/plugin_itt.hpp" - -using namespace InferenceEngine; -using namespace InferenceEngine::details; - namespace InferenceEngine { namespace { diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp index 935ac60dff7..aa448544a35 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp @@ -280,16 +280,16 @@ public: * @brief Defines the exported `CreatePluginEngine` function which is used to create a plugin instance * @ingroup ie_dev_api_plugin_api */ -#define IE_DEFINE_PLUGIN_CREATE_FUNCTION(PluginType, version, ...) \ - INFERENCE_PLUGIN_API(InferenceEngine::StatusCode) CreatePluginEngine( \ - InferenceEngine::IInferencePlugin *&plugin, \ - InferenceEngine::ResponseDesc *resp) noexcept { \ - try { \ - plugin = new PluginType(__VA_ARGS__); \ - plugin->SetVersion(version); \ - return OK; \ - } \ - catch (std::exception &ex) { \ - return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what(); \ - } \ +#define IE_DEFINE_PLUGIN_CREATE_FUNCTION(PluginType, version, ...) \ + INFERENCE_PLUGIN_API(InferenceEngine::StatusCode) CreatePluginEngine( \ + InferenceEngine::IInferencePlugin *&plugin, \ + InferenceEngine::ResponseDesc *resp) noexcept { \ + try { \ + plugin = new PluginType(__VA_ARGS__); \ + plugin->SetVersion(version); \ + return InferenceEngine::OK; \ + } \ + catch (std::exception &ex) { \ + return InferenceEngine::DescriptionBuffer(InferenceEngine::GENERAL_ERROR, resp) << ex.what(); \ + } \ } diff --git a/inference-engine/src/plugin_api/ie_algorithm.hpp b/inference-engine/src/plugin_api/ie_algorithm.hpp index 319198c40a9..16a577b2d7a 100644 --- a/inference-engine/src/plugin_api/ie_algorithm.hpp +++ b/inference-engine/src/plugin_api/ie_algorithm.hpp @@ -93,7 +93,7 @@ static Set Intersection(const Set& lhs, const Set& rhs) { const auto& minSizeSet = (lhs.size() < rhs.size()) ? lhs : rhs; const auto& maxSizeSet = (lhs.size() >= rhs.size()) ? lhs : rhs; for (auto&& val : minSizeSet) { - if (contains(maxSizeSet, val)) { + if (InferenceEngine::details::contains(maxSizeSet, val)) { result.insert(val); } } @@ -112,7 +112,7 @@ static bool Intersects(const Set& lhs, const Set& rhs) { const auto& minSizeSet = (lhs.size() < rhs.size()) ? lhs : rhs; const auto& maxSizeSet = (lhs.size() >= rhs.size()) ? lhs : rhs; for (auto&& val : minSizeSet) { - if (contains(maxSizeSet, val)) { + if (InferenceEngine::details::contains(maxSizeSet, val)) { return true; } } diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp b/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp index d1d2d7a3348..21c230f5e58 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp +++ b/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp @@ -100,7 +100,7 @@ QueryNetworkResult Engine::QueryNetwork( ngraph::NodeVector splits; ngraph::NodeVector concats; - const auto isLayerSupported = [this, &splitNames, &concatNames, &concats, &splits](CNNNetworkIterator& layer) -> bool { + const auto isLayerSupported = [this, &splitNames, &concatNames, &concats, &splits](InferenceEngine::details::CNNNetworkIterator& layer) -> bool { auto node = (*layer)->getNode(); if (std::dynamic_pointer_cast(node) != nullptr) { splitNames.emplace(node->get_friendly_name()); @@ -117,8 +117,8 @@ QueryNetworkResult Engine::QueryNetwork( } }; - for (CNNNetworkIterator itLayer{convertedNetwork.get()}; - itLayer != CNNNetworkIterator(); + for (InferenceEngine::details::CNNNetworkIterator itLayer{convertedNetwork.get()}; + itLayer != InferenceEngine::details::CNNNetworkIterator(); itLayer++) { const auto fusedNode = (*itLayer)->getNode(); if (fusedNode == nullptr) { @@ -126,7 +126,7 @@ QueryNetworkResult Engine::QueryNetwork( } for (auto& fusedLayerName : ngraph::getFusedNamesVector(fusedNode)) { - if (contains(originalOps, fusedLayerName)) { + if (InferenceEngine::details::contains(originalOps, fusedLayerName)) { if (isLayerSupported(itLayer)) { supported.emplace(fusedLayerName); } else { @@ -137,7 +137,7 @@ QueryNetworkResult Engine::QueryNetwork( } for (const auto& layerName : supported) { - if (contains(unsupported, layerName)) { + if (InferenceEngine::details::contains(unsupported, layerName)) { supported.erase(layerName); } } @@ -149,13 +149,13 @@ QueryNetworkResult Engine::QueryNetwork( const auto inputs = split->inputs(); for (const auto& input : inputs) { const auto& parentName = input.get_source_output().get_node()->get_friendly_name(); - if (contains(supported, parentName) && - contains(splitNames, parentName)) { + if (InferenceEngine::details::contains(supported, parentName) && + InferenceEngine::details::contains(splitNames, parentName)) { markParentSplitAsUnsupported(input.get_source_output().get_node_shared_ptr()); } } const auto& name = split->get_friendly_name(); - if (contains(supported, name)) { + if (InferenceEngine::details::contains(supported, name)) { supported.erase(name); } }; @@ -167,9 +167,9 @@ QueryNetworkResult Engine::QueryNetwork( for (const auto& output : outputs) { for (const auto& consumer : output.get_target_inputs()) { const auto& name = consumer.get_node()->get_friendly_name(); - if (!contains(supported, name) && - !contains(concatNames, name) && - !contains(splitNames, name)) { + if (!InferenceEngine::details::contains(supported, name) && + !InferenceEngine::details::contains(concatNames, name) && + !InferenceEngine::details::contains(splitNames, name)) { is_supported = false; break; } @@ -189,8 +189,8 @@ QueryNetworkResult Engine::QueryNetwork( const auto inputs = concat->inputs(); for (const auto& input : inputs) { const auto& name = input.get_source_output().get_node()->get_friendly_name(); - if (!contains(supported, name) && - !contains(concatNames, name)) { + if (!InferenceEngine::details::contains(supported, name) && + !InferenceEngine::details::contains(concatNames, name)) { is_supported = false; break; } @@ -201,7 +201,7 @@ QueryNetworkResult Engine::QueryNetwork( } for (const auto& node : function->get_ops()) { - if (contains(supported, node->get_friendly_name())) { + if (InferenceEngine::details::contains(supported, node->get_friendly_name())) { for (const auto& inputNodeOutput : node->input_values()) { if (ngraph::op::is_constant(inputNodeOutput.get_node()) || ngraph::op::is_parameter(inputNodeOutput.get_node())) { supported.emplace(inputNodeOutput.get_node()->get_friendly_name()); diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp index f770b2c6e9e..c76c188e225 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp @@ -32,7 +32,7 @@ MockPlugin::LoadNetwork(const CNNNetwork &network, } } -ExecutableNetworkInternal::Ptr +InferenceEngine::ExecutableNetworkInternal::Ptr MockPlugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const std::map& config) { return {}; diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp index f500dfc1ce7..1015f6a5a54 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp @@ -20,7 +20,7 @@ public: InferenceEngine::ExecutableNetwork LoadNetwork(const InferenceEngine::CNNNetwork &network, const std::map &config) override; - ExecutableNetworkInternal::Ptr + InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const std::map& config) override; diff --git a/inference-engine/tests_deprecated/unit/inference_engine_tests/util_test.cpp b/inference-engine/tests_deprecated/unit/inference_engine_tests/util_test.cpp index f45e41b3eab..9774473cd50 100644 --- a/inference-engine/tests_deprecated/unit/inference_engine_tests/util_test.cpp +++ b/inference-engine/tests_deprecated/unit/inference_engine_tests/util_test.cpp @@ -100,7 +100,7 @@ TEST(UtilTests, cloneLayers) { namespace { IE::CNNLayerPtr getLayer(const IE::details::CNNNetworkImplPtr n, const char* name) { - if (contains(n->allLayers(), name)) { + if (InferenceEngine::details::contains(n->allLayers(), name)) { return n->allLayers().find(name)->second; } return nullptr;