Compare commits

...

5 Commits

Author SHA1 Message Date
Alexey Suhov
2fe9b15230 change repo name to openvino in readme files 2020-06-03 00:08:25 +03:00
Alexey Suhov
9221f41b01 fix permissions for shell scripts 2020-06-02 22:32:00 +03:00
Alexey Suhov
85de6ee857 Publishing 2020.3 content 2020-06-02 21:59:45 +03:00
Moshe David
acad2e01e5 w (#394)
Co-authored-by: modav <modav@microsoft.com>
2020-05-26 00:28:09 +03:00
Ian Hunter
94dd082199 Fix link to Linux Guide (#494) 2020-05-14 13:52:13 +03:00
393 changed files with 11270 additions and 1327 deletions

View File

@@ -78,8 +78,7 @@ function(build_ngraph)
if (NOT ANDROID)
ngraph_set(NGRAPH_UNIT_TEST_ENABLE TRUE)
ngraph_set(NGRAPH_UNIT_TEST_OPENVINO_ENABLE TRUE)
# ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE TRUE)
set(NGRAPH_ONNX_IMPORT_ENABLE TRUE CACHE BOOL "" FORCE)
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE TRUE)
else()
ngraph_set(NGRAPH_UNIT_TEST_ENABLE FALSE)
ngraph_set(NGRAPH_TEST_UTIL_ENABLE FALSE)
@@ -118,4 +117,49 @@ build_ngraph()
add_subdirectory(inference-engine)
add_subdirectory(docs)
# cpack
# install setupvars
ie_cpack_add_component(setupvars REQUIRED)
if(UNIX)
install(PROGRAMS scripts/setupvars/setupvars.sh
DESTINATION bin
COMPONENT setupvars)
elseif(WIN32)
install(PROGRAMS scripts/setupvars/setupvars.bat
DESTINATION bin
COMPONENT setupvars)
endif()
# install install_dependencies
if(UNIX)
ie_cpack_add_component(install_dependencies REQUIRED)
install(DIRECTORY scripts/install_dependencies/
DESTINATION install_dependencies
COMPONENT install_dependencies)
endif()
# install files for demo
ie_cpack_add_component(demo_scripts REQUIRED DEPENDS core)
if(UNIX)
install(DIRECTORY scripts/demo/
DESTINATION deployment_tools/demo
COMPONENT demo_scripts
USE_SOURCE_PERMISSIONS
PATTERN *.bat EXCLUDE)
elseif(WIN32)
install(DIRECTORY scripts/demo/
DESTINATION deployment_tools/demo
COMPONENT demo_scripts
USE_SOURCE_PERMISSIONS
PATTERN *.sh EXCLUDE)
endif()
ie_cpack(${IE_CPACK_COMPONENTS_ALL})

View File

@@ -1,5 +1,5 @@
# [OpenVINO™ Toolkit](https://01.org/openvinotoolkit) - Deep Learning Deployment Toolkit repository
[![Stable release](https://img.shields.io/badge/version-2020.1-green.svg)](https://github.com/opencv/dldt/releases/tag/2020.1)
[![Stable release](https://img.shields.io/badge/version-2020.3-green.svg)](https://github.com/openvinotoolkit/openvino/releases/tag/2020.3.0)
[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE)
This toolkit allows developers to deploy pre-trained deep learning models
@@ -36,7 +36,7 @@ with us doing the following steps:
* Make sure you can build the product and run all tests and samples with your patch
* In case of a larger feature, provide relevant unit tests and one or more sample
* Submit a pull request at https://github.com/opencv/dldt/pulls
* Submit a pull request at https://github.com/openvinotoolkit/openvino/pulls
We will review your contribution and, if any additional fixes or modifications
are necessary, may give some feedback to guide you. Your pull request will be
@@ -46,7 +46,7 @@ merged into GitHub* repositories if accepted.
Please report questions, issues and suggestions using:
* The `openvino` [tag on StackOverflow]\*
* [GitHub* Issues](https://github.com/opencv/dldt/issues)
* [GitHub* Issues](https://github.com/openvinotoolkit/openvino/issues)
* [Forum](https://software.intel.com/en-us/forums/computer-vision)
---

View File

@@ -28,7 +28,6 @@
- [Add Inference Engine to Your Project](#add-inference-engine-to-your-project)
- [(Optional) Additional Installation Steps for the Intel® Movidius™ Neural Compute Stick and Neural Compute Stick 2](#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2)
- [For Linux, Raspbian Stretch* OS](#for-linux-raspbian-stretch-os)
- [For Windows](#for-windows-1)
- [Next Steps](#next-steps)
- [Additional Resources](#additional-resources)
@@ -60,12 +59,12 @@ The software was validated on:
- [CMake]\* 3.11 or higher
- GCC\* 4.8 or higher to build the Inference Engine
- Python 2.7 or higher for Inference Engine Python API wrapper
- (Optional) [Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441].
- (Optional) [Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 20.13.16352].
### Build Steps
1. Clone submodules:
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the
@@ -78,7 +77,7 @@ The software was validated on:
```
3. By default, the build enables the Inference Engine GPU plugin to infer models
on your Intel® Processor Graphics. This requires you to
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441]
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 20.13.16352]
before running the build. If you don't want to use the GPU plugin, use the
`-DENABLE_CLDNN=OFF` CMake build option and skip the installation of the
Intel® Graphics Compute Runtime for OpenCL™ Driver.
@@ -172,10 +171,10 @@ Native compilation of the Inference Engine is the most straightforward solution.
sudo apt-get install -y git cmake libusb-1.0-0-dev
```
2. Go to the cloned `dldt` repository:
2. Go to the cloned `openvino` repository:
```bash
cd dldt
cd openvino
```
3. Initialize submodules:
@@ -262,15 +261,15 @@ with the following content:
5. Run Docker\* container with mounted source code folder from host:
```bash
docker run -it -v /absolute/path/to/dldt:/dldt ie_cross_armhf /bin/bash
docker run -it -v /absolute/path/to/openvino:/openvino ie_cross_armhf /bin/bash
```
6. While in the container:
1. Go to the cloned `dldt` repository:
1. Go to the cloned `openvino` repository:
```bash
cd dldt
cd openvino
```
2. Create a build folder:
@@ -291,8 +290,8 @@ with the following content:
```
7. Press **Ctrl+D** to exit from Docker. You can find the resulting binaries
in the `dldt/bin/armv7l/` directory and the OpenCV*
installation in the `dldt/inference-engine/temp`.
in the `openvino/bin/armv7l/` directory and the OpenCV*
installation in the `openvino/inference-engine/temp`.
>**NOTE**: Native applications that link to cross-compiled Inference Engine
library require an extra compilation flag `-march=armv7-a`.
@@ -381,8 +380,8 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
6. Before running the samples, add paths to the TBB and OpenCV binaries used for
the build to the `%PATH%` environment variable. By default, TBB binaries are
downloaded by the CMake-based script to the `<dldt_repo>/inference-engine/temp/tbb/bin`
folder, OpenCV binaries to the `<dldt_repo>/inference-engine/temp/opencv_4.3.0/opencv/bin`
downloaded by the CMake-based script to the `<openvino_repo>/inference-engine/temp/tbb/bin`
folder, OpenCV binaries to the `<openvino_repo>/inference-engine/temp/opencv_4.3.0/opencv/bin`
folder.
### Additional Build Options
@@ -437,7 +436,7 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
call "C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\bin\ipsxe-comp-vars.bat" intel64 vs2017
set CXX=icl
set CC=icl
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by dldt cmake script
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by openvino cmake script
set TBBROOT=
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --config Release
@@ -461,7 +460,7 @@ The software was validated on:
1. Clone submodules:
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the
@@ -545,7 +544,7 @@ This section describes how to build Inference Engine for Android x86 (64-bit) op
2. Clone submodules
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
@@ -610,7 +609,7 @@ before running the Inference Engine build:
For CMake projects, set the `InferenceEngine_DIR` environment variable:
```sh
export InferenceEngine_DIR=/path/to/dldt/build/
export InferenceEngine_DIR=/path/to/openvino/build/
```
Then you can find Inference Engine by `find_package`:
@@ -660,26 +659,12 @@ sudo ldconfig
rm 97-myriad-usbboot.rules
```
### For Windows
For Intel® Movidius™ Neural Compute Stick and Intel® Neural Compute Stick 2,
install the Movidius™ VSC driver:
1. Go to the `<DLDT_ROOT_DIR>/inference-engine/thirdparty/movidius/MovidiusDriver`
directory, where the `DLDT_ROOT_DIR` is the directory to which the DLDT
repository was cloned.
2. Right click on the `Movidius_VSC_Device.inf` file and choose **Install** from
the pop-up menu.
You have installed the driver for your Intel® Movidius™ Neural Compute Stick
or Intel® Neural Compute Stick 2.
## Next Steps
Congratulations, you have built the Inference Engine. To get started with the
OpenVINO™, proceed to the Get Started guides:
* [Get Started with Deep Learning Deployment Toolkit on Linux*](../get-started-linux.md)
* [Get Started with Deep Learning Deployment Toolkit on Linux*](get-started-linux.md)
## Notice
@@ -706,7 +691,7 @@ This target collects all dependencies, prepares the nGraph package and copies it
[Intel® Distribution of OpenVINO™]:https://software.intel.com/en-us/openvino-toolkit
[CMake]:https://cmake.org/download/
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441]:https://github.com/intel/compute-runtime/releases/tag/19.41.14441
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 20.13.16352]:https://github.com/intel/compute-runtime/releases/tag/20.13.16352
[MKL-DNN repository]:https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_lnx_2019.0.5.20190502.tgz
[MKL-DNN repository for Windows]:(https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_win_2019.0.5.20190502.zip)
[OpenBLAS]:https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download

View File

@@ -36,9 +36,13 @@ function(ie_cpack_set_library_dir)
endif()
if(WIN32)
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${CMAKE_BUILD_TYPE}/${ARCH} PARENT_SCOPE)
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
else()
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
endif()
endfunction()
@@ -59,8 +63,10 @@ macro(ie_cpack)
set(CPACK_GENERATOR "TGZ")
if(WIN32)
set(CPACK_PACKAGE_NAME inference-engine_${CMAKE_BUILD_TYPE})
string(REPLACE "\\" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}")
else()
set(CPACK_PACKAGE_NAME inference-engine)
string(REPLACE "/" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}")
endif()
set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF)
set(CPACK_ARCHIVE_COMPONENT_INSTALL ON)
@@ -194,7 +200,7 @@ else()
endif()
if(APPLE)
set(CMAKE_MACOSX_RPATH 1)
set(CMAKE_MACOSX_RPATH ON)
endif(APPLE)
# Use solution folders

View File

@@ -138,6 +138,14 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
endfunction(RESOLVE_DEPENDENCY)
function (resolve_model_dependency network archive network_model_path)
RESOLVE_DEPENDENCY(${network_model_path}
ARCHIVE "models_archives/${archive}"
TARGET_PATH "${MODELS_PATH}/${network}")
string (REPLACE ${MODELS_PATH} "" relative_path ${${network_model_path}})
set(${network_model_path} ".${relative_path}" PARENT_SCOPE)
endfunction()
function(reset_deps_cache)
#
# Reset the dependencies cache if it was set by dependency solver

View File

@@ -154,7 +154,7 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
if(DEFINED ENV{IE_PATH_TO_DEPS})
set(URL "$ENV{IE_PATH_TO_DEPS}/${RELATIVE_URL}")
else()
set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.2/inference_engine/${RELATIVE_URL}")
set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.3/inference_engine/${RELATIVE_URL}")
endif()
#no message on recursive calls

View File

@@ -223,12 +223,13 @@ if(WIN32)
# 161 unrecognized pragma
# 177 variable was declared but never referenced
# 556 not matched type of assigned function pointer
# 1744: field of class type without a DLL interface used in a class with a DLL interface
# 2586 decorated name length exceeded, name was truncated
# 2651: attribute does not apply to any entity
# 3180 unrecognized OpenMP pragma
# 11075: To get full report use -Qopt-report:4 -Qopt-report-phase ipo
# 15335 was not vectorized: vectorization possible but seems inefficient. Use vector always directive or /Qvec-threshold0 to override
ie_add_compiler_flags(/Qdiag-disable:161,177,556,2586,2651,3180,11075,15335)
ie_add_compiler_flags(/Qdiag-disable:161,177,556,1744,2586,2651,3180,11075,15335)
endif()
# Debug information flags

47
docs/CMakeLists.txt Normal file
View File

@@ -0,0 +1,47 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if(NOT ENABLE_DOCKER)
add_subdirectory(examples)
# Detect nGraph
find_package(ngraph QUIET)
if(NOT ngraph_FOUND)
set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph)
endif()
# Detect InferenceEngine
find_package(InferenceEngine QUIET)
if(NOT InferenceEngine_FOUND)
set(InferenceEngine_DIR ${CMAKE_BINARY_DIR})
endif()
add_subdirectory(template_extension)
endif()
# OpenVINO docs
set(OPENVINO_DOCS_PATH "" CACHE PATH "Path to openvino-documentation local repository")
set(args "")
if(OPENVINO_DOCS_PATH)
set(args "${args} ovinodoc_path:${OPENVINO_DOCS_PATH}")
endif()
file(GLOB_RECURSE docs_files "${OpenVINO_MAIN_SOURCE_DIR}/docs")
file(GLOB_RECURSE include_files "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/include")
file(GLOB_RECURSE ovino_files "${OPENVINO_DOCS_PATH}")
add_custom_target(ie_docs
COMMAND ./build_docs.sh ${args}
WORKING_DIRECTORY "${OpenVINO_MAIN_SOURCE_DIR}/docs/build_documentation"
COMMENT "Generating OpenVINO documentation"
SOURCES ${docs_files} ${include_files} ${ovino_files}
VERBATIM)
find_program(browser NAMES xdg-open)
if(browser)
add_custom_target(ie_docs_open
COMMAND ${browser} "${OpenVINO_MAIN_SOURCE_DIR}/doc/html/index.html"
DEPENDS ie_docs
COMMENT "Open OpenVINO documentation"
VERBATIM)
endif()

View File

@@ -0,0 +1,14 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME ie_docs_examples)
file(GLOB SOURCES *.cpp)
add_library(ie_docs_examples STATIC ${SOURCES})
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api)
#add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
#add_clang_format_target(clang_format_${TARGET_NAME} FOR_TARGETS ${TARGET_NAME})

View File

@@ -0,0 +1,68 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <threading/ie_itask_executor.hpp>
#include <cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp>
#include <memory>
using namespace InferenceEngine;
class AcceleratorSyncRequest : public InferRequestInternal {
public:
using Ptr = std::shared_ptr<AcceleratorSyncRequest>;
void Preprocess();
void WriteToDevice();
void RunOnDevice();
void ReadFromDevice();
void PostProcess();
};
// ! [async_infer_request:define_pipeline]
// Inherits from AsyncInferRequestThreadSafeDefault
class AcceleratorAsyncInferRequest : public AsyncInferRequestThreadSafeDefault {
// Store the pointer to the synchronous request and five executors
AcceleratorAsyncInferRequest(const AcceleratorSyncRequest::Ptr& syncRequest,
const ITaskExecutor::Ptr& preprocessExecutor,
const ITaskExecutor::Ptr& writeToDeviceExecutor,
const ITaskExecutor::Ptr& runOnDeviceExecutor,
const ITaskExecutor::Ptr& readFromDeviceExecutor,
const ITaskExecutor::Ptr& postProcessExecutor) :
AsyncInferRequestThreadSafeDefault(syncRequest, nullptr, nullptr),
_accSyncRequest{syncRequest},
_preprocessExecutor{preprocessExecutor},
_writeToDeviceExecutor{writeToDeviceExecutor},
_runOnDeviceExecutor{runOnDeviceExecutor},
_readFromDeviceExecutor{readFromDeviceExecutor},
_postProcessExecutor{postProcessExecutor}
{
// Five pipeline stages of synchronous infer request are run by different executors
_pipeline = {
{ _preprocessExecutor , [this] {
_accSyncRequest->Preprocess();
}},
{ _writeToDeviceExecutor , [this] {
_accSyncRequest->WriteToDevice();
}},
{ _runOnDeviceExecutor , [this] {
_accSyncRequest->RunOnDevice();
}},
{ _readFromDeviceExecutor , [this] {
_accSyncRequest->ReadFromDevice();
}},
{ _postProcessExecutor , [this] {
_accSyncRequest->PostProcess();
}},
};
}
// As all stages use _accSyncRequest member we should wait for all stages tasks before the destructor destroy this member.
~AcceleratorAsyncInferRequest() {
StopAndWait();
}
AcceleratorSyncRequest::Ptr _accSyncRequest;
ITaskExecutor::Ptr _preprocessExecutor, _writeToDeviceExecutor, _runOnDeviceExecutor, _readFromDeviceExecutor, _postProcessExecutor;
};
// ! [async_infer_request:define_pipeline]

View File

@@ -0,0 +1,53 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <threading/ie_cpu_streams_executor.hpp>
#include <memory>
#include <future>
#include <iostream>
void example1() {
// ! [itask_executor:define_pipeline]
// std::promise is move only object so to satisfy copy callable constraint we use std::shared_ptr
auto promise = std::make_shared<std::promise<void>>();
// When the promise is created we can get std::future to wait the result
auto future = promise->get_future();
// Rather simple task
InferenceEngine::Task task = [] {std::cout << "Some Output" << std::endl; };
// Create an executor
InferenceEngine::ITaskExecutor::Ptr taskExecutor = std::make_shared<InferenceEngine::CPUStreamsExecutor>();
if (taskExecutor == nullptr) {
// ProcessError(e);
return;
}
// We capture the task and the promise. When the task is executed in the task executor context
// we munually call std::promise::set_value() method
taskExecutor->run([task, promise] {
std::exception_ptr currentException;
try {
task();
} catch(...) {
// If there is some exceptions store the pointer to current exception
currentException = std::current_exception();
}
if (nullptr == currentException) {
promise->set_value(); // <-- If there is no problems just call std::promise::set_value()
} else {
promise->set_exception(currentException); // <-- If there is an exception forward it to std::future object
}
});
// To wait the task completion we call std::future::wait method
future.wait(); // The current thread will be blocked here and wait when std::promise::set_value()
// or std::promise::set_exception() method will be called.
// If the future store the exception it will be rethrown in std::future::get method
try {
future.get();
} catch(std::exception& /*e*/) {
// ProcessError(e);
}
// ! [itask_executor:define_pipeline]
}

View File

@@ -0,0 +1,18 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:extension]
set(TARGET_NAME "template_extension")
find_package(ngraph REQUIRED)
find_package(InferenceEngine REQUIRED)
file(GLOB_RECURSE SRC *.cpp)
add_library(${TARGET_NAME} SHARED ${SRC})
target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_EXTENSION_API)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES}
${NGRAPH_LIBRARIES})
# [cmake:extension]

View File

@@ -0,0 +1,124 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "cpu_kernel.hpp"
#include "op.hpp"
#include <details/ie_exception.hpp>
#include <ie_layouts.h>
using namespace TemplateExtension;
//! [cpu_implementation:ctor]
OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node> &node) {
try {
auto castedNode = std::dynamic_pointer_cast<Operation>(node);
if (!castedNode)
THROW_IE_EXCEPTION << "Cannot create implementation for unknown operation!";
if (castedNode->inputs().size() != 1 || castedNode->outputs().size() != 1)
THROW_IE_EXCEPTION << "Cannot create implementation for operation with incorrect number of inputs or outputs!";
if (castedNode->get_input_partial_shape(0).is_dynamic() || castedNode->get_output_partial_shape(0).is_dynamic())
THROW_IE_EXCEPTION << "Cannot create implementation for op with dynamic shapes!";
if (castedNode->get_input_shape(0).size() != 4 || castedNode->get_output_shape(0).size() != 4)
THROW_IE_EXCEPTION << "Operation supports only 4d tensors for input and output.";
if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32)
THROW_IE_EXCEPTION << "Operation supports only FP32 tensors.";
add = castedNode->getAddAttr();
} catch (InferenceEngine::details::InferenceEngineException& ex) {
error = ex.what();
}
}
//! [cpu_implementation:ctor]
//! [cpu_implementation:getSupportedConfigurations]
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept {
auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) {
InferenceEngine::LayerConfig config;
config.dynBatchSupport = false;
InferenceEngine::DataConfig inData;
InferenceEngine::DataConfig outData;
InferenceEngine::SizeVector order = {0, 1, 2, 3};
// Allow any offset before data
size_t offset((std::numeric_limits<size_t>::max)());
if (planar) {
inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset});
config.inConfs.push_back(inData);
outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset});
config.outConfs.push_back(outData);
} else {
// Add blocked (nChw8c) format
auto div_up = [](const int a, const int b) -> int {
if (!b)
return 0;
return (a + b - 1) / b;
};
order.push_back(1);
InferenceEngine::SizeVector inBlkDims = inShape;
inBlkDims[1] = div_up(inBlkDims[1], 8);
inBlkDims.push_back(8);
InferenceEngine::SizeVector outBlkDims = outShape;
outBlkDims[1] = div_up(outBlkDims[1], 8);
outBlkDims.push_back(8);
inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset});
config.inConfs.push_back(inData);
outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset});
config.outConfs.push_back(outData);
}
return config;
};
if (!error.empty()) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
// Add planar format
conf.emplace_back(createConfig(inShape, outShape, true));
// Add blocked format nChw8c
conf.emplace_back(createConfig(inShape, outShape, false));
return InferenceEngine::OK;
}
//! [cpu_implementation:getSupportedConfigurations]
//! [cpu_implementation:init]
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept {
try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
THROW_IE_EXCEPTION << "Operation cannot be initialized with incorrect number of inputs/outputs!";
}
if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) {
THROW_IE_EXCEPTION << "Operation can be initialized only with 4d input/output tensors!";
}
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
THROW_IE_EXCEPTION << "Operation supports only FP32 precisions!";
}
} catch (InferenceEngine::details::InferenceEngineException& ex) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
return InferenceEngine::OK;
}
//! [cpu_implementation:init]
//! [cpu_implementation:execute]
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept {
const float* src_data = inputs[0]->cbuffer().as<const float *>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float *dst_data = outputs[0]->buffer().as<float *>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
for (size_t i = 0; i < inputs[0]->size(); i++) {
dst_data[i] = src_data[i] + add;
}
return InferenceEngine::OK;
}
//! [cpu_implementation:execute]

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <ngraph/ngraph.hpp>
namespace TemplateExtension {
//! [cpu_implementation:header]
class OpImplementation : public InferenceEngine::ILayerExecImpl {
public:
explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept override;
private:
int64_t add;
ngraph::Shape inShape;
ngraph::Shape outShape;
std::string error;
};
//! [cpu_implementation:header]
} // namespace TemplateExtension

View File

@@ -0,0 +1,73 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "extension.hpp"
#include "cpu_kernel.hpp"
#include "op.hpp"
#include <ngraph/factory.hpp>
#include <ngraph/opsets/opset.hpp>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
using namespace TemplateExtension;
//! [extension:GetVersion]
void Extension::GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept {
static InferenceEngine::Version ExtensionDescription = {
{1, 0}, // extension API version
"1.0",
"template_ext" // extension description message
};
versionInfo = &ExtensionDescription;
}
//! [extension:GetVersion]
//! [extension:getOpSets]
std::map<std::string, ngraph::OpSet> Extension::getOpSets() {
std::map<std::string, ngraph::OpSet> opsets;
ngraph::OpSet opset;
opset.insert<Operation>();
opsets["custom_opset"] = opset;
return opsets;
}
//! [extension:getOpSets]
//! [extension:getImplTypes]
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node> &node) {
if (std::dynamic_pointer_cast<Operation>(node)) {
return {"CPU"};
}
return {};
}
//! [extension:getImplTypes]
//! [extension:getImplementation]
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node> &node, const std::string &implType) {
if (std::dynamic_pointer_cast<Operation>(node) && implType == "CPU") {
return std::make_shared<OpImplementation>(node);
}
return nullptr;
}
//! [extension:getImplementation]
//! [extension:CreateExtension]
// Exported function
INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext,
InferenceEngine::ResponseDesc *resp) noexcept {
try {
ext = new Extension();
return OK;
} catch (std::exception &ex) {
if (resp) {
std::string err = ((std::string) "Couldn't create extension: ") + ex.what();
err.copy(resp->msg, 255);
}
return InferenceEngine::GENERAL_ERROR;
}
}
//! [extension:CreateExtension]

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <ie_api.h>
#include <ngraph/ngraph.hpp>
#include <memory>
#include <vector>
#include <string>
#include <map>
//! [extension:header]
namespace TemplateExtension {
class Extension : public InferenceEngine::IExtension {
public:
Extension() = default;
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override;
void Unload() noexcept override {}
void Release() noexcept override { delete this; }
std::map<std::string, ngraph::OpSet> getOpSets() override;
std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) override;
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override;
};
} // namespace TemplateExtension
//! [extension:header]

View File

@@ -0,0 +1,38 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op.hpp"
using namespace TemplateExtension;
constexpr ngraph::NodeTypeInfo Operation::type_info;
//! [op:ctor]
Operation::Operation(const ngraph::Output<ngraph::Node> &arg, int64_t add) : Op({arg}), add(add) {
constructor_validate_and_infer_types();
}
//! [op:ctor]
//! [op:validate]
void Operation::validate_and_infer_types() {
// Operation doesn't change shapes end element type
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
}
//! [op:validate]
//! [op:copy]
std::shared_ptr<ngraph::Node> Operation::copy_with_new_args(const ngraph::NodeVector &new_args) const {
if (new_args.size() != 1) {
throw ngraph::ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Operation>(new_args.at(0), add);
}
//! [op:copy]
//! [op:visit_attributes]
bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) {
visitor.on_attribute("add", add);
return true;
}
//! [op:visit_attributes]

View File

@@ -0,0 +1,29 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ngraph/ngraph.hpp>
//! [op:header]
namespace TemplateExtension {
class Operation : public ngraph::op::Op {
public:
static constexpr ngraph::NodeTypeInfo type_info{"Template", 0};
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
Operation() = default;
Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add);
void validate_and_infer_types() override;
std::shared_ptr<ngraph::Node> copy_with_new_args(const ngraph::NodeVector& new_args) const override;
bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
int64_t getAddAttr() { return add; }
private:
int64_t add;
};
//! [op:header]
} // namespace TemplateExtension

View File

@@ -0,0 +1,31 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:main]
if (APPLE)
# due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
else()
cmake_minimum_required(VERSION 3.7.2 FATAL_ERROR)
endif()
project(InferenceEngineTemplatePlugin)
set(IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR ${InferenceEngineTemplatePlugin_SOURCE_DIR})
find_package(InferenceEngineDeveloperPackage REQUIRED)
add_subdirectory(src)
if(ENABLE_TESTS)
include(CTest)
enable_testing()
endif()
# [cmake:main]
# install
# ATTENTION: uncomment to install component
# ie_cpack(template)

View File

@@ -0,0 +1,18 @@
# template-plugin
Template Plugin for Inference Engine which demonstrates basics of how Inference Engine plugin can be built and implemented on top of Inference Engine Developer Package and Plugin API.
## How to build
```bash
$ cd $DLDT_HOME
$ mkdir $DLDT_HOME/build
$ cd $DLDT_HOME/build
$ cmake -DENABLE_TESTS=ON -DENABLE_BEH_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON ..
$ make -j8
$ cd $TEMPLATE_PLUGIN_HOME
$ mkdir $TEMPLATE_PLUGIN_HOME/build
$ cd $TEMPLATE_PLUGIN_HOME/build
$ cmake -DInferenceEngineDeveloperPackage_DIR=$DLDT_HOME/build ..
$ make -j8
```

View File

@@ -0,0 +1,59 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header that defines advanced related properties for DLIA plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
* @file dlia_config.hpp
*/
#pragma once
#include <string>
#include "ie_plugin_config.hpp"
namespace InferenceEngine {
namespace TemplateMetrics {
/**
* @def TEMPLATE_METRIC_VALUE(name)
* @brief Shortcut for defining Template metric values
*/
#define TEMPLATE_METRIC_VALUE(name) InferenceEngine::TemplateMetrics::name
#define DECLARE_TEMPLATE_METRIC_VALUE(name) static constexpr auto name = #name
// ! [public_header:metrics]
/**
* @brief Defines whether current Template device instance supports hardware blocks for fast convolution computations.
*/
DECLARE_TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION);
// ! [public_header:metrics]
} // namespace TemplateMetrics
namespace TemplateConfigParams {
/**
* @def TEMPLATE_CONFIG_KEY(name)
* @brief Shortcut for defining Template device configuration keys
*/
#define TEMPLATE_CONFIG_KEY(name) InferenceEngine::TemplateConfigParams::_CONFIG_KEY(TEMPLATE_##name)
#define DECLARE_TEMPLATE_CONFIG_KEY(name) DECLARE_CONFIG_KEY(TEMPLATE_##name)
#define DECLARE_TEMPLATE_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(TEMPLATE_##name)
/**
* @brief The key to define the type of transformations for TEMPLATE inputs and outputs.
* TEMPLATE use custom data layout for input and output blobs. IE TEMPLATE Plugin provides custom
* optimized version of transformation functions that do not use OpenMP and much more faster
* than native TEMPLATE functions. Values: "NO" - optimized plugin transformations
* are used, "YES" - native TEMPLATE transformations are used.
*/
DECLARE_TEMPLATE_CONFIG_KEY(ANY_CONFIG_KEY);
} // namespace TemplateConfigParams
} // namespace InferenceEngine

View File

@@ -0,0 +1,43 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:plugin]
set(TARGET_NAME "templatePlugin")
if(ENABLE_LTO)
ie_enable_lto()
endif()
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
# adds a shared library with plugin
ie_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "TEMPLATE"
SOURCES ${SOURCES} ${HEADERS}
SKIP_INSTALL # ATTENTION: uncomment to install component
VERSION_DEFINES_FOR template_plugin.cpp)
target_include_directories(${TARGET_NAME} PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}"
"${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include")
target_link_libraries(${TARGET_NAME} PRIVATE IE::inference_engine IE::inference_engine_transformations ${NGRAPH_LIBRARIES} ${INTEL_ITT_LIBS})
# ATTENTION: uncomment to register a plugin in the plugins.xml file
# ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
# POSSIBLE_PLUGINS ${TARGET_NAME})
# [cmake:plugin]
# ATTENTION: uncomment to install component
# install
# set(component_name template)
# ie_cpack_add_component(${component_name} REQUIRED)
# install(TARGETS ${TARGET_NAME}
# RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH}
# ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH}
# LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH}
# COMPONENT ${component_name})

View File

@@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <ie_profiling.hpp>
#include "template_async_infer_request.hpp"
#include "template_executable_network.hpp"
using namespace TemplatePlugin;
// ! [async_infer_request:ctor]
TemplateAsyncInferRequest::TemplateAsyncInferRequest(
const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) :
AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor),
_inferRequest(inferRequest), _waitExecutor(waitExecutor) {
_pipeline = {
{cpuTaskExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(PreprocessingAndStartPipeline)
_inferRequest->inferPreprocess();
_inferRequest->startPipeline();
}},
{_waitExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(WaitPipeline)
_inferRequest->waitPipeline();
}},
{cpuTaskExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(Postprocessing)
_inferRequest->inferPostprocess();
}}
};
}
// ! [async_infer_request:ctor]
// ! [async_infer_request:dtor]
TemplateAsyncInferRequest::~TemplateAsyncInferRequest() {
InferenceEngine::AsyncInferRequestThreadSafeDefault::StopAndWait();
}
// ! [async_infer_request:dtor]

View File

@@ -0,0 +1,30 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp>
#include "template_infer_request.hpp"
namespace TemplatePlugin {
// ! [async_infer_request:header]
class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
public:
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
~TemplateAsyncInferRequest() override;
private:
TemplateInferRequest::Ptr _inferRequest;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [async_infer_request:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,45 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <vector>
#include <algorithm>
#include <ie_util_internal.hpp>
#include <ie_plugin_config.hpp>
#include <file_utils.h>
#include <cpp_interfaces/exception2status.hpp>
#include "template_config.hpp"
using namespace TemplatePlugin;
Configuration::Configuration() { }
Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) {
*this = defaultCfg;
for (auto&& c : config) {
const auto& key = c.first;
const auto& value = c.second;
if (CONFIG_KEY(DEVICE_ID) == key) {
deviceId = std::stoi(value);
} else if (CONFIG_KEY(PERF_COUNT) == key) {
perfCount = (CONFIG_VALUE(YES) == value);
} else if (throwOnUnsupported) {
THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << key;
}
}
}
InferenceEngine::Parameter Configuration::Get(const std::string& name) const {
if (name == CONFIG_KEY(DEVICE_ID)) {
return {std::to_string(deviceId)};
} else if (name == CONFIG_KEY(PERF_COUNT)) {
return {perfCount};
} else {
THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << name;
}
}

View File

@@ -0,0 +1,40 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <string>
#include <map>
#include <unordered_map>
#include <ie_parameter.hpp>
namespace TemplatePlugin {
template<typename T>
using IOMap = std::unordered_map<std::string, T>;
// ! [configuration:header]
using ConfigMap = std::map<std::string, std::string>;
struct Configuration {
Configuration();
Configuration(const Configuration&) = default;
Configuration(Configuration&&) = default;
Configuration& operator=(const Configuration&) = default;
Configuration& operator=(Configuration&&) = default;
explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true);
InferenceEngine::Parameter Get(const std::string& name) const;
// Plugin configuration parameters
int deviceId = 0;
bool perfCount = true;
};
// ! [configuration:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,167 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <atomic>
#include <set>
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include <ie_metric_helpers.hpp>
#include <ie_util_internal.hpp>
#include <ie_plugin_config.hpp>
#include <network_serializer.h>
#include <threading/ie_executor_manager.hpp>
#include <details/ie_cnn_network_tools.h>
#include <ngraph/specialize_function.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <transformations/convert_divide.hpp>
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
using namespace TemplatePlugin;
// ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(InferenceEngine::ICNNNetwork& network,
const Configuration& cfg):
_name(network.getName()),
_cfg(cfg),
_waitExecutor(InferenceEngine::ExecutorManager::getInstance()->getExecutor("Template")) {
// TODO: if your plugin supports device ID (more that single instance of device can be on host machine)
// you should select proper device based on KEY_DEVICE_ID or automatic behavior
// In this case, _waitExecutor should also be created per device.
try {
if (std::shared_ptr<const ngraph::Function> ngraphFunction = network.getFunction()) {
CompileGraph(ngraphFunction);
} else {
THROW_IE_EXCEPTION << "TEMPLATE plugin can compile only IR v10 networks";
}
}
catch (const InferenceEngineException & e) {
throw e;
}
catch (const std::exception & e) {
THROW_IE_EXCEPTION << "Standard exception from compilation library: " << e.what();
}
catch (...) {
THROW_IE_EXCEPTION << "Generic exception is thrown";
}
}
// ! [executable_network:ctor_cnnnetwork]
// ! [executable_network:ctor_import_stream]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
const Configuration& cfg) :
_cfg(cfg) {
// TODO: since Import network is not a mandatory functionality, this ctor can just be removed
}
// ! [executable_network:ctor_import_stream]
// ! [executable_network:compile_graph]
void TemplatePlugin::ExecutableNetwork::CompileGraph(const std::shared_ptr<const ngraph::Function> & ngraphFunction) {
// TODO: perform actual graph compilation taking `_cfg` into account
// 1.Copy ngraph::Function first to apply some transformations later in
// ExecutableNetwork::CompileGraph, which modify original ngraph::Function
const bool shareConsts = false, constFolding = false;
std::vector<::ngraph::element::Type> new_types;
std::vector<::ngraph::PartialShape> new_shapes;
for (const auto &parameter : ngraphFunction->get_parameters()) {
new_shapes.emplace_back(parameter->get_partial_shape());
new_types.emplace_back(parameter->get_element_type());
}
auto copyFunction = ngraph::specialize_function(std::const_pointer_cast<ngraph::Function>(ngraphFunction),
new_types, new_shapes, std::vector<void *>(new_types.size(), nullptr), constFolding, shareConsts);
// 2. Perform common and device-specific transformations
ngraph::pass::Manager passManager;
// Example: register standard ngraph transformation from ngraph::ngraph
passManager.register_pass<ngraph::pass::ConstantFolding>();
// Example: register inference engine optimization transformation for IE::inference_engine_transformations
passManager.register_pass<ngraph::pass::ConvertDivide>();
// Register any other transformations
// ..
// After `run_passes`, we have the transformed function, where operations match device operations,
// and we can create device hardware-dependent graph
passManager.run_passes(copyFunction);
// 3. Iterate over operations and create hardware-specific ngraph
for (const auto& op : copyFunction->get_ordered_ops()) {
// TODO: map ngraph `op` to device operation
}
// 4. Perform any other steps like allocation and filling device buffers, and so on
}
// ! [executable_network:compile_graph]
// ! [executable_network:create_infer_request_impl]
InferenceEngine::InferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) {
return std::make_shared<TemplateInferRequest>(networkInputs, networkOutputs, std::static_pointer_cast<ExecutableNetwork>(shared_from_this()));
}
// ! [executable_network:create_infer_request_impl]
// ! [executable_network:create_infer_request]
void TemplatePlugin::ExecutableNetwork::CreateInferRequest(IInferRequest::Ptr& asyncRequest) {
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
auto asyncThreadSafeImpl = std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest),
_taskExecutor, _waitExecutor, _callbackExecutor);
asyncRequest.reset(new InferenceEngine::InferRequestBase<TemplateAsyncInferRequest>(asyncThreadSafeImpl),
[](InferenceEngine::IInferRequest *p) { p->Release(); });
asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
}
// ! [executable_network:create_infer_request]
// ! [executable_network:get_config]
void TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name, Parameter &result, ResponseDesc *resp) const {
// TODO: return more supported values for config keys
if (name == CONFIG_KEY(DEVICE_ID) ||
name == CONFIG_KEY(PERF_COUNT)) {
result = _cfg.Get(name);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork config key: " << name;
}
}
// ! [executable_network:get_config]
// ! [executable_network:get_metric]
void TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *) const {
// TODO: return more supported values for metrics
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
result = IE_SET_METRIC(SUPPORTED_METRICS, std::vector<std::string>{
METRIC_KEY(NETWORK_NAME),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, std::vector<std::string>{
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT)});
} else if (METRIC_KEY(NETWORK_NAME) == name) {
result = IE_SET_METRIC(NETWORK_NAME, _name);
} else if (METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS) == name) {
// TODO: fill with actual number
unsigned int value = 1;
result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork metric: " << name;
}
}
// ! [executable_network:get_metric]
// ! [executable_network:export_impl]
void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& dlaModel) {
// TODO: Code which exports graph from std::ostream
}
// ! [executable_network:export_impl]

View File

@@ -0,0 +1,68 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <utility>
#include <tuple>
#include <memory>
#include <string>
#include <vector>
#include <map>
#include <unordered_map>
#include <list>
#include <ie_common.h>
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include <cnn_network_impl.hpp>
#include <threading/ie_itask_executor.hpp>
#include <ngraph/function.hpp>
#include "template_config.hpp"
#include "template_infer_request.hpp"
#include "template_async_infer_request.hpp"
namespace TemplatePlugin {
class Engine;
/**
* @class ExecutableNetwork
* @brief Interface of executable network
*/
// ! [executable_network:header]
class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public:
ExecutableNetwork(InferenceEngine::ICNNNetwork& network,
const Configuration& cfg);
ExecutableNetwork(std::istream & model,
const Configuration& cfg);
~ExecutableNetwork() override = default;
// Methods from a base class ExecutableNetworkThreadSafeDefault
void ExportImpl(std::ostream& model) override;
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
std::atomic<std::size_t> _requestId = {0};
std::string _name;
Configuration _cfg;
private:
void CompileGraph(const std::shared_ptr<const ngraph::Function> & ngraphFunction);
std::shared_ptr<Engine> _plugin;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [executable_network:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,224 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <map>
#include <ie_blob.h>
#include <ie_plugin.hpp>
#include <description_buffer.hpp>
#include <debug.h>
#include <ie_layouts.h>
#include <threading/ie_executor_manager.hpp>
#include <blob_transform.hpp>
#include <ie_parallel.hpp>
#include <ie_memcpy.h>
#include <precision_utils.h>
#include <template/template_config.hpp>
#include "template_infer_request.hpp"
#include "template_executable_network.hpp"
#include "template_plugin.hpp"
using namespace TemplatePlugin;
using Time = std::chrono::high_resolution_clock;
using ns = std::chrono::nanoseconds;
using fsec = std::chrono::duration<float>;
// ! [infer_request:ctor]
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork) :
InferRequestInternal(networkInputs, networkOutputs),
_executableNetwork(executableNetwork) {
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
auto requestID = std::to_string(_executableNetwork->_requestId);
_executableNetwork->_requestId++;
std::string name = _executableNetwork->_name + "_Req" + requestID;
_profilingTask = { {
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Preprocess") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Postprocess") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_StartPipline") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_WaitPipline") },
} };
allocateDeviceBuffers();
allocateInputBlobs();
allocateOutputBlobs();
}
// ! [infer_request:ctor]
// ! [infer_request:dtor]
TemplateInferRequest::~TemplateInferRequest() {
_executableNetwork->_requestId--;
}
// ! [infer_request:dtor]
void TemplateInferRequest::allocateDeviceBuffers() {
// TODO: allocate device buffers if Template device is a remote one
}
void TemplateInferRequest::allocateInputBlobs() {
for (auto &networkInput : _networkInputs) {
SizeVector dims = networkInput.second->getTensorDesc().getDims();
Precision precision = networkInput.second->getTensorDesc().getPrecision();
Layout input_layout = networkInput.second->getInputData()->getLayout();
Blob::Ptr inputBlob;
Blob::Ptr inputBlobNCHW;
switch (precision) {
case Precision::FP32 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<float>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<float>({ precision, dims, Layout::NCHW });
}
break;
case Precision::FP16 :
case Precision::I16 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, Layout::NCHW });
}
break;
case Precision::U8 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<uint8_t>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<uint8_t>({ precision, dims, Layout::NCHW });
}
break;
default:
THROW_IE_EXCEPTION << "Unsupported network precision: " << precision
<< precision << "! Supported precisions are: FP32, FP16, I16, U8";
}
// allocate the input blob
inputBlob->allocate();
_inputs[networkInput.first] = inputBlob;
if (inputBlobNCHW != inputBlob) {
inputBlobNCHW->allocate();
}
_inputsNCHW[networkInput.first] = inputBlobNCHW;
}
}
void TemplateInferRequest::allocateOutputBlobs() {
for (auto &networkOutput : _networkOutputs) {
SizeVector dims = networkOutput.second->getTensorDesc().getDims();
Precision precision = networkOutput.second->getPrecision();
Blob::Ptr outputBlob;
// allocate the output blob
Blob::Ptr outputBlobNCHW;
switch (precision) {
case Precision::FP32 :
outputBlobNCHW = outputBlob = InferenceEngine::make_shared_blob<float>({ precision, dims, networkOutput.second->getLayout() });
if (networkOutput.second->getLayout() == Layout::NHWC) {
outputBlobNCHW = InferenceEngine::make_shared_blob<float>({ precision, dims, Layout::NCHW });
}
break;
case Precision::FP16 :
outputBlobNCHW = outputBlob = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, networkOutput.second->getLayout() });
if (networkOutput.second->getLayout() == Layout::NHWC) {
outputBlobNCHW = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, Layout::NCHW });
}
break;
default:
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Unsupported output precision: "
<< precision << "! Supported precisions are: FP32, FP16";
}
// allocate the output blob
outputBlob->allocate();
_outputs[networkOutput.first] = outputBlob;
if (outputBlobNCHW != outputBlob) {
outputBlobNCHW->allocate();
}
_outputsNCHW[networkOutput.first] = outputBlobNCHW;
}
if (_networkOutputs.empty() || _networkInputs.empty()) {
THROW_IE_EXCEPTION << "Internal error: no information about network's output/input";
}
}
// ! [infer_request:infer_impl]
void TemplateInferRequest::InferImpl() {
// TODO: fill with actual list of pipeline stages, which are executed syncronously for sync infer requests
inferPreprocess();
startPipeline();
waitPipeline();
inferPostprocess();
}
// ! [infer_request:infer_impl]
// ! [infer_request:infer_preprocess]
void TemplateInferRequest::inferPreprocess() {
auto prev = Time::now();
// execute input pre-processing.
InferRequestInternal::execDataPreprocessing(_inputs);
for (auto &input : InferRequestInternal::_inputs) {
auto& src = input.second;
auto& dst = _inputsNCHW[input.first];
if (src != dst) {
if (src->getTensorDesc().getPrecision() == dst->getTensorDesc().getPrecision()
&& src->getTensorDesc().getDims() == dst->getTensorDesc().getDims()
&& src->getTensorDesc().getLayout() == dst->getTensorDesc().getLayout()) {
_inputsNCHW[input.first] = input.second;
} else { // Convert Layout to NCHW
InferenceEngine::blob_copy(src, dst);
}
}
}
// TODO: Preprocessing on inputs if needed: work _inputsNCHW
_inputPreprocessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
// ! [infer_request:infer_preprocess]
void TemplateInferRequest::startPipeline() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[StartPipeline])
// TODO: Start pipeline and fill _inputTransferTime, _executeTime, _outputTransferTime
}
void TemplateInferRequest::waitPipeline() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[WaitPipeline])
auto prev = Time::now();
// TODO: Wait pipeline using driver API or other synronizations methods
_inputPreprocessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
void TemplateInferRequest::inferPostprocess() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[Postprocess])
auto prev = Time::now();
// TODO: perform post-processing and convert to NHWC layout
_outputPostProcessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
// ! [infer_request:get_performance_counts]
void TemplateInferRequest::GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo> &perfMap) const {
InferenceEngineProfileInfo info;
info.execution_index = 0;
info.status = InferenceEngineProfileInfo::EXECUTED;
info.cpu_uSec = info.realTime_uSec = _inputPreprocessTime / 1000;
perfMap["1. input preprocessing"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _inputTransferTime / 1000;
perfMap["2. input transfer to a device"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _executeTime / 1000;
perfMap["3. execution time"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _outputTransferTime / 1000;
perfMap["4. output transfer from a device"] = info;
info.cpu_uSec = info.realTime_uSec = _outputPostProcessTime / 1000;
perfMap["5. output postprocessing"] = info;
}
// ! [infer_request:get_performance_counts]

View File

@@ -0,0 +1,74 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include <vector>
#include <memory>
#include <unordered_map>
#include <ie_common.h>
#include <ie_profiling.hpp>
#include <cpp_interfaces/impl/ie_infer_request_internal.hpp>
#include <cpp_interfaces/impl/ie_executable_network_internal.hpp>
#include <threading/ie_itask_executor.hpp>
#include "template_config.hpp"
namespace TemplatePlugin {
class ExecutableNetwork;
// ! [infer_request:header]
class TemplateInferRequest : public InferenceEngine::InferRequestInternal {
public:
typedef std::shared_ptr<TemplateInferRequest> Ptr;
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<ExecutableNetwork>& executableNetwork);
~TemplateInferRequest() override;
void InferImpl() override;
void GetPerformanceCounts(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo>& perfMap) const override;
// pipeline methods-stages which are used in async infer request implementation and assigned to particular executor
void inferPreprocess();
void startPipeline();
void waitPipeline();
void inferPostprocess();
std::shared_ptr<ExecutableNetwork> _executableNetwork;
private:
void allocateDeviceBuffers();
void allocateInputBlobs();
void allocateOutputBlobs();
enum {
Preprocess,
Postprocess,
StartPipeline,
WaitPipeline,
numOfStages
};
std::array<InferenceEngine::ProfilingTask, numOfStages> _profilingTask;
InferenceEngine::BlobMap _inputsNCHW;
InferenceEngine::BlobMap _outputsNCHW;
// for performance counts
double _inputPreprocessTime = 0.0;
double _inputTransferTime = 0.0;
double _executeTime = 0.0;
double _outputTransferTime = 0.0;
double _outputPostProcessTime = 0.0;
};
// ! [infer_request:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,194 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <memory>
#include <vector>
#include <sstream>
#include <regex>
#include <string>
#include <map>
#include <ie_metric_helpers.hpp>
#include <details/ie_cnn_network_tools.h>
#include <ie_plugin_config.hpp>
#include <ie_util_internal.hpp>
#include <inference_engine.hpp>
#include <file_utils.h>
#include <cpp_interfaces/base/ie_plugin_base.hpp>
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include <threading/ie_executor_manager.hpp>
#include <graph_tools.hpp>
#include <ie_input_info.hpp>
#include <ie_layouts.h>
#include <hetero/hetero_plugin_config.hpp>
#include <template/template_config.hpp>
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
#include "template_infer_request.hpp"
using namespace TemplatePlugin;
// ! [plugin:ctor]
Plugin::Plugin() {
// TODO: fill with actual device name
_pluginName = "TEMPLATE";
}
// ! [plugin:ctor]
// ! [plugin:load_exe_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::ICore * core,
const InferenceEngine::ICNNNetwork & network,
const ConfigMap &config) {
auto cfg = Configuration{ config, _cfg };
InferenceEngine::InputsDataMap networkInputs;
InferenceEngine::OutputsDataMap networkOutputs;
network.getInputsInfo(networkInputs);
network.getOutputsInfo(networkOutputs);
// TODO: check with precisions supported by Template device
for (auto networkOutput : networkOutputs) {
auto output_precision = networkOutput.second->getPrecision();
if (output_precision != Precision::FP32 &&
output_precision != Precision::FP16) {
THROW_IE_EXCEPTION << "Template device supports only FP16 and FP32 output precision.";
}
}
for (auto networkInput : networkInputs) {
auto input_precision = networkInput.second->getTensorDesc().getPrecision();
if (input_precision != InferenceEngine::Precision::FP32 &&
input_precision != InferenceEngine::Precision::FP16 &&
input_precision != InferenceEngine::Precision::I16 &&
input_precision != InferenceEngine::Precision::U8) {
THROW_IE_EXCEPTION << "Input image format " << input_precision << " is not supported yet.\n"
<< "Supported formats are: FP32, FP16, I16 and U8.";
}
}
auto clonedNetwork = cloneNet(network);
ConstTransformer transformator(clonedNetwork.get());
transformator.fullTrim();
return std::make_shared<ExecutableNetwork>(*clonedNetwork, cfg);
}
// ! [plugin:load_exe_network_impl]
// ! [plugin:import_network_impl]
InferenceEngine::ExecutableNetwork Plugin::ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) {
// TODO: Import network from stream is not mandatory functionality;
// Can just throw an exception and remove the code below
Configuration exportedCfg;
// some code below which reads exportedCfg from `model` stream
// ..
auto cfg = Configuration(config, exportedCfg);
IExecutableNetwork::Ptr executableNetwork;
auto exec_network_impl = std::make_shared<ExecutableNetwork>(model, cfg);
executableNetwork.reset(new ExecutableNetworkBase<ExecutableNetworkInternal>(exec_network_impl),
[](InferenceEngine::details::IRelease *p) {p->Release(); });
return InferenceEngine::ExecutableNetwork{ executableNetwork };
}
// ! [plugin:import_network_impl]
// ! [plugin:query_network]
void Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config, QueryNetworkResult &res) const {
Configuration cfg{config, _cfg, false};
res.rc = StatusCode::OK;
if (std::shared_ptr<const ngraph::Function> ngraphFunction = network.getFunction()) {
auto ops = ngraphFunction->get_ordered_ops();
for (auto&& op : ops) {
// TODO: investigate if an op is actually supported by Template device
bool supported = true;
if (supported) {
res.supportedLayersMap.insert({ op->get_friendly_name(), GetName() });
}
}
} else {
THROW_IE_EXCEPTION << "TEMPLATE plugin can query only IR v10 networks";
}
}
// ! [plugin:query_network]
// ! [plugin:add_extension]
void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) {
// TODO: add extensions if plugin supports extensions
}
// ! [plugin:add_extension]
// ! [plugin:set_config]
void Plugin::SetConfig(const ConfigMap &config) {
_cfg = Configuration{config, _cfg};
}
// ! [plugin:set_config]
// ! [plugin:get_config]
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & /*options*/) const {
return _cfg.Get(name);
}
// ! [plugin:get_config]
// ! [plugin:get_metric]
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const {
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> supportedMetrics = {
METRIC_KEY(AVAILABLE_DEVICES),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(OPTIMIZATION_CAPABILITIES),
METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) };
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> confiKeys = {
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT) };
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, confiKeys);
} else if (METRIC_KEY(AVAILABLE_DEVICES) == name) {
// TODO: fill list of available devices
std::vector<std::string> availableDevices = { "" };
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
} else if (METRIC_KEY(FULL_DEVICE_NAME) == name) {
std::string name = "Template Device Full Name";
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, name);
} else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) {
// TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32
std::vector<std::string> capabilities = { METRIC_VALUE(FP32), TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION) };
IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities);
} else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) {
// TODO: fill with actual values
using uint = unsigned int;
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1}));
} else {
THROW_IE_EXCEPTION << "Unsupported device metric: " << name;
}
}
// ! [plugin:get_metric]
IE_SUPPRESS_DEPRECATED_START
// ! [plugin:create_plugin_engine]
INFERENCE_PLUGIN_API(StatusCode) CreatePluginEngine(IInferencePlugin *&plugin, ResponseDesc *resp) noexcept {
try {
plugin = make_ie_compatible_plugin({2, 1, CI_BUILD_NUMBER, "templatePlugin"},
std::make_shared<Plugin>());
return OK;
}
catch (std::exception &ex) {
return DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
}
}
// ! [plugin:create_plugin_engine]
IE_SUPPRESS_DEPRECATED_END

View File

@@ -0,0 +1,48 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <inference_engine.hpp>
#include <description_buffer.hpp>
#include <cpp_interfaces/impl/ie_plugin_internal.hpp>
#include <ie_error.hpp>
#include <memory>
#include <string>
#include <map>
#include <unordered_map>
#include <vector>
#include "template_executable_network.hpp"
#include "template_config.hpp"
//! [plugin:header]
namespace TemplatePlugin {
class Plugin : public InferenceEngine::InferencePluginInternal {
public:
using Ptr = std::shared_ptr<Plugin>;
Plugin();
~Plugin() override = default;
void SetConfig(const std::map<std::string, std::string> &config) override;
void QueryNetwork(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string>& config,
InferenceEngine::QueryNetworkResult &res) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr
LoadExeNetworkImpl(const InferenceEngine::ICore * core, const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string> &config) override;
void AddExtension(InferenceEngine::IExtensionPtr extension) override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::ExecutableNetwork ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) override;
private:
Configuration _cfg;
};
} // namespace TemplatePlugin
//! [plugin:header]

View File

@@ -1,7 +1,7 @@
# Get Started with OpenVINO™ Deep Learning Deployment Toolkit (DLDT) on Linux*
This guide provides you with the information that will help you to start using
the DLDT on Linux\*. With this guide, you will learn how to:
the OpenVINO on Linux\*. With this guide, you will learn how to:
1. [Configure the Model Optimizer](#configure-the-model-optimizer)
2. [Prepare a model for sample inference](#prepare-a-model-for-sample-inference)
@@ -10,13 +10,13 @@ the DLDT on Linux\*. With this guide, you will learn how to:
3. [Run the Image Classification Sample Application with the model](#run-the-image-classification-sample-application)
## Prerequisites
1. This guide assumes that you have already cloned the `dldt` repo and
1. This guide assumes that you have already cloned the `openvino` repo and
successfully built the Inference Engine and Samples using the
[build instructions](inference-engine/README.md).
2. The original structure of the repository directories remains unchanged.
> **NOTE**: Below, the directory to which the `dldt` repository is cloned is
referred to as `<DLDT_DIR>`.
> **NOTE**: Below, the directory to which the `openvino` repository is cloned is
referred to as `<OPENVINO_DIR>`.
## Configure the Model Optimizer
@@ -53,7 +53,7 @@ If you see error messages, check for any missing dependencies.
1. Go to the Model Optimizer prerequisites directory:
```sh
cd <DLDT_DIR>/model_optimizer/install_prerequisites
cd <OPENVINO_DIR>/model_optimizer/install_prerequisites
```
2. Run the script to configure the Model Optimizer for Caffe,
TensorFlow, MXNet, Kaldi\*, and ONNX:
@@ -68,7 +68,7 @@ Configure individual frameworks separately **ONLY** if you did not select
1. Go to the Model Optimizer prerequisites directory:
```sh
cd <DLDT_DIR>/model_optimizer/install_prerequisites
cd <OPENVINO_DIR>/model_optimizer/install_prerequisites
```
2. Run the script for your model framework. You can run more than one script:
@@ -162,20 +162,20 @@ as `<models_dir>` below) with the Model Downloader:
**For CPU (FP32):**
```sh
python3 <DLDT_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP32 --output_dir <ir_dir>
python3 <OPENVINO_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP32 --output_dir <ir_dir>
```
**For GPU and MYRIAD (FP16):**
```sh
python3 <DLDT_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir <ir_dir>
python3 <OPENVINO_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir <ir_dir>
```
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `<ir_dir>` directory.
3. Copy the `squeezenet1.1.labels` file from the `<DLDT_DIR>/inference-engine/samples/sample_data/`
3. Copy the `squeezenet1.1.labels` file from the `<OPENVINO_DIR>/scripts/demo/`
folder to the model IR directory. This file contains the classes that ImageNet
uses so that the inference results show text instead of classification numbers:
```sh
cp <DLDT_DIR>/inference-engine/samples/sample_data/squeezenet1.1.labels <ir_dir>
cp <OPENVINO_DIR>/scripts/demo/squeezenet1.1.labels <ir_dir>
```
Now you are ready to run the Image Classification Sample Application.
@@ -184,28 +184,28 @@ Now you are ready to run the Image Classification Sample Application.
The Inference Engine sample applications are automatically compiled when you
built the Inference Engine using the [build instructions](inference-engine/README.md).
The binary files are located in the `<DLDT_DIR>/inference-engine/bin/intel64/Release`
The binary files are located in the `<OPENVINO_DIR>/inference-engine/bin/intel64/Release`
directory.
To run the Image Classification sample application with an input image on the prepared IR:
1. Go to the samples build directory:
```sh
cd <DLDT_DIR>/inference-engine/bin/intel64/Release
cd <OPENVINO_DIR>/inference-engine/bin/intel64/Release
2. Run the sample executable with specifying the `car.png` file from the
`<DLDT_DIR>/inference-engine/samples/sample_data/` directory as an input
`<OPENVINO_DIR>/scripts/demo/` directory as an input
image, the IR of your model and a plugin for a hardware device to perform
inference on:
**For CPU:**
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d CPU
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d CPU
```
**For GPU:**
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d GPU
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d GPU
```
**For MYRIAD:**
@@ -214,14 +214,14 @@ To run the Image Classification sample application with an input image on the pr
Stick or Intel® Neural Compute Stick 2) with the MYRIAD plugin requires
performing [additional hardware configuration steps](inference-engine/README.md#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2).
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
```
When the Sample Application completes, you will have the label and confidence for the top-10 categories printed on the screen. Below is a sample output with inference results on CPU:
```sh
Top 10 results:
Image /home/user/dldt/inference-engine/samples/sample_data/car.png
Image /home/user/openvino/scripts/demo/car.png
classid probability label
------- ----------- -----

View File

@@ -109,7 +109,7 @@ if(UNIX)
PATTERN *.bat EXCLUDE
PATTERN speech_libs_and_demos EXCLUDE)
elseif(WIN32)
install(DIRECTORY samples
install(DIRECTORY samples/
DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp
COMPONENT cpp_samples
USE_SOURCE_PERMISSIONS
@@ -197,6 +197,7 @@ function(register_extra_plugins)
# automatically import plugins from the 'plugins' folder
file(GLOB local_extra_plugins "plugins/*")
list(APPEND local_extra_plugins "${OpenVINO_MAIN_SOURCE_DIR}/docs/template_plugin")
foreach(plugin_path IN LISTS IE_EXTRA_PLUGINS local_extra_plugins)
get_filename_component(plugin_dir "${plugin_path}" NAME)

View File

@@ -83,24 +83,24 @@ if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
if (WIN32 AND X86_64)
#TODO: add target_path to be platform specific as well, to avoid following if
RESOLVE_DEPENDENCY(TBB
ARCHIVE_WIN "tbb2020_20200214_win.zip"
ARCHIVE_WIN "tbb2020_20200415_win.zip"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(ANDROID) # Should be before LINUX due LINUX is detected as well
RESOLVE_DEPENDENCY(TBB
ARCHIVE_ANDROID "tbb2020_20191023_android.tgz"
ARCHIVE_ANDROID "tbb2020_20200404_android.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(LINUX AND X86_64)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_LIN "tbb2020_20200327_lin_strip.tgz"
ARCHIVE_LIN "tbb2020_20200415_lin_strip.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT")
elseif(APPLE AND X86_64)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_MAC "tbb2020_20191023_mac.tgz"
ARCHIVE_MAC "tbb2020_20200404_mac.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")

View File

@@ -90,8 +90,8 @@ function(ie_add_plugin)
ie_cpack_add_component(${install_component} REQUIRED DEPENDS core)
install(TARGETS ${IE_PLUGIN_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component}
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component}
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ${install_component}
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ${install_component}
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component})
endif()
endfunction()

View File

@@ -19,7 +19,7 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x)
# Default packages
#
set(FIRMWARE_PACKAGE_VERSION 1076)
set(FIRMWARE_PACKAGE_VERSION 1119)
#
# CMake variables to override default firmware files
@@ -82,7 +82,7 @@ foreach(firmware_name IN LISTS VPU_SUPPORTED_FIRMWARES)
VERBATIM)
install(FILES ${${var_name}}
DESTINATION ${IE_CPACK_LIBRARY_PATH}
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT myriad)
endforeach()

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier : Apache-2.0
//
#include <stdlib.h>

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier : Apache-2.0
//
#include <stdlib.h>

View File

@@ -28,8 +28,8 @@ export(TARGETS ${TARGET_NAME} NAMESPACE IE:: APPEND FILE "${CMAKE_BINARY_DIR}/ta
# install
install(TARGETS ${TARGET_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core)
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/

View File

@@ -29,15 +29,18 @@ def build_argparser():
args = parser.add_argument_group("Options")
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
required=True, type=str)
required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to image file.",
required=True, type=str, nargs="+")
required=True, type=str, nargs="+")
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. Absolute path to a shared library with the kernels implementations.",
type=str, default=None)
help="Optional. Required for CPU custom layers. "
"Absolute path to a shared library with the kernels implementations.",
type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample will look for a suitable plugin for device specified (CPU by default)",
default="CPU", type=str)
help="Optional. Specify the target device to infer on; "
"CPU, GPU, FPGA or MYRIAD is acceptable. "
"Sample will look for a suitable plugin for device specified (CPU by default)",
default="CPU", type=str)
args.add_argument("--labels", help="Optional. Labels mapping file", default=None, type=str)
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
@@ -59,9 +62,10 @@ def main():
# ------------- 2. Load Plugin for inference engine and extensions library if specified --------------
log.info("Device info:")
versions = ie.get_versions(args.device)
print("{}{}".format(" "*8, args.device))
print("{}MKLDNNPlugin version ......... {}.{}".format(" "*8, versions[args.device].major, versions[args.device].minor))
print("{}Build ........... {}".format(" "*8, versions[args.device].build_number))
print("{}{}".format(" " * 8, args.device))
print("{}MKLDNNPlugin version ......... {}.{}".format(" " * 8, versions[args.device].major,
versions[args.device].minor))
print("{}Build ........... {}".format(" " * 8, versions[args.device].build_number))
if args.cpu_extension and "CPU" in args.device:
ie.add_extension(args.cpu_extension, "CPU")
@@ -79,8 +83,15 @@ def main():
# -----------------------------------------------------------------------------------------------------
# --------------------------- 3. Read and preprocess input --------------------------------------------
input_blob = next(iter(net.inputs))
n, c, h, w = net.inputs[input_blob].shape
print("inputs number: " + str(len(net.inputs.keys())))
for input_key in net.inputs:
print("input shape: " + str(net.inputs[input_key].shape))
print("input key: " + input_key)
if len(net.inputs[input_key].layout) == 4:
n, c, h, w = net.inputs[input_key].shape
images = np.ndarray(shape=(n, c, h, w))
images_hw = []
for i in range(n):
@@ -94,13 +105,14 @@ def main():
log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w)))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image
# -----------------------------------------------------------------------------------------------------
# --------------------------- 4. Configure input & output ---------------------------------------------
# --------------------------- Prepare input blobs -----------------------------------------------------
log.info("Preparing input blobs")
assert (len(net.inputs.keys()) == 1 or len(net.inputs.keys()) == 2), "Sample supports topologies only with 1 or 2 inputs"
input_blob = next(iter(net.inputs))
assert (len(net.inputs.keys()) == 1 or len(
net.inputs.keys()) == 2), "Sample supports topologies only with 1 or 2 inputs"
out_blob = next(iter(net.outputs))
input_name, input_info_name = "", ""
@@ -112,9 +124,21 @@ def main():
elif len(net.inputs[input_key].layout) == 2:
input_info_name = input_key
net.inputs[input_key].precision = 'FP32'
if net.inputs[input_key].shape[1] != 3 and net.inputs[input_key].shape[1] != 6 or net.inputs[input_key].shape[0] != 1:
if net.inputs[input_key].shape[1] != 3 and net.inputs[input_key].shape[1] != 6 or \
net.inputs[input_key].shape[0] != 1:
log.error('Invalid input info. Should be 3 or 6 values length.')
data = {}
data[input_name] = images
if input_info_name != "":
infos = np.ndarray(shape=(n, c), dtype=float)
for i in range(n):
infos[i, 0] = h
infos[i, 1] = w
infos[i, 2] = 1.0
data[input_info_name] = infos
# --------------------------- Prepare output blobs ----------------------------------------------------
log.info('Preparing output blobs')
@@ -141,7 +165,7 @@ def main():
log.info("Loading model to the device")
exec_net = ie.load_network(network=net, device_name=args.device)
log.info("Creating infer request and starting inference")
res = exec_net.infer(inputs={input_blob: images})
res = exec_net.infer(inputs=data)
# -----------------------------------------------------------------------------------------------------
# --------------------------- Read and postprocess output ---------------------------------------------
@@ -159,8 +183,8 @@ def main():
ymin = np.int(ih * proposal[4])
xmax = np.int(iw * proposal[5])
ymax = np.int(ih * proposal[6])
print("[{},{}] element, prob = {:.6} ({},{})-({},{}) batch id : {}"\
.format(number, label, confidence, xmin, ymin, xmax, ymax, imid), end="")
print("[{},{}] element, prob = {:.6} ({},{})-({},{}) batch id : {}" \
.format(number, label, confidence, xmin, ymin, xmax, ymax, imid), end="")
if proposal[2] > 0.5:
print(" WILL BE PRINTED!")
if not imid in boxes.keys():
@@ -181,7 +205,8 @@ def main():
# -----------------------------------------------------------------------------------------------------
log.info("Execution successful\n")
log.info("This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool")
log.info(
"This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool")
if __name__ == '__main__':

View File

@@ -171,9 +171,9 @@ cdef class IECore:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# exec_net = ie.load_network(network=net, device_name="CPU", num_requsts=2)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2)
# ```
cpdef ExecutableNetwork load_network(self, IENetwork network, str device_name, config=None, int num_requests=1):
cdef ExecutableNetwork exec_net = ExecutableNetwork()
@@ -197,8 +197,8 @@ cdef class IECore:
# @return An `ExecutableNetwork` object
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(network=net, device_name="MYRIAD", num_requsts=2)
# # export executable network
# exec_net.export(path_to_file_to_save)
@@ -226,8 +226,8 @@ cdef class IECore:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# layers_map = ie.query_network(network=net, device_name="HETERO:GPU,CPU")
# ```
def query_network(self, IENetwork network, str device_name, config=None):
@@ -238,12 +238,19 @@ cdef class IECore:
return c_map_to_dict(res)
## Sets a configuration for a plugin
# NOTE: When specifying a key value of a config, the "KEY_" prefix is omitted.
#
# \note When specifying a key value of a config, the "KEY_" prefix is omitted.
#
# @param config: a dictionary of configuration parameters as keys and their values
# @param device_name: a device name of a target plugin
# @return None
#
# Usage examples: See the `set_affinity` method of the `IENetwork` class
# Usage examples:\n
# ```python
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# ie.set_config(config={"DYN_BATCH_ENABLED": "YES"}, device_name="CPU")
# ```
def set_config(self, config: dict, device_name: str):
cdef map[string, string] c_config = dict_to_c_map(config)
self.impl.setConfig(c_config, device_name.encode())
@@ -316,7 +323,9 @@ cdef class IECore:
## Gets a configuration dedicated to device behavior. The method targets to extract information
# which can be set via set_config method.
# NOTE: When specifying a key value of a config, the "KEY_" prefix is omitted.
#
# \note When specifying a key value of a config, the "KEY_" prefix is omitted.
#
# @param device_name: A name of a device to get a config value.
# @param config_name: A config name to request.
# @return A config value corresponding to a config key.
@@ -452,8 +461,8 @@ cdef class ExecutableNetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie_core = IECore()
# net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie_core.load_network(net, device, num_requests=2)
# res = exec_net.infer({'data': img})
# res
@@ -531,9 +540,9 @@ cdef class ExecutableNetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie_core = IECore()
# exec_net = ie_core.load_network(net, device, num_requsts=2)
# net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie_core.load_network(net, device, num_requests=2)
# exec_graph = exec_net.get_exec_graph_info()
# ```
def get_exec_graph_info(self):
@@ -549,7 +558,7 @@ cdef class ExecutableNetwork:
# Usage example:\n
# ```python
# ie = IECore()
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(net, "CPU")
# exec_net.get_metric("NETWORK_NAME")
# ```
@@ -564,9 +573,9 @@ cdef class ExecutableNetwork:
# Usage example:\n
# ```python
# ie = IECore()
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(net, "CPU")
# exec_net.get_metric("DEVICE_ID")
# config = exec_net.get_config("CPU_BIND_THREAD")
# ```
def get_config(self, config_name: str):
return deref(self.impl).getConfig(config_name.encode())
@@ -576,8 +585,8 @@ cdef class ExecutableNetwork:
# @return None
#
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(network=net, device_name="MYRIAD", num_requsts=2)
# exec_net.export(path_to_file_to_save)
# ```
@@ -620,8 +629,9 @@ cdef class InferRequest:
cdef void user_callback(self, int status) with gil:
if self._py_callback:
self._py_callback(status, self._py_data)
# Set flag at first since user can call wait in callback
self._py_callback_called.set()
self._py_callback(status, self._py_data)
## Description: Sets a callback function that is called on success or failure of an asynchronous request
#
@@ -632,8 +642,8 @@ cdef class InferRequest:
# Usage example:\n
# ```python
# callback = lambda status, py_data: print("Request with id {} finished with status {}".format(py_data, status))
# net = IENetwork("./model.xml", "./model.bin")
# ie = IECore()
# net = ie.read_network(model="./model.xml", weights="./model.bin")
# exec_net = ie.load_network(net, "CPU", num_requests=4)
# for id, req in enumerate(exec_net.requests):
# req.set_completion_callback(py_callback=callback, py_data=id)
@@ -662,7 +672,7 @@ cdef class InferRequest:
#
# Usage example:\n
# ```python
# exec_net = plugin.load(network=net, num_requests=2)
# exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
# exec_net.requests[0].infer({input_blob: image})
# res = exec_net.requests[0].outputs['prob']
# np.flip(np.sort(np.squeeze(res)),0)
@@ -683,7 +693,7 @@ cdef class InferRequest:
#
# Usage example:\n
# ```python
# exec_net = plugin.load(network=net, num_requests=2)
# exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
# exec_net.requests[0].async_infer({input_blob: image})
# request_status = exec_net.requests[0].wait()
# res = exec_net.requests[0].outputs['prob']
@@ -697,7 +707,8 @@ cdef class InferRequest:
## Waits for the result to become available. Blocks until specified timeout elapses or the result
# becomes available, whichever comes first.
# NOTE: There are special values of the timeout parameter:
#
# \note There are special values of the timeout parameter:
# * 0 - Immediately returns the inference status. It does not block or interrupt execution.
# To find statuses meaning, please refer to InferenceEngine::StatusCode in Inference Engine C++ documentation
# * -1 - Waits until inference result becomes available (default value)
@@ -714,6 +725,11 @@ cdef class InferRequest:
if status != StatusCode.RESULT_NOT_READY:
return status
if not self._py_callback_called.is_set():
if timeout == WaitMode.RESULT_READY:
timeout = None
if timeout is not None:
# Convert milliseconds to seconds
timeout = float(timeout)/1000
if not self._py_callback_called.wait(timeout):
return StatusCode.REQUEST_BUSY
return StatusCode.OK
@@ -724,12 +740,14 @@ cdef class InferRequest:
return deref(self.impl).wait(<int64_t> timeout)
## Queries performance measures per layer to get feedback of what is the most time consuming layer.
# NOTE: Performance counters data and format depends on the plugin
#
# \note Performance counters data and format depends on the plugin
#
# @return Dictionary containing per-layer execution information.
#
# Usage example:
# ```python
# exec_net = plugin.load(network=net, num_requests=2)
# exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
# exec_net.requests[0].infer({input_blob: image})
# exec_net.requests[0].get_perf_counts()
# {'Conv2D': {'exec_type': 'jit_avx2_1x1',
@@ -780,18 +798,20 @@ cdef class InferRequest:
## Sets new batch size for certain infer request when dynamic batching is enabled in executable network
# that created this request.
# NOTE: Support of dynamic batch size depends on the target plugin.
#
# \note Support of dynamic batch size depends on the target plugin.
#
# @param size: New batch size to be used by all the following inference calls for this request
# @return None
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# # Set max batch size
# net.batch = 10
# plugin.set_config({"DYN_BATCH_ENABLED": "YES"})
# exec_net = plugin.load(network=net)
# ie.set_config(config={"DYN_BATCH_ENABLED": "YES"}, device_name=device)
# exec_net = ie.load_network(network=net, device_name=device)
# # Set batch size for certain network.
# # NOTE: Input data shape will not be changed, but will be used partially in inference which increases performance
# exec_net.requests[0].set_batch(2)
@@ -855,7 +875,11 @@ cdef class IENetLayer:
def type(self):
return deref(self._ptr).type.decode()
## Layer base operating precision. Provides getter and setter interfaces.
## \note This property is deprecated.
# Please, use out_data property to access DataPtr objects for all output ports, which contains full
# information about layer's output data including precision.
#
# Layer base operating precision. Provides getter and setter interfaces.
@property
def precision(self):
warnings.filterwarnings("always", category=DeprecationWarning)
@@ -874,8 +898,8 @@ cdef class IENetLayer:
# The affinity attribute provides getter and setter interfaces, so the layer affinity can be modified directly.
# For example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# layers_map = ie.query_network(network=net, device_name="HETERO:GPU,CPU")
# layers = net.layers
# for layer, device in layers_map.items():
@@ -922,8 +946,10 @@ cdef class IENetLayer:
input_to_list.append(deref(layer.second).name.decode())
return input_to_list
## Deprecated: use out_data property to access DataPtr objects for all output ports, which contains full
## \note This property is deprecated.
# Please, use out_data property to access DataPtr objects for all output ports, which contains full
# information about layer's output data including layout
#
# Returns the layout of the layer output data on 1st port
@property
def layout(self):
@@ -936,8 +962,10 @@ cdef class IENetLayer:
cdef C.DataPtr c_input = deref(self._ptr).outData[0]
return layout_int_to_str_map[deref(c_input).getLayout()]
## Deprecated: use out_data property to access DataPtr objects for all output ports, which contains full
## \note This property is deprecated.
# Please, use out_data property to access DataPtr objects for all output ports, which contains full
# information about layer's output data including shape
#
# Return the list of dimension of the layer output data on 1st port
@property
def shape(self):
@@ -988,7 +1016,10 @@ cdef class IENetLayer:
weights_buffer.reset(blob.second)
blobs_map[blob.first.decode()] = weights_buffer.to_numpy()
return blobs_map
## Dictionary with layer weights, biases or custom blobs if any
## \note This property is deprecated.
# Please use blobs property instead.
#
# Dictionary with layer weights, biases or custom blobs if any
@property
def weights(self):
warnings.filterwarnings("always", category=DeprecationWarning)
@@ -1003,6 +1034,9 @@ cdef class IENetLayer:
cdef class IENetwork:
## Class constructor
#
# \note Reading networks using IENetwork constructor is deprecated.
# Please, use IECore.read_network() method instead.
#
# @param model: A `.xml` file of the IR or PyCapsule containing smart pointer to nGraph function.
# In case of passing a `.xml` file attribute value can be a string path or bytes with file content
# depending on `init_from_buffer` attribute value
@@ -1100,8 +1134,9 @@ cdef class IENetwork:
## Batch size of the network. Provides getter and setter interfaces to get and modify the
# network batch size. For example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# print(et.batch_size)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# print(net.batch_size)
# net.batch_size = 4
# print(net.batch_size)
# print(net.inputs['data'].shape)
@@ -1109,7 +1144,9 @@ cdef class IENetwork:
@property
def batch_size(self):
return self.impl.getBatch()
## Deprecated: network precision does not make sence, use precision on egdes.
## \note This property is deprecated:
# network precision does not make sense, use precision on edges.
#
# Precision of the network
@property
def precision(self):
@@ -1139,13 +1176,16 @@ cdef class IENetwork:
layers[deref(l).name.decode()] = net_l
return layers
## Deprecated: new Calibration Tool doesn't generate statistics
## \note This property is deprecated.
# New Calibration Tool doesn't generate statistics
#
# Returns `LayersStatsMap` object containing dictionary that maps network layer names to calibration statistics
# represented by `LayerStats` objects.
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# net.stats.update({"conv1_2d" : LayserStats(min=(-25, -1, 0), max=(63, 124, 70)),
# "conv2_2d" : LayserStats(min=(-5, -1, 0, 1, -7, 2), max=(63, 124, 70, 174, 99, 106))
# })
@@ -1163,7 +1203,7 @@ cdef class IENetwork:
max=tuple(it.second["max".encode()]))
return py_stats_map
## NOTE: The function is deprecated. Please use the `IENetwork()` class constructor
## \note The function is deprecated. Please use the `IENetwork()` class constructor
# to create valid instance of `IENetwork`.
#
# Reads the model from the `.xml` and `.bin` files of the IR.
@@ -1192,7 +1232,8 @@ cdef class IENetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# net.add_outputs(["conv5_1', conv2_1', (split_2, 1)])]
# ```
def add_outputs(self, outputs):
@@ -1216,14 +1257,16 @@ cdef class IENetwork:
#
# Usage example:
# ```python
# net = IENetwork(model=path_to_model, weights=path_to_weights)
# ie = IECore()
# net = ie.read_network(model=path_to_xml, weights=path_to_bin)
# net.serialize(path_to_xml, path_to_bin)
# ```
def serialize(self, path_to_xml, path_to_bin: str = ""):
self.impl.serialize(path_to_xml.encode(), path_to_bin.encode())
## Reshapes the network to change spatial dimensions, batch size, or any dimension.
# NOTE: Before using this method, make sure that the target shape is applicable for the network.
#
# \note Before using this method, make sure that the target shape is applicable for the network.
# Changing the network shape to an arbitrary value may lead to unpredictable behaviour.
#
# @param input_shapes: A dictionary that maps input layer names to tuples with the target shape
@@ -1231,10 +1274,11 @@ cdef class IENetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# input_layer = next(iter(net.inputs))
# n, c, h, w = net.inputs[input_layer]
# net.reshape({input_layer: (n, c, h*2, w*2)}]
# net.reshape({input_layer: (n, c, h*2, w*2)})
# ```
def reshape(self, input_shapes: dict):
cdef map[string, vector[size_t]] c_input_shapes;
@@ -1255,9 +1299,11 @@ cdef class IENetwork:
# return self.impl.getFunction()
## This class is the main plugin interface and serves to initialize and configure the plugin.
#
#\note This class is deprecated: Use IECore instead
#
cdef class IEPlugin:
## Deprecated: Use IECore instead
# Class constructor
## Class constructor
#
# @param device: Target device name. Supported devices: CPU, GPU, FPGA, MYRIAD, HETERO, MULTI
# @param plugin_dirs: List of paths to plugin directories

View File

@@ -437,10 +437,10 @@ PyObject *InferenceEnginePython::IEExecNetwork::getMetric(const std::string &met
return parse_parameter(parameter);
}
PyObject *InferenceEnginePython::IEExecNetwork::getConfig(const std::string &metric_name) {
PyObject *InferenceEnginePython::IEExecNetwork::getConfig(const std::string &name) {
InferenceEngine::Parameter parameter;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->GetMetric(metric_name, parameter, &response));
IE_CHECK_CALL(actual->GetConfig(name, parameter, &response));
return parse_parameter(parameter);
}

View File

@@ -136,7 +136,7 @@ struct IEExecNetwork {
std::map<std::string, InferenceEngine::CDataPtr> getOutputs();
PyObject* getMetric(const std::string & metric_name);
PyObject* getConfig(const std::string & metric_name);
PyObject* getConfig(const std::string & name);
int wait(int num_requests, int64_t timeout);
int getIdleRequestId();

View File

@@ -0,0 +1,20 @@
import pathlib
import os
import pytest
test_root = pathlib.Path(__file__).parent
@pytest.fixture(scope='session')
def models_dir():
return test_root / 'test_data' / 'models'
@pytest.fixture(scope='session')
def images_dir():
return test_root / 'test_data' / 'images'
@pytest.fixture(scope='session')
def device():
return os.environ.get("TEST_DEVICE") if os.environ.get("TEST_DEVICE") else "CPU"

View File

@@ -0,0 +1,55 @@
import os
import pytest
from openvino.inference_engine import CDataPtr, IECore
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.bin')
def test_name(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
assert isinstance(exec_net.outputs['fc_out'], CDataPtr)
assert exec_net.outputs['fc_out'].name == "fc_out", "Incorrect name for layer 'fc_out'"
def test_precision(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
assert isinstance(exec_net.outputs['fc_out'], CDataPtr)
assert exec_net.outputs['fc_out'].precision == "FP32", "Incorrect precision for layer 'fc_out'"
def test_no_precision_setter(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
with pytest.raises(AttributeError) as e:
exec_net.outputs['fc_out'].precision = "I8"
assert "attribute 'precision' of 'openvino.inference_engine.ie_api.CDataPtr' objects is not writable" in str(e.value)
def test_layout(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
assert exec_net.outputs['fc_out'].layout == "NC", "Incorrect layout for layer 'fc_out"
def test_no_layout_setter(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
with pytest.raises(AttributeError) as e:
exec_net.outputs['fc_out'].layout = "CN"
assert "attribute 'layout' of 'openvino.inference_engine.ie_api.CDataPtr' objects is not writable" in str(e.value)
def test_initialized(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
assert exec_net.outputs['fc_out'].initialized, "Incorrect value for initialized property for layer 'fc_out"

View File

@@ -0,0 +1,42 @@
import os
import pytest
from openvino.inference_engine import IECore, IENetLayer, DataPtr
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.bin')
def layer_out_data():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
return net.layers['19'].out_data[0]
def test_name():
assert layer_out_data().name == "19", "Incorrect name for layer '19'"
def test_precision():
assert layer_out_data().precision == "FP32", "Incorrect precision for layer '19'"
def test_precision_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.layers['19'].out_data[0].precision = "I8"
assert net.layers['19'].out_data[0].precision == "I8", "Incorrect precision for layer '19'"
def test_incorrect_precision_setter():
with pytest.raises(ValueError) as e:
layer_out_data().precision = "123"
assert "Unsupported precision 123! List of supported precisions:" in str(e.value)
def test_layout():
assert layer_out_data().layout == "NCHW", "Incorrect layout for layer '19"
def test_initialized():
assert layer_out_data().initialized, "Incorrect value for initialized property for layer '19"

View File

@@ -0,0 +1,283 @@
import numpy as np
import os
import pytest
from openvino.inference_engine import ie_api as ie
if os.environ.get("TEST_DEVICE") != "MYRIAD":
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.bin')
else:
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_fp16.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_fp16.bin')
IMAGE_PATH = os.path.join(os.path.dirname(__file__), 'test_data', 'images', 'cat3.bmp')
# # computed with caffe
# REF_IMAGE_RESULT = np.array([[34.6295814514, 18.9434795380, 43.2669448853, 0.4420155287, -108.4574050903,
# -314.8240051270, 231.0738067627, -106.3504943848, 108.5880966187, 92.7254943848]])
def read_image():
import cv2
n, c, h, w = (1, 3, 32, 32)
image = cv2.imread(IMAGE_PATH)
if image is None:
raise FileNotFoundError("Input image not found")
image = cv2.resize(image, (h, w)) / 255
image = image.transpose((2, 0, 1))
image = image.reshape((n, c, h, w))
return image
def test_infer(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
img = read_image()
res = exec_net.infer({'data': img})
assert np.argmax(res['fc_out'][0]) == 3
del exec_net
del ie_core
def test_infer_net_from_buffer(device):
ie_core = ie.IECore()
with open(SAMPLENET_BIN, 'rb') as f:
bin = f.read()
with open(SAMPLENET_XML, 'rb') as f:
xml = f.read()
net = ie_core.read_network(model=xml, weights=bin, init_from_buffer=True)
net2 = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
exec_net2 = ie_core.load_network(net2, device)
img = read_image()
res = exec_net.infer({'data': img})
res2 = exec_net2.infer({'data': img})
del exec_net
del exec_net2
del ie_core
assert np.allclose(res['fc_out'], res2['fc_out'], atol=1E-4, rtol=1E-4)
def test_infer_wrong_input_name(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
img = read_image()
with pytest.raises(AssertionError) as e:
exec_net.infer({'_data_': img})
assert "No input with name _data_ found in network" in str(e.value)
del exec_net
del ie_core
def test_inputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=5)
assert len(exec_net.inputs) == 1
assert "data" in exec_net.inputs
assert isinstance(exec_net.inputs['data'], ie.DataPtr)
del exec_net
del ie_core
def test_outputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=5)
assert len(exec_net.outputs) == 1
assert "fc_out" in exec_net.outputs
assert isinstance(exec_net.outputs['fc_out'], ie.CDataPtr)
del exec_net
del ie_core
def test_access_requests(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=5)
assert len(exec_net.requests) == 5
assert isinstance(exec_net.requests[0], ie.InferRequest)
del exec_net
del ie_core
def test_async_infer_one_req(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request_handler = exec_net.start_async(request_id=0, inputs={'data': img})
request_handler.wait()
res = request_handler.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
def test_async_infer_many_req(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=5)
img = read_image()
for id in range(5):
request_handler = exec_net.start_async(request_id=id, inputs={'data': img})
request_handler.wait()
res = request_handler.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
def test_async_infer_many_req_get_idle(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
num_requests = 5
exec_net = ie_core.load_network(net, device, num_requests=num_requests)
img = read_image()
check_id = set()
for id in range(2*num_requests):
request_id = exec_net.get_idle_request_id()
if request_id == -1:
status = exec_net.wait(num_requests=1, timeout=ie.WaitMode.RESULT_READY)
assert(status == ie.StatusCode.OK)
request_id = exec_net.get_idle_request_id()
assert(request_id >= 0)
request_handler = exec_net.start_async(request_id=request_id, inputs={'data': img})
check_id.add(request_id)
status = exec_net.wait(timeout=ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
for id in range(num_requests):
if id in check_id:
assert np.argmax(exec_net.requests[id].outputs['fc_out'][0]) == 3
del exec_net
del ie_core
def test_wait_before_start(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
num_requests = 5
exec_net = ie_core.load_network(net, device, num_requests=num_requests)
img = read_image()
requests = exec_net.requests
for id in range(num_requests):
status = requests[id].wait()
assert status == ie.StatusCode.INFER_NOT_STARTED
request_handler = exec_net.start_async(request_id=id, inputs={'data': img})
status = requests[id].wait()
assert status == ie.StatusCode.OK
assert np.argmax(request_handler.outputs['fc_out'][0]) == 3
del exec_net
del ie_core
def test_wrong_request_id(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
with pytest.raises(ValueError) as e:
exec_net.start_async(request_id=20, inputs={'data': img})
assert "Incorrect request_id specified!" in str(e.value)
del exec_net
del ie_core
def test_wrong_num_requests(device):
with pytest.raises(ValueError) as e:
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
ie_core.load_network(net, device, num_requests=-1)
assert "Incorrect number of requests specified: -1. Expected positive integer number or zero for auto detection" \
in str(e.value)
del ie_core
def test_wrong_num_requests_core(device):
with pytest.raises(ValueError) as e:
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=-1)
assert "Incorrect number of requests specified: -1. Expected positive integer number or zero for auto detection" \
in str(e.value)
del ie_core
def test_plugin_accessible_after_deletion(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
del ie_core
img = read_image()
res = exec_net.infer({'data': img})
assert np.argmax(res['fc_out'][0]) == 3
del exec_net
def test_exec_graph(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
img = read_image()
res = exec_net.infer({'data': img})
exec_graph = exec_net.get_exec_graph_info()
exec_graph_file = 'exec_graph.xml'
exec_graph.serialize(exec_graph_file)
assert os.path.exists(exec_graph_file)
os.remove(exec_graph_file)
del exec_net
del exec_graph
del ie_core
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "MYRIAD", reason="Device specific test. "
"Only MYRIAD plugin implements network export")
def test_export_import():
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, "MYRIAD")
exported_net_file = 'exported_model.bin'
exec_net.export(exported_net_file)
assert os.path.exists(exported_net_file)
exec_net = ie_core.import_network(exported_net_file, "MYRIAD")
os.remove(exported_net_file)
img = read_image()
res = exec_net.infer({'data': img})
assert np.argmax(res['fc_out'][0]) == 3
del exec_net
del ie_core
def test_multi_out_data(device):
# Regression test CVS-23965
# Check that CDataPtr for all output layers not copied between outputs map items
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.add_outputs(['28'])
exec_net = ie_core.load_network(net, device)
assert "fc_out" in exec_net.outputs and "28" in exec_net.outputs
assert isinstance(exec_net.outputs["fc_out"], ie.CDataPtr)
assert isinstance(exec_net.outputs["28"], ie.CDataPtr)
assert exec_net.outputs["fc_out"].name == "fc_out" and exec_net.outputs["fc_out"].shape == [1, 10]
assert exec_net.outputs["28"].name == "28" and exec_net.outputs["28"].shape == [1, 5184]
del ie_core
pass
def test_get_metric(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, "CPU")
network_name = exec_net.get_metric("NETWORK_NAME")
assert network_name == "test_model"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
def test_get_config(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
config = exec_net.get_config("PERF_COUNT")
assert config == "NO"

View File

@@ -0,0 +1,182 @@
import os
import pytest
from sys import platform
import numpy as np
from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.bin')
plugins_xml = os.path.join(os.path.dirname(__file__), 'test_data', 'plugins.xml')
plugins_win_xml = os.path.join(os.path.dirname(__file__), 'test_data', 'plugins_windows.xml')
plugins_osx_xml = os.path.join(os.path.dirname(__file__), 'test_data', 'plugins_macos.xml')
def test_init_ie_core_no_cfg():
ie = IECore()
assert isinstance(ie, IECore)
def test_init_ie_core_with_cfg():
ie = IECore(plugins_xml)
assert isinstance(ie, IECore)
def test_get_version(device):
ie = IECore()
version = ie.get_versions(device)
assert isinstance(version, dict), "Returned version must be a dictionary"
assert device in version, "{} plugin version wasn't found in versions"
assert hasattr(version[device], "major"), "Returned version has no field 'major'"
assert hasattr(version[device], "minor"), "Returned version has no field 'minor'"
assert hasattr(version[device], "description"), "Returned version has no field 'description'"
assert hasattr(version[device], "build_number"), "Returned version has no field 'build_number'"
def test_load_network(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device)
assert isinstance(exec_net, ExecutableNetwork)
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
def test_load_network_wrong_device():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
with pytest.raises(RuntimeError) as e:
ie.load_network(net, "BLA")
assert 'Device with "BLA" name is not registered in the InferenceEngine' in str(e.value)
def test_query_network(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
query_res = ie.query_network(net, device)
assert net.layers.keys() == query_res.keys(), "Not all network layers present in query_network results"
assert next(iter(set(query_res.values()))) == device, "Wrong device for some layers"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
def test_register_plugin():
ie = IECore()
ie.register_plugin("MKLDNNPlugin", "BLA")
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, "BLA")
assert isinstance(exec_net, ExecutableNetwork), "Cannot load the network to the registered plugin with name 'BLA'"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
def test_register_plugins():
ie = IECore()
if platform == "linux" or platform == "linux2":
ie.register_plugins(plugins_xml)
elif platform == "darwin":
ie.register_plugins(plugins_osx_xml)
elif platform == "win32":
ie.register_plugins(plugins_win_xml)
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, "CUSTOM")
assert isinstance(exec_net,
ExecutableNetwork), "Cannot load the network to the registered plugin with name 'CUSTOM' " \
"registred in the XML file"
@pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well")
def test_unregister_plugin(device):
ie = IECore()
ie.unregister_plugin(device)
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
with pytest.raises(RuntimeError) as e:
ie.load_network(net, device)
assert 'Device with "{}" name is not registered in the InferenceEngine'.format(device) in str(e.value)
@pytest.mark.skip(reason="Need to figure out segmentation fault cause.")
def test_available_devices(device):
ie = IECore()
devices = ie.available_devices
assert device in devices, "Current device '{}' is not listed in available devices '{}'".format(device,
', '.join(devices))
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
def test_get_metric_list_of_str():
ie = IECore()
param = ie.get_metric("CPU", "OPTIMIZATION_CAPABILITIES")
assert isinstance(param, list), "Parameter value for 'OPTIMIZATION_CAPABILITIES' " \
"metric must be a list but {} is returned".format(type(param))
assert all(isinstance(v, str) for v in param), "Not all of the parameter values for 'OPTIMIZATION_CAPABILITIES' " \
"metric are strings!"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
def test_get_metric_tuple_of_two_ints():
ie = IECore()
param = ie.get_metric("CPU", "RANGE_FOR_STREAMS")
assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_STREAMS' " \
"metric must be tuple but {} is returned".format(type(param))
assert all(isinstance(v, int) for v in param), "Not all of the parameter values for 'RANGE_FOR_STREAMS' " \
"metric are integers!"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
def test_get_metric_tuple_of_three_ints():
ie = IECore()
param = ie.get_metric("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS")
assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' " \
"metric must be tuple but {} is returned".format(type(param))
assert all(isinstance(v, int) for v in param), "Not all of the parameter values for " \
"'RANGE_FOR_ASYNC_INFER_REQUESTS' metric are integers!"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
def test_get_metric_str():
ie = IECore()
param = ie.get_metric("CPU", "FULL_DEVICE_NAME")
assert isinstance(param, str), "Parameter value for 'FULL_DEVICE_NAME' " \
"metric must be string but {} is returned".format(type(param))
def test_read_network_from_xml():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net, IENetwork)
def test_incorrect_xml():
ie = IECore()
with pytest.raises(Exception) as e:
ie.read_network(model="./model.xml", weights=SAMPLENET_BIN)
assert "Path to the model ./model.xml doesn't exists or it's a directory" in str(e.value)
def test_incorrect_bin():
ie = IECore()
with pytest.raises(Exception) as e:
ie.read_network(model=SAMPLENET_XML, weights="./model.bin")
assert "Path to the weights ./model.bin doesn't exists or it's a directory" in str(e.value)
def test_read_net_from_buffer():
ie = IECore()
with open(SAMPLENET_BIN, 'rb') as f:
bin = f.read()
with open(SAMPLENET_XML, 'rb') as f:
xml = f.read()
net = ie.read_network(model=xml, weights=bin, init_from_buffer=True)
assert isinstance(net, IENetwork)
def test_net_from_buffer_valid():
ie = IECore()
with open(SAMPLENET_BIN, 'rb') as f:
bin = f.read()
with open(SAMPLENET_XML, 'rb') as f:
xml = f.read()
net = ie.read_network(model=xml, weights=bin, init_from_buffer=True)
net2 = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
for name, layer in net.layers.items():
for blob, data in layer.blobs.items():
assert np.allclose(data, net2.layers[name].blobs[blob]), \
"Incorrect weights for layer {} and blob {}".format(name, blob)

View File

@@ -0,0 +1,128 @@
import warnings
import os
import numpy
from openvino.inference_engine import DataPtr, IECore
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.bin')
def test_name():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].name == "19"
def test_type():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].type == "Convolution"
def test_precision_getter(recwarn):
warnings.simplefilter("always")
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].precision == "FP32"
assert len(recwarn) == 1
assert recwarn.pop(DeprecationWarning)
def test_precision_setter(recwarn):
warnings.simplefilter("always")
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.layers['19'].precision = "I8"
assert net.layers['19'].precision == "I8"
assert len(recwarn) == 1
assert recwarn.pop(DeprecationWarning)
def test_affinuty_getter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].affinity == ""
def test_affinity_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.layers['19'].affinity = "CPU"
assert net.layers['19'].affinity == "CPU"
def test_blobs():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.layers['19'].blobs["biases"], numpy.ndarray)
assert isinstance(net.layers['19'].blobs["weights"], numpy.ndarray)
assert net.layers['19'].blobs["biases"].size != 0
assert net.layers['19'].blobs["weights"].size != 0
def test_weights(recwarn):
warnings.simplefilter("always")
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.layers['19'].weights["biases"], numpy.ndarray)
assert isinstance(net.layers['19'].weights["weights"], numpy.ndarray)
assert net.layers['19'].weights["biases"].size != 0
assert net.layers['19'].weights["weights"].size != 0
assert len(recwarn) == 4
assert recwarn.pop(DeprecationWarning)
def test_params_getter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].params == {'dilations': '1,1', 'group': '1', 'kernel': '5,5', 'output': '16', 'pads_begin': '2,2',
'pads_end': '2,2', 'strides': '1,1'}
def test_params_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
params = net.layers['19'].params
params.update({'PrimitivesPriority': 'cpu:ref_any'})
net.layers['19'].params = params
assert net.layers['19'].params == {'dilations': '1,1', 'group': '1', 'kernel': '5,5', 'output': '16',
'pads_begin': '2,2',
'pads_end': '2,2', 'strides': '1,1', 'PrimitivesPriority': 'cpu:ref_any'}
def test_layer_parents():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].parents == ['data']
def test_layer_children():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].children == ['21']
def test_layout(recwarn):
warnings.simplefilter("always")
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].layout == 'NCHW'
assert len(recwarn) == 1
assert recwarn.pop(DeprecationWarning)
def test_shape(recwarn):
warnings.simplefilter("always")
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].shape == [1, 16, 32, 32]
assert len(recwarn) == 1
def test_out_data():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.layers['19'].out_data[0], DataPtr)
def test_in_data():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.layers['19'].in_data[0], DataPtr)

View File

@@ -0,0 +1,292 @@
import os
import pytest
import warnings
import numpy as np
from openvino.inference_engine import IENetwork, IENetLayer, DataPtr, LayersStatsMap, LayerStats, IECore
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.bin')
NGRPAPH_COMPATIBLE_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_ngraph_format.xml')
NGRPAPH_COMPATIBLE_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_ngraph_format.bin')
def test_read_from_ir_deprecated():
with warnings.catch_warnings(record=True) as w:
net = IENetwork.from_ir(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net, IENetwork)
assert len(w) == 2
for warns in w:
assert issubclass(warns.category, DeprecationWarning)
assert "from_ir() method of IENetwork is deprecated." in str(w[0].message)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[1].message)
def test_create_ie_network_deprecated():
with warnings.catch_warnings(record=True) as w:
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net, IENetwork)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_incorrect_xml_deprecated():
with warnings.catch_warnings(record=True) as w:
with pytest.raises(Exception) as e:
IENetwork(model="./model.xml", weights=SAMPLENET_BIN)
assert "Path to the model ./model.xml doesn't exists or it's a directory" in str(e.value)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_incorrect_bin_deprecated():
with warnings.catch_warnings(record=True) as w:
with pytest.raises(Exception) as e:
IENetwork(model=SAMPLENET_XML, weights="./model.bin")
assert "Path to the weights ./model.bin doesn't exists or it's a directory" in str(e.value)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_name():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.name == "model"
def test_inputs():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.inputs['data'], DataPtr)
assert net.inputs['data'].layout == "NCHW"
assert net.inputs['data'].precision == "FP32"
assert net.inputs['data'].shape == [1, 3, 32, 32]
def test_input_precision_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.inputs['data'].layout == "NCHW"
net.inputs['data'].layout = "NHWC"
assert net.inputs['data'].layout == "NHWC"
def test_input_layout_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.inputs['data'].precision == "FP32"
net.inputs['data'].precision = "I8"
assert net.inputs['data'].precision == "I8"
def test_input_unsupported_precision_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
with pytest.raises(ValueError) as e:
net.inputs['data'].precision = "BLA"
assert "Unsupported precision BLA! List of supported precisions: " in str(e.value)
def test_input_unsupported_layout_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
with pytest.raises(ValueError) as e:
net.inputs['data'].layout = "BLA"
assert "Unsupported layout BLA! List of supported layouts: " in str(e.value)
def test_outputs():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.outputs['fc3'], DataPtr)
assert net.outputs['fc3'].layout == "NC"
assert net.outputs['fc3'].precision == "FP32"
assert net.outputs['fc3'].shape == [1, 10]
def test_output_precision_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.outputs['fc3'].precision == "FP32"
net.outputs['fc3'].precision = "I8"
assert net.outputs['fc3'].precision == "I8"
def test_output_unsupported_precision_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
with pytest.raises(ValueError) as e:
net.outputs['fc3'].precision = "BLA"
assert "Unsupported precision BLA! List of supported precisions: " in str(e.value)
def test_add_ouputs():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.add_outputs('fc1')
net.add_outputs(['fc2'])
assert sorted(net.outputs) == ['fc1', 'fc2', 'fc3']
def test_add_outputs_with_port():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.add_outputs(('fc1', 0))
net.add_outputs([('fc2', 0)])
assert sorted(net.outputs) == ['fc1', 'fc2', 'fc3']
def test_add_outputs_with_and_without_port():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.add_outputs('fc1')
net.add_outputs([('fc2', 0)])
assert sorted(net.outputs) == ['fc1', 'fc2', 'fc3']
def test_batch_size_getter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.batch_size == 1
def test_batch_size_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.batch_size = 4
assert net.batch_size == 4
assert net.inputs['data'].shape == [4, 3, 32, 32]
def test_batch_size_after_reshape():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.reshape({'data' : [4, 3, 32, 32]})
assert net.batch_size == 4
assert net.inputs['data'].shape == [4, 3, 32, 32]
net.reshape({'data' : [8, 3, 32, 32]})
assert net.batch_size == 8
assert net.inputs['data'].shape == [8, 3, 32, 32]
def test_layers():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
layers_name = [key for key in net.layers]
assert layers_name == ['data', 'conv1', 'relu_conv1', 'pool1', 'conv2',
'relu_conv2', 'pool2', 'fc1', 'relu_fc1', 'fc2', 'relu_fc2', 'fc3']
assert isinstance(net.layers['conv1'], IENetLayer)
def test_get_stats_deprecated():
with warnings.catch_warnings(record=True) as w:
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
stats = net.stats
assert isinstance(stats, LayersStatsMap)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "stats property of IENetwork is deprecated." in str(w[-1].message)
def test_set_new_stats_deprecated():
with warnings.catch_warnings(record=True) as w:
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
new_stats = LayerStats(min=(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0),
max=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0))
stats = net.stats
stats.update({"fc3": new_stats})
assert net.stats["fc3"].min == new_stats.min
assert net.stats["fc3"].max == new_stats.max
assert len(w) == 3
for warns in w:
assert issubclass(warns.category, DeprecationWarning)
assert "stats property of IENetwork is deprecated." in str(warns.message)
def test_update_stats_deprecated():
with warnings.catch_warnings(record=True) as w:
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
initial_stats = LayerStats(min=(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0),
max=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0))
stats = net.stats
stats.update({"fc3": initial_stats})
new_stats = LayerStats(min=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0),
max=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0))
stats.update({"fc3": new_stats})
assert net.stats["fc3"].min == new_stats.min
assert net.stats["fc3"].max == new_stats.max
assert len(w) == 3
for warns in w:
assert issubclass(warns.category, DeprecationWarning)
assert "stats property of IENetwork is deprecated." in str(warns.message)
def test_serialize():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.serialize("./serialized_net.xml", "./serialized_net.bin")
serialized_net = ie.read_network(model="./serialized_net.xml", weights="./serialized_net.bin")
assert net.layers.keys() == serialized_net.layers.keys()
os.remove("./serialized_net.xml")
os.remove("./serialized_net.bin")
def test_reshape():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.reshape({"data": (2, 3, 32, 32)})
def test_read_net_from_buffer_deprecated():
with warnings.catch_warnings(record=True) as w:
with open(SAMPLENET_BIN, 'rb') as f:
bin = f.read()
with open(SAMPLENET_XML, 'rb') as f:
xml = f.read()
net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
assert isinstance(net, IENetwork)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_net_from_buffer_valid_deprecated():
with warnings.catch_warnings(record=True) as w:
with open(SAMPLENET_BIN, 'rb') as f:
bin = f.read()
with open(SAMPLENET_XML, 'rb') as f:
xml = f.read()
net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
net2 = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
for name, l in net.layers.items():
for blob, data in l.blobs.items():
assert np.allclose(data, net2.layers[name].blobs[blob]), \
"Incorrect weights for layer {} and blob {}".format(name, blob)
assert len(w) == 2
for warns in w:
assert issubclass(warns.category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(warns.message)
def test_multi_out_data():
# Regression test CVS-23965
# Check that DataPtr for all output layers not copied between outputs map items
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.add_outputs(['fc2'])
assert "fc2" in net.outputs and "fc3" in net.outputs
assert isinstance(net.outputs["fc2"], DataPtr)
assert isinstance(net.outputs["fc3"], DataPtr)
assert net.outputs["fc2"].name == "fc2" and net.outputs["fc2"].shape == [1, 84]
assert net.outputs["fc3"].name == "fc3" and net.outputs["fc3"].shape == [1, 10]
pass

View File

@@ -0,0 +1,87 @@
import os
import pytest
from openvino.inference_engine import IENetwork, IEPlugin, ExecutableNetwork
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.bin')
def test_init_plugin(device):
plugin = IEPlugin(device, None)
assert isinstance(plugin, IEPlugin)
def test_device_attr(device):
plugin = IEPlugin(device, None)
assert plugin.device == device
def test_get_version(device):
plugin = IEPlugin(device, None)
assert not len(plugin.version) == 0
def test_load_network(device):
plugin = IEPlugin(device, None)
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = plugin.load(net)
assert isinstance(exec_net, ExecutableNetwork)
def test_load_network_many_requests(device):
plugin = IEPlugin(device)
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = plugin.load(net, num_requests=5)
assert len(exec_net.requests) == 5
def test_get_supported_layers(device):
plugin = IEPlugin(device)
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
supported = plugin.get_supported_layers(net)
layers = ['conv1', 'conv2', 'data', 'fc1', 'fc2', 'fc3', 'pool1', 'pool2',
'relu_conv1', 'relu_conv2', 'relu_fc1', 'relu_fc2']
if device == "GPU":
layers.remove("data")
assert sorted(supported) == layers
@pytest.mark.skip(reason="Plugiin specific test.")
def test_set_config(device):
plugin = IEPlugin("HETERO:CPU")
plugin.set_config({"TARGET_FALLBACK": "CPU,GPU"})
@pytest.mark.skip(reason="Sporadically fail in CI, not reproducible locally")
def test_set_initial_affinity():
plugin = IEPlugin("HETERO:CPU", None)
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
plugin.set_initial_affinity(net)
for l, params in net.layers.items():
assert params.affinity == "CPU", "Incorrect affinity for {}".format(l)
def test_set_initial_affinity_wrong_device(device):
with pytest.raises(RuntimeError) as e:
plugin = IEPlugin("CPU", None)
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
plugin.set_initial_affinity(net)
assert "set_initial_affinity method applicable only for HETERO device" in str(e.value)
def test_add_cpu_extenstion_wrong_device():
with pytest.raises(RuntimeError) as e:
plugin = IEPlugin("GPU", None)
plugin.add_cpu_extension("./")
if "Cannot find plugin to use" in str(e.value):
pytest.skip("No GPU found. Skipping test")
else:
assert "add_cpu_extension method applicable only for CPU or HETERO devices" in str(e.value)
def test_unknown_plugin():
with pytest.raises(ValueError) as e:
IEPlugin("BLA")
assert "Unknown plugin: BLA, expected one of:" in str(e.value)

View File

@@ -0,0 +1,401 @@
import numpy as np
import os
import pytest
from openvino.inference_engine import ie_api as ie
if os.environ.get("TEST_DEVICE") != "MYRIAD":
test_net_xml = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.xml')
test_net_bin = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.bin')
else:
test_net_xml = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_fp16.xml')
test_net_bin = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_fp16.bin')
IMAGE_PATH = os.path.join(os.path.dirname(__file__), 'test_data', 'images', 'cat3.bmp')
# computed with caffe
REF_IMAGE_RESULT = np.array([[34.6295814514, 18.9434795380, 43.2669448853, 0.4420155287, -108.4574050903,
-314.8240051270, 231.0738067627, -106.3504943848, 108.5880966187, 92.7254943848]])
def read_image():
import cv2
n, c, h, w = (1, 3, 32, 32)
image = cv2.imread(IMAGE_PATH) / 255
if image is None:
raise FileNotFoundError("Input image not found")
image = cv2.resize(image, (h, w))
image = image.transpose((2, 0, 1))
image = image.reshape((n, c, h, w))
return image
def load_sample_model(device, num_requests=1):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=num_requests)
return executable_network
def test_inputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
assert len(executable_network.requests) == 2
for req in executable_network.requests:
assert len(req.inputs) == 1
assert "data" in req.inputs
assert req.inputs['data'].shape == (1, 3, 32, 32)
del executable_network
del ie_core
del net
def test_outputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
assert len(executable_network.requests) == 2
for req in executable_network.requests:
assert len(req.outputs) == 1
assert "fc_out" in req.outputs
assert req.outputs['fc_out'].shape == (1, 10)
del executable_network
del ie_core
del net
def test_inputs_list(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
for req in executable_network.requests:
assert len(req._inputs_list) == 1
assert "data" in req._inputs_list
del ie_core
def test_outputs_list(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
for req in executable_network.requests:
assert len(req._outputs_list) == 1
assert "fc_out" in req._outputs_list
del ie_core
def test_access_input_buffer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
buffer = executable_network.requests[0]._get_blob_buffer("data".encode()).to_numpy()
assert buffer.shape == (1, 3, 32, 32)
assert buffer.strides == (12288, 4096, 128, 4)
assert buffer.dtype == np.float32
del executable_network
del ie_core
del net
def test_access_output_buffer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
buffer = executable_network.requests[0]._get_blob_buffer("fc_out".encode()).to_numpy()
assert buffer.shape == (1, 10)
assert buffer.strides == (40, 4)
assert buffer.dtype == np.float32
del executable_network
del ie_core
del net
def test_write_to_inputs_directly(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
img = read_image()
executable_network.requests[0].inputs["data"][:] = img
assert np.allclose(executable_network.requests[0].inputs["data"], img)
del executable_network
del ie_core
del net
def test_write_to_inputs_copy(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = executable_network.requests[0]
request.inputs["data"][:] = img
assert np.allclose(executable_network.requests[0].inputs["data"], img)
del executable_network
del ie_core
del net
def test_infer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.infer({'data': img})
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
del net
def test_async_infer_default_timeout(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait()
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
del net
def test_async_infer_wait_finish(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait(ie.WaitMode.RESULT_READY)
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
del net
def test_async_infer_wait_time(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait(100)
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
del net
def test_async_infer_wait_status(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait(ie.WaitMode.RESULT_READY)
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
status = request.wait(ie.WaitMode.STATUS_ONLY)
assert status == ie.StatusCode.OK
del exec_net
del ie_core
del net
def test_async_infer_fill_inputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.inputs['data'][:] = img
request.async_infer()
status_end = request.wait()
assert status_end == ie.StatusCode.OK
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
del net
def test_infer_modify_outputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
outputs0 = exec_net.infer({'data': img})
status_end = request.wait()
assert status_end == ie.StatusCode.OK
assert np.argmax(outputs0['fc_out'][0]) == 3
outputs0['fc_out'][0] = 0
outputs1 = request.outputs
assert np.argmax(outputs1['fc_out'][0]) == 3
outputs1['fc_out'][0] = 1
outputs2 = request.outputs
assert np.argmax(outputs2['fc_out'][0]) == 3
del exec_net
del ie_core
del net
def test_async_infer_callback(device):
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(callback_called=0)
def callback(self, status):
callback.callback_called = 1
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.set_completion_callback(callback)
request.async_infer({'data': img})
status = request.wait()
assert status == ie.StatusCode.OK
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
assert callback.callback_called == 1
del exec_net
del ie_core
def test_async_infer_callback_wait_before_start(device):
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(callback_called=0)
def callback(self, status):
callback.callback_called = 1
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.set_completion_callback(callback)
status = request.wait()
assert status == ie.StatusCode.INFER_NOT_STARTED
request.async_infer({'data': img})
status = request.wait()
assert status == ie.StatusCode.OK
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
assert callback.callback_called == 1
del exec_net
del ie_core
def test_async_infer_callback_wait_in_callback(device):
class InferReqWrap:
def __init__(self, request):
self.request = request
self.request.set_completion_callback(self.callback)
self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY)
assert self.status_code == ie.StatusCode.INFER_NOT_STARTED
def callback(self, statusCode, userdata):
self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY)
def execute(self, input_data):
self.request.async_infer(input_data)
status = self.request.wait(ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
assert self.status_code == ie.StatusCode.OK
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request_wrap = InferReqWrap(exec_net.requests[0])
request_wrap.execute({'data': img})
del exec_net
del ie_core
def test_get_perf_counts(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
ie_core.set_config({"PERF_COUNT": "YES"}, device)
exec_net = ie_core.load_network(net, device)
img = read_image()
request = exec_net.requests[0]
request.infer({'data': img})
pc = request.get_perf_counts()
assert pc['29']["status"] == "EXECUTED"
assert pc['29']["layer_type"] == "FullyConnected"
del exec_net
del ie_core
del net
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Can't run test on device {},"
"Dynamic batch fully supported only on CPU".format(os.environ.get("TEST_DEVICE", "CPU")))
def test_set_batch_size(device):
xml = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.xml')
bin = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.bin')
ie_core = ie.IECore()
ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device)
net = ie_core.read_network(xml, bin)
net.batch_size = 10
data = np.zeros(shape=net.inputs['data'].shape)
exec_net = ie_core.load_network(net, device)
data[0] = read_image()[0]
request = exec_net.requests[0]
request.set_batch(1)
request.infer({'data': data})
assert np.allclose(int(request.outputs['fc3'][0][0]), -1), "Incorrect data for 1st batch"
del exec_net
del ie_core
del net
def test_set_zero_batch_size(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
request = exec_net.requests[0]
with pytest.raises(ValueError) as e:
request.set_batch(0)
assert "Batch size should be positive integer number but 0 specified" in str(e.value)
del exec_net
del ie_core
del net
def test_set_negative_batch_size(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
request = exec_net.requests[0]
with pytest.raises(ValueError) as e:
request.set_batch(-1)
assert "Batch size should be positive integer number but -1 specified" in str(e.value)
del exec_net
del ie_core
del net

View File

@@ -0,0 +1,39 @@
from openvino.inference_engine import IENetwork
try:
from ngraph.impl.op import Parameter, Relu
from ngraph.impl import Function, Shape, Type
ngraph_available=True
except:
ngraph_available=False
import numpy as np
import pytest
if not ngraph_available:
pytest.skip("NGraph is not installed, skip", allow_module_level=True)
@pytest.mark.skip(reason="nGraph python API has been removed in 2020.2 LTS release")
def test_CreateIENetworkFromNGraph():
element_type = Type.f32
param = Parameter(element_type, Shape([1, 3, 22, 22]))
relu = Relu(param)
func = Function([relu], [param], 'test')
caps = Function.to_capsule(func)
cnnNetwork = IENetwork(caps)
assert cnnNetwork != None
assert cnnNetwork.get_function() != None
assert len(cnnNetwork.layers) == 2
@pytest.mark.skip(reason="nGraph python API has been removed in 2020.2 LTS release")
def test_GetIENetworkFromNGraph():
element_type = Type.f32
param = Parameter(element_type, Shape([1, 3, 22, 22]))
relu = Relu(param)
func = Function([relu], [param], 'test')
caps = Function.to_capsule(func)
cnnNetwork = IENetwork(caps)
assert cnnNetwork != None
assert cnnNetwork.get_function() != None
caps2 = cnnNetwork.get_function()
func2 = Function.from_capsule(caps2)
assert func2 != None

View File

@@ -23,13 +23,13 @@
namespace InferenceEngine {
/**
* @deprecated Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2020.3
* @deprecated Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2021.1
* @brief This is a wrapper class used to build and parse a network from the given IR.
*
* All the methods here can throw exceptions.
*/
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2020.3")
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2021.1")
CNNNetReader {
public:
/**

View File

@@ -79,14 +79,14 @@ public:
virtual ~CNNNetwork() {}
/**
* @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2020.3
* @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1
* @copybrief ICNNNetwork::getPrecision
*
* Wraps ICNNNetwork::getPrecision
*
* @return A precision type
*/
INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1")
virtual Precision getPrecision() const;
/**
@@ -200,7 +200,7 @@ public:
}
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @copybrief ICNNNetwork::getLayerByName
*
* Wraps ICNNNetwork::getLayerByName
@@ -208,11 +208,11 @@ public:
* @param layerName Given name of the layer
* @return Status code of the operation. InferenceEngine::OK if succeeded
*/
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
CNNLayerPtr getLayerByName(const char* layerName) const;
/**
* @deprecated Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Begin layer iterator
*
* Order of layers is implementation specific,
@@ -221,25 +221,25 @@ public:
* @return Iterator pointing to a layer
*/
IE_SUPPRESS_DEPRECATED_START
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2021.1")
details::CNNNetworkIterator begin() const;
/**
* @deprecated Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief End layer iterator
* @return Iterator pointing to a layer
*/
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2021.1")
details::CNNNetworkIterator end() const;
IE_SUPPRESS_DEPRECATED_END
/**
* @deprecated Use CNNNetwork::layerCount() instead. The method will be removed in 2020.3
* @deprecated Use CNNNetwork::layerCount() instead. The method will be removed in 2021.1
* @brief Number of layers in network object
*
* @return Number of layers.
*/
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::layerCount() instead. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::layerCount() instead. The method will be removed in 2021.1")
size_t size() const;
/**

View File

@@ -153,6 +153,7 @@ public:
/**
* @deprecated Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph.
* This method will be removed in 2021.1 release.
* @copybrief IExecutableNetwork::GetMappedTopology
*
* Wraps IExecutableNetwork::GetMappedTopology.

View File

@@ -78,8 +78,8 @@ public:
IExecutableNetwork::Ptr ret;
IE_SUPPRESS_DEPRECATED_START
CALL_STATUS_FNC(LoadNetwork, ret, network, config);
IE_SUPPRESS_DEPRECATED_END
return ExecutableNetwork(ret, actual);
IE_SUPPRESS_DEPRECATED_END
}
/**
@@ -94,9 +94,9 @@ public:
IExecutableNetwork::Ptr ret;
IE_SUPPRESS_DEPRECATED_START
CALL_STATUS_FNC(LoadNetwork, ret, network, config);
IE_SUPPRESS_DEPRECATED_END
if (ret.get() == nullptr) THROW_IE_EXCEPTION << "Internal error: pointer to executable network is null";
return ExecutableNetwork(ret, actual);
IE_SUPPRESS_DEPRECATED_END
}
/**
@@ -137,8 +137,8 @@ public:
IExecutableNetwork::Ptr ret;
IE_SUPPRESS_DEPRECATED_START
CALL_STATUS_FNC(ImportNetwork, ret, modelFileName, config);
IE_SUPPRESS_DEPRECATED_END
return ExecutableNetwork(ret, actual);
IE_SUPPRESS_DEPRECATED_END
}
/**

View File

@@ -70,7 +70,7 @@ public:
explicit SharedObjectLoader(LPCSTR pluginName) {
ExcludeCurrentDirectory();
shared_object = LoadLibrary(pluginName);
shared_object = LoadLibraryA(pluginName);
if (!shared_object) {
char cwd[1024];
THROW_IE_EXCEPTION << "Cannot load library '" << pluginName << "': " << GetLastError()

View File

@@ -48,7 +48,7 @@
#endif
#define INFERENCE_ENGINE_NN_BUILDER_DEPRECATED \
INFERENCE_ENGINE_DEPRECATED("Use ngraph API. NN Builder API will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use ngraph API. NN Builder API will be removed in 2020.4")
#define INFERENCE_ENGINE_NN_BUILDER_API_CLASS(...) \
INFERENCE_ENGINE_NN_BUILDER_DEPRECATED \
INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
@@ -63,7 +63,7 @@
# define INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(...) INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
#else
# define INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(...) \
INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3") \
INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1") \
INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
#endif

View File

@@ -51,6 +51,7 @@ public:
/**
* @deprecated IErrorListener is not used anymore. An exception is thrown in case of any unexpected situations.
* The function will be removed in 2021.1 release.
* @brief Sets logging callback
*
* Logging is used to track what is going on inside the plugins, Inference Engine library

View File

@@ -47,7 +47,7 @@ public:
* @param _precision Precision of the data
* @param layout Data layout
*/
INFERENCE_ENGINE_DEPRECATED("Use Data(const std::string &, const TensorDesc&). The ctor will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use Data(const std::string &, const TensorDesc&). The ctor will be removed in 2021.1")
Data(const std::string& name, const SizeVector& a_dims, Precision _precision, Layout layout = NCHW);
/**

View File

@@ -12,6 +12,7 @@
namespace InferenceEngine {
/**
* @deprecated IErrorListener is not used anymore. An exception is thrown / StatusCode set in case of any unexpected situations
* The class will be removed in 2021.1 release.
* @brief This class represents a custom error listener.
*/
class

View File

@@ -34,6 +34,7 @@ public:
};
/**
* @deprecated Implement IExtension interface. The interface will be removed in 2021.1 release.
* @brief The SOCreatorTrait class specialization for IExtension case, defines the name of the fabric method for
* creating IExtension object in DLL
*/
@@ -72,6 +73,7 @@ public:
/**
* @deprecated IErrorListener is not used anymore. StatusCode is provided in case of unexpected situations
* The method will be removed in 2021.1 release.
* @brief Sets a log callback that is used to track what is going on inside
*
* @param listener Logging listener
@@ -96,7 +98,8 @@ public:
void Release() noexcept override {}
/**
* @deprecated Use IExtension::getImplTypes to get implementation types for a particular node
* @deprecated Use IExtension::getImplTypes to get implementation types for a particular node.
* The method will removed in 2021.1 release.
* @brief Gets the array with types of layers which are included in the extension
*
* @param types Types array
@@ -112,7 +115,8 @@ public:
}
/**
* @deprecated Use IExtension::getImplementation to get a concrete implementation
* @deprecated Use IExtension::getImplementation to get a concrete implementation.
* The method will be removed in 2021.1 release.
* @brief Gets the factory with implementations for a given layer
*
* @param factory Factory with implementations
@@ -130,6 +134,7 @@ public:
/**
* @deprecated Implement ngraph::op::Op::validate_and_infer_types method in a custom ngraph operation
* The method will be removed in 2021.1 release.
* @brief Gets shape propagation implementation for the given string-type of CNNLayer
*
* @param impl the vector with implementations which is ordered by priority
@@ -146,6 +151,7 @@ public:
/**
* @deprecated Implement ngraph::op::Op::validate_and_infer_types method in a custom ngraph operation
* The method will be removed in 2021.1 release.
* @brief Gets the array with types of layers which are included in the extension
*
* @param types Types array
@@ -194,7 +200,7 @@ protected:
};
/**
* @deprecated Use a common Extension class
* @deprecated Use a common Extension class. The interface will be removed in 2021.1 release.
* @brief This class is a C++ helper to work with objects created using extensions.
*/
class INFERENCE_ENGINE_DEPRECATED("Use a common Extension interface") ShapeInferExtension :
@@ -205,7 +211,9 @@ public:
*
* @param name Full or relative path to extension library
*/
IE_SUPPRESS_DEPRECATED_START_WIN
explicit ShapeInferExtension(const file_name_t& name): actual(name) {}
IE_SUPPRESS_DEPRECATED_END_WIN
/**
* @brief Gets the extension version information
@@ -218,6 +226,7 @@ public:
/**
* @brief IErrorListener is not used anymore. StatusCode is provided in case of unexpected situations
* The method will be removed in 2021.1 release.
* @brief Sets a log callback that is used to track what is going on inside
*
* @param listener Logging listener

View File

@@ -20,13 +20,13 @@
namespace InferenceEngine {
/**
* @deprecated Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2020.3
* @deprecated Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2021.1
* @brief This class is the main interface to build and parse a network from a given IR
*
* All methods here do not throw exceptions and return a StatusCode and ResponseDesc object.
* Alternatively, to use methods that throw exceptions, refer to the CNNNetReader wrapper class.
*/
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2020.3")
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2021.1")
ICNNNetReader : public details::IRelease {
public:
/**

View File

@@ -54,14 +54,14 @@ public:
virtual std::shared_ptr<const ngraph::Function> getFunction() const noexcept = 0;
/**
* @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2020.3
* @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1
* @brief Returns the main network operating precision.
*
* This may be MIXED if not homogeneous.
*
* @return A precision type
*/
INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1")
virtual Precision getPrecision() const noexcept = 0;
/**
@@ -94,14 +94,14 @@ public:
virtual InputInfo::Ptr getInput(const std::string& inputName) const noexcept = 0;
/**
* @deprecated Use ICNNNetwork::getName() instead. The method will be removed in 2020.3
* @deprecated Use ICNNNetwork::getName() instead. The method will be removed in 2021.1
* @brief Gets the network name. The name is stored in the given pName string.
*
* @param pName - will receive actual network name, specified in IR file,
* pName should point to valid memory address before invoking this function
* @param len - size in bytes of pName buffer, actual name is trimmed by this size
*/
INFERENCE_ENGINE_DEPRECATED("Use ICNNNetwork::getName() instead. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use ICNNNetwork::getName() instead. The method will be removed in 2021.1")
virtual void getName(char* pName, size_t len) const noexcept = 0;
/**
@@ -119,7 +119,7 @@ public:
virtual size_t layerCount() const noexcept = 0;
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Returns a smart pointer reference to a Data node given its name.
*
* If the Data node is missing, returns reference to a default initialized new empty data pointer with given name.
@@ -127,16 +127,16 @@ public:
* @param dname Name of the Data node
* @return Data node smart pointer
*/
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
virtual DataPtr& getData(const char* dname) noexcept = 0;
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Insert a layer into the network. A user is responsible to connect it to other data elements.
*
* @param layer Const reference to a layer smart pointer
*/
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
virtual void addLayer(const CNNLayerPtr& layer) noexcept = 0;
/**
@@ -151,7 +151,7 @@ public:
ResponseDesc* resp = nullptr) noexcept = 0;
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Gets network layer with the given name
*
* @param layerName Given name of the layer
@@ -159,7 +159,7 @@ public:
* @param resp Pointer to the response message that holds a description of an error if any occurred
* @return Status code of the operation. InferenceEngine::OK if succeeded
*/
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
virtual StatusCode getLayerByName(const char* layerName, CNNLayerPtr& out, ResponseDesc* resp) const noexcept = 0;
/**

View File

@@ -86,10 +86,12 @@ public:
float mn = (std::numeric_limits<float>::max)();
float mx = (std::numeric_limits<float>::min)();
IE_SUPPRESS_DEPRECATED_START_WIN
for (int i = 0; i < statCount; i++) {
_minOutputs.push_back(mn);
_maxOutputs.push_back(mx);
}
IE_SUPPRESS_DEPRECATED_END_WIN
}
public:

View File

@@ -102,7 +102,8 @@ public:
virtual StatusCode Export(std::ostream& networkModel, ResponseDesc* resp) noexcept = 0;
/**
* @deprecated Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph
* @deprecated Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph.
* The method will be removed in 2021.1 release.
* @brief Get the mapping of IR layer names to implemented kernels
*
* @param deployedTopology Map of PrimitiveInfo objects that represent the deployed topology

View File

@@ -147,6 +147,7 @@ public:
/**
* @deprecated Implement IExtension::getImplTypes and IExtension::getImplementation
* The interface will be removed in 2021.1 release.
* @interface ILayerImplFactory
* @brief This class provides interface for extension factories
*/
@@ -178,6 +179,7 @@ public:
/**
* @deprecated Implement ngraph::op::Op::validate_and_infer_types method in a custom ngraph operation.
* The interface will be removed in 2021.1 release.
* @class IShapeInferImpl
* @brief This class provides interface for the implementation with the custom execution code
*/
@@ -212,6 +214,7 @@ class IShapeInferExtension : public InferenceEngine::details::IRelease {
public:
/**
* @deprecated IErrorListener is not used anymore. StatusCode is provided in case of unexpected situations
* The method will be removed in 2021.1 release.
* @brief Sets logging callback.
*
* Logging is used to track what is going on inside.
@@ -225,7 +228,6 @@ public:
/**
* @brief Gets extension version information and stores in versionInfo
*
* @param versionInfo Pointer to version info, will be set by plugin
*/
virtual void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept = 0;
@@ -237,6 +239,7 @@ public:
/**
* @deprecated Implement ngraph::op::Op::validate_and_infer_types method in a custom ngraph operation.
* The method will be removed in 2021.1 release.
* @brief Fills passed array with types of layers which shape infer implementations are included in the extension
*
* @param types Array to store the layer types
@@ -249,6 +252,7 @@ public:
/**
* @deprecated Implement ngraph::op::Op::validate_and_infer_types method in a custom ngraph operation.
* The method will be removed in 2021.1 release.
* @brief Gets shape propagation implementation for the given string-type of CNNLayer
*
* @param impl the vector with implementations which is ordered by priority
@@ -271,6 +275,7 @@ class INFERENCE_ENGINE_API_CLASS(IExtension) : public IShapeInferExtension {
public:
/**
* @deprecated Use IExtension::getImplementation to get a concrete implementation
* The method will be removed in 2021.1 release.
* @brief Provides a factory for a specified CNNLayer
* @param factory A factory returned from an extension plugin
* @param cnnLayer A CNNLayer object to provide factory for
@@ -281,12 +286,16 @@ public:
INFERENCE_ENGINE_DEPRECATED("Use IExtension::getImplementation to get a concrete implementation")
virtual StatusCode getFactoryFor(ILayerImplFactory*& factory, const CNNLayer* cnnLayer,
ResponseDesc* resp) noexcept {
(void)factory;
(void)cnnLayer;
(void)resp;
return NOT_IMPLEMENTED;
}
IE_SUPPRESS_DEPRECATED_END
/**
* @deprecated Use IExtension::getImplTypes to get implementation types for a particular node
* The method will be removed in 2021.1 release.
* @brief Fills passed array with types of layers which kernel implementations are included in the extension
*
* @param types Array to store the layer types
@@ -296,6 +305,9 @@ public:
*/
INFERENCE_ENGINE_DEPRECATED("Use IExtension::getImplTypes to get implementation types for a particular node")
virtual StatusCode getPrimitiveTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept {
(void)types;
(void)size;
(void)resp;
return NOT_IMPLEMENTED;
}
@@ -322,6 +334,7 @@ public:
* @return vector of strings
*/
virtual std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) {
(void)node;
return {};
}
@@ -332,6 +345,8 @@ public:
* @return shared pointer to implementation
*/
virtual ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) {
(void)node;
(void)implType;
return nullptr;
}
};
@@ -345,6 +360,7 @@ using IExtensionPtr = std::shared_ptr<IExtension>;
/**
* @deprecated Migrate to IR v10 and implement shape inference in the ngraph::op::Op::validate_and_infer_types method
* This API will be removed in 2021.1 release.
* @brief A shared pointer to a IShapeInferExtension interface
*/
using IShapeInferExtensionPtr = std::shared_ptr<IShapeInferExtension>;
@@ -360,6 +376,7 @@ INFERENCE_EXTENSION_API(StatusCode) CreateExtension(IExtension*& ext, ResponseDe
/**
* @deprecated Migrate to IR v10 and implement shape inference in the ngraph::op::Op::validate_and_infer_types method
* This API will be removed in 2021.1 release.
* @brief Creates the default instance of the shape infer extension
*
* @param ext Shape Infer Extension interface

View File

@@ -32,7 +32,7 @@ class Node;
namespace InferenceEngine {
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This is an internal common Layer parameter parsing arguments
*/
struct INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(LayerParams) {
@@ -47,10 +47,8 @@ struct INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(LayerParams) {
std::string type;
/**
* deprecated Use precision of CNNLayer::outData and CNNLayer::insData
* @brief Layer precision
*/
INFERENCE_ENGINE_DEPRECATED("Use precision of CNNLayer::outData and CNNLayer::insData")
Precision precision;
/**
@@ -85,7 +83,7 @@ struct INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(LayerParams) {
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This is a base abstraction Layer - all DNN Layers inherit from this class
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(CNNLayer) {
@@ -127,7 +125,9 @@ public:
/**
* @brief If suggested to fuse - a pointer to the layer which needs to be fused with this layer
*/
IE_SUPPRESS_DEPRECATED_START_WIN
Ptr _fusedWith;
IE_SUPPRESS_DEPRECATED_END_WIN
/**
* @brief Convenience user values to store in this object as extra data
@@ -174,25 +174,18 @@ public:
*
* @param layer Reference to the layer to be fused with
*/
IE_SUPPRESS_DEPRECATED_START_WIN
void fuse(Ptr& layer) {
_fusedWith = layer;
}
IE_SUPPRESS_DEPRECATED_END_WIN
/**
* @brief Returns the first element of the input data for this layer
*
* @return A smart pointer to the input data element
*/
virtual const DataPtr input() const {
if (insData.empty()) {
THROW_IE_EXCEPTION << "Internal error: input data is empty";
}
auto lockedFirstInsData = insData[0].lock();
if (!lockedFirstInsData) {
THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
}
return lockedFirstInsData;
}
virtual const DataPtr input() const;
/**
* @brief Checks if the input data and layer data are legitimate
@@ -206,30 +199,13 @@ public:
* @return float value if parsing was successful
* @throws InferenceEngineException in case of parsing error
*/
static float ie_parse_float(const std::string& str) {
if (str == "-inf") {
return -std::numeric_limits<float>::infinity();
} else if (str == "inf") {
return std::numeric_limits<float>::infinity();
} else {
float res;
std::stringstream val_stream(str);
val_stream.imbue(std::locale("C"));
val_stream >> res;
if (!val_stream.eof()) THROW_IE_EXCEPTION;
return res;
}
}
static float ie_parse_float(const std::string& str);
/**
* @brief serialize float with c_locale formating
* used for default values serializing
*/
static std::string ie_serialize_float(float value) {
std::stringstream val_stream;
val_stream.imbue(std::locale("C"));
val_stream << value;
return val_stream.str();
}
static std::string ie_serialize_float(float value);
/**
* @brief Gets float value for the given parameter
@@ -238,15 +214,7 @@ public:
* @param def default value of the parameter if not found
* @return float value
*/
float GetParamAsFloat(const char* param, float def) const {
std::string val = GetParamAsString(param, ie_serialize_float(def).c_str());
try {
return ie_parse_float(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to float.";
}
}
float GetParamAsFloat(const char* param, float def) const;
/**
* @brief Returns a float value for the given layer parameter
@@ -254,15 +222,7 @@ public:
* @param param Name of the layer parameter
* @return A float value for the specified parameter
*/
float GetParamAsFloat(const char* param) const {
std::string val = GetParamAsString(param);
try {
return ie_parse_float(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to float.";
}
}
float GetParamAsFloat(const char* param) const;
/**
* @brief Returns a vector of float values for the given parameter or returns the default value
@@ -271,23 +231,7 @@ public:
* @param def Default value of the parameter if not found
* @return vector of float values
*/
std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<float> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
float val = ie_parse_float(str);
result.push_back(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to floats.";
}
}
return result;
}
std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const;
/**
* @brief Returns a vector of float values for the given parameter
@@ -295,22 +239,7 @@ public:
* @param param Name of the layer parameter
* @return vector of float values
*/
std::vector<float> GetParamAsFloats(const char* param) const {
std::string vals = GetParamAsString(param);
std::vector<float> result;
std::istringstream stream(vals);
std::string str;
while (getline(stream, str, ',')) {
try {
float val = ie_parse_float(str);
result.push_back(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to floats.";
}
}
return result;
}
std::vector<float> GetParamAsFloats(const char* param) const;
/**
* @brief Returns an integer value for the given parameter or returns the default value
@@ -319,15 +248,7 @@ public:
* @param def Default value of the parameter if not found
* @return An int value for the specified parameter
*/
int GetParamAsInt(const char* param, int def) const {
std::string val = GetParamAsString(param, std::to_string(def).c_str());
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to int.";
}
}
int GetParamAsInt(const char* param, int def) const;
/**
* @brief Returns an integer value for the given parameter
@@ -335,15 +256,7 @@ public:
* @param param Name of the layer parameter
* @return An int value for the specified parameter
*/
int GetParamAsInt(const char* param) const {
std::string val = GetParamAsString(param);
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to int.";
}
}
int GetParamAsInt(const char* param) const;
/**
* @brief Returns a vector of int values for the given parameter or returns the default value
@@ -352,22 +265,7 @@ public:
* @param def Default value of the parameter if not found
* @return vector of int values
*/
std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<int> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
result.push_back(std::stoi(str));
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to int.";
}
}
return result;
}
std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const;
/**
* @brief Returns a vector of int values for the given parameter
@@ -375,21 +273,8 @@ public:
* @param param Name of the layer parameter
* @return vector of int values
*/
std::vector<int> GetParamAsInts(const char* param) const {
std::string vals = GetParamAsString(param);
std::vector<int> result;
std::istringstream stream(vals);
std::string str;
while (getline(stream, str, ',')) {
try {
result.push_back(std::stoi(str));
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to int.";
}
}
return result;
}
std::vector<int> GetParamAsInts(const char* param) const;
/**
* @brief Returns an unsigned integer value for the given parameter or returns the default value
*
@@ -397,20 +282,7 @@ public:
* @param def Default value of the parameter if not found
* @return An unsigned integer value for the specified parameter
*/
unsigned int GetParamAsUInt(const char* param, unsigned int def) const {
std::string val = GetParamAsString(param, std::to_string(def).c_str());
std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
". Value " + val + " cannot be casted to int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
unsigned int GetParamAsUInt(const char* param, unsigned int def) const;
/**
* @brief Returns an unsigned integer value for the given parameter
@@ -418,20 +290,7 @@ public:
* @param param Name of the layer parameter
* @return An unsigned integer value for the specified parameter
*/
unsigned int GetParamAsUInt(const char* param) const {
std::string val = GetParamAsString(param);
std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
". Value " + val + " cannot be casted to unsigned int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
unsigned int GetParamAsUInt(const char* param) const;
/**
* @brief Returns a vector of unsigned int values for the given parameter or returns the default value
@@ -440,27 +299,7 @@ public:
* @param def Default value of the parameter if not found
* @return vector of unsigned int values
*/
std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<unsigned int> result;
std::istringstream stream(vals);
std::string str;
std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
name + ". Value " + vals + " cannot be casted to unsigned int.";
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
int value = std::stoi(str);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
result.push_back(static_cast<unsigned int>(value));
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
return result;
}
std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const;
/**
* @brief Returns a vector of unsigned int values for the given parameter
@@ -468,26 +307,8 @@ public:
* @param param Name of the layer parameter
* @return vector of unsigned int values
*/
std::vector<unsigned int> GetParamAsUInts(const char* param) const {
std::string vals = GetParamAsString(param);
std::vector<unsigned int> result;
std::istringstream stream(vals);
std::string str;
std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
name + ". Value " + vals + " cannot be casted to int.";
while (getline(stream, str, ',')) {
try {
int value = std::stoi(str);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
result.push_back(static_cast<unsigned int>(value));
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
return result;
}
std::vector<unsigned int> GetParamAsUInts(const char* param) const;
/**
* @brief Returns a boolean value for the given parameter.
*
@@ -496,44 +317,15 @@ public:
* @param def Default value of the parameter if not found
* @return A bool value for the specified parameter
*/
bool GetParamAsBool(const char* param, bool def) const {
std::string val = GetParamAsString(param, std::to_string(def).c_str());
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return std::tolower(value);
});
bool GetParamAsBool(const char* param, bool def) const;
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param, def) != 0);
}
return result;
}
/**
* @brief Returns a boolean value for the given parameter
*
* @param param Name of the layer parameter
* @return A bool value for the specified parameter
*/
bool GetParamAsBool(const char* param) const {
std::string val = GetParamAsString(param);
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return std::tolower(value);
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param) != 0);
}
return result;
}
bool GetParamAsBool(const char* param) const;
/**
* @brief Returns a string value for the given parameter or returns the default one
@@ -542,13 +334,7 @@ public:
* @param def Default value of the parameter if not found
* @return A string value
*/
std::string GetParamAsString(const char* param, const char* def) const {
auto it = params.find(param);
if (it == params.end() || it->second.empty()) {
return def;
}
return (*it).second;
}
std::string GetParamAsString(const char* param, const char* def) const;
/**
* @brief Checks the param presence in the layer
@@ -556,13 +342,7 @@ public:
* @param param Name of the layer parameter
* @return a bool depending param presence
*/
bool CheckParamPresence(const char* param) const {
auto it = params.find(param);
if (it == params.end()) {
return false;
}
return true;
}
bool CheckParamPresence(const char* param) const;
/**
* @brief Returns a string value for the given parameter.
@@ -571,13 +351,7 @@ public:
* @param param Name of the layer parameter
* @return A string value
*/
std::string GetParamAsString(const char* param) const {
auto it = params.find(param);
if (it == params.end()) {
THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
}
return (*it).second;
}
std::string GetParamAsString(const char* param) const;
/**
* @brief Gets the parameter as a std::vector<std::string>
@@ -585,21 +359,7 @@ public:
* @param def The default values if case of parameter is not found
* @return The parameter as strings.
*/
std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<std::string> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
result.push_back(str);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
}
}
return result;
}
std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const;
/**
* @brief Map of pairs: (parameter name, parameter value)
@@ -622,7 +382,7 @@ IE_SUPPRESS_DEPRECATED_END
IE_SUPPRESS_DEPRECATED_START_WIN
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(WeightableLayer): public CNNLayer {
@@ -665,7 +425,7 @@ public:
unsigned int& prop_name##_y = prop_name.at(Y_AXIS)
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard 3D Convolution Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ConvolutionLayer): public WeightableLayer {
@@ -745,7 +505,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard deconvolution layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(DeconvolutionLayer): public ConvolutionLayer {
@@ -757,7 +517,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard deformable convolution layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(DeformableConvolutionLayer): public ConvolutionLayer {
@@ -774,7 +534,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard pooling layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PoolingLayer): public CNNLayer {
@@ -856,7 +616,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard binary convolution layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BinaryConvolutionLayer): public WeightableLayer {
@@ -961,7 +721,7 @@ public:
#undef DEFINE_PROP
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a fully connected layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(FullyConnectedLayer): public WeightableLayer {
@@ -980,7 +740,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents concatenation layer
*
* Takes as input several data elements and merges them to one using the supplied axis
@@ -1004,7 +764,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a layer that evenly splits the input into the supplied outputs
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SplitLayer): public CNNLayer {
@@ -1023,7 +783,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Linear Response Normalization (LRN) Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(NormLayer): public CNNLayer {
@@ -1058,7 +818,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents standard softmax Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SoftMaxLayer): public CNNLayer {
@@ -1076,7 +836,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents standard GRN Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(GRNLayer): public CNNLayer {
@@ -1096,7 +856,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents standard MVN Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(MVNLayer): public CNNLayer {
@@ -1121,7 +881,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Rectified Linear activation layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReLULayer): public CNNLayer {
@@ -1140,7 +900,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Clamp activation layer
*
* Clamps all tensor elements into the range [min_value, max_value]
@@ -1165,7 +925,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a ReLU6 activation layer
*
* Clamps all tensor elements into the range [0, 6.0]
@@ -1186,7 +946,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents an element wise operation layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(EltwiseLayer): public CNNLayer {
@@ -1237,7 +997,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard crop layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(CropLayer): public CNNLayer {
@@ -1264,7 +1024,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard reshape layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReshapeLayer): public CNNLayer {
@@ -1291,7 +1051,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Tile Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(TileLayer): public CNNLayer {
@@ -1314,7 +1074,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Layer which performs Scale and Shift
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ScaleShiftLayer): public WeightableLayer {
@@ -1334,7 +1094,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents TensorIterator layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(TensorIterator): public CNNLayer {
@@ -1372,7 +1132,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Base class for recurrent cell layers
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(RNNCellBase): public WeightableLayer {
@@ -1431,7 +1191,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief LSTM Cell layer
*
* G - number of gates (=4)
@@ -1477,7 +1237,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief GRU Cell layer
*
* G - number of gates (=3)
@@ -1519,7 +1279,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief RNN Cell layer
*
* G - number of gates (=1)
@@ -1556,7 +1316,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Sequence of recurrent cells
*
* N - batch size
@@ -1612,7 +1372,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Layer which performs Scale and Shift
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PReLULayer): public WeightableLayer {
@@ -1635,7 +1395,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Power Layer
*
* Formula is: output = (offset + scale * input) ^ power
@@ -1664,7 +1424,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Batch Normalization Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BatchNormalizationLayer): public WeightableLayer {
@@ -1683,7 +1443,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a general matrix multiplication operation layer
*
* Formula is: dst := alpha*src1*src2 + beta*src3
@@ -1715,7 +1475,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Pad layer
*
* Adds paddings to input tensor
@@ -1753,7 +1513,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Gather layer
*
* Gather slices from Dictionary according to Indexes
@@ -1773,7 +1533,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Strided Slice layer
*
* Strided Slice picks from input tensor according parameters
@@ -1814,7 +1574,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Shuffle Channels layer
* Shuffle Channels picks from input tensor according parameters
*/
@@ -1839,7 +1599,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Depth To Space layer
* Depth To Space picks from input tensor according parameters
*/
@@ -1859,7 +1619,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Space To Depth layer
* Space To Depth picks from input tensor according parameters
*/
@@ -1879,7 +1639,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Space To Batch layer
*
* Space To Batch picks from input tensor according parameters
@@ -1909,7 +1669,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Batch To Space layer
*
* Batch To Space picks from input tensor according parameters
@@ -1942,7 +1702,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents SparseFillEmptyRows layer
*
* SparseFillEmptyRows fills empty rows in a sparse tensor
@@ -1958,7 +1718,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents SparseSegmentMean(SqrtN, Sum) layers
* SparseSegmentMean(SqrtN, Sum) layer reduces data along sparse segments of a tensor.
*/
@@ -1973,7 +1733,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents ExperimentalSparseWeightedReduce layer
* ExperimentalSparseWeightedReduce layer reduces data along sparse segments of a tensor.
*/
@@ -1988,7 +1748,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents SparseToDense layer
* SparseToDense layer converts a sparse tensor to a dense tensor.
*/
@@ -2003,7 +1763,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents Bucketize layer
* Bucketize layer bucketizes the input based on the boundaries.
*/
@@ -2023,7 +1783,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Reverse Sequence layer
*
* Reverse Sequence modifies input tensor according parameters
@@ -2049,7 +1809,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a OneHot layer
* Converts input into OneHot representation.
*/
@@ -2084,7 +1844,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard RangeLayer layer
*
* RangeLayer modifies input tensor dimensions according parameters
@@ -2100,7 +1860,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Fill layer
*
* RFill modifies input tensor according parameters
@@ -2116,7 +1876,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a SelectLayer layer
*
* SelectLayer layer takes elements from the second (“then”) or the third (“else”) input based on condition mask
@@ -2134,7 +1894,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Broadcast layer
*
* Broadcast modifies input tensor dimensions according parameters
@@ -2150,7 +1910,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a quantization operation layer
*
* Element-wise linear quantization of floating point input values into a descrete set of floating point values
@@ -2171,7 +1931,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Math layers
*
* Math modifies input tensor dimensions according parameters
@@ -2187,7 +1947,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Reduce layers
*
* Reduce modifies input tensor according parameters
@@ -2208,7 +1968,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard TopK layer
*
* TopK picks top K values from input tensor according parameters
@@ -2237,7 +1997,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents Unique layer.
*
* The Unique operation searches for unique elements in 1-D input
@@ -2266,7 +2026,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard NonMaxSuppression layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(NonMaxSuppressionLayer): public CNNLayer {
@@ -2289,7 +2049,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Scatter layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ScatterLayer): public CNNLayer {

View File

@@ -71,10 +71,10 @@ struct QueryNetworkResult {
};
/**
* @deprecated Use InferenceEngine::Core instead. Will be removed in 2020.3
* @deprecated Use InferenceEngine::Core instead. Will be removed in 2021.1
* @brief This class is a main plugin interface
*/
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core instead. Will be removed in 2020.3")
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core instead. Will be removed in 2021.1")
INFERENCE_ENGINE_API_CLASS(IInferencePlugin)
: public details::IRelease {
public:
@@ -87,6 +87,7 @@ public:
/**
* @deprecated IErrorListener is not used anymore. StatusCode is provided in case of unexpected situations
* This API will be removed in 2021.1 release.
* @brief Sets logging callback
*
* Logging is used to track what is going on inside

View File

@@ -18,11 +18,11 @@
namespace InferenceEngine {
/**
* @deprecated Use InferenceEngine::Core instead. Will be removed in 2020.3
* @deprecated Use InferenceEngine::Core instead. Will be removed in 2021.1
* @brief This is a class to load a suitable plugin
*/
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core instead which dispatches plugin automatically."
"Will be removed in 2020.3") INFERENCE_ENGINE_API_CLASS(PluginDispatcher) {
"Will be removed in 2021.1") INFERENCE_ENGINE_API_CLASS(PluginDispatcher) {
public:
/**
* @brief A constructor

View File

@@ -21,7 +21,7 @@ namespace details {
IE_SUPPRESS_DEPRECATED_START
/**
* @deprecated Use InferenceEngine::Core instead.
* @deprecated Use InferenceEngine::Core instead. This API will be removed in 2021.1 release.
* @brief This class defines the name of the fabric for creating an IInferencePlugin object in DLL
*/
template <>

View File

@@ -21,6 +21,7 @@ namespace InferenceEngine {
/**
* @deprecated Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph.
* This structure will be removed in 2021.1 release.
* @brief Structure with information about Primitive
*/
struct INFERENCE_ENGINE_DEPRECATED("Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph") PrimitiveInfo {

View File

@@ -20,6 +20,7 @@ namespace InferenceEngine {
/**
* @deprecated Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph.
* This API will be removed in 2021.1 release.
* @struct TensorInfo
* @brief This structure describes tensor information
*/

View File

@@ -35,7 +35,8 @@
namespace InferenceEngine {
/**
* @deprecated InferenceEngine utility functions are not a part of public API
* @deprecated InferenceEngine utility functions are not a part of public API.
* This method will be removed in 2020.4 release.
* @brief Gets the top n results from a tblob
*
* @param n Top n count
@@ -44,7 +45,7 @@ namespace InferenceEngine {
*/
template <class T>
INFERENCE_ENGINE_DEPRECATED(
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& output) {
SizeVector dims = input.getTensorDesc().getDims();
size_t input_rank = dims.size();
@@ -81,7 +82,8 @@ inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& o
}
/**
* @deprecated InferenceEngine utility functions are not a part of public API
* @deprecated InferenceEngine utility functions are not a part of public API.
* This method will be removed in 2020.4 release.
* @brief Gets the top n results from a blob
*
* @param n Top n count
@@ -89,7 +91,7 @@ inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& o
* @param output Vector of indexes for the top n places
*/
INFERENCE_ENGINE_DEPRECATED(
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
inline void TopResults(unsigned int n, Blob& input, std::vector<unsigned>& output) {
IE_SUPPRESS_DEPRECATED_START
switch (input.getTensorDesc().getPrecision()) {
@@ -112,7 +114,8 @@ inline void TopResults(unsigned int n, Blob& input, std::vector<unsigned>& outpu
#undef TBLOB_TOP_RESULT
/**
* @deprecated InferenceEngine utility functions are not a part of public API
* @deprecated InferenceEngine utility functions are not a part of public API.
* This method will be removed in 2020.4 release.
* @brief Copies a 8-bit RGB image to the blob.
*
* Throws an exception in case of dimensions or input size mismatch
@@ -124,7 +127,7 @@ inline void TopResults(unsigned int n, Blob& input, std::vector<unsigned>& outpu
*/
template <typename data_t>
INFERENCE_ENGINE_DEPRECATED(
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t>* blob) {
SizeVector dims = blob->getTensorDesc().getDims();
if (4 != dims.size())
@@ -162,7 +165,8 @@ void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t
}
/**
* @deprecated InferenceEngine utility functions are not a part of public API
* @deprecated InferenceEngine utility functions are not a part of public API.
* This method will be removed in 2020.4 release.
* @brief Splits the RGB channels to either I16 Blob or float blob.
*
* The image buffer is assumed to be packed with no support for strides.
@@ -172,7 +176,7 @@ void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t
* @param input Blob to contain the split image (to 3 channels)
*/
INFERENCE_ENGINE_DEPRECATED(
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSize, Blob& input) {
IE_SUPPRESS_DEPRECATED_START
TBlob<float>* float_input = dynamic_cast<TBlob<float>*>(&input);
@@ -187,7 +191,8 @@ inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSiz
}
/**
* @deprecated InferenceEngine utility functions are not a part of public API
* @deprecated InferenceEngine utility functions are not a part of public API.
* This method will be removed in 2020.4 release.
* @brief Copies data from a certain precision to float
*
* @param dst Pointer to an output float buffer, must be allocated before the call
@@ -195,7 +200,7 @@ inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSiz
*/
template <typename T>
INFERENCE_ENGINE_DEPRECATED(
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
if (!dst) {
return;

View File

@@ -82,6 +82,16 @@ else()
endif()
endif()
if(APPLE)
set(CMAKE_MACOSX_RPATH ON)
endif()
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
set(CMAKE_C_VISIBILITY_PRESET hidden)
set(CMAKE_VISIBILITY_INLINES_HIDDEN ON)
####################################
## to use C++11; can overwritten via cmake command line
if(NOT DEFINED CMAKE_CXX_STANDARD)

View File

@@ -156,7 +156,7 @@ int main(int argc, char *argv[]) {
if (FLAGS_d.find("CPU") != std::string::npos && !FLAGS_l.empty()) {
// CPU (MKLDNN) extensions is loaded as a shared library and passed as a pointer to base extension
const auto extension_ptr = InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(FLAGS_l);
ie.AddExtension(extension_ptr, "CPU");
ie.AddExtension(extension_ptr);
slog::info << "CPU (MKLDNN) extensions is loaded " << FLAGS_l << slog::endl;
}

0
inference-engine/samples/build_samples.sh Normal file → Executable file
View File

View File

@@ -77,7 +77,7 @@ int main(int argc, char *argv[]) {
if (!FLAGS_l.empty()) {
// CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
ie.AddExtension(extension_ptr, "CPU");
ie.AddExtension(extension_ptr);
slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
}
if (!FLAGS_c.empty()) {

View File

@@ -85,30 +85,28 @@ int main(int argc, char *argv[]) {
std::vector<std::string> availableDevices = ie.GetAvailableDevices();
// --------------------------- 3. Query and print supported metrics and config keys--------------------
std::set<std::string> printedDevices;
std::cout << "Available devices: " << std::endl;
for (auto && device : availableDevices) {
std::string deviceFamilyName = device.substr(0, device.find_first_of('.'));
if (printedDevices.find(deviceFamilyName) == printedDevices.end())
printedDevices.insert(deviceFamilyName);
else
continue;
std::cout << "\tDevice: " << deviceFamilyName << std::endl;
std::cout << "\tDevice: " << device << std::endl;
std::cout << "\tMetrics: " << std::endl;
std::vector<std::string> supportedMetrics = ie.GetMetric(deviceFamilyName, METRIC_KEY(SUPPORTED_METRICS));
std::vector<std::string> supportedMetrics = ie.GetMetric(device, METRIC_KEY(SUPPORTED_METRICS));
for (auto && metricName : supportedMetrics) {
std::cout << "\t\t" << metricName << " : " << std::flush;
printParameterValue(ie.GetMetric(device, metricName));
if (metricName != METRIC_KEY(AVAILABLE_DEVICES)) {
std::cout << "\t\t" << metricName << " : " << std::flush;
printParameterValue(ie.GetMetric(device, metricName));
}
}
std::cout << "\tDefault values for device configuration keys: " << std::endl;
std::vector<std::string> supportedConfigKeys = ie.GetMetric(deviceFamilyName, METRIC_KEY(SUPPORTED_CONFIG_KEYS));
for (auto && configKey : supportedConfigKeys) {
std::cout << "\t\t" << configKey << " : " << std::flush;
printParameterValue(ie.GetConfig(deviceFamilyName, configKey));
if (std::find(supportedMetrics.begin(), supportedMetrics.end(),
METRIC_KEY(SUPPORTED_CONFIG_KEYS)) != supportedMetrics.end()) {
std::cout << "\tDefault values for device configuration keys: " << std::endl;
std::vector<std::string> supportedConfigKeys = ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS));
for (auto && configKey : supportedConfigKeys) {
std::cout << "\t\t" << configKey << " : " << std::flush;
printParameterValue(ie.GetConfig(device, configKey));
}
}
std::cout << std::endl;

View File

@@ -37,7 +37,7 @@ int main(int argc, char* argv[]) {
if (device_name.find("CPU") != std::string::npos) {
inPlaceExtension = std::make_shared<InPlaceExtension>();
// register sample's custom kernel (CustomReLU)
ie.AddExtension(inPlaceExtension, "CPU");
ie.AddExtension(inPlaceExtension);
}
// -----------------------------------------------------------------------------------------------------

View File

@@ -86,7 +86,7 @@ int main(int argc, char *argv[]) {
if (!FLAGS_l.empty()) {
// CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
ie.AddExtension(extension_ptr, "CPU");
ie.AddExtension(extension_ptr);
slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
}

View File

@@ -69,7 +69,7 @@ int main(int argc, char *argv[]) {
if (!FLAGS_l.empty()) {
// CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
ie.AddExtension(extension_ptr, "CPU");
ie.AddExtension(extension_ptr);
slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
}
if (!FLAGS_c.empty()) {

View File

View File

@@ -24,6 +24,8 @@ endif()
add_subdirectory(hetero_plugin)
add_subdirectory(multi_device)
add_subdirectory(transformations)
add_subdirectory(inference_engine)

View File

@@ -42,9 +42,9 @@ add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
# install
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/cldnn_global_custom_kernels
DESTINATION ${IE_CPACK_LIBRARY_PATH}
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT gpu)
install(FILES "${clDNN_SOURCE_DIR}/kernel_selector/core/cache/cache.json"
DESTINATION ${IE_CPACK_LIBRARY_PATH}
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT gpu)

View File

@@ -65,6 +65,27 @@ HeteroInferRequest::HeteroInferRequest(InferenceEngine::InputsDataMap networkInp
}
}
void HeteroInferRequest::SetBlob(const char* name, const InferenceEngine::Blob::Ptr& data) {
InferenceEngine::InferRequestInternal::SetBlob(name, data);
assert(!_inferRequests.empty());
for (auto &&desc : _inferRequests) {
auto &r = desc._request;
assert(nullptr != r);
InputInfo::Ptr foundInput;
DataPtr foundOutput;
try {
// if `name` is input blob
if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) {
r->SetBlob(name, data, foundInput->getPreProcess());
}
} catch (const InferenceEngine::details::InferenceEngineException & ex) {
std::string message = ex.what();
if (message.find(NOT_FOUND_str) == std::string::npos)
throw ex;
}
}
}
void HeteroInferRequest::InferImpl() {
updateInOutIfNeeded();
size_t i = 0;

View File

@@ -39,6 +39,8 @@ public:
void InferImpl() override;
void SetBlob(const char* name, const InferenceEngine::Blob::Ptr& data) override;
void GetPerformanceCounts(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> &perfMap) const override;
void updateInOutIfNeeded();

View File

@@ -170,7 +170,7 @@ endif()
target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_API)
ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
POSSIBLE_PLUGINS HeteroPlugin clDNNPlugin GNAPlugin MKLDNNPlugin myriadPlugin)
POSSIBLE_PLUGINS MultiDevicePlugin HeteroPlugin clDNNPlugin GNAPlugin MKLDNNPlugin myriadPlugin)
# Create NN Builder
@@ -272,6 +272,10 @@ if(THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
install(FILES "${TBB}/LICENSE"
DESTINATION ${IE_CPACK_IE_DIR}/external/tbb
COMPONENT tbb)
install(FILES "${TBB}/cmake/TBBConfig.cmake"
"${TBB}/cmake/TBBConfigVersion.cmake"
DESTINATION ${IE_CPACK_IE_DIR}/external/tbb/cmake
COMPONENT tbb)
endif()
ie_cpack_add_component(core REQUIRED DEPENDS ${core_components})
@@ -279,8 +283,8 @@ ie_cpack_add_component(core REQUIRED DEPENDS ${core_components})
install(DIRECTORY "${IE_MAIN_SOURCE_DIR}/include" DESTINATION ${IE_CPACK_IE_DIR}
COMPONENT core)
install(TARGETS ${TARGET_NAME} ${TARGET_NAME}_nn_builder
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core)
install(FILES "${OpenVINO_BINARY_DIR}/share/ie_parallel.cmake"
"${OpenVINO_BINARY_DIR}/share/InferenceEngineConfig.cmake"
@@ -288,5 +292,5 @@ install(FILES "${OpenVINO_BINARY_DIR}/share/ie_parallel.cmake"
DESTINATION ${IE_CPACK_IE_DIR}/share
COMPONENT core)
install(FILES $<TARGET_FILE_DIR:${TARGET_NAME}>/plugins.xml
DESTINATION ${IE_CPACK_LIBRARY_PATH}
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT core)

View File

@@ -23,6 +23,7 @@
#include <transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
#include <transformations/convert_opset2_to_opset1/convert_opset2_to_opset1.hpp>
#include <transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.hpp>
#include <ngraph/opsets/opset2.hpp>
#include "ngraph_ops/eltwise.hpp"
#include "graph_tools.hpp"
@@ -70,6 +71,7 @@ static std::shared_ptr<ngraph::Function> copyFunction(const std::shared_ptr<cons
for (auto n : specialized_function->get_ops()) {
goe_elimination.run_on_node(n);
}
specialized_function->set_friendly_name(func->get_friendly_name());
return specialized_function;
}
@@ -190,14 +192,14 @@ void CNNNetworkNGraphImpl::getName(char* pName, size_t len) const noexcept {
// Description buffer will preserve garbage if external pointer not initialized
if (len < 1) return;
memset(pName, 0, len);
DescriptionBuffer(pName, len) << _ngraph_function->get_name();
DescriptionBuffer(pName, len) << _ngraph_function->get_friendly_name();
}
const std::string& CNNNetworkNGraphImpl::getName() const noexcept {
if (cnnNetwork) {
return cnnNetwork->getName();
}
return _ngraph_function->get_name();
return _ngraph_function->get_friendly_name();
}
InputInfo::Ptr CNNNetworkNGraphImpl::getInput(const std::string& inputName) const noexcept {
@@ -335,9 +337,7 @@ void CNNNetworkNGraphImpl::reshape() {
// Disable reshape for generic nodes
::ngraph::op::GenericIE::DisableReshape noReshape(_ngraph_function);
StatusCode ret = reshape({}, &desc);
if (ret != OK)
THROW_IE_EXCEPTION << desc.msg;
reshape({}, true);
}
StatusCode
@@ -346,100 +346,123 @@ CNNNetworkNGraphImpl::reshape(const std::map<std::string, std::vector<size_t>>&
IE_PROFILING_AUTO_SCOPE(reshape)
if (cnnNetwork)
return cnnNetwork->reshape(inputShapes, responseDesc);
auto params = _ngraph_function->get_parameters();
try {
auto params = _ngraph_function->get_parameters();
for (size_t i = 0; i < params.size(); i++) {
const auto& param = params[i];
if (inputShapes.find(param->get_friendly_name()) == inputShapes.end())
continue;
::ngraph::PartialShape shape(inputShapes.at(param->get_friendly_name()));
auto newParam = std::make_shared<::ngraph::op::Parameter>(param->get_element_type(), shape);
newParam->set_friendly_name(param->get_friendly_name());
_ngraph_function->replace_parameter(i, newParam);
}
_ngraph_function->validate_nodes_and_infer_types();
if (cnnNetwork) {
convertToCNNNetworkImpl();
} else {
auto specialized_ngraph_function = cloneFunction(true, inputShapes);
// Call this transformation because OneHot IE and nGraph have different output precisions
{
IE_PROFILING_AUTO_SCOPE(ConvertOneHot);
::ngraph::pass::ConvertOneHotToOneHotIE().run_on_function(specialized_ngraph_function);
}
specialized_ngraph_function->validate_nodes_and_infer_types();
#if 0
for (const auto &op : specialized_ngraph_function->get_ordered_ops()) {
cout << "[ " << op->description() << " ] " << op->get_friendly_name() << endl;
cout << " Inputs: ";
for (const auto &in : op->inputs()) {
cout << "[" << in.get_element_type().get_type_name() << "]";
if (in.get_partial_shape().is_dynamic()) {
cout << "dyn_shape";
} else {
cout << "{";
bool first = true;
for (auto i : in.get_shape()) {
if (!first) cout << ",";
cout << i;
first = false;
}
cout << "} ";
}
}
cout << endl << " Outputs: ";
for (const auto &in : op->outputs()) {
cout << "[" << in.get_element_type().get_type_name() << "]";
if (in.get_partial_shape().is_dynamic()) {
cout << "dyn_shape";
} else {
cout << "{";
bool first = true;
for (auto i : in.get_shape()) {
if (!first) cout << ",";
cout << i;
first = false;
}
cout << "} ";
}
}
cout << endl;
}
#endif
std::unordered_set<std::string> opName;
for (const auto & layer : specialized_ngraph_function->get_ordered_ops()) {
if (std::dynamic_pointer_cast<::ngraph::op::Result>(layer)) {
IE_ASSERT(layer->get_inputs().size() == 1);
const auto& input = layer->get_inputs()[0];
std::string outName = input.get_output().get_node()->get_friendly_name();
if (input.get_output().get_node()->get_output_size() != 1)
outName += "." + std::to_string(input.get_output().get_index());
addOutput(outName);
continue;
}
if (opName.find(layer->get_friendly_name()) != opName.end())
THROW_IE_EXCEPTION << "All operations in nGraph function should have unique friendly names!";
opName.insert(layer->get_friendly_name());
for (const auto& output : layer->outputs()) {
std::string outName = layer->get_friendly_name();
if (layer->outputs().size() != 1)
outName += "." + std::to_string(output.get_index());
createDataForResult(output, outName, _data[outName]);
}
}
}
reshape(inputShapes);
} catch (std::exception& ex) {
// Try to restore original parameters
auto new_params = _ngraph_function->get_parameters();
for (size_t i = 0; i < params.size(); i++) {
if (new_params[i] != params[i])
_ngraph_function->replace_parameter(i, params[i]);
}
return DescriptionBuffer(GENERAL_ERROR, responseDesc) << ex.what();
}
return OK;
}
void CNNNetworkNGraphImpl::reshape(const std::map<std::string, std::vector<size_t>>& inputShapes, bool force) {
auto params = _ngraph_function->get_parameters();
for (size_t i = 0; i < params.size() && !inputShapes.empty(); i++) {
const auto& param = params[i];
if (inputShapes.find(param->get_friendly_name()) == inputShapes.end())
continue;
::ngraph::PartialShape shape(inputShapes.at(param->get_friendly_name()));
// Check that shapes will be changed
bool shapesChanged = param->get_partial_shape().is_dynamic();
if (!shapesChanged && param->get_partial_shape().is_static()) {
auto oldShape = param->get_shape();
auto newShape = ::ngraph::Shape(inputShapes.at(param->get_friendly_name()));
shapesChanged = oldShape != newShape;
}
if (!shapesChanged)
continue;
force = true;
auto newParam = std::make_shared<::ngraph::opset1::Parameter>(param->get_element_type(), shape);
newParam->set_friendly_name(param->get_friendly_name());
_ngraph_function->replace_parameter(i, newParam);
}
if (!force) return;
_ngraph_function->validate_nodes_and_infer_types();
if (cnnNetwork) {
convertToCNNNetworkImpl();
} else {
auto specialized_ngraph_function = cloneFunction(true, inputShapes);
// Call this transformation because OneHot IE and nGraph have different output precisions
{
IE_PROFILING_AUTO_SCOPE(ConvertOneHot);
::ngraph::pass::ConvertOneHotToOneHotIE().run_on_function(specialized_ngraph_function);
}
specialized_ngraph_function->validate_nodes_and_infer_types();
#if 0
for (const auto &op : specialized_ngraph_function->get_ordered_ops()) {
cout << "[ " << op->description() << " ] " << op->get_friendly_name() << endl;
cout << " Inputs: ";
for (const auto &in : op->inputs()) {
cout << "[" << in.get_element_type().get_type_name() << "]";
if (in.get_partial_shape().is_dynamic()) {
cout << "dyn_shape";
} else {
cout << "{";
bool first = true;
for (auto i : in.get_shape()) {
if (!first) cout << ",";
cout << i;
first = false;
}
cout << "} ";
}
}
cout << endl << " Outputs: ";
for (const auto &in : op->outputs()) {
cout << "[" << in.get_element_type().get_type_name() << "]";
if (in.get_partial_shape().is_dynamic()) {
cout << "dyn_shape";
} else {
cout << "{";
bool first = true;
for (auto i : in.get_shape()) {
if (!first) cout << ",";
cout << i;
first = false;
}
cout << "} ";
}
}
cout << endl;
}
#endif
std::unordered_set<std::string> opName;
for (const auto & layer : specialized_ngraph_function->get_ordered_ops()) {
if (std::dynamic_pointer_cast<::ngraph::op::Result>(layer)) {
IE_ASSERT(layer->get_inputs().size() == 1);
const auto& input = layer->get_inputs()[0];
std::string outName = input.get_output().get_node()->get_friendly_name();
if (input.get_output().get_node()->get_output_size() != 1)
outName += "." + std::to_string(input.get_output().get_index());
addOutput(outName);
continue;
}
if (opName.find(layer->get_friendly_name()) != opName.end())
THROW_IE_EXCEPTION << "All operations in nGraph function should have unique friendly names!";
opName.insert(layer->get_friendly_name());
for (const auto& output : layer->outputs()) {
std::string outName = layer->get_friendly_name();
if (layer->outputs().size() != 1)
outName += "." + std::to_string(output.get_index());
createDataForResult(output, outName, _data[outName]);
}
}
}
}
StatusCode CNNNetworkNGraphImpl::AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension,
InferenceEngine::ResponseDesc* resp) noexcept {
InferenceEngine::ResponseDesc* resp) noexcept {
if (!cnnNetwork) {
::ngraph::op::GenericIE::addExtension(_ngraph_function, extension);
}

View File

@@ -250,6 +250,11 @@ public:
iplugin_api_ptr->SetCore(mutableCore);
}
// Add registered extensions to new plugin
for (const auto& ext : extensions) {
plugin->AddExtension(ext, nullptr);
}
InferencePlugin cppPlugin(plugin);
// configuring
@@ -361,6 +366,14 @@ public:
THROW_IE_EXCEPTION << "Cannot add opset with name: " << it.first << ". Opset with the same name already exists.";
opsetNames.insert(it.first);
}
for (auto& plugin : plugins) {
IE_SUPPRESS_DEPRECATED_START
try {
plugin.second.AddExtension(extension);
} catch (...) {}
IE_SUPPRESS_DEPRECATED_END
}
extensions.emplace_back(extension);
}
@@ -488,6 +501,17 @@ CNNNetwork Core::ReadNetwork(const std::string& modelPath, const std::string& bi
return CNNNetwork(cnnReader);
}
/**
* Hold original blob in order to avoid situations when original blob is allocated on stack
*/
class WeightsHolderBlob: public TBlob<uint8_t> {
private:
Blob::CPtr originBlob;
public:
WeightsHolderBlob(const Blob::CPtr& weights): TBlob<uint8_t>(weights->getTensorDesc(), weights->cbuffer().as<uint8_t*>()), originBlob(weights) {}
};
CNNNetwork Core::ReadNetwork(const std::string& model, const Blob::CPtr& weights) const {
IE_PROFILING_AUTO_SCOPE(Core::ReadNetwork)
IE_SUPPRESS_DEPRECATED_START
@@ -501,8 +525,7 @@ CNNNetwork Core::ReadNetwork(const std::string& model, const Blob::CPtr& weights
}
TBlob<uint8_t>::Ptr weights_ptr;
if (weights) {
uint8_t* ptr = weights->cbuffer().as<uint8_t*>();
weights_ptr = make_shared_blob<uint8_t>(weights->getTensorDesc(), ptr);
weights_ptr = std::make_shared<WeightsHolderBlob>(weights);
}
rt = cnnReader->SetWeights(weights_ptr, &desc);
if (rt != OK) THROW_IE_EXCEPTION << desc.msg;

View File

@@ -36,7 +36,7 @@ std::string getPluginName(const std::string& deviceName) {
static std::map<std::string, std::string> plugunFromDeviceMap = {
{"CPU", "MKLDNNPlugin"}, {"GPU", "clDNNPlugin"}, {"FPGA", "dliaPlugin"},
{"MYRIAD", "myriadPlugin"}, {"HDDL", "HDDLPlugin"}, {"GNA", "GNAPlugin"},
{"HETERO", "HeteroPlugin"}, {"MULTI", "MultiDevicePlugin"}};
{"HETERO", "HeteroPlugin"}, {"MULTI", "MultiDevicePlugin"}, {"KMB", "kmbPlugin"}};
auto val = plugunFromDeviceMap.find(deviceName);
if (val == plugunFromDeviceMap.end()) {

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -12,6 +12,7 @@
namespace InferenceEngine {
ITaskExecutor::Ptr ExecutorManagerImpl::getExecutor(std::string id) {
std::lock_guard<std::mutex> guard(taskExecutorMutex);
auto foundEntry = executors.find(id);
if (foundEntry == executors.end()) {
auto newExec = std::make_shared<CPUStreamsExecutor>(IStreamsExecutor::Config{id});
@@ -22,6 +23,7 @@ ITaskExecutor::Ptr ExecutorManagerImpl::getExecutor(std::string id) {
}
IStreamsExecutor::Ptr ExecutorManagerImpl::getIdleCPUStreamsExecutor(const IStreamsExecutor::Config& config) {
std::lock_guard<std::mutex> guard(streamExecutorMutex);
for (const auto& it : cpuStreamsExecutors) {
const auto& executor = it.second;
if (executor.use_count() != 1)
@@ -52,6 +54,8 @@ size_t ExecutorManagerImpl::getIdleCPUStreamsExecutorsNumber() {
}
void ExecutorManagerImpl::clear(const std::string& id) {
std::lock_guard<std::mutex> stream_guard(streamExecutorMutex);
std::lock_guard<std::mutex> task_guard(taskExecutorMutex);
if (id.empty()) {
executors.clear();
cpuStreamsExecutors.clear();
@@ -66,8 +70,47 @@ void ExecutorManagerImpl::clear(const std::string& id) {
}
}
std::mutex ExecutorManager::_mutex;
ExecutorManager* ExecutorManager::_instance = nullptr;
ExecutorManager* ExecutorManager::getInstance() {
/*
* 1) We do not use singleton implementation via STATIC LOCAL object like
*
* getInstance() {
* static ExecutorManager _instance;
* return &instance;
* }
*
* Because of problem with destruction order on program exit.
* Some IE classes like MKLDNN::Engine use this singleton in destructor.
* But they has no direct dependency from c++ runtime point of view and
* it's possible that _instance local static variable will be destroyed
* before MKLDNN::~Engine call. Any further manipulation with destroyed
* object will lead to exception or crashes.
*
* 2) We do not use singleton implementation via STATIC object like:
*
* ExecutorManager ExecutorManager::_instance;
* getInstance() {
* return &instance;
* }
*
* Because of problem with double destruction. In some test cases we use
* double link with IE module via static and dynamic version. Both modules
* have static object with same export name and it leads to double construction
* and double destruction of that object. For some c++ compilers (ex gcc 5.4)
* it lead to crash with "double free".
*
* That's why we use manual allocation of singleton instance on heap.
*/
std::lock_guard<std::mutex> guard(_mutex);
if (_instance == nullptr) {
_instance = new ExecutorManager();
}
return _instance;
}
ITaskExecutor::Ptr ExecutorManager::getExecutor(std::string id) {
return _impl.getExecutor(id);
}

View File

@@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -67,6 +67,6 @@ ie_developer_export_targets(${TARGET_NAME})
# install
install(TARGETS ${TARGET_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core)

View File

@@ -6,6 +6,7 @@
#include <cpp/ie_cnn_network.h>
#include <ie_icnn_network.hpp>
#include <cnn_network_impl.hpp>
#include <file_utils.h>
#include <deque>

View File

@@ -6,9 +6,293 @@
using namespace InferenceEngine;
//
// ie_layers.h
//
const DataPtr CNNLayer::input() const {
if (insData.empty()) {
THROW_IE_EXCEPTION << "Internal error: input data is empty";
}
auto lockedFirstInsData = insData[0].lock();
if (!lockedFirstInsData) {
THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
}
return lockedFirstInsData;
}
float CNNLayer::ie_parse_float(const std::string& str) {
if (str == "-inf") {
return -std::numeric_limits<float>::infinity();
} else if (str == "inf") {
return std::numeric_limits<float>::infinity();
} else {
float res;
std::stringstream val_stream(str);
val_stream.imbue(std::locale("C"));
val_stream >> res;
if (!val_stream.eof()) THROW_IE_EXCEPTION;
return res;
}
}
std::string CNNLayer::ie_serialize_float(float value) {
std::stringstream val_stream;
val_stream.imbue(std::locale("C"));
val_stream << value;
return val_stream.str();
}
float CNNLayer::GetParamAsFloat(const char* param, float def) const {
std::string val = GetParamAsString(param, ie_serialize_float(def).c_str());
try {
return ie_parse_float(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to float.";
}
}
float CNNLayer::GetParamAsFloat(const char* param) const {
std::string val = GetParamAsString(param);
try {
return ie_parse_float(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to float.";
}
}
std::vector<float> CNNLayer::GetParamAsFloats(const char* param, std::vector<float> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<float> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
float val = ie_parse_float(str);
result.push_back(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to floats.";
}
}
return result;
}
std::vector<float> CNNLayer::GetParamAsFloats(const char* param) const {
std::string vals = GetParamAsString(param);
std::vector<float> result;
std::istringstream stream(vals);
std::string str;
while (getline(stream, str, ',')) {
try {
float val = ie_parse_float(str);
result.push_back(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to floats.";
}
}
return result;
}
int CNNLayer::GetParamAsInt(const char* param, int def) const {
std::string val = GetParamAsString(param, std::to_string(def).c_str());
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to int.";
}
}
int CNNLayer::GetParamAsInt(const char* param) const {
std::string val = GetParamAsString(param);
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to int.";
}
}
std::vector<int> CNNLayer::GetParamAsInts(const char* param, std::vector<int> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<int> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
result.push_back(std::stoi(str));
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to int.";
}
}
return result;
}
std::vector<int> CNNLayer::GetParamAsInts(const char* param) const {
std::string vals = GetParamAsString(param);
std::vector<int> result;
std::istringstream stream(vals);
std::string str;
while (getline(stream, str, ',')) {
try {
result.push_back(std::stoi(str));
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to int.";
}
}
return result;
}
unsigned int CNNLayer::GetParamAsUInt(const char* param, unsigned int def) const {
std::string val = GetParamAsString(param, std::to_string(def).c_str());
std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
". Value " + val + " cannot be casted to int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
unsigned int CNNLayer::GetParamAsUInt(const char* param) const {
std::string val = GetParamAsString(param);
std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
". Value " + val + " cannot be casted to unsigned int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
std::vector<unsigned int> CNNLayer::GetParamAsUInts(const char* param, std::vector<unsigned int> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<unsigned int> result;
std::istringstream stream(vals);
std::string str;
std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
name + ". Value " + vals + " cannot be casted to unsigned int.";
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
int value = std::stoi(str);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
result.push_back(static_cast<unsigned int>(value));
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
return result;
}
std::vector<unsigned int> CNNLayer::GetParamAsUInts(const char* param) const {
std::string vals = GetParamAsString(param);
std::vector<unsigned int> result;
std::istringstream stream(vals);
std::string str;
std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
name + ". Value " + vals + " cannot be casted to int.";
while (getline(stream, str, ',')) {
try {
int value = std::stoi(str);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
result.push_back(static_cast<unsigned int>(value));
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
return result;
}
bool CNNLayer::GetParamAsBool(const char* param, bool def) const {
std::string val = GetParamAsString(param, std::to_string(def).c_str());
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return static_cast<char>(std::tolower(value));
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param, def) != 0);
}
return result;
}
bool CNNLayer::GetParamAsBool(const char* param) const {
std::string val = GetParamAsString(param);
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return static_cast<char>(std::tolower(value));
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param) != 0);
}
return result;
}
std::string CNNLayer::GetParamAsString(const char* param, const char* def) const {
auto it = params.find(param);
if (it == params.end() || it->second.empty()) {
return def;
}
return (*it).second;
}
bool CNNLayer::CheckParamPresence(const char* param) const {
auto it = params.find(param);
if (it == params.end()) {
return false;
}
return true;
}
std::string CNNLayer::GetParamAsString(const char* param) const {
auto it = params.find(param);
if (it == params.end()) {
THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
}
return (*it).second;
}
std::vector<std::string> CNNLayer::GetParamAsStrings(const char* param, std::vector<std::string> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<std::string> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
result.push_back(str);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
}
}
return result;
}
CNNLayer::~CNNLayer() {}
WeightableLayer::~WeightableLayer() {}

Some files were not shown because too many files have changed in this diff Show More