Compare commits

...

10 Commits

Author SHA1 Message Date
Alexey Suhov
f26da46e3b Publishing 2020.3.1 LTS content (#3108) 2020-11-12 19:35:17 +03:00
Alexander Zhogov
cd95d8d3bb Azure CI: Disable Ninja on Mac due to errors (#809) 2020-06-06 18:29:31 +03:00
azhogov
5c6a0cb922 Azure: Add Ninja 2020-06-06 16:29:54 +03:00
azhogov
2e634cafc9 Add CODEOWNERS and CONTRIBUTING.md 2020-06-06 16:15:24 +03:00
Alexander Zhogov
28f258e18d Enable public CI (#789)
* Enable public CI

* Exclude failed nGraph UT by *GPU*:*CPU*

* Disable absent tests

* Exclude failed nGraph UT constant.shared_data
2020-06-05 15:55:45 +03:00
Alexey Suhov
2fe9b15230 change repo name to openvino in readme files 2020-06-03 00:08:25 +03:00
Alexey Suhov
9221f41b01 fix permissions for shell scripts 2020-06-02 22:32:00 +03:00
Alexey Suhov
85de6ee857 Publishing 2020.3 content 2020-06-02 21:59:45 +03:00
Moshe David
acad2e01e5 w (#394)
Co-authored-by: modav <modav@microsoft.com>
2020-05-26 00:28:09 +03:00
Ian Hunter
94dd082199 Fix link to Linux Guide (#494) 2020-05-14 13:52:13 +03:00
459 changed files with 13977 additions and 2263 deletions

55
.github/workflows/mo.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: MO
on:
push:
paths:
- 'model-optimizer/**'
pull_request:
paths:
- 'model-optimizer/**'
jobs:
Pylint-UT:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
with:
python-version: 3.6
- name: Cache pip
uses: actions/cache@v1
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('model-optimizer/requirements*.txt') }}
restore-keys: |
${{ runner.os }}-pip-
${{ runner.os }}-
# tensorflow 1.15 causes modules import
# errors, most likely due to https://github.com/PyCQA/pylint/issues/2603
# for tensorflow.core.framework and tensorflow.contrib
- name: Install dependencies
run: |
python -m pip install --upgrade pip setuptools
# For Pylint
pip install tensorflow==1.14.0 tensorboard==1.14.0 tensorflow-estimator==1.14.0
# For UT
pip install unittest-xml-reporting==3.0.2
# MO requirements
pip install -r requirements.txt
pip install -r requirements_dev.txt
working-directory: model-optimizer
- name: Pylint
run: pylint -d C,R,W mo/ mo.py extensions/
working-directory: model-optimizer
- name: UT
run: |
export PYTHONPATH=$PYTHONPATH:`pwd`
export MO_ROOT=`pwd`
env
mkdir ../mo-ut-logs
python3 -m xmlrunner discover -p *_test.py --output=../mo-ut-logs
working-directory: model-optimizer

View File

@@ -78,8 +78,7 @@ function(build_ngraph)
if (NOT ANDROID)
ngraph_set(NGRAPH_UNIT_TEST_ENABLE TRUE)
ngraph_set(NGRAPH_UNIT_TEST_OPENVINO_ENABLE TRUE)
# ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE TRUE)
set(NGRAPH_ONNX_IMPORT_ENABLE TRUE CACHE BOOL "" FORCE)
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE TRUE)
else()
ngraph_set(NGRAPH_UNIT_TEST_ENABLE FALSE)
ngraph_set(NGRAPH_TEST_UTIL_ENABLE FALSE)
@@ -118,4 +117,49 @@ build_ngraph()
add_subdirectory(inference-engine)
add_subdirectory(docs)
# cpack
# install setupvars
ie_cpack_add_component(setupvars REQUIRED)
if(UNIX)
install(PROGRAMS scripts/setupvars/setupvars.sh
DESTINATION bin
COMPONENT setupvars)
elseif(WIN32)
install(PROGRAMS scripts/setupvars/setupvars.bat
DESTINATION bin
COMPONENT setupvars)
endif()
# install install_dependencies
if(UNIX)
ie_cpack_add_component(install_dependencies REQUIRED)
install(DIRECTORY scripts/install_dependencies/
DESTINATION install_dependencies
COMPONENT install_dependencies)
endif()
# install files for demo
ie_cpack_add_component(demo_scripts REQUIRED DEPENDS core)
if(UNIX)
install(DIRECTORY scripts/demo/
DESTINATION deployment_tools/demo
COMPONENT demo_scripts
USE_SOURCE_PERMISSIONS
PATTERN *.bat EXCLUDE)
elseif(WIN32)
install(DIRECTORY scripts/demo/
DESTINATION deployment_tools/demo
COMPONENT demo_scripts
USE_SOURCE_PERMISSIONS
PATTERN *.sh EXCLUDE)
endif()
ie_cpack(${IE_CPACK_COMPONENTS_ALL})

66
CODEOWNERS Normal file
View File

@@ -0,0 +1,66 @@
# See help here: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners
* @openvinotoolkit/openvino-maintainers
CODEOWNERS @openvinotoolkit/openvino-admins @openvinotoolkit/openvino-maintainers
# CI:
Jenkinsfile @openvinotoolkit/openvino-admins
azure-pipelines.yml @openvinotoolkit/openvino-admins
/.github/ @openvinotoolkit/openvino-admins
# QA Tests:
/tests/ @openvinotoolkit/openvino-tests-maintainers
# IE Core:
/inference-engine/ @openvinotoolkit/openvino-ie-maintainers
/inference-engine/src/transformations/ @GlebKazantaev @ichuraev
/inference-engine/src/legacy_api/ @openvinotoolkit/openvino-ngraph-maintainers
/inference-engine/src/readers/ @openvinotoolkit/openvino-ngraph-maintainers
# IE CPU:
/inference-engine/src/mkldnn_plugin/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
/inference-engine/src/low_precision_transformations/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
/inference-engine/thirdparty/mkl-dnn/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
# IE GPU:
/inference-engine/src/cldnn_engine/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/include/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/include/cldnn/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/thirdparty/clDNN/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
# IE VPU:
/inference-engine/src/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/include/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/thirdparty/movidius/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/tests_deprecated/unit/engines/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests_deprecated/functional/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests_deprecated/behavior/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/functional/plugin/myriad/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/unit/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/unit/engines/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tools/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/scripts/run_tests_myriad_multistick.sh @openvinotoolkit/openvino-ie-vpu-maintainers
# IE GNA:
/inference-engine/src/gna_plugin/ @openvinotoolkit/openvino-ie-gna-maintainers
/inference-engine/include/gna/ @openvinotoolkit/openvino-ie-gna-maintainers
# IE MULTI:
/inference-engine/src/multi_device/ @openvinotoolkit/openvino-ie-multi-maintainers
/inference-engine/include/multi-device/ @openvinotoolkit/openvino-ie-multi-maintainers
# IE Tests:
/inference-engine/tests/ @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests_deprecated/ @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/functional/inference_engine/ngraph_reader/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ngraph-maintainers
/inference-engine/tests/functional/inference_engine/transformations/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ngraph-maintainers
# MO:
/model-optimizer/ @openvinotoolkit/openvino-mo-maintainers
# nGraph:
/ngraph/ @openvinotoolkit/openvino-ngraph-maintainers
# Tools
/tools/ @openvinotoolkit/openvino-tools-maintainers

18
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,18 @@
# How to Contribute
We welcome community contributions to the OpenVINO™ repository.
If you have an idea how to improve the product, please share it
with us doing the following steps:
* Make sure you can build the product and run all tests and samples with your patch
* In case of a larger feature, provide relevant unit tests and one or more sample
* Submit a pull request at https://github.com/openvinotoolkit/openvino/pulls
## OpenVINO™ Coding Style Guide
We basically use the Google style (https://google.github.io/styleguide/cppguide.html) with some exceptions:
* 4 spaces instead of 2 spaces for indentations
* Limitation of 160 symbols for the line length
* Exceptions are allowed
* Using namespace are allowed in cpp and prohibited in headers
* Underscore symbol before member in classes/structures
* thisStyleForFunctions()
* theSameStyleForVariables

View File

@@ -1,5 +1,5 @@
# [OpenVINO™ Toolkit](https://01.org/openvinotoolkit) - Deep Learning Deployment Toolkit repository
[![Stable release](https://img.shields.io/badge/version-2020.1-green.svg)](https://github.com/opencv/dldt/releases/tag/2020.1)
[![Stable release](https://img.shields.io/badge/version-2020.3-green.svg)](https://github.com/openvinotoolkit/openvino/releases/tag/2020.3.0)
[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE)
This toolkit allows developers to deploy pre-trained deep learning models
@@ -30,23 +30,13 @@ and release your contribution under these terms.
* [Model Optimizer Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)
## How to Contribute
We welcome community contributions to the Deep Learning Deployment Toolkit
repository. If you have an idea how to improve the product, please share it
with us doing the following steps:
* Make sure you can build the product and run all tests and samples with your patch
* In case of a larger feature, provide relevant unit tests and one or more sample
* Submit a pull request at https://github.com/opencv/dldt/pulls
We will review your contribution and, if any additional fixes or modifications
are necessary, may give some feedback to guide you. Your pull request will be
merged into GitHub* repositories if accepted.
See [CONTRIBUTING](./CONTRIBUTING.md) for details. Thank you!
## Support
Please report questions, issues and suggestions using:
* The `openvino` [tag on StackOverflow]\*
* [GitHub* Issues](https://github.com/opencv/dldt/issues)
* [GitHub* Issues](https://github.com/openvinotoolkit/openvino/issues)
* [Forum](https://software.intel.com/en-us/forums/computer-vision)
---

345
azure-pipelines.yml Normal file
View File

@@ -0,0 +1,345 @@
jobs:
- job: Lin
# About 150% of total time
timeoutInMinutes: 75
pool:
#vmImage: 'ubuntu-18.04'
name: LIN_VMSS_VENV_F8S_WU2
variables:
BUILD_TYPE: Release
BIN_DIR: ../bin/intel64/$(BUILD_TYPE)
steps:
- script: |
whoami
uname -a
which python3
gcc --version
lsb_release
env
cat /proc/cpuinfo
cat /proc/meminfo
vmstat -s
df
displayName: 'System properties'
- script: |
sudo apt --assume-yes install libusb-1.0-0-dev
python3 -m pip install -r ./inference-engine/ie_bridges/python/requirements.txt
# For running Python API tests
python3 -m pip install -r ./inference-engine/ie_bridges/python/src/requirements-dev.txt
displayName: 'Install dependencies'
- script: |
wget https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-linux.zip
unzip ninja-linux.zip
sudo cp -v ninja /usr/local/bin/
displayName: 'Install Ninja'
- script: git submodule update --init --recursive --jobs 8
displayName: 'Clone submodules'
- script: |
mkdir dldt-build
cd dldt-build
displayName: 'Create build directory'
- task: CMake@1
inputs:
workingDirectory: dldt-build
# CMake must get Python 3.x version by default
cmakeArgs: .. -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 -DENABLE_TESTS=ON
- script: ninja
workingDirectory: dldt-build
displayName: 'Build Lin'
- script: ls -alR ../bin/
workingDirectory: dldt-build
displayName: 'List files'
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*GPU*:*CPU*:constant.shared_data
workingDirectory: dldt-build
displayName: 'nGraph UT'
continueOnError: false
- script: $(BIN_DIR)/InferenceEngineUnitTests
workingDirectory: dldt-build
displayName: 'IE UT old'
continueOnError: false
- script: $(BIN_DIR)/ieUnitTests
workingDirectory: dldt-build
displayName: 'IE UT'
continueOnError: false
- script: $(BIN_DIR)/cpuUnitTests
workingDirectory: dldt-build
displayName: 'CPU UT'
continueOnError: false
- script: $(BIN_DIR)/gnaUnitTests
workingDirectory: dldt-build
displayName: 'GNA UT'
continueOnError: false
- script: $(BIN_DIR)/vpuUnitTests
workingDirectory: dldt-build
displayName: 'VPU UT'
continueOnError: false
- script: $(BIN_DIR)/ieFuncTests
workingDirectory: dldt-build
displayName: 'IE FuncTests'
continueOnError: false
- script: $(BIN_DIR)/cpuFuncTests
workingDirectory: dldt-build
displayName: 'CPU FuncTests'
continueOnError: false
- script: $(BIN_DIR)/MklDnnBehaviorTests
workingDirectory: dldt-build
displayName: 'MklDnnBehaviorTests'
continueOnError: false
enabled: false
- script: git clone https://github.com/openvinotoolkit/testdata.git
displayName: 'Clone testdata'
enabled: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/MklDnnFunctionalTests --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric*
workingDirectory: dldt-build
displayName: 'MklDnnFunctionalTests'
continueOnError: false
enabled: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/InferenceEngineCAPITests
workingDirectory: dldt-build
displayName: 'IE CAPITests'
continueOnError: false
enabled: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
export LD_LIBRARY_PATH=`pwd`/$(BIN_DIR)/lib
export PYTHONPATH=`pwd`/$(BIN_DIR)/lib/python_api/python3.6
env
cd ../inference-engine/ie_bridges/python/tests
pytest
workingDirectory: dldt-build
displayName: 'Python API Tests'
continueOnError: false
enabled: false
- job: Mac
# About 150% of total time
timeoutInMinutes: 130
pool:
vmImage: 'macOS-10.15'
variables:
BUILD_TYPE: Release
BIN_DIR: ../bin/intel64/$(BUILD_TYPE)
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.7'
- script: |
whoami
uname -a
which python3
gcc --version
xcrun --sdk macosx --show-sdk-version
env
sysctl -a
displayName: 'System properties'
- script: |
brew install cython
brew install automake
displayName: 'Install dependencies'
- script: brew install ninja
displayName: 'Install Ninja'
- script: git submodule update --init --recursive --jobs 8
displayName: 'Clone submodules'
- script: |
mkdir dldt-build
cd dldt-build
displayName: 'Create build directory'
- script: |
export PATH="/usr/local/opt/cython/bin:$PATH"
export CC=gcc
export CXX=g++
# Disable errors with Ninja
#export CXXFLAGS="-Wno-error=unused-command-line-argument"
#export CFLAGS="-Wno-error=unused-command-line-argument"
cmake .. -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON
workingDirectory: dldt-build
displayName: 'CMake'
- script: make -j3
workingDirectory: dldt-build
displayName: 'Build Mac'
- script: ls -alR ../bin/
workingDirectory: dldt-build
displayName: 'List files'
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*GPU*:*CPU*:constant.shared_data
workingDirectory: dldt-build
displayName: 'nGraph UT'
continueOnError: false
- script: $(BIN_DIR)/InferenceEngineUnitTests --gtest_filter=-*MKLDNNGraph*
workingDirectory: dldt-build
displayName: 'IE UT old'
continueOnError: false
- script: $(BIN_DIR)/ieUnitTests
workingDirectory: dldt-build
displayName: 'IE UT'
continueOnError: false
- script: $(BIN_DIR)/cpuUnitTests
workingDirectory: dldt-build
displayName: 'CPU UT'
continueOnError: false
- script: $(BIN_DIR)/vpuUnitTests
workingDirectory: dldt-build
displayName: 'VPU UT'
continueOnError: false
- script: $(BIN_DIR)/ieFuncTests
workingDirectory: dldt-build
displayName: 'IE FuncTests'
continueOnError: false
- script: $(BIN_DIR)/cpuFuncTests
workingDirectory: dldt-build
displayName: 'CPU FuncTests'
continueOnError: false
- script: $(BIN_DIR)/MklDnnBehaviorTests
workingDirectory: dldt-build
displayName: 'MklDnnBehaviorTests'
continueOnError: false
enabled: false
- script: git clone https://github.com/openvinotoolkit/testdata.git
displayName: 'Clone testdata'
enabled: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/MklDnnFunctionalTests --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric*
workingDirectory: dldt-build
displayName: 'MklDnnFunctionalTests'
continueOnError: false
enabled: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/InferenceEngineCAPITests
workingDirectory: dldt-build
displayName: 'IE CAPITests'
continueOnError: false
enabled: false
- job: Win
# About 150% of total time
timeoutInMinutes: 120
pool:
#vmImage: 'vs2017-win2016'
name: WIN_VMSS_VENV_F8S_WU2
variables:
BUILD_TYPE: Release
BUILD_DIR: D:\dldt-build
BIN_DIR: ..\bin\intel64
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
steps:
- script: |
where python3
wmic computersystem get TotalPhysicalMemory
wmic cpu list
wmic logicaldisk get description,name
wmic VOLUME list
set
displayName: 'System properties'
- script: |
certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-win.zip ninja-win.zip
powershell -command "Expand-Archive -Force ninja-win.zip"
displayName: Install Ninja
- script: git submodule update --init --recursive --jobs 8
displayName: 'Clone submodules'
- script: |
rd /Q /S $(BUILD_DIR)
mkdir $(BUILD_DIR)\bin
rd /Q /S dldt-build
mkdir dldt-build
displayName: 'Create build directory'
- script: |
set PATH=$(Build.Repository.LocalPath)\ninja-win;%PATH%
call "$(MSVS_VARS_PATH)" && cmake -GNinja -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(Build.Repository.LocalPath)
workingDirectory: $(BUILD_DIR)
displayName: 'CMake'
- script: |
set PATH=$(Build.Repository.LocalPath)\ninja-win;%PATH%
call "$(MSVS_VARS_PATH)" && ninja
workingDirectory: $(BUILD_DIR)
displayName: 'Build Win'
- script: dir ..\bin\ /s /b
workingDirectory: dldt-build
displayName: 'List files'
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*GPU*:*CPU*:constant.shared_data
workingDirectory: dldt-build
displayName: 'nGraph UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\InferenceEngineUnitTests
workingDirectory: dldt-build
displayName: 'IE UT old'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\ieUnitTests
workingDirectory: dldt-build
displayName: 'IE UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\cpuUnitTests
workingDirectory: dldt-build
displayName: 'CPU UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\gnaUnitTests
workingDirectory: dldt-build
displayName: 'GNA UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\vpuUnitTests
workingDirectory: dldt-build
displayName: 'VPU UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\ieFuncTests
workingDirectory: dldt-build
displayName: 'IE FuncTests'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\cpuFuncTests
workingDirectory: dldt-build
displayName: 'CPU FuncTests'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\MklDnnBehaviorTests
workingDirectory: dldt-build
displayName: 'MklDnnBehaviorTests'
continueOnError: false
enabled: false
- script: git clone https://github.com/openvinotoolkit/testdata.git
workingDirectory: $(BUILD_DIR)
displayName: 'Clone testdata'
enabled: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;$(Build.Repository.LocalPath)\inference-engine\temp\opencv_4.3.0\opencv\bin;%PATH%
set DATA_PATH=$(BUILD_DIR)\testdata
set MODELS_PATH=$(BUILD_DIR)\testdata
$(BIN_DIR)\MklDnnFunctionalTests --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric*
workingDirectory: dldt-build
displayName: 'MklDnnFunctionalTests'
continueOnError: false
enabled: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;$(Build.Repository.LocalPath)\inference-engine\temp\opencv_4.3.0\opencv\bin;%PATH%
set DATA_PATH=$(BUILD_DIR)\testdata
set MODELS_PATH=$(BUILD_DIR)\testdata
$(BIN_DIR)\InferenceEngineCAPITests
workingDirectory: dldt-build
displayName: 'IE CAPITests'
continueOnError: false
enabled: false

View File

@@ -28,7 +28,6 @@
- [Add Inference Engine to Your Project](#add-inference-engine-to-your-project)
- [(Optional) Additional Installation Steps for the Intel® Movidius™ Neural Compute Stick and Neural Compute Stick 2](#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2)
- [For Linux, Raspbian Stretch* OS](#for-linux-raspbian-stretch-os)
- [For Windows](#for-windows-1)
- [Next Steps](#next-steps)
- [Additional Resources](#additional-resources)
@@ -60,12 +59,12 @@ The software was validated on:
- [CMake]\* 3.11 or higher
- GCC\* 4.8 or higher to build the Inference Engine
- Python 2.7 or higher for Inference Engine Python API wrapper
- (Optional) [Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441].
- (Optional) [Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 20.13.16352].
### Build Steps
1. Clone submodules:
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the
@@ -78,7 +77,7 @@ The software was validated on:
```
3. By default, the build enables the Inference Engine GPU plugin to infer models
on your Intel® Processor Graphics. This requires you to
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441]
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 20.13.16352]
before running the build. If you don't want to use the GPU plugin, use the
`-DENABLE_CLDNN=OFF` CMake build option and skip the installation of the
Intel® Graphics Compute Runtime for OpenCL™ Driver.
@@ -172,10 +171,10 @@ Native compilation of the Inference Engine is the most straightforward solution.
sudo apt-get install -y git cmake libusb-1.0-0-dev
```
2. Go to the cloned `dldt` repository:
2. Go to the cloned `openvino` repository:
```bash
cd dldt
cd openvino
```
3. Initialize submodules:
@@ -262,15 +261,15 @@ with the following content:
5. Run Docker\* container with mounted source code folder from host:
```bash
docker run -it -v /absolute/path/to/dldt:/dldt ie_cross_armhf /bin/bash
docker run -it -v /absolute/path/to/openvino:/openvino ie_cross_armhf /bin/bash
```
6. While in the container:
1. Go to the cloned `dldt` repository:
1. Go to the cloned `openvino` repository:
```bash
cd dldt
cd openvino
```
2. Create a build folder:
@@ -291,8 +290,8 @@ with the following content:
```
7. Press **Ctrl+D** to exit from Docker. You can find the resulting binaries
in the `dldt/bin/armv7l/` directory and the OpenCV*
installation in the `dldt/inference-engine/temp`.
in the `openvino/bin/armv7l/` directory and the OpenCV*
installation in the `openvino/inference-engine/temp`.
>**NOTE**: Native applications that link to cross-compiled Inference Engine
library require an extra compilation flag `-march=armv7-a`.
@@ -381,8 +380,8 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
6. Before running the samples, add paths to the TBB and OpenCV binaries used for
the build to the `%PATH%` environment variable. By default, TBB binaries are
downloaded by the CMake-based script to the `<dldt_repo>/inference-engine/temp/tbb/bin`
folder, OpenCV binaries to the `<dldt_repo>/inference-engine/temp/opencv_4.3.0/opencv/bin`
downloaded by the CMake-based script to the `<openvino_repo>/inference-engine/temp/tbb/bin`
folder, OpenCV binaries to the `<openvino_repo>/inference-engine/temp/opencv_4.3.0/opencv/bin`
folder.
### Additional Build Options
@@ -437,7 +436,7 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
call "C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\bin\ipsxe-comp-vars.bat" intel64 vs2017
set CXX=icl
set CC=icl
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by dldt cmake script
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by openvino cmake script
set TBBROOT=
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --config Release
@@ -461,7 +460,7 @@ The software was validated on:
1. Clone submodules:
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the
@@ -545,7 +544,7 @@ This section describes how to build Inference Engine for Android x86 (64-bit) op
2. Clone submodules
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
@@ -610,7 +609,7 @@ before running the Inference Engine build:
For CMake projects, set the `InferenceEngine_DIR` environment variable:
```sh
export InferenceEngine_DIR=/path/to/dldt/build/
export InferenceEngine_DIR=/path/to/openvino/build/
```
Then you can find Inference Engine by `find_package`:
@@ -660,26 +659,12 @@ sudo ldconfig
rm 97-myriad-usbboot.rules
```
### For Windows
For Intel® Movidius™ Neural Compute Stick and Intel® Neural Compute Stick 2,
install the Movidius™ VSC driver:
1. Go to the `<DLDT_ROOT_DIR>/inference-engine/thirdparty/movidius/MovidiusDriver`
directory, where the `DLDT_ROOT_DIR` is the directory to which the DLDT
repository was cloned.
2. Right click on the `Movidius_VSC_Device.inf` file and choose **Install** from
the pop-up menu.
You have installed the driver for your Intel® Movidius™ Neural Compute Stick
or Intel® Neural Compute Stick 2.
## Next Steps
Congratulations, you have built the Inference Engine. To get started with the
OpenVINO™, proceed to the Get Started guides:
* [Get Started with Deep Learning Deployment Toolkit on Linux*](../get-started-linux.md)
* [Get Started with Deep Learning Deployment Toolkit on Linux*](get-started-linux.md)
## Notice
@@ -706,7 +691,7 @@ This target collects all dependencies, prepares the nGraph package and copies it
[Intel® Distribution of OpenVINO™]:https://software.intel.com/en-us/openvino-toolkit
[CMake]:https://cmake.org/download/
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441]:https://github.com/intel/compute-runtime/releases/tag/19.41.14441
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 20.13.16352]:https://github.com/intel/compute-runtime/releases/tag/20.13.16352
[MKL-DNN repository]:https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_lnx_2019.0.5.20190502.tgz
[MKL-DNN repository for Windows]:(https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_win_2019.0.5.20190502.zip)
[OpenBLAS]:https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download

View File

@@ -36,9 +36,13 @@ function(ie_cpack_set_library_dir)
endif()
if(WIN32)
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${CMAKE_BUILD_TYPE}/${ARCH} PARENT_SCOPE)
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
else()
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
endif()
endfunction()
@@ -59,8 +63,10 @@ macro(ie_cpack)
set(CPACK_GENERATOR "TGZ")
if(WIN32)
set(CPACK_PACKAGE_NAME inference-engine_${CMAKE_BUILD_TYPE})
string(REPLACE "\\" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}")
else()
set(CPACK_PACKAGE_NAME inference-engine)
string(REPLACE "/" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}")
endif()
set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF)
set(CPACK_ARCHIVE_COMPONENT_INSTALL ON)
@@ -194,7 +200,7 @@ else()
endif()
if(APPLE)
set(CMAKE_MACOSX_RPATH 1)
set(CMAKE_MACOSX_RPATH ON)
endif(APPLE)
# Use solution folders

View File

@@ -138,6 +138,14 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
endfunction(RESOLVE_DEPENDENCY)
function (resolve_model_dependency network archive network_model_path)
RESOLVE_DEPENDENCY(${network_model_path}
ARCHIVE "models_archives/${archive}"
TARGET_PATH "${MODELS_PATH}/${network}")
string (REPLACE ${MODELS_PATH} "" relative_path ${${network_model_path}})
set(${network_model_path} ".${relative_path}" PARENT_SCOPE)
endfunction()
function(reset_deps_cache)
#
# Reset the dependencies cache if it was set by dependency solver

View File

@@ -154,7 +154,7 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
if(DEFINED ENV{IE_PATH_TO_DEPS})
set(URL "$ENV{IE_PATH_TO_DEPS}/${RELATIVE_URL}")
else()
set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.2/inference_engine/${RELATIVE_URL}")
set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.3/inference_engine/${RELATIVE_URL}")
endif()
#no message on recursive calls

View File

@@ -223,12 +223,13 @@ if(WIN32)
# 161 unrecognized pragma
# 177 variable was declared but never referenced
# 556 not matched type of assigned function pointer
# 1744: field of class type without a DLL interface used in a class with a DLL interface
# 2586 decorated name length exceeded, name was truncated
# 2651: attribute does not apply to any entity
# 3180 unrecognized OpenMP pragma
# 11075: To get full report use -Qopt-report:4 -Qopt-report-phase ipo
# 15335 was not vectorized: vectorization possible but seems inefficient. Use vector always directive or /Qvec-threshold0 to override
ie_add_compiler_flags(/Qdiag-disable:161,177,556,2586,2651,3180,11075,15335)
ie_add_compiler_flags(/Qdiag-disable:161,177,556,1744,2586,2651,3180,11075,15335)
endif()
# Debug information flags

47
docs/CMakeLists.txt Normal file
View File

@@ -0,0 +1,47 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if(NOT ENABLE_DOCKER)
add_subdirectory(examples)
# Detect nGraph
find_package(ngraph QUIET)
if(NOT ngraph_FOUND)
set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph)
endif()
# Detect InferenceEngine
find_package(InferenceEngine QUIET)
if(NOT InferenceEngine_FOUND)
set(InferenceEngine_DIR ${CMAKE_BINARY_DIR})
endif()
add_subdirectory(template_extension)
endif()
# OpenVINO docs
set(OPENVINO_DOCS_PATH "" CACHE PATH "Path to openvino-documentation local repository")
set(args "")
if(OPENVINO_DOCS_PATH)
set(args "${args} ovinodoc_path:${OPENVINO_DOCS_PATH}")
endif()
file(GLOB_RECURSE docs_files "${OpenVINO_MAIN_SOURCE_DIR}/docs")
file(GLOB_RECURSE include_files "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/include")
file(GLOB_RECURSE ovino_files "${OPENVINO_DOCS_PATH}")
add_custom_target(ie_docs
COMMAND ./build_docs.sh ${args}
WORKING_DIRECTORY "${OpenVINO_MAIN_SOURCE_DIR}/docs/build_documentation"
COMMENT "Generating OpenVINO documentation"
SOURCES ${docs_files} ${include_files} ${ovino_files}
VERBATIM)
find_program(browser NAMES xdg-open)
if(browser)
add_custom_target(ie_docs_open
COMMAND ${browser} "${OpenVINO_MAIN_SOURCE_DIR}/doc/html/index.html"
DEPENDS ie_docs
COMMENT "Open OpenVINO documentation"
VERBATIM)
endif()

View File

@@ -0,0 +1,14 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME ie_docs_examples)
file(GLOB SOURCES *.cpp)
add_library(ie_docs_examples STATIC ${SOURCES})
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api)
#add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
#add_clang_format_target(clang_format_${TARGET_NAME} FOR_TARGETS ${TARGET_NAME})

View File

@@ -0,0 +1,68 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <threading/ie_itask_executor.hpp>
#include <cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp>
#include <memory>
using namespace InferenceEngine;
class AcceleratorSyncRequest : public InferRequestInternal {
public:
using Ptr = std::shared_ptr<AcceleratorSyncRequest>;
void Preprocess();
void WriteToDevice();
void RunOnDevice();
void ReadFromDevice();
void PostProcess();
};
// ! [async_infer_request:define_pipeline]
// Inherits from AsyncInferRequestThreadSafeDefault
class AcceleratorAsyncInferRequest : public AsyncInferRequestThreadSafeDefault {
// Store the pointer to the synchronous request and five executors
AcceleratorAsyncInferRequest(const AcceleratorSyncRequest::Ptr& syncRequest,
const ITaskExecutor::Ptr& preprocessExecutor,
const ITaskExecutor::Ptr& writeToDeviceExecutor,
const ITaskExecutor::Ptr& runOnDeviceExecutor,
const ITaskExecutor::Ptr& readFromDeviceExecutor,
const ITaskExecutor::Ptr& postProcessExecutor) :
AsyncInferRequestThreadSafeDefault(syncRequest, nullptr, nullptr),
_accSyncRequest{syncRequest},
_preprocessExecutor{preprocessExecutor},
_writeToDeviceExecutor{writeToDeviceExecutor},
_runOnDeviceExecutor{runOnDeviceExecutor},
_readFromDeviceExecutor{readFromDeviceExecutor},
_postProcessExecutor{postProcessExecutor}
{
// Five pipeline stages of synchronous infer request are run by different executors
_pipeline = {
{ _preprocessExecutor , [this] {
_accSyncRequest->Preprocess();
}},
{ _writeToDeviceExecutor , [this] {
_accSyncRequest->WriteToDevice();
}},
{ _runOnDeviceExecutor , [this] {
_accSyncRequest->RunOnDevice();
}},
{ _readFromDeviceExecutor , [this] {
_accSyncRequest->ReadFromDevice();
}},
{ _postProcessExecutor , [this] {
_accSyncRequest->PostProcess();
}},
};
}
// As all stages use _accSyncRequest member we should wait for all stages tasks before the destructor destroy this member.
~AcceleratorAsyncInferRequest() {
StopAndWait();
}
AcceleratorSyncRequest::Ptr _accSyncRequest;
ITaskExecutor::Ptr _preprocessExecutor, _writeToDeviceExecutor, _runOnDeviceExecutor, _readFromDeviceExecutor, _postProcessExecutor;
};
// ! [async_infer_request:define_pipeline]

View File

@@ -0,0 +1,53 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <threading/ie_cpu_streams_executor.hpp>
#include <memory>
#include <future>
#include <iostream>
void example1() {
// ! [itask_executor:define_pipeline]
// std::promise is move only object so to satisfy copy callable constraint we use std::shared_ptr
auto promise = std::make_shared<std::promise<void>>();
// When the promise is created we can get std::future to wait the result
auto future = promise->get_future();
// Rather simple task
InferenceEngine::Task task = [] {std::cout << "Some Output" << std::endl; };
// Create an executor
InferenceEngine::ITaskExecutor::Ptr taskExecutor = std::make_shared<InferenceEngine::CPUStreamsExecutor>();
if (taskExecutor == nullptr) {
// ProcessError(e);
return;
}
// We capture the task and the promise. When the task is executed in the task executor context
// we munually call std::promise::set_value() method
taskExecutor->run([task, promise] {
std::exception_ptr currentException;
try {
task();
} catch(...) {
// If there is some exceptions store the pointer to current exception
currentException = std::current_exception();
}
if (nullptr == currentException) {
promise->set_value(); // <-- If there is no problems just call std::promise::set_value()
} else {
promise->set_exception(currentException); // <-- If there is an exception forward it to std::future object
}
});
// To wait the task completion we call std::future::wait method
future.wait(); // The current thread will be blocked here and wait when std::promise::set_value()
// or std::promise::set_exception() method will be called.
// If the future store the exception it will be rethrown in std::future::get method
try {
future.get();
} catch(std::exception& /*e*/) {
// ProcessError(e);
}
// ! [itask_executor:define_pipeline]
}

View File

@@ -0,0 +1,18 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:extension]
set(TARGET_NAME "template_extension")
find_package(ngraph REQUIRED)
find_package(InferenceEngine REQUIRED)
file(GLOB_RECURSE SRC *.cpp)
add_library(${TARGET_NAME} SHARED ${SRC})
target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_EXTENSION_API)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES}
${NGRAPH_LIBRARIES})
# [cmake:extension]

View File

@@ -0,0 +1,124 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "cpu_kernel.hpp"
#include "op.hpp"
#include <details/ie_exception.hpp>
#include <ie_layouts.h>
using namespace TemplateExtension;
//! [cpu_implementation:ctor]
OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node> &node) {
try {
auto castedNode = std::dynamic_pointer_cast<Operation>(node);
if (!castedNode)
THROW_IE_EXCEPTION << "Cannot create implementation for unknown operation!";
if (castedNode->inputs().size() != 1 || castedNode->outputs().size() != 1)
THROW_IE_EXCEPTION << "Cannot create implementation for operation with incorrect number of inputs or outputs!";
if (castedNode->get_input_partial_shape(0).is_dynamic() || castedNode->get_output_partial_shape(0).is_dynamic())
THROW_IE_EXCEPTION << "Cannot create implementation for op with dynamic shapes!";
if (castedNode->get_input_shape(0).size() != 4 || castedNode->get_output_shape(0).size() != 4)
THROW_IE_EXCEPTION << "Operation supports only 4d tensors for input and output.";
if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32)
THROW_IE_EXCEPTION << "Operation supports only FP32 tensors.";
add = castedNode->getAddAttr();
} catch (InferenceEngine::details::InferenceEngineException& ex) {
error = ex.what();
}
}
//! [cpu_implementation:ctor]
//! [cpu_implementation:getSupportedConfigurations]
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept {
auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) {
InferenceEngine::LayerConfig config;
config.dynBatchSupport = false;
InferenceEngine::DataConfig inData;
InferenceEngine::DataConfig outData;
InferenceEngine::SizeVector order = {0, 1, 2, 3};
// Allow any offset before data
size_t offset((std::numeric_limits<size_t>::max)());
if (planar) {
inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset});
config.inConfs.push_back(inData);
outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset});
config.outConfs.push_back(outData);
} else {
// Add blocked (nChw8c) format
auto div_up = [](const int a, const int b) -> int {
if (!b)
return 0;
return (a + b - 1) / b;
};
order.push_back(1);
InferenceEngine::SizeVector inBlkDims = inShape;
inBlkDims[1] = div_up(inBlkDims[1], 8);
inBlkDims.push_back(8);
InferenceEngine::SizeVector outBlkDims = outShape;
outBlkDims[1] = div_up(outBlkDims[1], 8);
outBlkDims.push_back(8);
inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset});
config.inConfs.push_back(inData);
outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset});
config.outConfs.push_back(outData);
}
return config;
};
if (!error.empty()) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
// Add planar format
conf.emplace_back(createConfig(inShape, outShape, true));
// Add blocked format nChw8c
conf.emplace_back(createConfig(inShape, outShape, false));
return InferenceEngine::OK;
}
//! [cpu_implementation:getSupportedConfigurations]
//! [cpu_implementation:init]
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept {
try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
THROW_IE_EXCEPTION << "Operation cannot be initialized with incorrect number of inputs/outputs!";
}
if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) {
THROW_IE_EXCEPTION << "Operation can be initialized only with 4d input/output tensors!";
}
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
THROW_IE_EXCEPTION << "Operation supports only FP32 precisions!";
}
} catch (InferenceEngine::details::InferenceEngineException& ex) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
return InferenceEngine::OK;
}
//! [cpu_implementation:init]
//! [cpu_implementation:execute]
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept {
const float* src_data = inputs[0]->cbuffer().as<const float *>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float *dst_data = outputs[0]->buffer().as<float *>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
for (size_t i = 0; i < inputs[0]->size(); i++) {
dst_data[i] = src_data[i] + add;
}
return InferenceEngine::OK;
}
//! [cpu_implementation:execute]

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <ngraph/ngraph.hpp>
namespace TemplateExtension {
//! [cpu_implementation:header]
class OpImplementation : public InferenceEngine::ILayerExecImpl {
public:
explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept override;
private:
int64_t add;
ngraph::Shape inShape;
ngraph::Shape outShape;
std::string error;
};
//! [cpu_implementation:header]
} // namespace TemplateExtension

View File

@@ -0,0 +1,73 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "extension.hpp"
#include "cpu_kernel.hpp"
#include "op.hpp"
#include <ngraph/factory.hpp>
#include <ngraph/opsets/opset.hpp>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
using namespace TemplateExtension;
//! [extension:GetVersion]
void Extension::GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept {
static InferenceEngine::Version ExtensionDescription = {
{1, 0}, // extension API version
"1.0",
"template_ext" // extension description message
};
versionInfo = &ExtensionDescription;
}
//! [extension:GetVersion]
//! [extension:getOpSets]
std::map<std::string, ngraph::OpSet> Extension::getOpSets() {
std::map<std::string, ngraph::OpSet> opsets;
ngraph::OpSet opset;
opset.insert<Operation>();
opsets["custom_opset"] = opset;
return opsets;
}
//! [extension:getOpSets]
//! [extension:getImplTypes]
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node> &node) {
if (std::dynamic_pointer_cast<Operation>(node)) {
return {"CPU"};
}
return {};
}
//! [extension:getImplTypes]
//! [extension:getImplementation]
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node> &node, const std::string &implType) {
if (std::dynamic_pointer_cast<Operation>(node) && implType == "CPU") {
return std::make_shared<OpImplementation>(node);
}
return nullptr;
}
//! [extension:getImplementation]
//! [extension:CreateExtension]
// Exported function
INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext,
InferenceEngine::ResponseDesc *resp) noexcept {
try {
ext = new Extension();
return OK;
} catch (std::exception &ex) {
if (resp) {
std::string err = ((std::string) "Couldn't create extension: ") + ex.what();
err.copy(resp->msg, 255);
}
return InferenceEngine::GENERAL_ERROR;
}
}
//! [extension:CreateExtension]

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <ie_api.h>
#include <ngraph/ngraph.hpp>
#include <memory>
#include <vector>
#include <string>
#include <map>
//! [extension:header]
namespace TemplateExtension {
class Extension : public InferenceEngine::IExtension {
public:
Extension() = default;
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override;
void Unload() noexcept override {}
void Release() noexcept override { delete this; }
std::map<std::string, ngraph::OpSet> getOpSets() override;
std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) override;
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override;
};
} // namespace TemplateExtension
//! [extension:header]

View File

@@ -0,0 +1,38 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op.hpp"
using namespace TemplateExtension;
constexpr ngraph::NodeTypeInfo Operation::type_info;
//! [op:ctor]
Operation::Operation(const ngraph::Output<ngraph::Node> &arg, int64_t add) : Op({arg}), add(add) {
constructor_validate_and_infer_types();
}
//! [op:ctor]
//! [op:validate]
void Operation::validate_and_infer_types() {
// Operation doesn't change shapes end element type
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
}
//! [op:validate]
//! [op:copy]
std::shared_ptr<ngraph::Node> Operation::copy_with_new_args(const ngraph::NodeVector &new_args) const {
if (new_args.size() != 1) {
throw ngraph::ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Operation>(new_args.at(0), add);
}
//! [op:copy]
//! [op:visit_attributes]
bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) {
visitor.on_attribute("add", add);
return true;
}
//! [op:visit_attributes]

View File

@@ -0,0 +1,29 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ngraph/ngraph.hpp>
//! [op:header]
namespace TemplateExtension {
class Operation : public ngraph::op::Op {
public:
static constexpr ngraph::NodeTypeInfo type_info{"Template", 0};
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
Operation() = default;
Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add);
void validate_and_infer_types() override;
std::shared_ptr<ngraph::Node> copy_with_new_args(const ngraph::NodeVector& new_args) const override;
bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
int64_t getAddAttr() { return add; }
private:
int64_t add;
};
//! [op:header]
} // namespace TemplateExtension

View File

@@ -0,0 +1,31 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:main]
if (APPLE)
# due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
else()
cmake_minimum_required(VERSION 3.7.2 FATAL_ERROR)
endif()
project(InferenceEngineTemplatePlugin)
set(IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR ${InferenceEngineTemplatePlugin_SOURCE_DIR})
find_package(InferenceEngineDeveloperPackage REQUIRED)
add_subdirectory(src)
if(ENABLE_TESTS)
include(CTest)
enable_testing()
endif()
# [cmake:main]
# install
# ATTENTION: uncomment to install component
# ie_cpack(template)

View File

@@ -0,0 +1,18 @@
# template-plugin
Template Plugin for Inference Engine which demonstrates basics of how Inference Engine plugin can be built and implemented on top of Inference Engine Developer Package and Plugin API.
## How to build
```bash
$ cd $DLDT_HOME
$ mkdir $DLDT_HOME/build
$ cd $DLDT_HOME/build
$ cmake -DENABLE_TESTS=ON -DENABLE_BEH_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON ..
$ make -j8
$ cd $TEMPLATE_PLUGIN_HOME
$ mkdir $TEMPLATE_PLUGIN_HOME/build
$ cd $TEMPLATE_PLUGIN_HOME/build
$ cmake -DInferenceEngineDeveloperPackage_DIR=$DLDT_HOME/build ..
$ make -j8
```

View File

@@ -0,0 +1,59 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header that defines advanced related properties for DLIA plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
* @file dlia_config.hpp
*/
#pragma once
#include <string>
#include "ie_plugin_config.hpp"
namespace InferenceEngine {
namespace TemplateMetrics {
/**
* @def TEMPLATE_METRIC_VALUE(name)
* @brief Shortcut for defining Template metric values
*/
#define TEMPLATE_METRIC_VALUE(name) InferenceEngine::TemplateMetrics::name
#define DECLARE_TEMPLATE_METRIC_VALUE(name) static constexpr auto name = #name
// ! [public_header:metrics]
/**
* @brief Defines whether current Template device instance supports hardware blocks for fast convolution computations.
*/
DECLARE_TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION);
// ! [public_header:metrics]
} // namespace TemplateMetrics
namespace TemplateConfigParams {
/**
* @def TEMPLATE_CONFIG_KEY(name)
* @brief Shortcut for defining Template device configuration keys
*/
#define TEMPLATE_CONFIG_KEY(name) InferenceEngine::TemplateConfigParams::_CONFIG_KEY(TEMPLATE_##name)
#define DECLARE_TEMPLATE_CONFIG_KEY(name) DECLARE_CONFIG_KEY(TEMPLATE_##name)
#define DECLARE_TEMPLATE_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(TEMPLATE_##name)
/**
* @brief The key to define the type of transformations for TEMPLATE inputs and outputs.
* TEMPLATE use custom data layout for input and output blobs. IE TEMPLATE Plugin provides custom
* optimized version of transformation functions that do not use OpenMP and much more faster
* than native TEMPLATE functions. Values: "NO" - optimized plugin transformations
* are used, "YES" - native TEMPLATE transformations are used.
*/
DECLARE_TEMPLATE_CONFIG_KEY(ANY_CONFIG_KEY);
} // namespace TemplateConfigParams
} // namespace InferenceEngine

View File

@@ -0,0 +1,43 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:plugin]
set(TARGET_NAME "templatePlugin")
if(ENABLE_LTO)
ie_enable_lto()
endif()
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
# adds a shared library with plugin
ie_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "TEMPLATE"
SOURCES ${SOURCES} ${HEADERS}
SKIP_INSTALL # ATTENTION: uncomment to install component
VERSION_DEFINES_FOR template_plugin.cpp)
target_include_directories(${TARGET_NAME} PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}"
"${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include")
target_link_libraries(${TARGET_NAME} PRIVATE IE::inference_engine IE::inference_engine_transformations ${NGRAPH_LIBRARIES} ${INTEL_ITT_LIBS})
# ATTENTION: uncomment to register a plugin in the plugins.xml file
# ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
# POSSIBLE_PLUGINS ${TARGET_NAME})
# [cmake:plugin]
# ATTENTION: uncomment to install component
# install
# set(component_name template)
# ie_cpack_add_component(${component_name} REQUIRED)
# install(TARGETS ${TARGET_NAME}
# RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH}
# ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH}
# LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH}
# COMPONENT ${component_name})

View File

@@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <ie_profiling.hpp>
#include "template_async_infer_request.hpp"
#include "template_executable_network.hpp"
using namespace TemplatePlugin;
// ! [async_infer_request:ctor]
TemplateAsyncInferRequest::TemplateAsyncInferRequest(
const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) :
AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor),
_inferRequest(inferRequest), _waitExecutor(waitExecutor) {
_pipeline = {
{cpuTaskExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(PreprocessingAndStartPipeline)
_inferRequest->inferPreprocess();
_inferRequest->startPipeline();
}},
{_waitExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(WaitPipeline)
_inferRequest->waitPipeline();
}},
{cpuTaskExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(Postprocessing)
_inferRequest->inferPostprocess();
}}
};
}
// ! [async_infer_request:ctor]
// ! [async_infer_request:dtor]
TemplateAsyncInferRequest::~TemplateAsyncInferRequest() {
InferenceEngine::AsyncInferRequestThreadSafeDefault::StopAndWait();
}
// ! [async_infer_request:dtor]

View File

@@ -0,0 +1,30 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp>
#include "template_infer_request.hpp"
namespace TemplatePlugin {
// ! [async_infer_request:header]
class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
public:
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
~TemplateAsyncInferRequest() override;
private:
TemplateInferRequest::Ptr _inferRequest;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [async_infer_request:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,45 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <vector>
#include <algorithm>
#include <ie_util_internal.hpp>
#include <ie_plugin_config.hpp>
#include <file_utils.h>
#include <cpp_interfaces/exception2status.hpp>
#include "template_config.hpp"
using namespace TemplatePlugin;
Configuration::Configuration() { }
Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) {
*this = defaultCfg;
for (auto&& c : config) {
const auto& key = c.first;
const auto& value = c.second;
if (CONFIG_KEY(DEVICE_ID) == key) {
deviceId = std::stoi(value);
} else if (CONFIG_KEY(PERF_COUNT) == key) {
perfCount = (CONFIG_VALUE(YES) == value);
} else if (throwOnUnsupported) {
THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << key;
}
}
}
InferenceEngine::Parameter Configuration::Get(const std::string& name) const {
if (name == CONFIG_KEY(DEVICE_ID)) {
return {std::to_string(deviceId)};
} else if (name == CONFIG_KEY(PERF_COUNT)) {
return {perfCount};
} else {
THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << name;
}
}

View File

@@ -0,0 +1,40 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <string>
#include <map>
#include <unordered_map>
#include <ie_parameter.hpp>
namespace TemplatePlugin {
template<typename T>
using IOMap = std::unordered_map<std::string, T>;
// ! [configuration:header]
using ConfigMap = std::map<std::string, std::string>;
struct Configuration {
Configuration();
Configuration(const Configuration&) = default;
Configuration(Configuration&&) = default;
Configuration& operator=(const Configuration&) = default;
Configuration& operator=(Configuration&&) = default;
explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true);
InferenceEngine::Parameter Get(const std::string& name) const;
// Plugin configuration parameters
int deviceId = 0;
bool perfCount = true;
};
// ! [configuration:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,167 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <atomic>
#include <set>
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include <ie_metric_helpers.hpp>
#include <ie_util_internal.hpp>
#include <ie_plugin_config.hpp>
#include <network_serializer.h>
#include <threading/ie_executor_manager.hpp>
#include <details/ie_cnn_network_tools.h>
#include <ngraph/specialize_function.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <transformations/convert_divide.hpp>
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
using namespace TemplatePlugin;
// ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(InferenceEngine::ICNNNetwork& network,
const Configuration& cfg):
_name(network.getName()),
_cfg(cfg),
_waitExecutor(InferenceEngine::ExecutorManager::getInstance()->getExecutor("Template")) {
// TODO: if your plugin supports device ID (more that single instance of device can be on host machine)
// you should select proper device based on KEY_DEVICE_ID or automatic behavior
// In this case, _waitExecutor should also be created per device.
try {
if (std::shared_ptr<const ngraph::Function> ngraphFunction = network.getFunction()) {
CompileGraph(ngraphFunction);
} else {
THROW_IE_EXCEPTION << "TEMPLATE plugin can compile only IR v10 networks";
}
}
catch (const InferenceEngineException & e) {
throw e;
}
catch (const std::exception & e) {
THROW_IE_EXCEPTION << "Standard exception from compilation library: " << e.what();
}
catch (...) {
THROW_IE_EXCEPTION << "Generic exception is thrown";
}
}
// ! [executable_network:ctor_cnnnetwork]
// ! [executable_network:ctor_import_stream]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
const Configuration& cfg) :
_cfg(cfg) {
// TODO: since Import network is not a mandatory functionality, this ctor can just be removed
}
// ! [executable_network:ctor_import_stream]
// ! [executable_network:compile_graph]
void TemplatePlugin::ExecutableNetwork::CompileGraph(const std::shared_ptr<const ngraph::Function> & ngraphFunction) {
// TODO: perform actual graph compilation taking `_cfg` into account
// 1.Copy ngraph::Function first to apply some transformations later in
// ExecutableNetwork::CompileGraph, which modify original ngraph::Function
const bool shareConsts = false, constFolding = false;
std::vector<::ngraph::element::Type> new_types;
std::vector<::ngraph::PartialShape> new_shapes;
for (const auto &parameter : ngraphFunction->get_parameters()) {
new_shapes.emplace_back(parameter->get_partial_shape());
new_types.emplace_back(parameter->get_element_type());
}
auto copyFunction = ngraph::specialize_function(std::const_pointer_cast<ngraph::Function>(ngraphFunction),
new_types, new_shapes, std::vector<void *>(new_types.size(), nullptr), constFolding, shareConsts);
// 2. Perform common and device-specific transformations
ngraph::pass::Manager passManager;
// Example: register standard ngraph transformation from ngraph::ngraph
passManager.register_pass<ngraph::pass::ConstantFolding>();
// Example: register inference engine optimization transformation for IE::inference_engine_transformations
passManager.register_pass<ngraph::pass::ConvertDivide>();
// Register any other transformations
// ..
// After `run_passes`, we have the transformed function, where operations match device operations,
// and we can create device hardware-dependent graph
passManager.run_passes(copyFunction);
// 3. Iterate over operations and create hardware-specific ngraph
for (const auto& op : copyFunction->get_ordered_ops()) {
// TODO: map ngraph `op` to device operation
}
// 4. Perform any other steps like allocation and filling device buffers, and so on
}
// ! [executable_network:compile_graph]
// ! [executable_network:create_infer_request_impl]
InferenceEngine::InferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) {
return std::make_shared<TemplateInferRequest>(networkInputs, networkOutputs, std::static_pointer_cast<ExecutableNetwork>(shared_from_this()));
}
// ! [executable_network:create_infer_request_impl]
// ! [executable_network:create_infer_request]
void TemplatePlugin::ExecutableNetwork::CreateInferRequest(IInferRequest::Ptr& asyncRequest) {
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
auto asyncThreadSafeImpl = std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest),
_taskExecutor, _waitExecutor, _callbackExecutor);
asyncRequest.reset(new InferenceEngine::InferRequestBase<TemplateAsyncInferRequest>(asyncThreadSafeImpl),
[](InferenceEngine::IInferRequest *p) { p->Release(); });
asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
}
// ! [executable_network:create_infer_request]
// ! [executable_network:get_config]
void TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name, Parameter &result, ResponseDesc *resp) const {
// TODO: return more supported values for config keys
if (name == CONFIG_KEY(DEVICE_ID) ||
name == CONFIG_KEY(PERF_COUNT)) {
result = _cfg.Get(name);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork config key: " << name;
}
}
// ! [executable_network:get_config]
// ! [executable_network:get_metric]
void TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *) const {
// TODO: return more supported values for metrics
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
result = IE_SET_METRIC(SUPPORTED_METRICS, std::vector<std::string>{
METRIC_KEY(NETWORK_NAME),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, std::vector<std::string>{
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT)});
} else if (METRIC_KEY(NETWORK_NAME) == name) {
result = IE_SET_METRIC(NETWORK_NAME, _name);
} else if (METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS) == name) {
// TODO: fill with actual number
unsigned int value = 1;
result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork metric: " << name;
}
}
// ! [executable_network:get_metric]
// ! [executable_network:export_impl]
void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& dlaModel) {
// TODO: Code which exports graph from std::ostream
}
// ! [executable_network:export_impl]

View File

@@ -0,0 +1,68 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <utility>
#include <tuple>
#include <memory>
#include <string>
#include <vector>
#include <map>
#include <unordered_map>
#include <list>
#include <ie_common.h>
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include <cnn_network_impl.hpp>
#include <threading/ie_itask_executor.hpp>
#include <ngraph/function.hpp>
#include "template_config.hpp"
#include "template_infer_request.hpp"
#include "template_async_infer_request.hpp"
namespace TemplatePlugin {
class Engine;
/**
* @class ExecutableNetwork
* @brief Interface of executable network
*/
// ! [executable_network:header]
class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public:
ExecutableNetwork(InferenceEngine::ICNNNetwork& network,
const Configuration& cfg);
ExecutableNetwork(std::istream & model,
const Configuration& cfg);
~ExecutableNetwork() override = default;
// Methods from a base class ExecutableNetworkThreadSafeDefault
void ExportImpl(std::ostream& model) override;
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
std::atomic<std::size_t> _requestId = {0};
std::string _name;
Configuration _cfg;
private:
void CompileGraph(const std::shared_ptr<const ngraph::Function> & ngraphFunction);
std::shared_ptr<Engine> _plugin;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [executable_network:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,224 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <map>
#include <ie_blob.h>
#include <ie_plugin.hpp>
#include <description_buffer.hpp>
#include <debug.h>
#include <ie_layouts.h>
#include <threading/ie_executor_manager.hpp>
#include <blob_transform.hpp>
#include <ie_parallel.hpp>
#include <ie_memcpy.h>
#include <precision_utils.h>
#include <template/template_config.hpp>
#include "template_infer_request.hpp"
#include "template_executable_network.hpp"
#include "template_plugin.hpp"
using namespace TemplatePlugin;
using Time = std::chrono::high_resolution_clock;
using ns = std::chrono::nanoseconds;
using fsec = std::chrono::duration<float>;
// ! [infer_request:ctor]
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork) :
InferRequestInternal(networkInputs, networkOutputs),
_executableNetwork(executableNetwork) {
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
auto requestID = std::to_string(_executableNetwork->_requestId);
_executableNetwork->_requestId++;
std::string name = _executableNetwork->_name + "_Req" + requestID;
_profilingTask = { {
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Preprocess") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Postprocess") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_StartPipline") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_WaitPipline") },
} };
allocateDeviceBuffers();
allocateInputBlobs();
allocateOutputBlobs();
}
// ! [infer_request:ctor]
// ! [infer_request:dtor]
TemplateInferRequest::~TemplateInferRequest() {
_executableNetwork->_requestId--;
}
// ! [infer_request:dtor]
void TemplateInferRequest::allocateDeviceBuffers() {
// TODO: allocate device buffers if Template device is a remote one
}
void TemplateInferRequest::allocateInputBlobs() {
for (auto &networkInput : _networkInputs) {
SizeVector dims = networkInput.second->getTensorDesc().getDims();
Precision precision = networkInput.second->getTensorDesc().getPrecision();
Layout input_layout = networkInput.second->getInputData()->getLayout();
Blob::Ptr inputBlob;
Blob::Ptr inputBlobNCHW;
switch (precision) {
case Precision::FP32 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<float>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<float>({ precision, dims, Layout::NCHW });
}
break;
case Precision::FP16 :
case Precision::I16 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, Layout::NCHW });
}
break;
case Precision::U8 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<uint8_t>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<uint8_t>({ precision, dims, Layout::NCHW });
}
break;
default:
THROW_IE_EXCEPTION << "Unsupported network precision: " << precision
<< precision << "! Supported precisions are: FP32, FP16, I16, U8";
}
// allocate the input blob
inputBlob->allocate();
_inputs[networkInput.first] = inputBlob;
if (inputBlobNCHW != inputBlob) {
inputBlobNCHW->allocate();
}
_inputsNCHW[networkInput.first] = inputBlobNCHW;
}
}
void TemplateInferRequest::allocateOutputBlobs() {
for (auto &networkOutput : _networkOutputs) {
SizeVector dims = networkOutput.second->getTensorDesc().getDims();
Precision precision = networkOutput.second->getPrecision();
Blob::Ptr outputBlob;
// allocate the output blob
Blob::Ptr outputBlobNCHW;
switch (precision) {
case Precision::FP32 :
outputBlobNCHW = outputBlob = InferenceEngine::make_shared_blob<float>({ precision, dims, networkOutput.second->getLayout() });
if (networkOutput.second->getLayout() == Layout::NHWC) {
outputBlobNCHW = InferenceEngine::make_shared_blob<float>({ precision, dims, Layout::NCHW });
}
break;
case Precision::FP16 :
outputBlobNCHW = outputBlob = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, networkOutput.second->getLayout() });
if (networkOutput.second->getLayout() == Layout::NHWC) {
outputBlobNCHW = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, Layout::NCHW });
}
break;
default:
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Unsupported output precision: "
<< precision << "! Supported precisions are: FP32, FP16";
}
// allocate the output blob
outputBlob->allocate();
_outputs[networkOutput.first] = outputBlob;
if (outputBlobNCHW != outputBlob) {
outputBlobNCHW->allocate();
}
_outputsNCHW[networkOutput.first] = outputBlobNCHW;
}
if (_networkOutputs.empty() || _networkInputs.empty()) {
THROW_IE_EXCEPTION << "Internal error: no information about network's output/input";
}
}
// ! [infer_request:infer_impl]
void TemplateInferRequest::InferImpl() {
// TODO: fill with actual list of pipeline stages, which are executed syncronously for sync infer requests
inferPreprocess();
startPipeline();
waitPipeline();
inferPostprocess();
}
// ! [infer_request:infer_impl]
// ! [infer_request:infer_preprocess]
void TemplateInferRequest::inferPreprocess() {
auto prev = Time::now();
// execute input pre-processing.
InferRequestInternal::execDataPreprocessing(_inputs);
for (auto &input : InferRequestInternal::_inputs) {
auto& src = input.second;
auto& dst = _inputsNCHW[input.first];
if (src != dst) {
if (src->getTensorDesc().getPrecision() == dst->getTensorDesc().getPrecision()
&& src->getTensorDesc().getDims() == dst->getTensorDesc().getDims()
&& src->getTensorDesc().getLayout() == dst->getTensorDesc().getLayout()) {
_inputsNCHW[input.first] = input.second;
} else { // Convert Layout to NCHW
InferenceEngine::blob_copy(src, dst);
}
}
}
// TODO: Preprocessing on inputs if needed: work _inputsNCHW
_inputPreprocessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
// ! [infer_request:infer_preprocess]
void TemplateInferRequest::startPipeline() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[StartPipeline])
// TODO: Start pipeline and fill _inputTransferTime, _executeTime, _outputTransferTime
}
void TemplateInferRequest::waitPipeline() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[WaitPipeline])
auto prev = Time::now();
// TODO: Wait pipeline using driver API or other synronizations methods
_inputPreprocessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
void TemplateInferRequest::inferPostprocess() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[Postprocess])
auto prev = Time::now();
// TODO: perform post-processing and convert to NHWC layout
_outputPostProcessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
// ! [infer_request:get_performance_counts]
void TemplateInferRequest::GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo> &perfMap) const {
InferenceEngineProfileInfo info;
info.execution_index = 0;
info.status = InferenceEngineProfileInfo::EXECUTED;
info.cpu_uSec = info.realTime_uSec = _inputPreprocessTime / 1000;
perfMap["1. input preprocessing"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _inputTransferTime / 1000;
perfMap["2. input transfer to a device"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _executeTime / 1000;
perfMap["3. execution time"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _outputTransferTime / 1000;
perfMap["4. output transfer from a device"] = info;
info.cpu_uSec = info.realTime_uSec = _outputPostProcessTime / 1000;
perfMap["5. output postprocessing"] = info;
}
// ! [infer_request:get_performance_counts]

View File

@@ -0,0 +1,74 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include <vector>
#include <memory>
#include <unordered_map>
#include <ie_common.h>
#include <ie_profiling.hpp>
#include <cpp_interfaces/impl/ie_infer_request_internal.hpp>
#include <cpp_interfaces/impl/ie_executable_network_internal.hpp>
#include <threading/ie_itask_executor.hpp>
#include "template_config.hpp"
namespace TemplatePlugin {
class ExecutableNetwork;
// ! [infer_request:header]
class TemplateInferRequest : public InferenceEngine::InferRequestInternal {
public:
typedef std::shared_ptr<TemplateInferRequest> Ptr;
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<ExecutableNetwork>& executableNetwork);
~TemplateInferRequest() override;
void InferImpl() override;
void GetPerformanceCounts(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo>& perfMap) const override;
// pipeline methods-stages which are used in async infer request implementation and assigned to particular executor
void inferPreprocess();
void startPipeline();
void waitPipeline();
void inferPostprocess();
std::shared_ptr<ExecutableNetwork> _executableNetwork;
private:
void allocateDeviceBuffers();
void allocateInputBlobs();
void allocateOutputBlobs();
enum {
Preprocess,
Postprocess,
StartPipeline,
WaitPipeline,
numOfStages
};
std::array<InferenceEngine::ProfilingTask, numOfStages> _profilingTask;
InferenceEngine::BlobMap _inputsNCHW;
InferenceEngine::BlobMap _outputsNCHW;
// for performance counts
double _inputPreprocessTime = 0.0;
double _inputTransferTime = 0.0;
double _executeTime = 0.0;
double _outputTransferTime = 0.0;
double _outputPostProcessTime = 0.0;
};
// ! [infer_request:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,194 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <memory>
#include <vector>
#include <sstream>
#include <regex>
#include <string>
#include <map>
#include <ie_metric_helpers.hpp>
#include <details/ie_cnn_network_tools.h>
#include <ie_plugin_config.hpp>
#include <ie_util_internal.hpp>
#include <inference_engine.hpp>
#include <file_utils.h>
#include <cpp_interfaces/base/ie_plugin_base.hpp>
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include <threading/ie_executor_manager.hpp>
#include <graph_tools.hpp>
#include <ie_input_info.hpp>
#include <ie_layouts.h>
#include <hetero/hetero_plugin_config.hpp>
#include <template/template_config.hpp>
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
#include "template_infer_request.hpp"
using namespace TemplatePlugin;
// ! [plugin:ctor]
Plugin::Plugin() {
// TODO: fill with actual device name
_pluginName = "TEMPLATE";
}
// ! [plugin:ctor]
// ! [plugin:load_exe_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::ICore * core,
const InferenceEngine::ICNNNetwork & network,
const ConfigMap &config) {
auto cfg = Configuration{ config, _cfg };
InferenceEngine::InputsDataMap networkInputs;
InferenceEngine::OutputsDataMap networkOutputs;
network.getInputsInfo(networkInputs);
network.getOutputsInfo(networkOutputs);
// TODO: check with precisions supported by Template device
for (auto networkOutput : networkOutputs) {
auto output_precision = networkOutput.second->getPrecision();
if (output_precision != Precision::FP32 &&
output_precision != Precision::FP16) {
THROW_IE_EXCEPTION << "Template device supports only FP16 and FP32 output precision.";
}
}
for (auto networkInput : networkInputs) {
auto input_precision = networkInput.second->getTensorDesc().getPrecision();
if (input_precision != InferenceEngine::Precision::FP32 &&
input_precision != InferenceEngine::Precision::FP16 &&
input_precision != InferenceEngine::Precision::I16 &&
input_precision != InferenceEngine::Precision::U8) {
THROW_IE_EXCEPTION << "Input image format " << input_precision << " is not supported yet.\n"
<< "Supported formats are: FP32, FP16, I16 and U8.";
}
}
auto clonedNetwork = cloneNet(network);
ConstTransformer transformator(clonedNetwork.get());
transformator.fullTrim();
return std::make_shared<ExecutableNetwork>(*clonedNetwork, cfg);
}
// ! [plugin:load_exe_network_impl]
// ! [plugin:import_network_impl]
InferenceEngine::ExecutableNetwork Plugin::ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) {
// TODO: Import network from stream is not mandatory functionality;
// Can just throw an exception and remove the code below
Configuration exportedCfg;
// some code below which reads exportedCfg from `model` stream
// ..
auto cfg = Configuration(config, exportedCfg);
IExecutableNetwork::Ptr executableNetwork;
auto exec_network_impl = std::make_shared<ExecutableNetwork>(model, cfg);
executableNetwork.reset(new ExecutableNetworkBase<ExecutableNetworkInternal>(exec_network_impl),
[](InferenceEngine::details::IRelease *p) {p->Release(); });
return InferenceEngine::ExecutableNetwork{ executableNetwork };
}
// ! [plugin:import_network_impl]
// ! [plugin:query_network]
void Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config, QueryNetworkResult &res) const {
Configuration cfg{config, _cfg, false};
res.rc = StatusCode::OK;
if (std::shared_ptr<const ngraph::Function> ngraphFunction = network.getFunction()) {
auto ops = ngraphFunction->get_ordered_ops();
for (auto&& op : ops) {
// TODO: investigate if an op is actually supported by Template device
bool supported = true;
if (supported) {
res.supportedLayersMap.insert({ op->get_friendly_name(), GetName() });
}
}
} else {
THROW_IE_EXCEPTION << "TEMPLATE plugin can query only IR v10 networks";
}
}
// ! [plugin:query_network]
// ! [plugin:add_extension]
void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) {
// TODO: add extensions if plugin supports extensions
}
// ! [plugin:add_extension]
// ! [plugin:set_config]
void Plugin::SetConfig(const ConfigMap &config) {
_cfg = Configuration{config, _cfg};
}
// ! [plugin:set_config]
// ! [plugin:get_config]
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & /*options*/) const {
return _cfg.Get(name);
}
// ! [plugin:get_config]
// ! [plugin:get_metric]
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const {
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> supportedMetrics = {
METRIC_KEY(AVAILABLE_DEVICES),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(OPTIMIZATION_CAPABILITIES),
METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) };
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> confiKeys = {
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT) };
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, confiKeys);
} else if (METRIC_KEY(AVAILABLE_DEVICES) == name) {
// TODO: fill list of available devices
std::vector<std::string> availableDevices = { "" };
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
} else if (METRIC_KEY(FULL_DEVICE_NAME) == name) {
std::string name = "Template Device Full Name";
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, name);
} else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) {
// TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32
std::vector<std::string> capabilities = { METRIC_VALUE(FP32), TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION) };
IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities);
} else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) {
// TODO: fill with actual values
using uint = unsigned int;
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1}));
} else {
THROW_IE_EXCEPTION << "Unsupported device metric: " << name;
}
}
// ! [plugin:get_metric]
IE_SUPPRESS_DEPRECATED_START
// ! [plugin:create_plugin_engine]
INFERENCE_PLUGIN_API(StatusCode) CreatePluginEngine(IInferencePlugin *&plugin, ResponseDesc *resp) noexcept {
try {
plugin = make_ie_compatible_plugin({2, 1, CI_BUILD_NUMBER, "templatePlugin"},
std::make_shared<Plugin>());
return OK;
}
catch (std::exception &ex) {
return DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
}
}
// ! [plugin:create_plugin_engine]
IE_SUPPRESS_DEPRECATED_END

View File

@@ -0,0 +1,48 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <inference_engine.hpp>
#include <description_buffer.hpp>
#include <cpp_interfaces/impl/ie_plugin_internal.hpp>
#include <ie_error.hpp>
#include <memory>
#include <string>
#include <map>
#include <unordered_map>
#include <vector>
#include "template_executable_network.hpp"
#include "template_config.hpp"
//! [plugin:header]
namespace TemplatePlugin {
class Plugin : public InferenceEngine::InferencePluginInternal {
public:
using Ptr = std::shared_ptr<Plugin>;
Plugin();
~Plugin() override = default;
void SetConfig(const std::map<std::string, std::string> &config) override;
void QueryNetwork(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string>& config,
InferenceEngine::QueryNetworkResult &res) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr
LoadExeNetworkImpl(const InferenceEngine::ICore * core, const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string> &config) override;
void AddExtension(InferenceEngine::IExtensionPtr extension) override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::ExecutableNetwork ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) override;
private:
Configuration _cfg;
};
} // namespace TemplatePlugin
//! [plugin:header]

View File

@@ -1,7 +1,7 @@
# Get Started with OpenVINO™ Deep Learning Deployment Toolkit (DLDT) on Linux*
This guide provides you with the information that will help you to start using
the DLDT on Linux\*. With this guide, you will learn how to:
the OpenVINO on Linux\*. With this guide, you will learn how to:
1. [Configure the Model Optimizer](#configure-the-model-optimizer)
2. [Prepare a model for sample inference](#prepare-a-model-for-sample-inference)
@@ -10,13 +10,13 @@ the DLDT on Linux\*. With this guide, you will learn how to:
3. [Run the Image Classification Sample Application with the model](#run-the-image-classification-sample-application)
## Prerequisites
1. This guide assumes that you have already cloned the `dldt` repo and
1. This guide assumes that you have already cloned the `openvino` repo and
successfully built the Inference Engine and Samples using the
[build instructions](inference-engine/README.md).
2. The original structure of the repository directories remains unchanged.
> **NOTE**: Below, the directory to which the `dldt` repository is cloned is
referred to as `<DLDT_DIR>`.
> **NOTE**: Below, the directory to which the `openvino` repository is cloned is
referred to as `<OPENVINO_DIR>`.
## Configure the Model Optimizer
@@ -53,7 +53,7 @@ If you see error messages, check for any missing dependencies.
1. Go to the Model Optimizer prerequisites directory:
```sh
cd <DLDT_DIR>/model_optimizer/install_prerequisites
cd <OPENVINO_DIR>/model_optimizer/install_prerequisites
```
2. Run the script to configure the Model Optimizer for Caffe,
TensorFlow, MXNet, Kaldi\*, and ONNX:
@@ -68,7 +68,7 @@ Configure individual frameworks separately **ONLY** if you did not select
1. Go to the Model Optimizer prerequisites directory:
```sh
cd <DLDT_DIR>/model_optimizer/install_prerequisites
cd <OPENVINO_DIR>/model_optimizer/install_prerequisites
```
2. Run the script for your model framework. You can run more than one script:
@@ -162,20 +162,20 @@ as `<models_dir>` below) with the Model Downloader:
**For CPU (FP32):**
```sh
python3 <DLDT_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP32 --output_dir <ir_dir>
python3 <OPENVINO_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP32 --output_dir <ir_dir>
```
**For GPU and MYRIAD (FP16):**
```sh
python3 <DLDT_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir <ir_dir>
python3 <OPENVINO_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir <ir_dir>
```
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `<ir_dir>` directory.
3. Copy the `squeezenet1.1.labels` file from the `<DLDT_DIR>/inference-engine/samples/sample_data/`
3. Copy the `squeezenet1.1.labels` file from the `<OPENVINO_DIR>/scripts/demo/`
folder to the model IR directory. This file contains the classes that ImageNet
uses so that the inference results show text instead of classification numbers:
```sh
cp <DLDT_DIR>/inference-engine/samples/sample_data/squeezenet1.1.labels <ir_dir>
cp <OPENVINO_DIR>/scripts/demo/squeezenet1.1.labels <ir_dir>
```
Now you are ready to run the Image Classification Sample Application.
@@ -184,28 +184,28 @@ Now you are ready to run the Image Classification Sample Application.
The Inference Engine sample applications are automatically compiled when you
built the Inference Engine using the [build instructions](inference-engine/README.md).
The binary files are located in the `<DLDT_DIR>/inference-engine/bin/intel64/Release`
The binary files are located in the `<OPENVINO_DIR>/inference-engine/bin/intel64/Release`
directory.
To run the Image Classification sample application with an input image on the prepared IR:
1. Go to the samples build directory:
```sh
cd <DLDT_DIR>/inference-engine/bin/intel64/Release
cd <OPENVINO_DIR>/inference-engine/bin/intel64/Release
2. Run the sample executable with specifying the `car.png` file from the
`<DLDT_DIR>/inference-engine/samples/sample_data/` directory as an input
`<OPENVINO_DIR>/scripts/demo/` directory as an input
image, the IR of your model and a plugin for a hardware device to perform
inference on:
**For CPU:**
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d CPU
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d CPU
```
**For GPU:**
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d GPU
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d GPU
```
**For MYRIAD:**
@@ -214,14 +214,14 @@ To run the Image Classification sample application with an input image on the pr
Stick or Intel® Neural Compute Stick 2) with the MYRIAD plugin requires
performing [additional hardware configuration steps](inference-engine/README.md#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2).
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
```
When the Sample Application completes, you will have the label and confidence for the top-10 categories printed on the screen. Below is a sample output with inference results on CPU:
```sh
Top 10 results:
Image /home/user/dldt/inference-engine/samples/sample_data/car.png
Image /home/user/openvino/scripts/demo/car.png
classid probability label
------- ----------- -----

View File

@@ -109,7 +109,7 @@ if(UNIX)
PATTERN *.bat EXCLUDE
PATTERN speech_libs_and_demos EXCLUDE)
elseif(WIN32)
install(DIRECTORY samples
install(DIRECTORY samples/
DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp
COMPONENT cpp_samples
USE_SOURCE_PERMISSIONS
@@ -197,6 +197,7 @@ function(register_extra_plugins)
# automatically import plugins from the 'plugins' folder
file(GLOB local_extra_plugins "plugins/*")
list(APPEND local_extra_plugins "${OpenVINO_MAIN_SOURCE_DIR}/docs/template_plugin")
foreach(plugin_path IN LISTS IE_EXTRA_PLUGINS local_extra_plugins)
get_filename_component(plugin_dir "${plugin_path}" NAME)

View File

@@ -0,0 +1,32 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
########################################################################
#
# Perform search of TBB package corresponding with specified search order.
#
# TBBROOT var is set into external package path or has a default value
# with IE own version of TBB. Search order is next:
# 1) ${TBBROOT}/cmake
# 2) ${TBBROOT} with IE own version of TBBConfig.cmake (actual for TBB < 2017.7)
#
## Path to IE own version of TBBConfig.cmake old TBB version without cmake config.
if(APPLE)
set(IE_OWN_TBB_CONFIG tbb/mac)
elseif(UNIX)
set(IE_OWN_TBB_CONFIG tbb/lnx)
elseif(WIN)
set(IE_OWN_TBB_CONFIG tbb/win)
else()
unset(IE_OWN_TBB_CONFIG)
endif()
find_package(TBB
CONFIG
NO_DEFAULT_PATH
PATHS ${TBBROOT}/cmake
${CMAKE_CURRENT_LIST_DIR}/${IE_OWN_TBB_CONFIG}
)
find_package_handle_standard_args(TBB CONFIG_MODE)

View File

@@ -77,33 +77,29 @@ endif ()
## TBB package
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
reset_deps_cache(TBBROOT TBB_DIR)
reset_deps_cache(TBBROOT)
if(NOT DEFINED TBB_DIR AND NOT DEFINED ENV{TBB_DIR})
if (WIN32 AND X86_64)
#TODO: add target_path to be platform specific as well, to avoid following if
RESOLVE_DEPENDENCY(TBB
ARCHIVE_WIN "tbb2020_20200214_win.zip"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(ANDROID) # Should be before LINUX due LINUX is detected as well
RESOLVE_DEPENDENCY(TBB
ARCHIVE_ANDROID "tbb2020_20191023_android.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(LINUX AND X86_64)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_LIN "tbb2020_20200327_lin_strip.tgz"
ARCHIVE_WIN "tbb2020_20200415_win.zip"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT")
elseif(ANDROID) # Should be before LINUX due LINUX is detected as well
RESOLVE_DEPENDENCY(TBB
ARCHIVE_ANDROID "tbb2020_20200404_android.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT")
elseif(LINUX AND X86_64)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_LIN "tbb2020_20200415_lin_strip.tgz"
TARGET_PATH "${TEMP}/tbb")
elseif(APPLE AND X86_64)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_MAC "tbb2020_20191023_mac.tgz"
ARCHIVE_MAC "tbb2020_20200404_mac.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
ENVIRONMENT "TBBROOT")
else()
message(FATAL_ERROR "TBB is not available on current platform")
endif()
@@ -116,12 +112,11 @@ if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
endif()
update_deps_cache(TBBROOT "${TBB}" "Path to TBB root folder")
update_deps_cache(TBB_DIR "${TBBROOT}/cmake" "Path to TBB package folder")
if (WIN32)
log_rpath_from_dir(TBB "${TBB_DIR}/../bin")
log_rpath_from_dir(TBB "${TBB}/bin")
else ()
log_rpath_from_dir(TBB "${TBB_DIR}/../lib")
log_rpath_from_dir(TBB "${TBB}/lib")
endif ()
debug_message(STATUS "tbb=" ${TBB})
endif ()

View File

@@ -2,6 +2,20 @@
# SPDX-License-Identifier: Apache-2.0
#
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
find_package(TBB COMPONENTS tbb tbbmalloc)
if (TBB_FOUND)
if (${TBB_VERSION} VERSION_LESS 2020)
ext_message(WARNING "TBB version is less than OpenVINO recommends to use.\
Some TBB related features like NUMA-aware tbb::task_arena\
execution will be disabled.")
endif()
else ()
ext_message(WARNING "TBB was not found by the configured TBB_DIR/TBBROOT path. \
SEQ method will be used.")
endif ()
endif()
function(set_ie_threading_interface_for TARGET_NAME)
get_target_property(target_type ${TARGET_NAME} TYPE)
if(target_type STREQUAL "INTERFACE_LIBRARY")
@@ -48,7 +62,6 @@ function(set_ie_threading_interface_for TARGET_NAME)
set(IE_THREAD_DEFINE "IE_THREAD_SEQ")
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
find_package(TBB COMPONENTS tbb tbbmalloc)
if (TBB_FOUND)
set(IE_THREAD_DEFINE "IE_THREAD_TBB")
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${TBB_IMPORTED_TARGETS})

View File

@@ -90,8 +90,8 @@ function(ie_add_plugin)
ie_cpack_add_component(${install_component} REQUIRED DEPENDS core)
install(TARGETS ${IE_PLUGIN_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component}
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component}
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ${install_component}
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ${install_component}
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component})
endif()
endfunction()

View File

@@ -0,0 +1,196 @@
#===============================================================================
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# TBB_FOUND should not be set explicitly. It is defined automatically by CMake.
# Handling of TBB_VERSION is in TBBConfigVersion.cmake.
if (NOT TBB_FIND_COMPONENTS)
set(TBB_FIND_COMPONENTS "tbb;tbbmalloc;tbbmalloc_proxy")
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(TBB_FIND_REQUIRED_${_tbb_component} 1)
endforeach()
endif()
# Add components with internal dependencies: tbbmalloc_proxy -> tbbmalloc
list(FIND TBB_FIND_COMPONENTS tbbmalloc_proxy _tbbmalloc_proxy_ix)
if (NOT _tbbmalloc_proxy_ix EQUAL -1)
list(FIND TBB_FIND_COMPONENTS tbbmalloc _tbbmalloc_ix)
if (_tbbmalloc_ix EQUAL -1)
list(APPEND TBB_FIND_COMPONENTS tbbmalloc)
set(TBB_FIND_REQUIRED_tbbmalloc ${TBB_FIND_REQUIRED_tbbmalloc_proxy})
endif()
endif()
if (NOT TBBROOT)
if(DEFINED ENV{TBBROOT})
set (TBBROOT $ENV{TBBROOT})
endif()
endif()
set(_tbb_root ${TBBROOT})
set(_tbb_x32_subdir ia32)
set(_tbb_x64_subdir intel64)
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
set(_tbb_arch_subdir ${_tbb_x64_subdir})
else()
set(_tbb_arch_subdir ${_tbb_x32_subdir})
endif()
if (CMAKE_CXX_COMPILER_LOADED)
set(_tbb_compiler_id ${CMAKE_CXX_COMPILER_ID})
set(_tbb_compiler_ver ${CMAKE_CXX_COMPILER_VERSION})
elseif (CMAKE_C_COMPILER_LOADED)
set(_tbb_compiler_id ${CMAKE_C_COMPILER_ID})
set(_tbb_compiler_ver ${CMAKE_C_COMPILER_VERSION})
endif()
# For non-GCC compilers try to find version of system GCC to choose right compiler subdirectory.
if (NOT _tbb_compiler_id STREQUAL "GNU")
execute_process(COMMAND gcc --version OUTPUT_VARIABLE _tbb_gcc_ver_output ERROR_QUIET)
string(REGEX REPLACE ".*gcc.*([0-9]+\\.[0-9]+)\\.[0-9]+.*" "\\1" _tbb_compiler_ver "${_tbb_gcc_ver_output}")
if (NOT _tbb_compiler_ver)
message(FATAL_ERROR "This Intel TBB package is intended to be used only environment with available 'gcc'")
endif()
unset(_tbb_gcc_ver_output)
endif()
if (EXISTS "${_tbb_root}/lib/${_tbb_arch_subdir}")
set(_tbb_lib ${_tbb_root}/lib/${_tbb_arch_subdir})
set(_tbb_inc ${_tbb_root}/include)
file(GLOB _tbb_gcc_versions_available RELATIVE ${_tbb_lib} ${_tbb_lib}/*)
# shall we check _tbb_gcc_versions_available is not empty?
foreach (_tbb_gcc_version ${_tbb_gcc_versions_available})
string(SUBSTRING ${_tbb_gcc_version} 3 -1 _tbb_gcc_version_number)
if (NOT _tbb_compiler_ver VERSION_LESS _tbb_gcc_version_number)
set(_tbb_compiler_subdir ${_tbb_gcc_version})
endif()
endforeach()
else()
if (TBBROOT)
set(__tbb_hint_path "${TBBROOT}")
else()
set(__tbb_hint_path "/non/existing/path")
endif()
# try to find TBB in the system
find_library(_tbb_lib NAMES tbb
HINTS "${__tbb_hint_path}"
PATH_SUFFIXES lib lib64)
find_path(_tbb_inc NAMES tbb.h
HINTS "${__tbb_hint_path}"
PATH_SUFFIXES include tbb include/tbb)
unset(__tbb_hint_path)
if (NOT _tbb_lib OR NOT _tbb_inc)
message("FATAL_ERROR" "Cannot find TBB")
endif()
get_filename_component(_tbb_lib "${_tbb_lib}" PATH)
get_filename_component(_tbb_inc "${_tbb_inc}" PATH)
set(_tbb_arch_subdir "")
set(_tbb_compiler_subdir "")
endif()
unset(_tbb_gcc_version_number)
unset(_tbb_compiler_id)
unset(_tbb_compiler_ver)
# Now we check that all the needed component are present
get_filename_component(_tbb_lib_path "${_tbb_lib}/${_tbb_compiler_subdir}" ABSOLUTE)
if (TBB_FOUND)
return()
endif()
# detect version
find_file(_tbb_def_header tbb_stddef.h HINTS "${_tbb_root}/include/tbb")
if (_tbb_def_header)
file(READ "${_tbb_def_header}" _tbb_def_content)
string(REGEX MATCH "TBB_VERSION_MAJOR[ ]*[0-9]*" _tbb_version_major ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_major ${_tbb_version_major})
string(REGEX MATCH "TBB_VERSION_MINOR[ ]*[0-9]" _tbb_version_minor ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_minor ${_tbb_version_minor})
set(TBB_VERSION "${_tbb_version_major}.${_tbb_version_minor}")
else()
set(TBB_VERSION "")
endif()
foreach (_tbb_soversion 2 12)
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(_tbb_release_lib
"${_tbb_lib_path}/lib${_tbb_component}.so.${_tbb_soversion}")
set(_tbb_debug_lib
"${_tbb_lib_path}/lib${_tbb_component}_debug.so.${_tbb_soversion}")
# oneDNN change: check library existence (BUILD_MODE related only, not both)
string(TOUPPER "${CMAKE_BUILD_TYPE}" UPPERCASE_CMAKE_BUILD_TYPE)
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL "DEBUG")
if (EXISTS "${_tbb_debug_lib}")
set(_lib_exists TRUE)
elseif (EXISTS "${_tbb_release_lib}")
message(FATAL_ERROR
"Intel TBB release library is found here: ${_tbb_release_lib}. "
"But the debug library
(lib${_tbb_component}_debug.so.${_tbb_soversion}) is missing.")
endif()
else()
if (EXISTS "${_tbb_release_lib}")
set(_lib_exists TRUE)
endif()
endif()
if (_lib_exists)
if (NOT TARGET TBB::${_tbb_component})
add_library(TBB::${_tbb_component} SHARED IMPORTED)
set_target_properties(TBB::${_tbb_component} PROPERTIES
IMPORTED_CONFIGURATIONS "RELEASE;DEBUG"
IMPORTED_LOCATION_RELEASE "${_tbb_release_lib}"
IMPORTED_LOCATION_DEBUG "${_tbb_debug_lib}"
INTERFACE_INCLUDE_DIRECTORIES "${_tbb_inc}")
# Add internal dependencies for imported targets: TBB::tbbmalloc_proxy -> TBB::tbbmalloc
if (_tbb_component STREQUAL tbbmalloc_proxy)
set_target_properties(TBB::tbbmalloc_proxy PROPERTIES INTERFACE_LINK_LIBRARIES TBB::tbbmalloc)
endif()
list(APPEND TBB_IMPORTED_TARGETS TBB::${_tbb_component})
set(TBB_${_tbb_component}_FOUND 1)
endif()
break()
endif()
endforeach()
endforeach()
if (NOT _lib_exists AND TBB_FIND_REQUIRED AND TBB_FIND_REQUIRED_${_tbb_component})
message(FATAL_ERROR "Missed required Intel TBB component: ${_tbb_component}")
endif()
unset(_tbb_x32_subdir)
unset(_tbb_x64_subdir)
unset(_tbb_arch_subdir)
unset(_tbb_compiler_subdir)
unset(_tbbmalloc_proxy_ix)
unset(_tbbmalloc_ix)
unset(_tbb_lib_path)
unset(_tbb_release_lib)
unset(_tbb_debug_lib)

View File

@@ -0,0 +1,114 @@
#===============================================================================
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# TBB_FOUND should not be set explicitly. It is defined automatically by CMake.
# Handling of TBB_VERSION is in TBBConfigVersion.cmake.
if (NOT TBB_FIND_COMPONENTS)
set(TBB_FIND_COMPONENTS "tbb;tbbmalloc;tbbmalloc_proxy")
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(TBB_FIND_REQUIRED_${_tbb_component} 1)
endforeach()
endif()
# Add components with internal dependencies: tbbmalloc_proxy -> tbbmalloc
list(FIND TBB_FIND_COMPONENTS tbbmalloc_proxy _tbbmalloc_proxy_ix)
if (NOT _tbbmalloc_proxy_ix EQUAL -1)
list(FIND TBB_FIND_COMPONENTS tbbmalloc _tbbmalloc_ix)
if (_tbbmalloc_ix EQUAL -1)
list(APPEND TBB_FIND_COMPONENTS tbbmalloc)
set(TBB_FIND_REQUIRED_tbbmalloc ${TBB_FIND_REQUIRED_tbbmalloc_proxy})
endif()
endif()
if (NOT TBBROOT)
if(DEFINED ENV{TBBROOT})
set (TBBROOT $ENV{TBBROOT})
else()
message("FATAL_ERROR" "TBBROOT is unset")
endif()
endif()
set(_tbb_root ${TBBROOT})
set(_tbb_x32_subdir .)
set(_tbb_x64_subdir .)
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
set(_tbb_arch_subdir ${_tbb_x64_subdir})
else()
set(_tbb_arch_subdir ${_tbb_x32_subdir})
endif()
set(_tbb_compiler_subdir .)
get_filename_component(_tbb_lib_path "${_tbb_root}/lib/${_tbb_arch_subdir}/${_tbb_compiler_subdir}" ABSOLUTE)
if (TBB_FOUND)
return()
endif()
# detect version
find_file(_tbb_def_header tbb_stddef.h HINTS "${_tbb_root}/include/tbb")
if (_tbb_def_header)
file(READ "${_tbb_def_header}" _tbb_def_content)
string(REGEX MATCH "TBB_VERSION_MAJOR[ ]*[0-9]*" _tbb_version_major ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_major ${_tbb_version_major})
string(REGEX MATCH "TBB_VERSION_MINOR[ ]*[0-9]" _tbb_version_minor ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_minor ${_tbb_version_minor})
set(TBB_VERSION "${_tbb_version_major}.${_tbb_version_minor}")
else()
set(TBB_VERSION "")
endif()
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(_tbb_release_lib "${_tbb_lib_path}/lib${_tbb_component}.dylib")
set(_tbb_debug_lib "${_tbb_lib_path}/lib${_tbb_component}_debug.dylib")
if (EXISTS "${_tbb_release_lib}" AND EXISTS "${_tbb_debug_lib}")
if (NOT TARGET TBB::${_tbb_component})
add_library(TBB::${_tbb_component} SHARED IMPORTED)
set_target_properties(TBB::${_tbb_component} PROPERTIES
IMPORTED_CONFIGURATIONS "RELEASE;DEBUG"
IMPORTED_LOCATION_RELEASE "${_tbb_release_lib}"
IMPORTED_LOCATION_DEBUG "${_tbb_debug_lib}"
INTERFACE_INCLUDE_DIRECTORIES "${_tbb_root}/include")
# Add internal dependencies for imported targets: TBB::tbbmalloc_proxy -> TBB::tbbmalloc
if (_tbb_component STREQUAL tbbmalloc_proxy)
set_target_properties(TBB::tbbmalloc_proxy PROPERTIES INTERFACE_LINK_LIBRARIES TBB::tbbmalloc)
endif()
list(APPEND TBB_IMPORTED_TARGETS TBB::${_tbb_component})
set(TBB_${_tbb_component}_FOUND 1)
endif()
elseif (TBB_FIND_REQUIRED AND TBB_FIND_REQUIRED_${_tbb_component})
message(FATAL_ERROR "Missed required Intel TBB component: ${_tbb_component}")
endif()
endforeach()
unset(_tbb_x32_subdir)
unset(_tbb_x64_subdir)
unset(_tbb_arch_subdir)
unset(_tbb_compiler_subdir)
unset(_tbbmalloc_proxy_ix)
unset(_tbbmalloc_ix)
unset(_tbb_lib_path)
unset(_tbb_release_lib)
unset(_tbb_debug_lib)

View File

@@ -0,0 +1,140 @@
#===============================================================================
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# TBB_FOUND should not be set explicitly. It is defined automatically by CMake.
# Handling of TBB_VERSION is in TBBConfigVersion.cmake.
if (NOT TBB_FIND_COMPONENTS)
set(TBB_FIND_COMPONENTS "tbb;tbbmalloc;tbbmalloc_proxy")
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(TBB_FIND_REQUIRED_${_tbb_component} 1)
endforeach()
endif()
# Add components with internal dependencies: tbbmalloc_proxy -> tbbmalloc
list(FIND TBB_FIND_COMPONENTS tbbmalloc_proxy _tbbmalloc_proxy_ix)
if (NOT _tbbmalloc_proxy_ix EQUAL -1)
list(FIND TBB_FIND_COMPONENTS tbbmalloc _tbbmalloc_ix)
if (_tbbmalloc_ix EQUAL -1)
list(APPEND TBB_FIND_COMPONENTS tbbmalloc)
set(TBB_FIND_REQUIRED_tbbmalloc ${TBB_FIND_REQUIRED_tbbmalloc_proxy})
endif()
endif()
if (NOT TBBROOT)
if(DEFINED ENV{TBBROOT})
set (TBBROOT $ENV{TBBROOT})
else()
message("FATAL_ERROR" "TBBROOT is unset")
endif()
endif()
set(_tbb_root ${TBBROOT})
set(_tbb_x32_subdir ia32)
set(_tbb_x64_subdir intel64)
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
set(_tbb_arch_subdir ${_tbb_x64_subdir})
else()
set(_tbb_arch_subdir ${_tbb_x32_subdir})
endif()
if (NOT MSVC)
message(FATAL_ERROR "This Intel TBB package is intended to be used only in the project with MSVC")
endif()
# Detect the most relevant MSVC subdirectory
set(_tbb_msvc_1700_subdir vc11)
set(_tbb_msvc_1800_subdir vc12)
set(_tbb_msvc_1900_subdir vc14)
set(_tbb_msvc_ver ${MSVC_VERSION})
if (MSVC_VERSION VERSION_LESS 1700)
message(FATAL_ERROR "This Intel TBB package is intended to be used only in the project with MSVC version 1700 (vc11) or higher")
elseif (MSVC_VERSION VERSION_GREATER 1900)
set(_tbb_msvc_ver 1900)
endif()
set(_tbb_compiler_subdir ${_tbb_msvc_${_tbb_msvc_ver}_subdir})
unset(_tbb_msvc_1700_subdir)
unset(_tbb_msvc_1800_subdir)
unset(_tbb_msvc_1900_subdir)
if (WINDOWS_STORE)
set(_tbb_compiler_subdir ${_tbb_compiler_subdir}_ui)
endif()
#set conveniance variable to locate TBB files (these are used for a PSXE install)
get_filename_component(_tbb_lib_path "${_tbb_root}/lib/${_tbb_arch_subdir}/${_tbb_compiler_subdir}" ABSOLUTE)
get_filename_component(_tbb_inc_path "${_tbb_root}/include/" ABSOLUTE)
if (TBB_FOUND)
return()
endif()
# detect version
find_file(_tbb_def_header tbb_stddef.h HINTS "${_tbb_root}/include/tbb")
if (_tbb_def_header)
file(READ "${_tbb_def_header}" _tbb_def_content)
string(REGEX MATCH "TBB_VERSION_MAJOR[ ]*[0-9]*" _tbb_version_major ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_major ${_tbb_version_major})
string(REGEX MATCH "TBB_VERSION_MINOR[ ]*[0-9]" _tbb_version_minor ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_minor ${_tbb_version_minor})
set(TBB_VERSION "${_tbb_version_major}.${_tbb_version_minor}")
else()
set(TBB_VERSION "")
endif()
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(_tbb_release_lib "${_tbb_lib_path}/${_tbb_component}.lib")
set(_tbb_debug_lib "${_tbb_lib_path}/${_tbb_component}_debug.lib")
if (EXISTS "${_tbb_release_lib}" AND EXISTS "${_tbb_debug_lib}")
if (NOT TARGET TBB::${_tbb_component})
add_library(TBB::${_tbb_component} SHARED IMPORTED)
set_target_properties(TBB::${_tbb_component} PROPERTIES
IMPORTED_CONFIGURATIONS "RELEASE;DEBUG"
IMPORTED_LOCATION_RELEASE "${_tbb_release_lib}"
IMPORTED_LOCATION_DEBUG "${_tbb_debug_lib}"
INTERFACE_INCLUDE_DIRECTORIES "${_tbb_inc_path}"
IMPORTED_IMPLIB_RELEASE "${_tbb_release_lib}"
IMPORTED_IMPLIB_DEBUG "${_tbb_debug_lib}"
INTERFACE_COMPILE_DEFINITIONS "__TBB_NO_IMPLICIT_LINKAGE=1")
# Add internal dependencies for imported targets: TBB::tbbmalloc_proxy -> TBB::tbbmalloc
if (_tbb_component STREQUAL tbbmalloc_proxy)
set_target_properties(TBB::tbbmalloc_proxy PROPERTIES INTERFACE_LINK_LIBRARIES TBB::tbbmalloc)
endif()
list(APPEND TBB_IMPORTED_TARGETS TBB::${_tbb_component})
set(TBB_${_tbb_component}_FOUND 1)
endif()
elseif (TBB_FIND_REQUIRED AND TBB_FIND_REQUIRED_${_tbb_component})
message(FATAL_ERROR "Missed required Intel TBB component: ${_tbb_component}")
endif()
endforeach()
unset(_tbb_x32_subdir)
unset(_tbb_x64_subdir)
unset(_tbb_arch_subdir)
unset(_tbb_compiler_subdir)
unset(_tbbmalloc_proxy_ix)
unset(_tbbmalloc_ix)
unset(_tbb_lib_path)
unset(_tbb_release_lib)
unset(_tbb_debug_lib)

View File

@@ -19,7 +19,7 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x)
# Default packages
#
set(FIRMWARE_PACKAGE_VERSION 1076)
set(FIRMWARE_PACKAGE_VERSION 1119)
#
# CMake variables to override default firmware files
@@ -82,7 +82,7 @@ foreach(firmware_name IN LISTS VPU_SUPPORTED_FIRMWARES)
VERBATIM)
install(FILES ${${var_name}}
DESTINATION ${IE_CPACK_LIBRARY_PATH}
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT myriad)
endforeach()

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier : Apache-2.0
//
#include <stdlib.h>

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier : Apache-2.0
//
#include <stdlib.h>

View File

@@ -28,8 +28,8 @@ export(TARGETS ${TARGET_NAME} NAMESPACE IE:: APPEND FILE "${CMAKE_BINARY_DIR}/ta
# install
install(TARGETS ${TARGET_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core)
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/

View File

@@ -29,15 +29,18 @@ def build_argparser():
args = parser.add_argument_group("Options")
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
required=True, type=str)
required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to image file.",
required=True, type=str, nargs="+")
required=True, type=str, nargs="+")
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. Absolute path to a shared library with the kernels implementations.",
type=str, default=None)
help="Optional. Required for CPU custom layers. "
"Absolute path to a shared library with the kernels implementations.",
type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample will look for a suitable plugin for device specified (CPU by default)",
default="CPU", type=str)
help="Optional. Specify the target device to infer on; "
"CPU, GPU, FPGA or MYRIAD is acceptable. "
"Sample will look for a suitable plugin for device specified (CPU by default)",
default="CPU", type=str)
args.add_argument("--labels", help="Optional. Labels mapping file", default=None, type=str)
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
@@ -59,9 +62,10 @@ def main():
# ------------- 2. Load Plugin for inference engine and extensions library if specified --------------
log.info("Device info:")
versions = ie.get_versions(args.device)
print("{}{}".format(" "*8, args.device))
print("{}MKLDNNPlugin version ......... {}.{}".format(" "*8, versions[args.device].major, versions[args.device].minor))
print("{}Build ........... {}".format(" "*8, versions[args.device].build_number))
print("{}{}".format(" " * 8, args.device))
print("{}MKLDNNPlugin version ......... {}.{}".format(" " * 8, versions[args.device].major,
versions[args.device].minor))
print("{}Build ........... {}".format(" " * 8, versions[args.device].build_number))
if args.cpu_extension and "CPU" in args.device:
ie.add_extension(args.cpu_extension, "CPU")
@@ -79,8 +83,15 @@ def main():
# -----------------------------------------------------------------------------------------------------
# --------------------------- 3. Read and preprocess input --------------------------------------------
input_blob = next(iter(net.inputs))
n, c, h, w = net.inputs[input_blob].shape
print("inputs number: " + str(len(net.inputs.keys())))
for input_key in net.inputs:
print("input shape: " + str(net.inputs[input_key].shape))
print("input key: " + input_key)
if len(net.inputs[input_key].layout) == 4:
n, c, h, w = net.inputs[input_key].shape
images = np.ndarray(shape=(n, c, h, w))
images_hw = []
for i in range(n):
@@ -94,13 +105,14 @@ def main():
log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w)))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image
# -----------------------------------------------------------------------------------------------------
# --------------------------- 4. Configure input & output ---------------------------------------------
# --------------------------- Prepare input blobs -----------------------------------------------------
log.info("Preparing input blobs")
assert (len(net.inputs.keys()) == 1 or len(net.inputs.keys()) == 2), "Sample supports topologies only with 1 or 2 inputs"
input_blob = next(iter(net.inputs))
assert (len(net.inputs.keys()) == 1 or len(
net.inputs.keys()) == 2), "Sample supports topologies only with 1 or 2 inputs"
out_blob = next(iter(net.outputs))
input_name, input_info_name = "", ""
@@ -112,9 +124,21 @@ def main():
elif len(net.inputs[input_key].layout) == 2:
input_info_name = input_key
net.inputs[input_key].precision = 'FP32'
if net.inputs[input_key].shape[1] != 3 and net.inputs[input_key].shape[1] != 6 or net.inputs[input_key].shape[0] != 1:
if net.inputs[input_key].shape[1] != 3 and net.inputs[input_key].shape[1] != 6 or \
net.inputs[input_key].shape[0] != 1:
log.error('Invalid input info. Should be 3 or 6 values length.')
data = {}
data[input_name] = images
if input_info_name != "":
infos = np.ndarray(shape=(n, c), dtype=float)
for i in range(n):
infos[i, 0] = h
infos[i, 1] = w
infos[i, 2] = 1.0
data[input_info_name] = infos
# --------------------------- Prepare output blobs ----------------------------------------------------
log.info('Preparing output blobs')
@@ -141,7 +165,7 @@ def main():
log.info("Loading model to the device")
exec_net = ie.load_network(network=net, device_name=args.device)
log.info("Creating infer request and starting inference")
res = exec_net.infer(inputs={input_blob: images})
res = exec_net.infer(inputs=data)
# -----------------------------------------------------------------------------------------------------
# --------------------------- Read and postprocess output ---------------------------------------------
@@ -159,8 +183,8 @@ def main():
ymin = np.int(ih * proposal[4])
xmax = np.int(iw * proposal[5])
ymax = np.int(ih * proposal[6])
print("[{},{}] element, prob = {:.6} ({},{})-({},{}) batch id : {}"\
.format(number, label, confidence, xmin, ymin, xmax, ymax, imid), end="")
print("[{},{}] element, prob = {:.6} ({},{})-({},{}) batch id : {}" \
.format(number, label, confidence, xmin, ymin, xmax, ymax, imid), end="")
if proposal[2] > 0.5:
print(" WILL BE PRINTED!")
if not imid in boxes.keys():
@@ -181,7 +205,8 @@ def main():
# -----------------------------------------------------------------------------------------------------
log.info("Execution successful\n")
log.info("This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool")
log.info(
"This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool")
if __name__ == '__main__':

View File

@@ -171,9 +171,9 @@ cdef class IECore:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# exec_net = ie.load_network(network=net, device_name="CPU", num_requsts=2)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2)
# ```
cpdef ExecutableNetwork load_network(self, IENetwork network, str device_name, config=None, int num_requests=1):
cdef ExecutableNetwork exec_net = ExecutableNetwork()
@@ -197,8 +197,8 @@ cdef class IECore:
# @return An `ExecutableNetwork` object
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(network=net, device_name="MYRIAD", num_requsts=2)
# # export executable network
# exec_net.export(path_to_file_to_save)
@@ -226,8 +226,8 @@ cdef class IECore:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# layers_map = ie.query_network(network=net, device_name="HETERO:GPU,CPU")
# ```
def query_network(self, IENetwork network, str device_name, config=None):
@@ -238,12 +238,19 @@ cdef class IECore:
return c_map_to_dict(res)
## Sets a configuration for a plugin
# NOTE: When specifying a key value of a config, the "KEY_" prefix is omitted.
#
# \note When specifying a key value of a config, the "KEY_" prefix is omitted.
#
# @param config: a dictionary of configuration parameters as keys and their values
# @param device_name: a device name of a target plugin
# @return None
#
# Usage examples: See the `set_affinity` method of the `IENetwork` class
# Usage examples:\n
# ```python
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# ie.set_config(config={"DYN_BATCH_ENABLED": "YES"}, device_name="CPU")
# ```
def set_config(self, config: dict, device_name: str):
cdef map[string, string] c_config = dict_to_c_map(config)
self.impl.setConfig(c_config, device_name.encode())
@@ -316,7 +323,9 @@ cdef class IECore:
## Gets a configuration dedicated to device behavior. The method targets to extract information
# which can be set via set_config method.
# NOTE: When specifying a key value of a config, the "KEY_" prefix is omitted.
#
# \note When specifying a key value of a config, the "KEY_" prefix is omitted.
#
# @param device_name: A name of a device to get a config value.
# @param config_name: A config name to request.
# @return A config value corresponding to a config key.
@@ -452,8 +461,8 @@ cdef class ExecutableNetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie_core = IECore()
# net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie_core.load_network(net, device, num_requests=2)
# res = exec_net.infer({'data': img})
# res
@@ -531,9 +540,9 @@ cdef class ExecutableNetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie_core = IECore()
# exec_net = ie_core.load_network(net, device, num_requsts=2)
# net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie_core.load_network(net, device, num_requests=2)
# exec_graph = exec_net.get_exec_graph_info()
# ```
def get_exec_graph_info(self):
@@ -549,7 +558,7 @@ cdef class ExecutableNetwork:
# Usage example:\n
# ```python
# ie = IECore()
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(net, "CPU")
# exec_net.get_metric("NETWORK_NAME")
# ```
@@ -564,9 +573,9 @@ cdef class ExecutableNetwork:
# Usage example:\n
# ```python
# ie = IECore()
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(net, "CPU")
# exec_net.get_metric("DEVICE_ID")
# config = exec_net.get_config("CPU_BIND_THREAD")
# ```
def get_config(self, config_name: str):
return deref(self.impl).getConfig(config_name.encode())
@@ -576,8 +585,8 @@ cdef class ExecutableNetwork:
# @return None
#
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(network=net, device_name="MYRIAD", num_requsts=2)
# exec_net.export(path_to_file_to_save)
# ```
@@ -620,8 +629,9 @@ cdef class InferRequest:
cdef void user_callback(self, int status) with gil:
if self._py_callback:
self._py_callback(status, self._py_data)
# Set flag at first since user can call wait in callback
self._py_callback_called.set()
self._py_callback(status, self._py_data)
## Description: Sets a callback function that is called on success or failure of an asynchronous request
#
@@ -632,8 +642,8 @@ cdef class InferRequest:
# Usage example:\n
# ```python
# callback = lambda status, py_data: print("Request with id {} finished with status {}".format(py_data, status))
# net = IENetwork("./model.xml", "./model.bin")
# ie = IECore()
# net = ie.read_network(model="./model.xml", weights="./model.bin")
# exec_net = ie.load_network(net, "CPU", num_requests=4)
# for id, req in enumerate(exec_net.requests):
# req.set_completion_callback(py_callback=callback, py_data=id)
@@ -662,7 +672,7 @@ cdef class InferRequest:
#
# Usage example:\n
# ```python
# exec_net = plugin.load(network=net, num_requests=2)
# exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
# exec_net.requests[0].infer({input_blob: image})
# res = exec_net.requests[0].outputs['prob']
# np.flip(np.sort(np.squeeze(res)),0)
@@ -683,7 +693,7 @@ cdef class InferRequest:
#
# Usage example:\n
# ```python
# exec_net = plugin.load(network=net, num_requests=2)
# exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
# exec_net.requests[0].async_infer({input_blob: image})
# request_status = exec_net.requests[0].wait()
# res = exec_net.requests[0].outputs['prob']
@@ -697,7 +707,8 @@ cdef class InferRequest:
## Waits for the result to become available. Blocks until specified timeout elapses or the result
# becomes available, whichever comes first.
# NOTE: There are special values of the timeout parameter:
#
# \note There are special values of the timeout parameter:
# * 0 - Immediately returns the inference status. It does not block or interrupt execution.
# To find statuses meaning, please refer to InferenceEngine::StatusCode in Inference Engine C++ documentation
# * -1 - Waits until inference result becomes available (default value)
@@ -714,6 +725,11 @@ cdef class InferRequest:
if status != StatusCode.RESULT_NOT_READY:
return status
if not self._py_callback_called.is_set():
if timeout == WaitMode.RESULT_READY:
timeout = None
if timeout is not None:
# Convert milliseconds to seconds
timeout = float(timeout)/1000
if not self._py_callback_called.wait(timeout):
return StatusCode.REQUEST_BUSY
return StatusCode.OK
@@ -724,12 +740,14 @@ cdef class InferRequest:
return deref(self.impl).wait(<int64_t> timeout)
## Queries performance measures per layer to get feedback of what is the most time consuming layer.
# NOTE: Performance counters data and format depends on the plugin
#
# \note Performance counters data and format depends on the plugin
#
# @return Dictionary containing per-layer execution information.
#
# Usage example:
# ```python
# exec_net = plugin.load(network=net, num_requests=2)
# exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
# exec_net.requests[0].infer({input_blob: image})
# exec_net.requests[0].get_perf_counts()
# {'Conv2D': {'exec_type': 'jit_avx2_1x1',
@@ -780,18 +798,20 @@ cdef class InferRequest:
## Sets new batch size for certain infer request when dynamic batching is enabled in executable network
# that created this request.
# NOTE: Support of dynamic batch size depends on the target plugin.
#
# \note Support of dynamic batch size depends on the target plugin.
#
# @param size: New batch size to be used by all the following inference calls for this request
# @return None
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# # Set max batch size
# net.batch = 10
# plugin.set_config({"DYN_BATCH_ENABLED": "YES"})
# exec_net = plugin.load(network=net)
# ie.set_config(config={"DYN_BATCH_ENABLED": "YES"}, device_name=device)
# exec_net = ie.load_network(network=net, device_name=device)
# # Set batch size for certain network.
# # NOTE: Input data shape will not be changed, but will be used partially in inference which increases performance
# exec_net.requests[0].set_batch(2)
@@ -855,7 +875,11 @@ cdef class IENetLayer:
def type(self):
return deref(self._ptr).type.decode()
## Layer base operating precision. Provides getter and setter interfaces.
## \note This property is deprecated.
# Please, use out_data property to access DataPtr objects for all output ports, which contains full
# information about layer's output data including precision.
#
# Layer base operating precision. Provides getter and setter interfaces.
@property
def precision(self):
warnings.filterwarnings("always", category=DeprecationWarning)
@@ -874,8 +898,8 @@ cdef class IENetLayer:
# The affinity attribute provides getter and setter interfaces, so the layer affinity can be modified directly.
# For example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# layers_map = ie.query_network(network=net, device_name="HETERO:GPU,CPU")
# layers = net.layers
# for layer, device in layers_map.items():
@@ -922,8 +946,10 @@ cdef class IENetLayer:
input_to_list.append(deref(layer.second).name.decode())
return input_to_list
## Deprecated: use out_data property to access DataPtr objects for all output ports, which contains full
## \note This property is deprecated.
# Please, use out_data property to access DataPtr objects for all output ports, which contains full
# information about layer's output data including layout
#
# Returns the layout of the layer output data on 1st port
@property
def layout(self):
@@ -936,8 +962,10 @@ cdef class IENetLayer:
cdef C.DataPtr c_input = deref(self._ptr).outData[0]
return layout_int_to_str_map[deref(c_input).getLayout()]
## Deprecated: use out_data property to access DataPtr objects for all output ports, which contains full
## \note This property is deprecated.
# Please, use out_data property to access DataPtr objects for all output ports, which contains full
# information about layer's output data including shape
#
# Return the list of dimension of the layer output data on 1st port
@property
def shape(self):
@@ -988,7 +1016,10 @@ cdef class IENetLayer:
weights_buffer.reset(blob.second)
blobs_map[blob.first.decode()] = weights_buffer.to_numpy()
return blobs_map
## Dictionary with layer weights, biases or custom blobs if any
## \note This property is deprecated.
# Please use blobs property instead.
#
# Dictionary with layer weights, biases or custom blobs if any
@property
def weights(self):
warnings.filterwarnings("always", category=DeprecationWarning)
@@ -1003,6 +1034,9 @@ cdef class IENetLayer:
cdef class IENetwork:
## Class constructor
#
# \note Reading networks using IENetwork constructor is deprecated.
# Please, use IECore.read_network() method instead.
#
# @param model: A `.xml` file of the IR or PyCapsule containing smart pointer to nGraph function.
# In case of passing a `.xml` file attribute value can be a string path or bytes with file content
# depending on `init_from_buffer` attribute value
@@ -1100,8 +1134,9 @@ cdef class IENetwork:
## Batch size of the network. Provides getter and setter interfaces to get and modify the
# network batch size. For example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# print(et.batch_size)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# print(net.batch_size)
# net.batch_size = 4
# print(net.batch_size)
# print(net.inputs['data'].shape)
@@ -1109,7 +1144,9 @@ cdef class IENetwork:
@property
def batch_size(self):
return self.impl.getBatch()
## Deprecated: network precision does not make sence, use precision on egdes.
## \note This property is deprecated:
# network precision does not make sense, use precision on edges.
#
# Precision of the network
@property
def precision(self):
@@ -1139,13 +1176,16 @@ cdef class IENetwork:
layers[deref(l).name.decode()] = net_l
return layers
## Deprecated: new Calibration Tool doesn't generate statistics
## \note This property is deprecated.
# New Calibration Tool doesn't generate statistics
#
# Returns `LayersStatsMap` object containing dictionary that maps network layer names to calibration statistics
# represented by `LayerStats` objects.
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# net.stats.update({"conv1_2d" : LayserStats(min=(-25, -1, 0), max=(63, 124, 70)),
# "conv2_2d" : LayserStats(min=(-5, -1, 0, 1, -7, 2), max=(63, 124, 70, 174, 99, 106))
# })
@@ -1163,7 +1203,7 @@ cdef class IENetwork:
max=tuple(it.second["max".encode()]))
return py_stats_map
## NOTE: The function is deprecated. Please use the `IENetwork()` class constructor
## \note The function is deprecated. Please use the `IENetwork()` class constructor
# to create valid instance of `IENetwork`.
#
# Reads the model from the `.xml` and `.bin` files of the IR.
@@ -1192,7 +1232,8 @@ cdef class IENetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# net.add_outputs(["conv5_1', conv2_1', (split_2, 1)])]
# ```
def add_outputs(self, outputs):
@@ -1216,14 +1257,16 @@ cdef class IENetwork:
#
# Usage example:
# ```python
# net = IENetwork(model=path_to_model, weights=path_to_weights)
# ie = IECore()
# net = ie.read_network(model=path_to_xml, weights=path_to_bin)
# net.serialize(path_to_xml, path_to_bin)
# ```
def serialize(self, path_to_xml, path_to_bin: str = ""):
self.impl.serialize(path_to_xml.encode(), path_to_bin.encode())
## Reshapes the network to change spatial dimensions, batch size, or any dimension.
# NOTE: Before using this method, make sure that the target shape is applicable for the network.
#
# \note Before using this method, make sure that the target shape is applicable for the network.
# Changing the network shape to an arbitrary value may lead to unpredictable behaviour.
#
# @param input_shapes: A dictionary that maps input layer names to tuples with the target shape
@@ -1231,10 +1274,11 @@ cdef class IENetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# input_layer = next(iter(net.inputs))
# n, c, h, w = net.inputs[input_layer]
# net.reshape({input_layer: (n, c, h*2, w*2)}]
# net.reshape({input_layer: (n, c, h*2, w*2)})
# ```
def reshape(self, input_shapes: dict):
cdef map[string, vector[size_t]] c_input_shapes;
@@ -1255,9 +1299,11 @@ cdef class IENetwork:
# return self.impl.getFunction()
## This class is the main plugin interface and serves to initialize and configure the plugin.
#
#\note This class is deprecated: Use IECore instead
#
cdef class IEPlugin:
## Deprecated: Use IECore instead
# Class constructor
## Class constructor
#
# @param device: Target device name. Supported devices: CPU, GPU, FPGA, MYRIAD, HETERO, MULTI
# @param plugin_dirs: List of paths to plugin directories

View File

@@ -437,10 +437,10 @@ PyObject *InferenceEnginePython::IEExecNetwork::getMetric(const std::string &met
return parse_parameter(parameter);
}
PyObject *InferenceEnginePython::IEExecNetwork::getConfig(const std::string &metric_name) {
PyObject *InferenceEnginePython::IEExecNetwork::getConfig(const std::string &name) {
InferenceEngine::Parameter parameter;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->GetMetric(metric_name, parameter, &response));
IE_CHECK_CALL(actual->GetConfig(name, parameter, &response));
return parse_parameter(parameter);
}

View File

@@ -136,7 +136,7 @@ struct IEExecNetwork {
std::map<std::string, InferenceEngine::CDataPtr> getOutputs();
PyObject* getMetric(const std::string & metric_name);
PyObject* getConfig(const std::string & metric_name);
PyObject* getConfig(const std::string & name);
int wait(int num_requests, int64_t timeout);
int getIdleRequestId();

View File

@@ -0,0 +1,20 @@
import pathlib
import os
import pytest
test_root = pathlib.Path(__file__).parent
@pytest.fixture(scope='session')
def models_dir():
return test_root / 'test_data' / 'models'
@pytest.fixture(scope='session')
def images_dir():
return test_root / 'test_data' / 'images'
@pytest.fixture(scope='session')
def device():
return os.environ.get("TEST_DEVICE") if os.environ.get("TEST_DEVICE") else "CPU"

View File

@@ -0,0 +1,55 @@
import os
import pytest
from openvino.inference_engine import CDataPtr, IECore
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.bin')
def test_name(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
assert isinstance(exec_net.outputs['fc_out'], CDataPtr)
assert exec_net.outputs['fc_out'].name == "fc_out", "Incorrect name for layer 'fc_out'"
def test_precision(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
assert isinstance(exec_net.outputs['fc_out'], CDataPtr)
assert exec_net.outputs['fc_out'].precision == "FP32", "Incorrect precision for layer 'fc_out'"
def test_no_precision_setter(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
with pytest.raises(AttributeError) as e:
exec_net.outputs['fc_out'].precision = "I8"
assert "attribute 'precision' of 'openvino.inference_engine.ie_api.CDataPtr' objects is not writable" in str(e.value)
def test_layout(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
assert exec_net.outputs['fc_out'].layout == "NC", "Incorrect layout for layer 'fc_out"
def test_no_layout_setter(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
with pytest.raises(AttributeError) as e:
exec_net.outputs['fc_out'].layout = "CN"
assert "attribute 'layout' of 'openvino.inference_engine.ie_api.CDataPtr' objects is not writable" in str(e.value)
def test_initialized(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device, num_requests=5)
assert exec_net.outputs['fc_out'].initialized, "Incorrect value for initialized property for layer 'fc_out"

View File

@@ -0,0 +1,42 @@
import os
import pytest
from openvino.inference_engine import IECore, IENetLayer, DataPtr
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.bin')
def layer_out_data():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
return net.layers['19'].out_data[0]
def test_name():
assert layer_out_data().name == "19", "Incorrect name for layer '19'"
def test_precision():
assert layer_out_data().precision == "FP32", "Incorrect precision for layer '19'"
def test_precision_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.layers['19'].out_data[0].precision = "I8"
assert net.layers['19'].out_data[0].precision == "I8", "Incorrect precision for layer '19'"
def test_incorrect_precision_setter():
with pytest.raises(ValueError) as e:
layer_out_data().precision = "123"
assert "Unsupported precision 123! List of supported precisions:" in str(e.value)
def test_layout():
assert layer_out_data().layout == "NCHW", "Incorrect layout for layer '19"
def test_initialized():
assert layer_out_data().initialized, "Incorrect value for initialized property for layer '19"

View File

@@ -0,0 +1,283 @@
import numpy as np
import os
import pytest
from openvino.inference_engine import ie_api as ie
if os.environ.get("TEST_DEVICE") != "MYRIAD":
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.bin')
else:
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_fp16.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_fp16.bin')
IMAGE_PATH = os.path.join(os.path.dirname(__file__), 'test_data', 'images', 'cat3.bmp')
# # computed with caffe
# REF_IMAGE_RESULT = np.array([[34.6295814514, 18.9434795380, 43.2669448853, 0.4420155287, -108.4574050903,
# -314.8240051270, 231.0738067627, -106.3504943848, 108.5880966187, 92.7254943848]])
def read_image():
import cv2
n, c, h, w = (1, 3, 32, 32)
image = cv2.imread(IMAGE_PATH)
if image is None:
raise FileNotFoundError("Input image not found")
image = cv2.resize(image, (h, w)) / 255
image = image.transpose((2, 0, 1))
image = image.reshape((n, c, h, w))
return image
def test_infer(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
img = read_image()
res = exec_net.infer({'data': img})
assert np.argmax(res['fc_out'][0]) == 3
del exec_net
del ie_core
def test_infer_net_from_buffer(device):
ie_core = ie.IECore()
with open(SAMPLENET_BIN, 'rb') as f:
bin = f.read()
with open(SAMPLENET_XML, 'rb') as f:
xml = f.read()
net = ie_core.read_network(model=xml, weights=bin, init_from_buffer=True)
net2 = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
exec_net2 = ie_core.load_network(net2, device)
img = read_image()
res = exec_net.infer({'data': img})
res2 = exec_net2.infer({'data': img})
del exec_net
del exec_net2
del ie_core
assert np.allclose(res['fc_out'], res2['fc_out'], atol=1E-4, rtol=1E-4)
def test_infer_wrong_input_name(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
img = read_image()
with pytest.raises(AssertionError) as e:
exec_net.infer({'_data_': img})
assert "No input with name _data_ found in network" in str(e.value)
del exec_net
del ie_core
def test_inputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=5)
assert len(exec_net.inputs) == 1
assert "data" in exec_net.inputs
assert isinstance(exec_net.inputs['data'], ie.DataPtr)
del exec_net
del ie_core
def test_outputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=5)
assert len(exec_net.outputs) == 1
assert "fc_out" in exec_net.outputs
assert isinstance(exec_net.outputs['fc_out'], ie.CDataPtr)
del exec_net
del ie_core
def test_access_requests(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=5)
assert len(exec_net.requests) == 5
assert isinstance(exec_net.requests[0], ie.InferRequest)
del exec_net
del ie_core
def test_async_infer_one_req(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request_handler = exec_net.start_async(request_id=0, inputs={'data': img})
request_handler.wait()
res = request_handler.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
def test_async_infer_many_req(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=5)
img = read_image()
for id in range(5):
request_handler = exec_net.start_async(request_id=id, inputs={'data': img})
request_handler.wait()
res = request_handler.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
def test_async_infer_many_req_get_idle(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
num_requests = 5
exec_net = ie_core.load_network(net, device, num_requests=num_requests)
img = read_image()
check_id = set()
for id in range(2*num_requests):
request_id = exec_net.get_idle_request_id()
if request_id == -1:
status = exec_net.wait(num_requests=1, timeout=ie.WaitMode.RESULT_READY)
assert(status == ie.StatusCode.OK)
request_id = exec_net.get_idle_request_id()
assert(request_id >= 0)
request_handler = exec_net.start_async(request_id=request_id, inputs={'data': img})
check_id.add(request_id)
status = exec_net.wait(timeout=ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
for id in range(num_requests):
if id in check_id:
assert np.argmax(exec_net.requests[id].outputs['fc_out'][0]) == 3
del exec_net
del ie_core
def test_wait_before_start(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
num_requests = 5
exec_net = ie_core.load_network(net, device, num_requests=num_requests)
img = read_image()
requests = exec_net.requests
for id in range(num_requests):
status = requests[id].wait()
assert status == ie.StatusCode.INFER_NOT_STARTED
request_handler = exec_net.start_async(request_id=id, inputs={'data': img})
status = requests[id].wait()
assert status == ie.StatusCode.OK
assert np.argmax(request_handler.outputs['fc_out'][0]) == 3
del exec_net
del ie_core
def test_wrong_request_id(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
with pytest.raises(ValueError) as e:
exec_net.start_async(request_id=20, inputs={'data': img})
assert "Incorrect request_id specified!" in str(e.value)
del exec_net
del ie_core
def test_wrong_num_requests(device):
with pytest.raises(ValueError) as e:
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
ie_core.load_network(net, device, num_requests=-1)
assert "Incorrect number of requests specified: -1. Expected positive integer number or zero for auto detection" \
in str(e.value)
del ie_core
def test_wrong_num_requests_core(device):
with pytest.raises(ValueError) as e:
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device, num_requests=-1)
assert "Incorrect number of requests specified: -1. Expected positive integer number or zero for auto detection" \
in str(e.value)
del ie_core
def test_plugin_accessible_after_deletion(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
del ie_core
img = read_image()
res = exec_net.infer({'data': img})
assert np.argmax(res['fc_out'][0]) == 3
del exec_net
def test_exec_graph(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
img = read_image()
res = exec_net.infer({'data': img})
exec_graph = exec_net.get_exec_graph_info()
exec_graph_file = 'exec_graph.xml'
exec_graph.serialize(exec_graph_file)
assert os.path.exists(exec_graph_file)
os.remove(exec_graph_file)
del exec_net
del exec_graph
del ie_core
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "MYRIAD", reason="Device specific test. "
"Only MYRIAD plugin implements network export")
def test_export_import():
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, "MYRIAD")
exported_net_file = 'exported_model.bin'
exec_net.export(exported_net_file)
assert os.path.exists(exported_net_file)
exec_net = ie_core.import_network(exported_net_file, "MYRIAD")
os.remove(exported_net_file)
img = read_image()
res = exec_net.infer({'data': img})
assert np.argmax(res['fc_out'][0]) == 3
del exec_net
del ie_core
def test_multi_out_data(device):
# Regression test CVS-23965
# Check that CDataPtr for all output layers not copied between outputs map items
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.add_outputs(['28'])
exec_net = ie_core.load_network(net, device)
assert "fc_out" in exec_net.outputs and "28" in exec_net.outputs
assert isinstance(exec_net.outputs["fc_out"], ie.CDataPtr)
assert isinstance(exec_net.outputs["28"], ie.CDataPtr)
assert exec_net.outputs["fc_out"].name == "fc_out" and exec_net.outputs["fc_out"].shape == [1, 10]
assert exec_net.outputs["28"].name == "28" and exec_net.outputs["28"].shape == [1, 5184]
del ie_core
pass
def test_get_metric(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, "CPU")
network_name = exec_net.get_metric("NETWORK_NAME")
assert network_name == "test_model"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
def test_get_config(device):
ie_core = ie.IECore()
net = ie_core.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie_core.load_network(net, device)
config = exec_net.get_config("PERF_COUNT")
assert config == "NO"

View File

@@ -0,0 +1,182 @@
import os
import pytest
from sys import platform
import numpy as np
from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.bin')
plugins_xml = os.path.join(os.path.dirname(__file__), 'test_data', 'plugins.xml')
plugins_win_xml = os.path.join(os.path.dirname(__file__), 'test_data', 'plugins_windows.xml')
plugins_osx_xml = os.path.join(os.path.dirname(__file__), 'test_data', 'plugins_macos.xml')
def test_init_ie_core_no_cfg():
ie = IECore()
assert isinstance(ie, IECore)
def test_init_ie_core_with_cfg():
ie = IECore(plugins_xml)
assert isinstance(ie, IECore)
def test_get_version(device):
ie = IECore()
version = ie.get_versions(device)
assert isinstance(version, dict), "Returned version must be a dictionary"
assert device in version, "{} plugin version wasn't found in versions"
assert hasattr(version[device], "major"), "Returned version has no field 'major'"
assert hasattr(version[device], "minor"), "Returned version has no field 'minor'"
assert hasattr(version[device], "description"), "Returned version has no field 'description'"
assert hasattr(version[device], "build_number"), "Returned version has no field 'build_number'"
def test_load_network(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, device)
assert isinstance(exec_net, ExecutableNetwork)
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
def test_load_network_wrong_device():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
with pytest.raises(RuntimeError) as e:
ie.load_network(net, "BLA")
assert 'Device with "BLA" name is not registered in the InferenceEngine' in str(e.value)
def test_query_network(device):
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
query_res = ie.query_network(net, device)
assert net.layers.keys() == query_res.keys(), "Not all network layers present in query_network results"
assert next(iter(set(query_res.values()))) == device, "Wrong device for some layers"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
def test_register_plugin():
ie = IECore()
ie.register_plugin("MKLDNNPlugin", "BLA")
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, "BLA")
assert isinstance(exec_net, ExecutableNetwork), "Cannot load the network to the registered plugin with name 'BLA'"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
def test_register_plugins():
ie = IECore()
if platform == "linux" or platform == "linux2":
ie.register_plugins(plugins_xml)
elif platform == "darwin":
ie.register_plugins(plugins_osx_xml)
elif platform == "win32":
ie.register_plugins(plugins_win_xml)
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = ie.load_network(net, "CUSTOM")
assert isinstance(exec_net,
ExecutableNetwork), "Cannot load the network to the registered plugin with name 'CUSTOM' " \
"registred in the XML file"
@pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well")
def test_unregister_plugin(device):
ie = IECore()
ie.unregister_plugin(device)
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
with pytest.raises(RuntimeError) as e:
ie.load_network(net, device)
assert 'Device with "{}" name is not registered in the InferenceEngine'.format(device) in str(e.value)
@pytest.mark.skip(reason="Need to figure out segmentation fault cause.")
def test_available_devices(device):
ie = IECore()
devices = ie.available_devices
assert device in devices, "Current device '{}' is not listed in available devices '{}'".format(device,
', '.join(devices))
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
def test_get_metric_list_of_str():
ie = IECore()
param = ie.get_metric("CPU", "OPTIMIZATION_CAPABILITIES")
assert isinstance(param, list), "Parameter value for 'OPTIMIZATION_CAPABILITIES' " \
"metric must be a list but {} is returned".format(type(param))
assert all(isinstance(v, str) for v in param), "Not all of the parameter values for 'OPTIMIZATION_CAPABILITIES' " \
"metric are strings!"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
def test_get_metric_tuple_of_two_ints():
ie = IECore()
param = ie.get_metric("CPU", "RANGE_FOR_STREAMS")
assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_STREAMS' " \
"metric must be tuple but {} is returned".format(type(param))
assert all(isinstance(v, int) for v in param), "Not all of the parameter values for 'RANGE_FOR_STREAMS' " \
"metric are integers!"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
def test_get_metric_tuple_of_three_ints():
ie = IECore()
param = ie.get_metric("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS")
assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' " \
"metric must be tuple but {} is returned".format(type(param))
assert all(isinstance(v, int) for v in param), "Not all of the parameter values for " \
"'RANGE_FOR_ASYNC_INFER_REQUESTS' metric are integers!"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
def test_get_metric_str():
ie = IECore()
param = ie.get_metric("CPU", "FULL_DEVICE_NAME")
assert isinstance(param, str), "Parameter value for 'FULL_DEVICE_NAME' " \
"metric must be string but {} is returned".format(type(param))
def test_read_network_from_xml():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net, IENetwork)
def test_incorrect_xml():
ie = IECore()
with pytest.raises(Exception) as e:
ie.read_network(model="./model.xml", weights=SAMPLENET_BIN)
assert "Path to the model ./model.xml doesn't exists or it's a directory" in str(e.value)
def test_incorrect_bin():
ie = IECore()
with pytest.raises(Exception) as e:
ie.read_network(model=SAMPLENET_XML, weights="./model.bin")
assert "Path to the weights ./model.bin doesn't exists or it's a directory" in str(e.value)
def test_read_net_from_buffer():
ie = IECore()
with open(SAMPLENET_BIN, 'rb') as f:
bin = f.read()
with open(SAMPLENET_XML, 'rb') as f:
xml = f.read()
net = ie.read_network(model=xml, weights=bin, init_from_buffer=True)
assert isinstance(net, IENetwork)
def test_net_from_buffer_valid():
ie = IECore()
with open(SAMPLENET_BIN, 'rb') as f:
bin = f.read()
with open(SAMPLENET_XML, 'rb') as f:
xml = f.read()
net = ie.read_network(model=xml, weights=bin, init_from_buffer=True)
net2 = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
for name, layer in net.layers.items():
for blob, data in layer.blobs.items():
assert np.allclose(data, net2.layers[name].blobs[blob]), \
"Incorrect weights for layer {} and blob {}".format(name, blob)

View File

@@ -0,0 +1,128 @@
import warnings
import os
import numpy
from openvino.inference_engine import DataPtr, IECore
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.bin')
def test_name():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].name == "19"
def test_type():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].type == "Convolution"
def test_precision_getter(recwarn):
warnings.simplefilter("always")
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].precision == "FP32"
assert len(recwarn) == 1
assert recwarn.pop(DeprecationWarning)
def test_precision_setter(recwarn):
warnings.simplefilter("always")
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.layers['19'].precision = "I8"
assert net.layers['19'].precision == "I8"
assert len(recwarn) == 1
assert recwarn.pop(DeprecationWarning)
def test_affinuty_getter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].affinity == ""
def test_affinity_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.layers['19'].affinity = "CPU"
assert net.layers['19'].affinity == "CPU"
def test_blobs():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.layers['19'].blobs["biases"], numpy.ndarray)
assert isinstance(net.layers['19'].blobs["weights"], numpy.ndarray)
assert net.layers['19'].blobs["biases"].size != 0
assert net.layers['19'].blobs["weights"].size != 0
def test_weights(recwarn):
warnings.simplefilter("always")
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.layers['19'].weights["biases"], numpy.ndarray)
assert isinstance(net.layers['19'].weights["weights"], numpy.ndarray)
assert net.layers['19'].weights["biases"].size != 0
assert net.layers['19'].weights["weights"].size != 0
assert len(recwarn) == 4
assert recwarn.pop(DeprecationWarning)
def test_params_getter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].params == {'dilations': '1,1', 'group': '1', 'kernel': '5,5', 'output': '16', 'pads_begin': '2,2',
'pads_end': '2,2', 'strides': '1,1'}
def test_params_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
params = net.layers['19'].params
params.update({'PrimitivesPriority': 'cpu:ref_any'})
net.layers['19'].params = params
assert net.layers['19'].params == {'dilations': '1,1', 'group': '1', 'kernel': '5,5', 'output': '16',
'pads_begin': '2,2',
'pads_end': '2,2', 'strides': '1,1', 'PrimitivesPriority': 'cpu:ref_any'}
def test_layer_parents():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].parents == ['data']
def test_layer_children():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].children == ['21']
def test_layout(recwarn):
warnings.simplefilter("always")
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].layout == 'NCHW'
assert len(recwarn) == 1
assert recwarn.pop(DeprecationWarning)
def test_shape(recwarn):
warnings.simplefilter("always")
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.layers['19'].shape == [1, 16, 32, 32]
assert len(recwarn) == 1
def test_out_data():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.layers['19'].out_data[0], DataPtr)
def test_in_data():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.layers['19'].in_data[0], DataPtr)

View File

@@ -0,0 +1,292 @@
import os
import pytest
import warnings
import numpy as np
from openvino.inference_engine import IENetwork, IENetLayer, DataPtr, LayersStatsMap, LayerStats, IECore
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.bin')
NGRPAPH_COMPATIBLE_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_ngraph_format.xml')
NGRPAPH_COMPATIBLE_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_ngraph_format.bin')
def test_read_from_ir_deprecated():
with warnings.catch_warnings(record=True) as w:
net = IENetwork.from_ir(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net, IENetwork)
assert len(w) == 2
for warns in w:
assert issubclass(warns.category, DeprecationWarning)
assert "from_ir() method of IENetwork is deprecated." in str(w[0].message)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[1].message)
def test_create_ie_network_deprecated():
with warnings.catch_warnings(record=True) as w:
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net, IENetwork)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_incorrect_xml_deprecated():
with warnings.catch_warnings(record=True) as w:
with pytest.raises(Exception) as e:
IENetwork(model="./model.xml", weights=SAMPLENET_BIN)
assert "Path to the model ./model.xml doesn't exists or it's a directory" in str(e.value)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_incorrect_bin_deprecated():
with warnings.catch_warnings(record=True) as w:
with pytest.raises(Exception) as e:
IENetwork(model=SAMPLENET_XML, weights="./model.bin")
assert "Path to the weights ./model.bin doesn't exists or it's a directory" in str(e.value)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_name():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.name == "model"
def test_inputs():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.inputs['data'], DataPtr)
assert net.inputs['data'].layout == "NCHW"
assert net.inputs['data'].precision == "FP32"
assert net.inputs['data'].shape == [1, 3, 32, 32]
def test_input_precision_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.inputs['data'].layout == "NCHW"
net.inputs['data'].layout = "NHWC"
assert net.inputs['data'].layout == "NHWC"
def test_input_layout_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.inputs['data'].precision == "FP32"
net.inputs['data'].precision = "I8"
assert net.inputs['data'].precision == "I8"
def test_input_unsupported_precision_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
with pytest.raises(ValueError) as e:
net.inputs['data'].precision = "BLA"
assert "Unsupported precision BLA! List of supported precisions: " in str(e.value)
def test_input_unsupported_layout_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
with pytest.raises(ValueError) as e:
net.inputs['data'].layout = "BLA"
assert "Unsupported layout BLA! List of supported layouts: " in str(e.value)
def test_outputs():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert isinstance(net.outputs['fc3'], DataPtr)
assert net.outputs['fc3'].layout == "NC"
assert net.outputs['fc3'].precision == "FP32"
assert net.outputs['fc3'].shape == [1, 10]
def test_output_precision_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.outputs['fc3'].precision == "FP32"
net.outputs['fc3'].precision = "I8"
assert net.outputs['fc3'].precision == "I8"
def test_output_unsupported_precision_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
with pytest.raises(ValueError) as e:
net.outputs['fc3'].precision = "BLA"
assert "Unsupported precision BLA! List of supported precisions: " in str(e.value)
def test_add_ouputs():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.add_outputs('fc1')
net.add_outputs(['fc2'])
assert sorted(net.outputs) == ['fc1', 'fc2', 'fc3']
def test_add_outputs_with_port():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.add_outputs(('fc1', 0))
net.add_outputs([('fc2', 0)])
assert sorted(net.outputs) == ['fc1', 'fc2', 'fc3']
def test_add_outputs_with_and_without_port():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.add_outputs('fc1')
net.add_outputs([('fc2', 0)])
assert sorted(net.outputs) == ['fc1', 'fc2', 'fc3']
def test_batch_size_getter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
assert net.batch_size == 1
def test_batch_size_setter():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.batch_size = 4
assert net.batch_size == 4
assert net.inputs['data'].shape == [4, 3, 32, 32]
def test_batch_size_after_reshape():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.reshape({'data' : [4, 3, 32, 32]})
assert net.batch_size == 4
assert net.inputs['data'].shape == [4, 3, 32, 32]
net.reshape({'data' : [8, 3, 32, 32]})
assert net.batch_size == 8
assert net.inputs['data'].shape == [8, 3, 32, 32]
def test_layers():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
layers_name = [key for key in net.layers]
assert layers_name == ['data', 'conv1', 'relu_conv1', 'pool1', 'conv2',
'relu_conv2', 'pool2', 'fc1', 'relu_fc1', 'fc2', 'relu_fc2', 'fc3']
assert isinstance(net.layers['conv1'], IENetLayer)
def test_get_stats_deprecated():
with warnings.catch_warnings(record=True) as w:
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
stats = net.stats
assert isinstance(stats, LayersStatsMap)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "stats property of IENetwork is deprecated." in str(w[-1].message)
def test_set_new_stats_deprecated():
with warnings.catch_warnings(record=True) as w:
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
new_stats = LayerStats(min=(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0),
max=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0))
stats = net.stats
stats.update({"fc3": new_stats})
assert net.stats["fc3"].min == new_stats.min
assert net.stats["fc3"].max == new_stats.max
assert len(w) == 3
for warns in w:
assert issubclass(warns.category, DeprecationWarning)
assert "stats property of IENetwork is deprecated." in str(warns.message)
def test_update_stats_deprecated():
with warnings.catch_warnings(record=True) as w:
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
initial_stats = LayerStats(min=(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0),
max=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0))
stats = net.stats
stats.update({"fc3": initial_stats})
new_stats = LayerStats(min=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0),
max=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0))
stats.update({"fc3": new_stats})
assert net.stats["fc3"].min == new_stats.min
assert net.stats["fc3"].max == new_stats.max
assert len(w) == 3
for warns in w:
assert issubclass(warns.category, DeprecationWarning)
assert "stats property of IENetwork is deprecated." in str(warns.message)
def test_serialize():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.serialize("./serialized_net.xml", "./serialized_net.bin")
serialized_net = ie.read_network(model="./serialized_net.xml", weights="./serialized_net.bin")
assert net.layers.keys() == serialized_net.layers.keys()
os.remove("./serialized_net.xml")
os.remove("./serialized_net.bin")
def test_reshape():
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.reshape({"data": (2, 3, 32, 32)})
def test_read_net_from_buffer_deprecated():
with warnings.catch_warnings(record=True) as w:
with open(SAMPLENET_BIN, 'rb') as f:
bin = f.read()
with open(SAMPLENET_XML, 'rb') as f:
xml = f.read()
net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
assert isinstance(net, IENetwork)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(w[0].message)
def test_net_from_buffer_valid_deprecated():
with warnings.catch_warnings(record=True) as w:
with open(SAMPLENET_BIN, 'rb') as f:
bin = f.read()
with open(SAMPLENET_XML, 'rb') as f:
xml = f.read()
net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
net2 = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
for name, l in net.layers.items():
for blob, data in l.blobs.items():
assert np.allclose(data, net2.layers[name].blobs[blob]), \
"Incorrect weights for layer {} and blob {}".format(name, blob)
assert len(w) == 2
for warns in w:
assert issubclass(warns.category, DeprecationWarning)
assert "Reading network using constructor is deprecated. " \
"Please, use IECore.read_network() method instead" in str(warns.message)
def test_multi_out_data():
# Regression test CVS-23965
# Check that DataPtr for all output layers not copied between outputs map items
ie = IECore()
net = ie.read_network(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
net.add_outputs(['fc2'])
assert "fc2" in net.outputs and "fc3" in net.outputs
assert isinstance(net.outputs["fc2"], DataPtr)
assert isinstance(net.outputs["fc3"], DataPtr)
assert net.outputs["fc2"].name == "fc2" and net.outputs["fc2"].shape == [1, 84]
assert net.outputs["fc3"].name == "fc3" and net.outputs["fc3"].shape == [1, 10]
pass

View File

@@ -0,0 +1,87 @@
import os
import pytest
from openvino.inference_engine import IENetwork, IEPlugin, ExecutableNetwork
SAMPLENET_XML = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.xml')
SAMPLENET_BIN = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.bin')
def test_init_plugin(device):
plugin = IEPlugin(device, None)
assert isinstance(plugin, IEPlugin)
def test_device_attr(device):
plugin = IEPlugin(device, None)
assert plugin.device == device
def test_get_version(device):
plugin = IEPlugin(device, None)
assert not len(plugin.version) == 0
def test_load_network(device):
plugin = IEPlugin(device, None)
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = plugin.load(net)
assert isinstance(exec_net, ExecutableNetwork)
def test_load_network_many_requests(device):
plugin = IEPlugin(device)
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
exec_net = plugin.load(net, num_requests=5)
assert len(exec_net.requests) == 5
def test_get_supported_layers(device):
plugin = IEPlugin(device)
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
supported = plugin.get_supported_layers(net)
layers = ['conv1', 'conv2', 'data', 'fc1', 'fc2', 'fc3', 'pool1', 'pool2',
'relu_conv1', 'relu_conv2', 'relu_fc1', 'relu_fc2']
if device == "GPU":
layers.remove("data")
assert sorted(supported) == layers
@pytest.mark.skip(reason="Plugiin specific test.")
def test_set_config(device):
plugin = IEPlugin("HETERO:CPU")
plugin.set_config({"TARGET_FALLBACK": "CPU,GPU"})
@pytest.mark.skip(reason="Sporadically fail in CI, not reproducible locally")
def test_set_initial_affinity():
plugin = IEPlugin("HETERO:CPU", None)
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
plugin.set_initial_affinity(net)
for l, params in net.layers.items():
assert params.affinity == "CPU", "Incorrect affinity for {}".format(l)
def test_set_initial_affinity_wrong_device(device):
with pytest.raises(RuntimeError) as e:
plugin = IEPlugin("CPU", None)
net = IENetwork(model=SAMPLENET_XML, weights=SAMPLENET_BIN)
plugin.set_initial_affinity(net)
assert "set_initial_affinity method applicable only for HETERO device" in str(e.value)
def test_add_cpu_extenstion_wrong_device():
with pytest.raises(RuntimeError) as e:
plugin = IEPlugin("GPU", None)
plugin.add_cpu_extension("./")
if "Cannot find plugin to use" in str(e.value):
pytest.skip("No GPU found. Skipping test")
else:
assert "add_cpu_extension method applicable only for CPU or HETERO devices" in str(e.value)
def test_unknown_plugin():
with pytest.raises(ValueError) as e:
IEPlugin("BLA")
assert "Unknown plugin: BLA, expected one of:" in str(e.value)

View File

@@ -0,0 +1,401 @@
import numpy as np
import os
import pytest
from openvino.inference_engine import ie_api as ie
if os.environ.get("TEST_DEVICE") != "MYRIAD":
test_net_xml = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.xml')
test_net_bin = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model.bin')
else:
test_net_xml = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_fp16.xml')
test_net_bin = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'test_model_fp16.bin')
IMAGE_PATH = os.path.join(os.path.dirname(__file__), 'test_data', 'images', 'cat3.bmp')
# computed with caffe
REF_IMAGE_RESULT = np.array([[34.6295814514, 18.9434795380, 43.2669448853, 0.4420155287, -108.4574050903,
-314.8240051270, 231.0738067627, -106.3504943848, 108.5880966187, 92.7254943848]])
def read_image():
import cv2
n, c, h, w = (1, 3, 32, 32)
image = cv2.imread(IMAGE_PATH) / 255
if image is None:
raise FileNotFoundError("Input image not found")
image = cv2.resize(image, (h, w))
image = image.transpose((2, 0, 1))
image = image.reshape((n, c, h, w))
return image
def load_sample_model(device, num_requests=1):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=num_requests)
return executable_network
def test_inputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
assert len(executable_network.requests) == 2
for req in executable_network.requests:
assert len(req.inputs) == 1
assert "data" in req.inputs
assert req.inputs['data'].shape == (1, 3, 32, 32)
del executable_network
del ie_core
del net
def test_outputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
assert len(executable_network.requests) == 2
for req in executable_network.requests:
assert len(req.outputs) == 1
assert "fc_out" in req.outputs
assert req.outputs['fc_out'].shape == (1, 10)
del executable_network
del ie_core
del net
def test_inputs_list(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
for req in executable_network.requests:
assert len(req._inputs_list) == 1
assert "data" in req._inputs_list
del ie_core
def test_outputs_list(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
for req in executable_network.requests:
assert len(req._outputs_list) == 1
assert "fc_out" in req._outputs_list
del ie_core
def test_access_input_buffer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
buffer = executable_network.requests[0]._get_blob_buffer("data".encode()).to_numpy()
assert buffer.shape == (1, 3, 32, 32)
assert buffer.strides == (12288, 4096, 128, 4)
assert buffer.dtype == np.float32
del executable_network
del ie_core
del net
def test_access_output_buffer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
buffer = executable_network.requests[0]._get_blob_buffer("fc_out".encode()).to_numpy()
assert buffer.shape == (1, 10)
assert buffer.strides == (40, 4)
assert buffer.dtype == np.float32
del executable_network
del ie_core
del net
def test_write_to_inputs_directly(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
img = read_image()
executable_network.requests[0].inputs["data"][:] = img
assert np.allclose(executable_network.requests[0].inputs["data"], img)
del executable_network
del ie_core
del net
def test_write_to_inputs_copy(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = executable_network.requests[0]
request.inputs["data"][:] = img
assert np.allclose(executable_network.requests[0].inputs["data"], img)
del executable_network
del ie_core
del net
def test_infer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.infer({'data': img})
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
del net
def test_async_infer_default_timeout(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait()
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
del net
def test_async_infer_wait_finish(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait(ie.WaitMode.RESULT_READY)
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
del net
def test_async_infer_wait_time(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait(100)
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
del net
def test_async_infer_wait_status(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait(ie.WaitMode.RESULT_READY)
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
status = request.wait(ie.WaitMode.STATUS_ONLY)
assert status == ie.StatusCode.OK
del exec_net
del ie_core
del net
def test_async_infer_fill_inputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.inputs['data'][:] = img
request.async_infer()
status_end = request.wait()
assert status_end == ie.StatusCode.OK
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
del exec_net
del ie_core
del net
def test_infer_modify_outputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
outputs0 = exec_net.infer({'data': img})
status_end = request.wait()
assert status_end == ie.StatusCode.OK
assert np.argmax(outputs0['fc_out'][0]) == 3
outputs0['fc_out'][0] = 0
outputs1 = request.outputs
assert np.argmax(outputs1['fc_out'][0]) == 3
outputs1['fc_out'][0] = 1
outputs2 = request.outputs
assert np.argmax(outputs2['fc_out'][0]) == 3
del exec_net
del ie_core
del net
def test_async_infer_callback(device):
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(callback_called=0)
def callback(self, status):
callback.callback_called = 1
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.set_completion_callback(callback)
request.async_infer({'data': img})
status = request.wait()
assert status == ie.StatusCode.OK
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
assert callback.callback_called == 1
del exec_net
del ie_core
def test_async_infer_callback_wait_before_start(device):
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(callback_called=0)
def callback(self, status):
callback.callback_called = 1
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.set_completion_callback(callback)
status = request.wait()
assert status == ie.StatusCode.INFER_NOT_STARTED
request.async_infer({'data': img})
status = request.wait()
assert status == ie.StatusCode.OK
res = request.outputs['fc_out']
assert np.argmax(res[0]) == 3
assert callback.callback_called == 1
del exec_net
del ie_core
def test_async_infer_callback_wait_in_callback(device):
class InferReqWrap:
def __init__(self, request):
self.request = request
self.request.set_completion_callback(self.callback)
self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY)
assert self.status_code == ie.StatusCode.INFER_NOT_STARTED
def callback(self, statusCode, userdata):
self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY)
def execute(self, input_data):
self.request.async_infer(input_data)
status = self.request.wait(ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
assert self.status_code == ie.StatusCode.OK
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request_wrap = InferReqWrap(exec_net.requests[0])
request_wrap.execute({'data': img})
del exec_net
del ie_core
def test_get_perf_counts(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
ie_core.set_config({"PERF_COUNT": "YES"}, device)
exec_net = ie_core.load_network(net, device)
img = read_image()
request = exec_net.requests[0]
request.infer({'data': img})
pc = request.get_perf_counts()
assert pc['29']["status"] == "EXECUTED"
assert pc['29']["layer_type"] == "FullyConnected"
del exec_net
del ie_core
del net
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Can't run test on device {},"
"Dynamic batch fully supported only on CPU".format(os.environ.get("TEST_DEVICE", "CPU")))
def test_set_batch_size(device):
xml = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.xml')
bin = os.path.join(os.path.dirname(__file__), 'test_data', 'models', 'SampLeNet.bin')
ie_core = ie.IECore()
ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device)
net = ie_core.read_network(xml, bin)
net.batch_size = 10
data = np.zeros(shape=net.inputs['data'].shape)
exec_net = ie_core.load_network(net, device)
data[0] = read_image()[0]
request = exec_net.requests[0]
request.set_batch(1)
request.infer({'data': data})
assert np.allclose(int(request.outputs['fc3'][0][0]), -1), "Incorrect data for 1st batch"
del exec_net
del ie_core
del net
def test_set_zero_batch_size(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
request = exec_net.requests[0]
with pytest.raises(ValueError) as e:
request.set_batch(0)
assert "Batch size should be positive integer number but 0 specified" in str(e.value)
del exec_net
del ie_core
del net
def test_set_negative_batch_size(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
request = exec_net.requests[0]
with pytest.raises(ValueError) as e:
request.set_batch(-1)
assert "Batch size should be positive integer number but -1 specified" in str(e.value)
del exec_net
del ie_core
del net

View File

@@ -0,0 +1,39 @@
from openvino.inference_engine import IENetwork
try:
from ngraph.impl.op import Parameter, Relu
from ngraph.impl import Function, Shape, Type
ngraph_available=True
except:
ngraph_available=False
import numpy as np
import pytest
if not ngraph_available:
pytest.skip("NGraph is not installed, skip", allow_module_level=True)
@pytest.mark.skip(reason="nGraph python API has been removed in 2020.2 LTS release")
def test_CreateIENetworkFromNGraph():
element_type = Type.f32
param = Parameter(element_type, Shape([1, 3, 22, 22]))
relu = Relu(param)
func = Function([relu], [param], 'test')
caps = Function.to_capsule(func)
cnnNetwork = IENetwork(caps)
assert cnnNetwork != None
assert cnnNetwork.get_function() != None
assert len(cnnNetwork.layers) == 2
@pytest.mark.skip(reason="nGraph python API has been removed in 2020.2 LTS release")
def test_GetIENetworkFromNGraph():
element_type = Type.f32
param = Parameter(element_type, Shape([1, 3, 22, 22]))
relu = Relu(param)
func = Function([relu], [param], 'test')
caps = Function.to_capsule(func)
cnnNetwork = IENetwork(caps)
assert cnnNetwork != None
assert cnnNetwork.get_function() != None
caps2 = cnnNetwork.get_function()
func2 = Function.from_capsule(caps2)
assert func2 != None

View File

@@ -23,13 +23,13 @@
namespace InferenceEngine {
/**
* @deprecated Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2020.3
* @deprecated Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2021.1
* @brief This is a wrapper class used to build and parse a network from the given IR.
*
* All the methods here can throw exceptions.
*/
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2020.3")
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2021.1")
CNNNetReader {
public:
/**

View File

@@ -79,14 +79,14 @@ public:
virtual ~CNNNetwork() {}
/**
* @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2020.3
* @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1
* @copybrief ICNNNetwork::getPrecision
*
* Wraps ICNNNetwork::getPrecision
*
* @return A precision type
*/
INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1")
virtual Precision getPrecision() const;
/**
@@ -200,7 +200,7 @@ public:
}
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @copybrief ICNNNetwork::getLayerByName
*
* Wraps ICNNNetwork::getLayerByName
@@ -208,11 +208,11 @@ public:
* @param layerName Given name of the layer
* @return Status code of the operation. InferenceEngine::OK if succeeded
*/
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
CNNLayerPtr getLayerByName(const char* layerName) const;
/**
* @deprecated Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Begin layer iterator
*
* Order of layers is implementation specific,
@@ -221,25 +221,25 @@ public:
* @return Iterator pointing to a layer
*/
IE_SUPPRESS_DEPRECATED_START
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2021.1")
details::CNNNetworkIterator begin() const;
/**
* @deprecated Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief End layer iterator
* @return Iterator pointing to a layer
*/
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::getFunction() and work with ngraph::Function directly. The method will be removed in 2021.1")
details::CNNNetworkIterator end() const;
IE_SUPPRESS_DEPRECATED_END
/**
* @deprecated Use CNNNetwork::layerCount() instead. The method will be removed in 2020.3
* @deprecated Use CNNNetwork::layerCount() instead. The method will be removed in 2021.1
* @brief Number of layers in network object
*
* @return Number of layers.
*/
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::layerCount() instead. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use CNNNetwork::layerCount() instead. The method will be removed in 2021.1")
size_t size() const;
/**

View File

@@ -153,6 +153,7 @@ public:
/**
* @deprecated Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph.
* This method will be removed in 2021.1 release.
* @copybrief IExecutableNetwork::GetMappedTopology
*
* Wraps IExecutableNetwork::GetMappedTopology.

View File

@@ -78,8 +78,8 @@ public:
IExecutableNetwork::Ptr ret;
IE_SUPPRESS_DEPRECATED_START
CALL_STATUS_FNC(LoadNetwork, ret, network, config);
IE_SUPPRESS_DEPRECATED_END
return ExecutableNetwork(ret, actual);
IE_SUPPRESS_DEPRECATED_END
}
/**
@@ -94,9 +94,9 @@ public:
IExecutableNetwork::Ptr ret;
IE_SUPPRESS_DEPRECATED_START
CALL_STATUS_FNC(LoadNetwork, ret, network, config);
IE_SUPPRESS_DEPRECATED_END
if (ret.get() == nullptr) THROW_IE_EXCEPTION << "Internal error: pointer to executable network is null";
return ExecutableNetwork(ret, actual);
IE_SUPPRESS_DEPRECATED_END
}
/**
@@ -137,8 +137,8 @@ public:
IExecutableNetwork::Ptr ret;
IE_SUPPRESS_DEPRECATED_START
CALL_STATUS_FNC(ImportNetwork, ret, modelFileName, config);
IE_SUPPRESS_DEPRECATED_END
return ExecutableNetwork(ret, actual);
IE_SUPPRESS_DEPRECATED_END
}
/**

View File

@@ -70,7 +70,7 @@ public:
explicit SharedObjectLoader(LPCSTR pluginName) {
ExcludeCurrentDirectory();
shared_object = LoadLibrary(pluginName);
shared_object = LoadLibraryA(pluginName);
if (!shared_object) {
char cwd[1024];
THROW_IE_EXCEPTION << "Cannot load library '" << pluginName << "': " << GetLastError()

View File

@@ -48,7 +48,7 @@
#endif
#define INFERENCE_ENGINE_NN_BUILDER_DEPRECATED \
INFERENCE_ENGINE_DEPRECATED("Use ngraph API. NN Builder API will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use ngraph API. NN Builder API will be removed in 2020.4")
#define INFERENCE_ENGINE_NN_BUILDER_API_CLASS(...) \
INFERENCE_ENGINE_NN_BUILDER_DEPRECATED \
INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
@@ -63,7 +63,7 @@
# define INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(...) INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
#else
# define INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(...) \
INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3") \
INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1") \
INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
#endif

View File

@@ -51,6 +51,7 @@ public:
/**
* @deprecated IErrorListener is not used anymore. An exception is thrown in case of any unexpected situations.
* The function will be removed in 2021.1 release.
* @brief Sets logging callback
*
* Logging is used to track what is going on inside the plugins, Inference Engine library

View File

@@ -47,7 +47,7 @@ public:
* @param _precision Precision of the data
* @param layout Data layout
*/
INFERENCE_ENGINE_DEPRECATED("Use Data(const std::string &, const TensorDesc&). The ctor will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use Data(const std::string &, const TensorDesc&). The ctor will be removed in 2021.1")
Data(const std::string& name, const SizeVector& a_dims, Precision _precision, Layout layout = NCHW);
/**

View File

@@ -12,6 +12,7 @@
namespace InferenceEngine {
/**
* @deprecated IErrorListener is not used anymore. An exception is thrown / StatusCode set in case of any unexpected situations
* The class will be removed in 2021.1 release.
* @brief This class represents a custom error listener.
*/
class

View File

@@ -34,6 +34,7 @@ public:
};
/**
* @deprecated Implement IExtension interface. The interface will be removed in 2021.1 release.
* @brief The SOCreatorTrait class specialization for IExtension case, defines the name of the fabric method for
* creating IExtension object in DLL
*/
@@ -72,6 +73,7 @@ public:
/**
* @deprecated IErrorListener is not used anymore. StatusCode is provided in case of unexpected situations
* The method will be removed in 2021.1 release.
* @brief Sets a log callback that is used to track what is going on inside
*
* @param listener Logging listener
@@ -96,7 +98,8 @@ public:
void Release() noexcept override {}
/**
* @deprecated Use IExtension::getImplTypes to get implementation types for a particular node
* @deprecated Use IExtension::getImplTypes to get implementation types for a particular node.
* The method will removed in 2021.1 release.
* @brief Gets the array with types of layers which are included in the extension
*
* @param types Types array
@@ -112,7 +115,8 @@ public:
}
/**
* @deprecated Use IExtension::getImplementation to get a concrete implementation
* @deprecated Use IExtension::getImplementation to get a concrete implementation.
* The method will be removed in 2021.1 release.
* @brief Gets the factory with implementations for a given layer
*
* @param factory Factory with implementations
@@ -130,6 +134,7 @@ public:
/**
* @deprecated Implement ngraph::op::Op::validate_and_infer_types method in a custom ngraph operation
* The method will be removed in 2021.1 release.
* @brief Gets shape propagation implementation for the given string-type of CNNLayer
*
* @param impl the vector with implementations which is ordered by priority
@@ -146,6 +151,7 @@ public:
/**
* @deprecated Implement ngraph::op::Op::validate_and_infer_types method in a custom ngraph operation
* The method will be removed in 2021.1 release.
* @brief Gets the array with types of layers which are included in the extension
*
* @param types Types array
@@ -194,7 +200,7 @@ protected:
};
/**
* @deprecated Use a common Extension class
* @deprecated Use a common Extension class. The interface will be removed in 2021.1 release.
* @brief This class is a C++ helper to work with objects created using extensions.
*/
class INFERENCE_ENGINE_DEPRECATED("Use a common Extension interface") ShapeInferExtension :
@@ -205,7 +211,9 @@ public:
*
* @param name Full or relative path to extension library
*/
IE_SUPPRESS_DEPRECATED_START_WIN
explicit ShapeInferExtension(const file_name_t& name): actual(name) {}
IE_SUPPRESS_DEPRECATED_END_WIN
/**
* @brief Gets the extension version information
@@ -218,6 +226,7 @@ public:
/**
* @brief IErrorListener is not used anymore. StatusCode is provided in case of unexpected situations
* The method will be removed in 2021.1 release.
* @brief Sets a log callback that is used to track what is going on inside
*
* @param listener Logging listener

View File

@@ -20,13 +20,13 @@
namespace InferenceEngine {
/**
* @deprecated Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2020.3
* @deprecated Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2021.1
* @brief This class is the main interface to build and parse a network from a given IR
*
* All methods here do not throw exceptions and return a StatusCode and ResponseDesc object.
* Alternatively, to use methods that throw exceptions, refer to the CNNNetReader wrapper class.
*/
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2020.3")
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core::ReadNetwork methods. This API will be removed in 2021.1")
ICNNNetReader : public details::IRelease {
public:
/**

View File

@@ -54,14 +54,14 @@ public:
virtual std::shared_ptr<const ngraph::Function> getFunction() const noexcept = 0;
/**
* @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2020.3
* @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1
* @brief Returns the main network operating precision.
*
* This may be MIXED if not homogeneous.
*
* @return A precision type
*/
INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1")
virtual Precision getPrecision() const noexcept = 0;
/**
@@ -94,14 +94,14 @@ public:
virtual InputInfo::Ptr getInput(const std::string& inputName) const noexcept = 0;
/**
* @deprecated Use ICNNNetwork::getName() instead. The method will be removed in 2020.3
* @deprecated Use ICNNNetwork::getName() instead. The method will be removed in 2021.1
* @brief Gets the network name. The name is stored in the given pName string.
*
* @param pName - will receive actual network name, specified in IR file,
* pName should point to valid memory address before invoking this function
* @param len - size in bytes of pName buffer, actual name is trimmed by this size
*/
INFERENCE_ENGINE_DEPRECATED("Use ICNNNetwork::getName() instead. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Use ICNNNetwork::getName() instead. The method will be removed in 2021.1")
virtual void getName(char* pName, size_t len) const noexcept = 0;
/**
@@ -119,7 +119,7 @@ public:
virtual size_t layerCount() const noexcept = 0;
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Returns a smart pointer reference to a Data node given its name.
*
* If the Data node is missing, returns reference to a default initialized new empty data pointer with given name.
@@ -127,16 +127,16 @@ public:
* @param dname Name of the Data node
* @return Data node smart pointer
*/
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
virtual DataPtr& getData(const char* dname) noexcept = 0;
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Insert a layer into the network. A user is responsible to connect it to other data elements.
*
* @param layer Const reference to a layer smart pointer
*/
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
virtual void addLayer(const CNNLayerPtr& layer) noexcept = 0;
/**
@@ -151,7 +151,7 @@ public:
ResponseDesc* resp = nullptr) noexcept = 0;
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Gets network layer with the given name
*
* @param layerName Given name of the layer
@@ -159,7 +159,7 @@ public:
* @param resp Pointer to the response message that holds a description of an error if any occurred
* @return Status code of the operation. InferenceEngine::OK if succeeded
*/
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3")
INFERENCE_ENGINE_DEPRECATED("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1")
virtual StatusCode getLayerByName(const char* layerName, CNNLayerPtr& out, ResponseDesc* resp) const noexcept = 0;
/**

View File

@@ -86,10 +86,12 @@ public:
float mn = (std::numeric_limits<float>::max)();
float mx = (std::numeric_limits<float>::min)();
IE_SUPPRESS_DEPRECATED_START_WIN
for (int i = 0; i < statCount; i++) {
_minOutputs.push_back(mn);
_maxOutputs.push_back(mx);
}
IE_SUPPRESS_DEPRECATED_END_WIN
}
public:

View File

@@ -102,7 +102,8 @@ public:
virtual StatusCode Export(std::ostream& networkModel, ResponseDesc* resp) noexcept = 0;
/**
* @deprecated Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph
* @deprecated Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph.
* The method will be removed in 2021.1 release.
* @brief Get the mapping of IR layer names to implemented kernels
*
* @param deployedTopology Map of PrimitiveInfo objects that represent the deployed topology

View File

@@ -147,6 +147,7 @@ public:
/**
* @deprecated Implement IExtension::getImplTypes and IExtension::getImplementation
* The interface will be removed in 2021.1 release.
* @interface ILayerImplFactory
* @brief This class provides interface for extension factories
*/
@@ -178,6 +179,7 @@ public:
/**
* @deprecated Implement ngraph::op::Op::validate_and_infer_types method in a custom ngraph operation.
* The interface will be removed in 2021.1 release.
* @class IShapeInferImpl
* @brief This class provides interface for the implementation with the custom execution code
*/
@@ -212,6 +214,7 @@ class IShapeInferExtension : public InferenceEngine::details::IRelease {
public:
/**
* @deprecated IErrorListener is not used anymore. StatusCode is provided in case of unexpected situations
* The method will be removed in 2021.1 release.
* @brief Sets logging callback.
*
* Logging is used to track what is going on inside.
@@ -225,7 +228,6 @@ public:
/**
* @brief Gets extension version information and stores in versionInfo
*
* @param versionInfo Pointer to version info, will be set by plugin
*/
virtual void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept = 0;
@@ -237,6 +239,7 @@ public:
/**
* @deprecated Implement ngraph::op::Op::validate_and_infer_types method in a custom ngraph operation.
* The method will be removed in 2021.1 release.
* @brief Fills passed array with types of layers which shape infer implementations are included in the extension
*
* @param types Array to store the layer types
@@ -249,6 +252,7 @@ public:
/**
* @deprecated Implement ngraph::op::Op::validate_and_infer_types method in a custom ngraph operation.
* The method will be removed in 2021.1 release.
* @brief Gets shape propagation implementation for the given string-type of CNNLayer
*
* @param impl the vector with implementations which is ordered by priority
@@ -271,6 +275,7 @@ class INFERENCE_ENGINE_API_CLASS(IExtension) : public IShapeInferExtension {
public:
/**
* @deprecated Use IExtension::getImplementation to get a concrete implementation
* The method will be removed in 2021.1 release.
* @brief Provides a factory for a specified CNNLayer
* @param factory A factory returned from an extension plugin
* @param cnnLayer A CNNLayer object to provide factory for
@@ -281,12 +286,16 @@ public:
INFERENCE_ENGINE_DEPRECATED("Use IExtension::getImplementation to get a concrete implementation")
virtual StatusCode getFactoryFor(ILayerImplFactory*& factory, const CNNLayer* cnnLayer,
ResponseDesc* resp) noexcept {
(void)factory;
(void)cnnLayer;
(void)resp;
return NOT_IMPLEMENTED;
}
IE_SUPPRESS_DEPRECATED_END
/**
* @deprecated Use IExtension::getImplTypes to get implementation types for a particular node
* The method will be removed in 2021.1 release.
* @brief Fills passed array with types of layers which kernel implementations are included in the extension
*
* @param types Array to store the layer types
@@ -296,6 +305,9 @@ public:
*/
INFERENCE_ENGINE_DEPRECATED("Use IExtension::getImplTypes to get implementation types for a particular node")
virtual StatusCode getPrimitiveTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept {
(void)types;
(void)size;
(void)resp;
return NOT_IMPLEMENTED;
}
@@ -322,6 +334,7 @@ public:
* @return vector of strings
*/
virtual std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) {
(void)node;
return {};
}
@@ -332,6 +345,8 @@ public:
* @return shared pointer to implementation
*/
virtual ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) {
(void)node;
(void)implType;
return nullptr;
}
};
@@ -345,6 +360,7 @@ using IExtensionPtr = std::shared_ptr<IExtension>;
/**
* @deprecated Migrate to IR v10 and implement shape inference in the ngraph::op::Op::validate_and_infer_types method
* This API will be removed in 2021.1 release.
* @brief A shared pointer to a IShapeInferExtension interface
*/
using IShapeInferExtensionPtr = std::shared_ptr<IShapeInferExtension>;
@@ -360,6 +376,7 @@ INFERENCE_EXTENSION_API(StatusCode) CreateExtension(IExtension*& ext, ResponseDe
/**
* @deprecated Migrate to IR v10 and implement shape inference in the ngraph::op::Op::validate_and_infer_types method
* This API will be removed in 2021.1 release.
* @brief Creates the default instance of the shape infer extension
*
* @param ext Shape Infer Extension interface

View File

@@ -32,7 +32,7 @@ class Node;
namespace InferenceEngine {
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This is an internal common Layer parameter parsing arguments
*/
struct INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(LayerParams) {
@@ -47,10 +47,8 @@ struct INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(LayerParams) {
std::string type;
/**
* deprecated Use precision of CNNLayer::outData and CNNLayer::insData
* @brief Layer precision
*/
INFERENCE_ENGINE_DEPRECATED("Use precision of CNNLayer::outData and CNNLayer::insData")
Precision precision;
/**
@@ -85,7 +83,7 @@ struct INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(LayerParams) {
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This is a base abstraction Layer - all DNN Layers inherit from this class
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(CNNLayer) {
@@ -127,7 +125,9 @@ public:
/**
* @brief If suggested to fuse - a pointer to the layer which needs to be fused with this layer
*/
IE_SUPPRESS_DEPRECATED_START_WIN
Ptr _fusedWith;
IE_SUPPRESS_DEPRECATED_END_WIN
/**
* @brief Convenience user values to store in this object as extra data
@@ -174,25 +174,18 @@ public:
*
* @param layer Reference to the layer to be fused with
*/
IE_SUPPRESS_DEPRECATED_START_WIN
void fuse(Ptr& layer) {
_fusedWith = layer;
}
IE_SUPPRESS_DEPRECATED_END_WIN
/**
* @brief Returns the first element of the input data for this layer
*
* @return A smart pointer to the input data element
*/
virtual const DataPtr input() const {
if (insData.empty()) {
THROW_IE_EXCEPTION << "Internal error: input data is empty";
}
auto lockedFirstInsData = insData[0].lock();
if (!lockedFirstInsData) {
THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
}
return lockedFirstInsData;
}
virtual const DataPtr input() const;
/**
* @brief Checks if the input data and layer data are legitimate
@@ -206,30 +199,13 @@ public:
* @return float value if parsing was successful
* @throws InferenceEngineException in case of parsing error
*/
static float ie_parse_float(const std::string& str) {
if (str == "-inf") {
return -std::numeric_limits<float>::infinity();
} else if (str == "inf") {
return std::numeric_limits<float>::infinity();
} else {
float res;
std::stringstream val_stream(str);
val_stream.imbue(std::locale("C"));
val_stream >> res;
if (!val_stream.eof()) THROW_IE_EXCEPTION;
return res;
}
}
static float ie_parse_float(const std::string& str);
/**
* @brief serialize float with c_locale formating
* used for default values serializing
*/
static std::string ie_serialize_float(float value) {
std::stringstream val_stream;
val_stream.imbue(std::locale("C"));
val_stream << value;
return val_stream.str();
}
static std::string ie_serialize_float(float value);
/**
* @brief Gets float value for the given parameter
@@ -238,15 +214,7 @@ public:
* @param def default value of the parameter if not found
* @return float value
*/
float GetParamAsFloat(const char* param, float def) const {
std::string val = GetParamAsString(param, ie_serialize_float(def).c_str());
try {
return ie_parse_float(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to float.";
}
}
float GetParamAsFloat(const char* param, float def) const;
/**
* @brief Returns a float value for the given layer parameter
@@ -254,15 +222,7 @@ public:
* @param param Name of the layer parameter
* @return A float value for the specified parameter
*/
float GetParamAsFloat(const char* param) const {
std::string val = GetParamAsString(param);
try {
return ie_parse_float(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to float.";
}
}
float GetParamAsFloat(const char* param) const;
/**
* @brief Returns a vector of float values for the given parameter or returns the default value
@@ -271,23 +231,7 @@ public:
* @param def Default value of the parameter if not found
* @return vector of float values
*/
std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<float> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
float val = ie_parse_float(str);
result.push_back(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to floats.";
}
}
return result;
}
std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const;
/**
* @brief Returns a vector of float values for the given parameter
@@ -295,22 +239,7 @@ public:
* @param param Name of the layer parameter
* @return vector of float values
*/
std::vector<float> GetParamAsFloats(const char* param) const {
std::string vals = GetParamAsString(param);
std::vector<float> result;
std::istringstream stream(vals);
std::string str;
while (getline(stream, str, ',')) {
try {
float val = ie_parse_float(str);
result.push_back(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to floats.";
}
}
return result;
}
std::vector<float> GetParamAsFloats(const char* param) const;
/**
* @brief Returns an integer value for the given parameter or returns the default value
@@ -319,15 +248,7 @@ public:
* @param def Default value of the parameter if not found
* @return An int value for the specified parameter
*/
int GetParamAsInt(const char* param, int def) const {
std::string val = GetParamAsString(param, std::to_string(def).c_str());
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to int.";
}
}
int GetParamAsInt(const char* param, int def) const;
/**
* @brief Returns an integer value for the given parameter
@@ -335,15 +256,7 @@ public:
* @param param Name of the layer parameter
* @return An int value for the specified parameter
*/
int GetParamAsInt(const char* param) const {
std::string val = GetParamAsString(param);
try {
return std::stoi(val);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
<< val << " cannot be casted to int.";
}
}
int GetParamAsInt(const char* param) const;
/**
* @brief Returns a vector of int values for the given parameter or returns the default value
@@ -352,22 +265,7 @@ public:
* @param def Default value of the parameter if not found
* @return vector of int values
*/
std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<int> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
result.push_back(std::stoi(str));
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to int.";
}
}
return result;
}
std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const;
/**
* @brief Returns a vector of int values for the given parameter
@@ -375,21 +273,8 @@ public:
* @param param Name of the layer parameter
* @return vector of int values
*/
std::vector<int> GetParamAsInts(const char* param) const {
std::string vals = GetParamAsString(param);
std::vector<int> result;
std::istringstream stream(vals);
std::string str;
while (getline(stream, str, ',')) {
try {
result.push_back(std::stoi(str));
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
<< ". Value " << vals << " cannot be casted to int.";
}
}
return result;
}
std::vector<int> GetParamAsInts(const char* param) const;
/**
* @brief Returns an unsigned integer value for the given parameter or returns the default value
*
@@ -397,20 +282,7 @@ public:
* @param def Default value of the parameter if not found
* @return An unsigned integer value for the specified parameter
*/
unsigned int GetParamAsUInt(const char* param, unsigned int def) const {
std::string val = GetParamAsString(param, std::to_string(def).c_str());
std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
". Value " + val + " cannot be casted to int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
unsigned int GetParamAsUInt(const char* param, unsigned int def) const;
/**
* @brief Returns an unsigned integer value for the given parameter
@@ -418,20 +290,7 @@ public:
* @param param Name of the layer parameter
* @return An unsigned integer value for the specified parameter
*/
unsigned int GetParamAsUInt(const char* param) const {
std::string val = GetParamAsString(param);
std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
". Value " + val + " cannot be casted to unsigned int.";
try {
int value = std::stoi(val);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
return static_cast<unsigned int>(value);
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
unsigned int GetParamAsUInt(const char* param) const;
/**
* @brief Returns a vector of unsigned int values for the given parameter or returns the default value
@@ -440,27 +299,7 @@ public:
* @param def Default value of the parameter if not found
* @return vector of unsigned int values
*/
std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<unsigned int> result;
std::istringstream stream(vals);
std::string str;
std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
name + ". Value " + vals + " cannot be casted to unsigned int.";
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
int value = std::stoi(str);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
result.push_back(static_cast<unsigned int>(value));
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
return result;
}
std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const;
/**
* @brief Returns a vector of unsigned int values for the given parameter
@@ -468,26 +307,8 @@ public:
* @param param Name of the layer parameter
* @return vector of unsigned int values
*/
std::vector<unsigned int> GetParamAsUInts(const char* param) const {
std::string vals = GetParamAsString(param);
std::vector<unsigned int> result;
std::istringstream stream(vals);
std::string str;
std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
name + ". Value " + vals + " cannot be casted to int.";
while (getline(stream, str, ',')) {
try {
int value = std::stoi(str);
if (value < 0) {
THROW_IE_EXCEPTION << message;
}
result.push_back(static_cast<unsigned int>(value));
} catch (...) {
THROW_IE_EXCEPTION << message;
}
}
return result;
}
std::vector<unsigned int> GetParamAsUInts(const char* param) const;
/**
* @brief Returns a boolean value for the given parameter.
*
@@ -496,44 +317,15 @@ public:
* @param def Default value of the parameter if not found
* @return A bool value for the specified parameter
*/
bool GetParamAsBool(const char* param, bool def) const {
std::string val = GetParamAsString(param, std::to_string(def).c_str());
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return std::tolower(value);
});
bool GetParamAsBool(const char* param, bool def) const;
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param, def) != 0);
}
return result;
}
/**
* @brief Returns a boolean value for the given parameter
*
* @param param Name of the layer parameter
* @return A bool value for the specified parameter
*/
bool GetParamAsBool(const char* param) const {
std::string val = GetParamAsString(param);
std::string loweredCaseValue;
std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
return std::tolower(value);
});
bool result = false;
if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
// attempting parse using non alpha bool
return (GetParamAsInt(param) != 0);
}
return result;
}
bool GetParamAsBool(const char* param) const;
/**
* @brief Returns a string value for the given parameter or returns the default one
@@ -542,13 +334,7 @@ public:
* @param def Default value of the parameter if not found
* @return A string value
*/
std::string GetParamAsString(const char* param, const char* def) const {
auto it = params.find(param);
if (it == params.end() || it->second.empty()) {
return def;
}
return (*it).second;
}
std::string GetParamAsString(const char* param, const char* def) const;
/**
* @brief Checks the param presence in the layer
@@ -556,13 +342,7 @@ public:
* @param param Name of the layer parameter
* @return a bool depending param presence
*/
bool CheckParamPresence(const char* param) const {
auto it = params.find(param);
if (it == params.end()) {
return false;
}
return true;
}
bool CheckParamPresence(const char* param) const;
/**
* @brief Returns a string value for the given parameter.
@@ -571,13 +351,7 @@ public:
* @param param Name of the layer parameter
* @return A string value
*/
std::string GetParamAsString(const char* param) const {
auto it = params.find(param);
if (it == params.end()) {
THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
}
return (*it).second;
}
std::string GetParamAsString(const char* param) const;
/**
* @brief Gets the parameter as a std::vector<std::string>
@@ -585,21 +359,7 @@ public:
* @param def The default values if case of parameter is not found
* @return The parameter as strings.
*/
std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const {
std::string vals = GetParamAsString(param, "");
std::vector<std::string> result;
std::istringstream stream(vals);
std::string str;
if (vals.empty()) return def;
while (getline(stream, str, ',')) {
try {
result.push_back(str);
} catch (...) {
THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
}
}
return result;
}
std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const;
/**
* @brief Map of pairs: (parameter name, parameter value)
@@ -622,7 +382,7 @@ IE_SUPPRESS_DEPRECATED_END
IE_SUPPRESS_DEPRECATED_START_WIN
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(WeightableLayer): public CNNLayer {
@@ -665,7 +425,7 @@ public:
unsigned int& prop_name##_y = prop_name.at(Y_AXIS)
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard 3D Convolution Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ConvolutionLayer): public WeightableLayer {
@@ -745,7 +505,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard deconvolution layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(DeconvolutionLayer): public ConvolutionLayer {
@@ -757,7 +517,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard deformable convolution layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(DeformableConvolutionLayer): public ConvolutionLayer {
@@ -774,7 +534,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard pooling layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PoolingLayer): public CNNLayer {
@@ -856,7 +616,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard binary convolution layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BinaryConvolutionLayer): public WeightableLayer {
@@ -961,7 +721,7 @@ public:
#undef DEFINE_PROP
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a fully connected layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(FullyConnectedLayer): public WeightableLayer {
@@ -980,7 +740,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents concatenation layer
*
* Takes as input several data elements and merges them to one using the supplied axis
@@ -1004,7 +764,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a layer that evenly splits the input into the supplied outputs
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SplitLayer): public CNNLayer {
@@ -1023,7 +783,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Linear Response Normalization (LRN) Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(NormLayer): public CNNLayer {
@@ -1058,7 +818,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents standard softmax Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SoftMaxLayer): public CNNLayer {
@@ -1076,7 +836,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents standard GRN Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(GRNLayer): public CNNLayer {
@@ -1096,7 +856,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents standard MVN Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(MVNLayer): public CNNLayer {
@@ -1121,7 +881,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Rectified Linear activation layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReLULayer): public CNNLayer {
@@ -1140,7 +900,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Clamp activation layer
*
* Clamps all tensor elements into the range [min_value, max_value]
@@ -1165,7 +925,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a ReLU6 activation layer
*
* Clamps all tensor elements into the range [0, 6.0]
@@ -1186,7 +946,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents an element wise operation layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(EltwiseLayer): public CNNLayer {
@@ -1237,7 +997,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard crop layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(CropLayer): public CNNLayer {
@@ -1264,7 +1024,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard reshape layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ReshapeLayer): public CNNLayer {
@@ -1291,7 +1051,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Tile Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(TileLayer): public CNNLayer {
@@ -1314,7 +1074,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Layer which performs Scale and Shift
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ScaleShiftLayer): public WeightableLayer {
@@ -1334,7 +1094,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents TensorIterator layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(TensorIterator): public CNNLayer {
@@ -1372,7 +1132,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Base class for recurrent cell layers
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(RNNCellBase): public WeightableLayer {
@@ -1431,7 +1191,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief LSTM Cell layer
*
* G - number of gates (=4)
@@ -1477,7 +1237,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief GRU Cell layer
*
* G - number of gates (=3)
@@ -1519,7 +1279,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief RNN Cell layer
*
* G - number of gates (=1)
@@ -1556,7 +1316,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Sequence of recurrent cells
*
* N - batch size
@@ -1612,7 +1372,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Layer which performs Scale and Shift
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(PReLULayer): public WeightableLayer {
@@ -1635,7 +1395,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Power Layer
*
* Formula is: output = (offset + scale * input) ^ power
@@ -1664,7 +1424,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a Batch Normalization Layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BatchNormalizationLayer): public WeightableLayer {
@@ -1683,7 +1443,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a general matrix multiplication operation layer
*
* Formula is: dst := alpha*src1*src2 + beta*src3
@@ -1715,7 +1475,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Pad layer
*
* Adds paddings to input tensor
@@ -1753,7 +1513,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Gather layer
*
* Gather slices from Dictionary according to Indexes
@@ -1773,7 +1533,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Strided Slice layer
*
* Strided Slice picks from input tensor according parameters
@@ -1814,7 +1574,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Shuffle Channels layer
* Shuffle Channels picks from input tensor according parameters
*/
@@ -1839,7 +1599,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Depth To Space layer
* Depth To Space picks from input tensor according parameters
*/
@@ -1859,7 +1619,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Space To Depth layer
* Space To Depth picks from input tensor according parameters
*/
@@ -1879,7 +1639,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Space To Batch layer
*
* Space To Batch picks from input tensor according parameters
@@ -1909,7 +1669,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Batch To Space layer
*
* Batch To Space picks from input tensor according parameters
@@ -1942,7 +1702,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents SparseFillEmptyRows layer
*
* SparseFillEmptyRows fills empty rows in a sparse tensor
@@ -1958,7 +1718,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents SparseSegmentMean(SqrtN, Sum) layers
* SparseSegmentMean(SqrtN, Sum) layer reduces data along sparse segments of a tensor.
*/
@@ -1973,7 +1733,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents ExperimentalSparseWeightedReduce layer
* ExperimentalSparseWeightedReduce layer reduces data along sparse segments of a tensor.
*/
@@ -1988,7 +1748,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents SparseToDense layer
* SparseToDense layer converts a sparse tensor to a dense tensor.
*/
@@ -2003,7 +1763,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents Bucketize layer
* Bucketize layer bucketizes the input based on the boundaries.
*/
@@ -2023,7 +1783,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Reverse Sequence layer
*
* Reverse Sequence modifies input tensor according parameters
@@ -2049,7 +1809,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a OneHot layer
* Converts input into OneHot representation.
*/
@@ -2084,7 +1844,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard RangeLayer layer
*
* RangeLayer modifies input tensor dimensions according parameters
@@ -2100,7 +1860,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Fill layer
*
* RFill modifies input tensor according parameters
@@ -2116,7 +1876,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a SelectLayer layer
*
* SelectLayer layer takes elements from the second (“then”) or the third (“else”) input based on condition mask
@@ -2134,7 +1894,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Broadcast layer
*
* Broadcast modifies input tensor dimensions according parameters
@@ -2150,7 +1910,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a quantization operation layer
*
* Element-wise linear quantization of floating point input values into a descrete set of floating point values
@@ -2171,7 +1931,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Math layers
*
* Math modifies input tensor dimensions according parameters
@@ -2187,7 +1947,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Reduce layers
*
* Reduce modifies input tensor according parameters
@@ -2208,7 +1968,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard TopK layer
*
* TopK picks top K values from input tensor according parameters
@@ -2237,7 +1997,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents Unique layer.
*
* The Unique operation searches for unique elements in 1-D input
@@ -2266,7 +2026,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard NonMaxSuppression layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(NonMaxSuppressionLayer): public CNNLayer {
@@ -2289,7 +2049,7 @@ public:
};
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class represents a standard Scatter layer
*/
class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ScatterLayer): public CNNLayer {

View File

@@ -71,10 +71,10 @@ struct QueryNetworkResult {
};
/**
* @deprecated Use InferenceEngine::Core instead. Will be removed in 2020.3
* @deprecated Use InferenceEngine::Core instead. Will be removed in 2021.1
* @brief This class is a main plugin interface
*/
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core instead. Will be removed in 2020.3")
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core instead. Will be removed in 2021.1")
INFERENCE_ENGINE_API_CLASS(IInferencePlugin)
: public details::IRelease {
public:
@@ -87,6 +87,7 @@ public:
/**
* @deprecated IErrorListener is not used anymore. StatusCode is provided in case of unexpected situations
* This API will be removed in 2021.1 release.
* @brief Sets logging callback
*
* Logging is used to track what is going on inside

View File

@@ -18,11 +18,11 @@
namespace InferenceEngine {
/**
* @deprecated Use InferenceEngine::Core instead. Will be removed in 2020.3
* @deprecated Use InferenceEngine::Core instead. Will be removed in 2021.1
* @brief This is a class to load a suitable plugin
*/
class INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::Core instead which dispatches plugin automatically."
"Will be removed in 2020.3") INFERENCE_ENGINE_API_CLASS(PluginDispatcher) {
"Will be removed in 2021.1") INFERENCE_ENGINE_API_CLASS(PluginDispatcher) {
public:
/**
* @brief A constructor

View File

@@ -21,7 +21,7 @@ namespace details {
IE_SUPPRESS_DEPRECATED_START
/**
* @deprecated Use InferenceEngine::Core instead.
* @deprecated Use InferenceEngine::Core instead. This API will be removed in 2021.1 release.
* @brief This class defines the name of the fabric for creating an IInferencePlugin object in DLL
*/
template <>

View File

@@ -21,6 +21,7 @@ namespace InferenceEngine {
/**
* @deprecated Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph.
* This structure will be removed in 2021.1 release.
* @brief Structure with information about Primitive
*/
struct INFERENCE_ENGINE_DEPRECATED("Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph") PrimitiveInfo {

View File

@@ -20,6 +20,7 @@ namespace InferenceEngine {
/**
* @deprecated Use ExecutableNetwork::GetExecGraphInfo to get information about an internal graph.
* This API will be removed in 2021.1 release.
* @struct TensorInfo
* @brief This structure describes tensor information
*/

View File

@@ -35,7 +35,8 @@
namespace InferenceEngine {
/**
* @deprecated InferenceEngine utility functions are not a part of public API
* @deprecated InferenceEngine utility functions are not a part of public API.
* This method will be removed in 2020.4 release.
* @brief Gets the top n results from a tblob
*
* @param n Top n count
@@ -44,7 +45,7 @@ namespace InferenceEngine {
*/
template <class T>
INFERENCE_ENGINE_DEPRECATED(
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& output) {
SizeVector dims = input.getTensorDesc().getDims();
size_t input_rank = dims.size();
@@ -81,7 +82,8 @@ inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& o
}
/**
* @deprecated InferenceEngine utility functions are not a part of public API
* @deprecated InferenceEngine utility functions are not a part of public API.
* This method will be removed in 2020.4 release.
* @brief Gets the top n results from a blob
*
* @param n Top n count
@@ -89,7 +91,7 @@ inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& o
* @param output Vector of indexes for the top n places
*/
INFERENCE_ENGINE_DEPRECATED(
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
inline void TopResults(unsigned int n, Blob& input, std::vector<unsigned>& output) {
IE_SUPPRESS_DEPRECATED_START
switch (input.getTensorDesc().getPrecision()) {
@@ -112,7 +114,8 @@ inline void TopResults(unsigned int n, Blob& input, std::vector<unsigned>& outpu
#undef TBLOB_TOP_RESULT
/**
* @deprecated InferenceEngine utility functions are not a part of public API
* @deprecated InferenceEngine utility functions are not a part of public API.
* This method will be removed in 2020.4 release.
* @brief Copies a 8-bit RGB image to the blob.
*
* Throws an exception in case of dimensions or input size mismatch
@@ -124,7 +127,7 @@ inline void TopResults(unsigned int n, Blob& input, std::vector<unsigned>& outpu
*/
template <typename data_t>
INFERENCE_ENGINE_DEPRECATED(
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t>* blob) {
SizeVector dims = blob->getTensorDesc().getDims();
if (4 != dims.size())
@@ -162,7 +165,8 @@ void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t
}
/**
* @deprecated InferenceEngine utility functions are not a part of public API
* @deprecated InferenceEngine utility functions are not a part of public API.
* This method will be removed in 2020.4 release.
* @brief Splits the RGB channels to either I16 Blob or float blob.
*
* The image buffer is assumed to be packed with no support for strides.
@@ -172,7 +176,7 @@ void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t
* @param input Blob to contain the split image (to 3 channels)
*/
INFERENCE_ENGINE_DEPRECATED(
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSize, Blob& input) {
IE_SUPPRESS_DEPRECATED_START
TBlob<float>* float_input = dynamic_cast<TBlob<float>*>(&input);
@@ -187,7 +191,8 @@ inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSiz
}
/**
* @deprecated InferenceEngine utility functions are not a part of public API
* @deprecated InferenceEngine utility functions are not a part of public API.
* This method will be removed in 2020.4 release.
* @brief Copies data from a certain precision to float
*
* @param dst Pointer to an output float buffer, must be allocated before the call
@@ -195,7 +200,7 @@ inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSiz
*/
template <typename T>
INFERENCE_ENGINE_DEPRECATED(
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
"InferenceEngine utility functions are not a part of public API. Will be removed in 2020.4")
void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
if (!dst) {
return;

View File

@@ -82,6 +82,16 @@ else()
endif()
endif()
if(APPLE)
set(CMAKE_MACOSX_RPATH ON)
endif()
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
set(CMAKE_C_VISIBILITY_PRESET hidden)
set(CMAKE_VISIBILITY_INLINES_HIDDEN ON)
####################################
## to use C++11; can overwritten via cmake command line
if(NOT DEFINED CMAKE_CXX_STANDARD)

View File

@@ -156,7 +156,7 @@ int main(int argc, char *argv[]) {
if (FLAGS_d.find("CPU") != std::string::npos && !FLAGS_l.empty()) {
// CPU (MKLDNN) extensions is loaded as a shared library and passed as a pointer to base extension
const auto extension_ptr = InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(FLAGS_l);
ie.AddExtension(extension_ptr, "CPU");
ie.AddExtension(extension_ptr);
slog::info << "CPU (MKLDNN) extensions is loaded " << FLAGS_l << slog::endl;
}

0
inference-engine/samples/build_samples.sh Normal file → Executable file
View File

View File

@@ -77,7 +77,7 @@ int main(int argc, char *argv[]) {
if (!FLAGS_l.empty()) {
// CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
ie.AddExtension(extension_ptr, "CPU");
ie.AddExtension(extension_ptr);
slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
}
if (!FLAGS_c.empty()) {

View File

@@ -85,30 +85,28 @@ int main(int argc, char *argv[]) {
std::vector<std::string> availableDevices = ie.GetAvailableDevices();
// --------------------------- 3. Query and print supported metrics and config keys--------------------
std::set<std::string> printedDevices;
std::cout << "Available devices: " << std::endl;
for (auto && device : availableDevices) {
std::string deviceFamilyName = device.substr(0, device.find_first_of('.'));
if (printedDevices.find(deviceFamilyName) == printedDevices.end())
printedDevices.insert(deviceFamilyName);
else
continue;
std::cout << "\tDevice: " << deviceFamilyName << std::endl;
std::cout << "\tDevice: " << device << std::endl;
std::cout << "\tMetrics: " << std::endl;
std::vector<std::string> supportedMetrics = ie.GetMetric(deviceFamilyName, METRIC_KEY(SUPPORTED_METRICS));
std::vector<std::string> supportedMetrics = ie.GetMetric(device, METRIC_KEY(SUPPORTED_METRICS));
for (auto && metricName : supportedMetrics) {
std::cout << "\t\t" << metricName << " : " << std::flush;
printParameterValue(ie.GetMetric(device, metricName));
if (metricName != METRIC_KEY(AVAILABLE_DEVICES)) {
std::cout << "\t\t" << metricName << " : " << std::flush;
printParameterValue(ie.GetMetric(device, metricName));
}
}
std::cout << "\tDefault values for device configuration keys: " << std::endl;
std::vector<std::string> supportedConfigKeys = ie.GetMetric(deviceFamilyName, METRIC_KEY(SUPPORTED_CONFIG_KEYS));
for (auto && configKey : supportedConfigKeys) {
std::cout << "\t\t" << configKey << " : " << std::flush;
printParameterValue(ie.GetConfig(deviceFamilyName, configKey));
if (std::find(supportedMetrics.begin(), supportedMetrics.end(),
METRIC_KEY(SUPPORTED_CONFIG_KEYS)) != supportedMetrics.end()) {
std::cout << "\tDefault values for device configuration keys: " << std::endl;
std::vector<std::string> supportedConfigKeys = ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS));
for (auto && configKey : supportedConfigKeys) {
std::cout << "\t\t" << configKey << " : " << std::flush;
printParameterValue(ie.GetConfig(device, configKey));
}
}
std::cout << std::endl;

View File

@@ -37,7 +37,7 @@ int main(int argc, char* argv[]) {
if (device_name.find("CPU") != std::string::npos) {
inPlaceExtension = std::make_shared<InPlaceExtension>();
// register sample's custom kernel (CustomReLU)
ie.AddExtension(inPlaceExtension, "CPU");
ie.AddExtension(inPlaceExtension);
}
// -----------------------------------------------------------------------------------------------------

View File

@@ -86,7 +86,7 @@ int main(int argc, char *argv[]) {
if (!FLAGS_l.empty()) {
// CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
ie.AddExtension(extension_ptr, "CPU");
ie.AddExtension(extension_ptr);
slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
}

View File

@@ -69,7 +69,7 @@ int main(int argc, char *argv[]) {
if (!FLAGS_l.empty()) {
// CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
ie.AddExtension(extension_ptr, "CPU");
ie.AddExtension(extension_ptr);
slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
}
if (!FLAGS_c.empty()) {

View File

View File

@@ -24,6 +24,8 @@ endif()
add_subdirectory(hetero_plugin)
add_subdirectory(multi_device)
add_subdirectory(transformations)
add_subdirectory(inference_engine)

View File

@@ -42,9 +42,9 @@ add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
# install
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/cldnn_global_custom_kernels
DESTINATION ${IE_CPACK_LIBRARY_PATH}
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT gpu)
install(FILES "${clDNN_SOURCE_DIR}/kernel_selector/core/cache/cache.json"
DESTINATION ${IE_CPACK_LIBRARY_PATH}
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT gpu)

View File

@@ -331,7 +331,9 @@ Parameter clDNNEngine::GetMetric(const std::string& name, const std::map<std::st
availableDevices.push_back(dev.first);
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
} else if (name == METRIC_KEY(FULL_DEVICE_NAME)) {
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, StringRightTrim(device_info.dev_name, "NEO", false));
auto deviceName = StringRightTrim(device_info.dev_name, "NEO", false);
deviceName += std::string(" (") + (device_info.dev_type == cldnn::device_type::discrete_gpu ? "dGPU" : "iGPU") + ")";
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, deviceName);
} else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
std::vector<std::string> configKeys;
for (auto opt : _impl->m_config.key_config_map)

View File

@@ -250,6 +250,7 @@ void CLDNNInferRequest::copyInputData(std::shared_ptr<cldnn::network> network,
case Precision::BOOL: {
uint8_t* blob_ptr = const_cast<uint8_t*>(locked.as<const uint8_t*>()) + offset;
network->set_input_data(internalName, cldnn::memory::attach(inputLayout, blob_ptr, n));
break;
}
default:
THROW_IE_EXCEPTION << "The plugin does not support input " << inputBlob.getTensorDesc().getPrecision() << " precision";

View File

@@ -7,9 +7,9 @@
#include <cstdint>
typedef struct {
double slope;
double slope {};
uint64_t slope_scale = 0;
uint32_t slope_scale_index;
uint32_t slope_scale_index {};
} pwl_gna_slope_scale_t;
pwl_gna_slope_scale_t gna_slope(const double slope, const double in_scale, const double out_scale);

View File

@@ -65,6 +65,27 @@ HeteroInferRequest::HeteroInferRequest(InferenceEngine::InputsDataMap networkInp
}
}
void HeteroInferRequest::SetBlob(const char* name, const InferenceEngine::Blob::Ptr& data) {
InferenceEngine::InferRequestInternal::SetBlob(name, data);
assert(!_inferRequests.empty());
for (auto &&desc : _inferRequests) {
auto &r = desc._request;
assert(nullptr != r);
InputInfo::Ptr foundInput;
DataPtr foundOutput;
try {
// if `name` is input blob
if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) {
r->SetBlob(name, data, foundInput->getPreProcess());
}
} catch (const InferenceEngine::details::InferenceEngineException & ex) {
std::string message = ex.what();
if (message.find(NOT_FOUND_str) == std::string::npos)
throw ex;
}
}
}
void HeteroInferRequest::InferImpl() {
updateInOutIfNeeded();
size_t i = 0;

Some files were not shown because too many files have changed in this diff Show More