Compare commits

...

14 Commits

Author SHA1 Message Date
Alexey Suhov
e18f88cc1a Fix license header in Movidius sources 2021-06-02 21:28:48 +03:00
Alexey Suhov
0b3773b740 Publishing 2020.3.2 LTS content (#5290)
* Publishing 2020.3.2 LTS content
2021-04-16 21:43:32 +03:00
Alexey Suhov
f26da46e3b Publishing 2020.3.1 LTS content (#3108) 2020-11-12 19:35:17 +03:00
Alexander Zhogov
cd95d8d3bb Azure CI: Disable Ninja on Mac due to errors (#809) 2020-06-06 18:29:31 +03:00
azhogov
5c6a0cb922 Azure: Add Ninja 2020-06-06 16:29:54 +03:00
azhogov
2e634cafc9 Add CODEOWNERS and CONTRIBUTING.md 2020-06-06 16:15:24 +03:00
Alexander Zhogov
28f258e18d Enable public CI (#789)
* Enable public CI

* Exclude failed nGraph UT by *GPU*:*CPU*

* Disable absent tests

* Exclude failed nGraph UT constant.shared_data
2020-06-05 15:55:45 +03:00
Alexey Suhov
2fe9b15230 change repo name to openvino in readme files 2020-06-03 00:08:25 +03:00
Alexey Suhov
9221f41b01 fix permissions for shell scripts 2020-06-02 22:32:00 +03:00
Alexey Suhov
85de6ee857 Publishing 2020.3 content 2020-06-02 21:59:45 +03:00
Moshe David
acad2e01e5 w (#394)
Co-authored-by: modav <modav@microsoft.com>
2020-05-26 00:28:09 +03:00
Ian Hunter
94dd082199 Fix link to Linux Guide (#494) 2020-05-14 13:52:13 +03:00
Alexey Suhov
95a57795dc Publishing 2020.2 content 2020-04-13 21:17:23 +03:00
Alexey Suhov
a347375d01 removed ie_rh_decoder.cmake from install target 2020-03-19 21:14:29 +03:00
3181 changed files with 355937 additions and 200133 deletions

55
.github/workflows/mo.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: MO
on:
push:
paths:
- 'model-optimizer/**'
pull_request:
paths:
- 'model-optimizer/**'
jobs:
Pylint-UT:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
with:
python-version: 3.6
- name: Cache pip
uses: actions/cache@v1
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('model-optimizer/requirements*.txt') }}
restore-keys: |
${{ runner.os }}-pip-
${{ runner.os }}-
# tensorflow 1.15 causes modules import
# errors, most likely due to https://github.com/PyCQA/pylint/issues/2603
# for tensorflow.core.framework and tensorflow.contrib
- name: Install dependencies
run: |
python -m pip install --upgrade pip setuptools
# For Pylint
pip install tensorflow==1.14.0 tensorboard==1.14.0 tensorflow-estimator==1.14.0
# For UT
pip install unittest-xml-reporting==3.0.2
# MO requirements
pip install -r requirements.txt
pip install -r requirements_dev.txt
working-directory: model-optimizer
- name: Pylint
run: pylint -d C,R,W mo/ mo.py extensions/
working-directory: model-optimizer
- name: UT
run: |
export PYTHONPATH=$PYTHONPATH:`pwd`
export MO_ROOT=`pwd`
env
mkdir ../mo-ut-logs
python3 -m xmlrunner discover -p *_test.py --output=../mo-ut-logs
working-directory: model-optimizer

View File

@@ -3,36 +3,41 @@
#
cmake_policy(SET CMP0054 NEW)
# TODO: for make instal / package we need to use 3.13.3 version because
# it allows to install targets created outside of current projects
# See https://blog.kitware.com/cmake-3-13-0-available-for-download/
if (APPLE)
# due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
else()
cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
cmake_minimum_required(VERSION 3.7.2 FATAL_ERROR)
endif()
project(OpenVINO)
set(OpenVINO_MAIN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(IE_MAIN_SOURCE_DIR ${OpenVINO_MAIN_SOURCE_DIR}/inference-engine)
set(CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
include(CTest)
include(features)
# include developer package
include(developer_package)
include(developer_package NO_POLICY_SCOPE)
# These options are shared with 3rdparty plugins
# by means of developer package
include(check_features)
include(dependencies)
# resolving dependencies for the project
message (STATUS "PROJECT ............................... " ${PROJECT_NAME})
message (STATUS "CMAKE_BINARY_DIR ...................... " ${CMAKE_BINARY_DIR})
message (STATUS "OpenVINO_MAIN_SOURCE_DIR .............. " ${OpenVINO_MAIN_SOURCE_DIR})
if (ENABLE_INFERENCE_ENGINE)
set(IE_MAIN_SOURCE_DIR ${OpenVINO_MAIN_SOURCE_DIR}/inference-engine)
message (STATUS "IE_MAIN_SOURCE_DIR .............. " ${IE_MAIN_SOURCE_DIR})
endif()
message (STATUS "IE_MAIN_SOURCE_DIR .............. " ${IE_MAIN_SOURCE_DIR})
message (STATUS "CMAKE_GENERATOR ....................... " ${CMAKE_GENERATOR})
message (STATUS "CMAKE_C_COMPILER_ID ................... " ${CMAKE_C_COMPILER_ID})
message (STATUS "CMAKE_BUILD_TYPE ...................... " ${CMAKE_BUILD_TYPE})
@@ -42,57 +47,43 @@ file(REMOVE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
file(REMOVE "${CMAKE_BINARY_DIR}/targets.cmake")
function(build_ngraph)
if(NOT ENABLE_NGRAPH)
return()
endif()
function(ngraph_set option value)
if(NOT DEFINED ${option})
set(${option} ${value} CACHE BOOL "" FORCE)
endif()
endfunction()
add_definitions(-DENABLE_NGRAPH)
set(NGRAPH_BUILD_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY} CACHE STRING "" FORCE)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${OpenVINO_MAIN_SOURCE_DIR}/ngraph/cmake/Modules/")
ngraph_set(NGRAPH_ADDRESS_SANITIZER FALSE)
if (ENABLE_SANITIZER)
ngraph_set(NGRAPH_ADDRESS_SANITIZER TRUE)
else ()
ngraph_set(NGRAPH_ADDRESS_SANITIZER FALSE)
endif ()
ngraph_set(NGRAPH_TOOLS_ENABLE FALSE)
ngraph_set(NGRAPH_CPU_ENABLE FALSE)
ngraph_set(NGRAPH_MLIR_ENABLE FALSE)
ngraph_set(NGRAPH_INTELGPU_ENABLE FALSE)
ngraph_set(NGRAPH_GPU_ENABLE FALSE)
ngraph_set(NGRAPH_INTERPRETER_ENABLE FALSE)
ngraph_set(NGRAPH_INTERPRETER_ENABLE TRUE)
ngraph_set(NGRAPH_NOP_ENABLE FALSE)
ngraph_set(NGRAPH_GPUH_ENABLE FALSE)
ngraph_set(NGRAPH_GENERIC_CPU_ENABLE FALSE)
ngraph_set(NGRAPH_DEBUG_ENABLE FALSE)
ngraph_set(NGRAPH_DEPRECATED_ENABLE FALSE)
ngraph_set(NGRAPH_DEX_ONLY FALSE)
ngraph_set(NGRAPH_ENABLE_CPU_CONV_AUTO FALSE)
ngraph_set(NGRAPH_CODE_COVERAGE_ENABLE FALSE)
ngraph_set(NGRAPH_LIB_VERSIONING_ENABLE FALSE)
if (ENABLE_PYTHON AND NOT WIN32)
ngraph_set(NGRAPH_PYTHON_BUILD_ENABLE TRUE)
else()
ngraph_set(NGRAPH_PYTHON_BUILD_ENABLE FALSE)
endif()
ngraph_set(NGRAPH_PYTHON_BUILD_ENABLE FALSE)
ngraph_set(NGRAPH_PLAIDML_ENABLE FALSE)
ngraph_set(NGRAPH_DISTRIBUTED_ENABLE FALSE)
ngraph_set(NGRAPH_FAST_MATH_ENABLE FALSE)
ngraph_set(NGRAPH_JSON_ENABLE FALSE)
ngraph_set(NGRAPH_STATIC_LIB_ENABLE FALSE)
ngraph_set(NGRAPH_INTERPRETER_STATIC_LIB_ENABLE FALSE)
ngraph_set(NGRAPH_CPU_STATIC_LIB_ENABLE FALSE)
ngraph_set(NGRAPH_DYNAMIC_COMPONENTS_ENABLE FALSE)
ngraph_set(NGRAPH_NATIVE_ARCH_ENABLE FALSE)
if (NOT ANDROID)
ngraph_set(NGRAPH_UNIT_TEST_ENABLE TRUE)
ngraph_set(NGRAPH_UNIT_TEST_OPENVINO_ENABLE TRUE)
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE TRUE)
else()
ngraph_set(NGRAPH_UNIT_TEST_ENABLE FALSE)
ngraph_set(NGRAPH_TEST_UTIL_ENABLE FALSE)
ngraph_set(NGRAPH_UNIT_TEST_OPENVINO_ENABLE FALSE)
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE FALSE)
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
@@ -108,11 +99,7 @@ function(build_ngraph)
if (UNIX)
ie_add_compiler_flags(-Wno-error=return-type -Wno-undef)
elseif(WIN32)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4308 /wd4146")
endif()
if(UNIX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4308 /wd4146 /wd4703 /wd4244")
endif()
if(ENABLE_LTO)
@@ -121,15 +108,58 @@ function(build_ngraph)
ie_cpack_add_component(ngraph)
set(SDL_cmake_included ON)
# set(NGRAPH_COMPONENT_PREFIX "deployment_tools/ngraph/")
add_subdirectory(ngraph)
endfunction()
build_ngraph()
if (ENABLE_INFERENCE_ENGINE)
add_subdirectory(inference-engine)
endif()
add_subdirectory(inference-engine)
add_subdirectory(docs)
# cpack
# install setupvars
ie_cpack_add_component(setupvars REQUIRED)
if(UNIX)
install(PROGRAMS scripts/setupvars/setupvars.sh
DESTINATION bin
COMPONENT setupvars)
elseif(WIN32)
install(PROGRAMS scripts/setupvars/setupvars.bat
DESTINATION bin
COMPONENT setupvars)
endif()
# install install_dependencies
if(UNIX)
ie_cpack_add_component(install_dependencies REQUIRED)
install(DIRECTORY scripts/install_dependencies/
DESTINATION install_dependencies
COMPONENT install_dependencies)
endif()
# install files for demo
ie_cpack_add_component(demo_scripts REQUIRED DEPENDS core)
if(UNIX)
install(DIRECTORY scripts/demo/
DESTINATION deployment_tools/demo
COMPONENT demo_scripts
USE_SOURCE_PERMISSIONS
PATTERN *.bat EXCLUDE)
elseif(WIN32)
install(DIRECTORY scripts/demo/
DESTINATION deployment_tools/demo
COMPONENT demo_scripts
USE_SOURCE_PERMISSIONS
PATTERN *.sh EXCLUDE)
endif()
ie_cpack(${IE_CPACK_COMPONENTS_ALL})

66
CODEOWNERS Normal file
View File

@@ -0,0 +1,66 @@
# See help here: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners
* @openvinotoolkit/openvino-maintainers
CODEOWNERS @openvinotoolkit/openvino-admins @openvinotoolkit/openvino-maintainers
# CI:
Jenkinsfile @openvinotoolkit/openvino-admins
azure-pipelines.yml @openvinotoolkit/openvino-admins
/.github/ @openvinotoolkit/openvino-admins
# QA Tests:
/tests/ @openvinotoolkit/openvino-tests-maintainers
# IE Core:
/inference-engine/ @openvinotoolkit/openvino-ie-maintainers
/inference-engine/src/transformations/ @GlebKazantaev @ichuraev
/inference-engine/src/legacy_api/ @openvinotoolkit/openvino-ngraph-maintainers
/inference-engine/src/readers/ @openvinotoolkit/openvino-ngraph-maintainers
# IE CPU:
/inference-engine/src/mkldnn_plugin/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
/inference-engine/src/low_precision_transformations/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
/inference-engine/thirdparty/mkl-dnn/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
# IE GPU:
/inference-engine/src/cldnn_engine/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/include/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/include/cldnn/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
/inference-engine/thirdparty/clDNN/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
# IE VPU:
/inference-engine/src/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/include/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/thirdparty/movidius/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/tests_deprecated/unit/engines/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests_deprecated/functional/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests_deprecated/behavior/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/functional/plugin/myriad/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/unit/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/unit/engines/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tools/vpu/ @openvinotoolkit/openvino-ie-vpu-maintainers
/inference-engine/scripts/run_tests_myriad_multistick.sh @openvinotoolkit/openvino-ie-vpu-maintainers
# IE GNA:
/inference-engine/src/gna_plugin/ @openvinotoolkit/openvino-ie-gna-maintainers
/inference-engine/include/gna/ @openvinotoolkit/openvino-ie-gna-maintainers
# IE MULTI:
/inference-engine/src/multi_device/ @openvinotoolkit/openvino-ie-multi-maintainers
/inference-engine/include/multi-device/ @openvinotoolkit/openvino-ie-multi-maintainers
# IE Tests:
/inference-engine/tests/ @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests_deprecated/ @openvinotoolkit/openvino-ie-tests-maintainers
/inference-engine/tests/functional/inference_engine/ngraph_reader/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ngraph-maintainers
/inference-engine/tests/functional/inference_engine/transformations/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ngraph-maintainers
# MO:
/model-optimizer/ @openvinotoolkit/openvino-mo-maintainers
# nGraph:
/ngraph/ @openvinotoolkit/openvino-ngraph-maintainers
# Tools
/tools/ @openvinotoolkit/openvino-tools-maintainers

18
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,18 @@
# How to Contribute
We welcome community contributions to the OpenVINO™ repository.
If you have an idea how to improve the product, please share it
with us doing the following steps:
* Make sure you can build the product and run all tests and samples with your patch
* In case of a larger feature, provide relevant unit tests and one or more sample
* Submit a pull request at https://github.com/openvinotoolkit/openvino/pulls
## OpenVINO™ Coding Style Guide
We basically use the Google style (https://google.github.io/styleguide/cppguide.html) with some exceptions:
* 4 spaces instead of 2 spaces for indentations
* Limitation of 160 symbols for the line length
* Exceptions are allowed
* Using namespace are allowed in cpp and prohibited in headers
* Underscore symbol before member in classes/structures
* thisStyleForFunctions()
* theSameStyleForVariables

View File

@@ -1,5 +1,5 @@
# [OpenVINO™ Toolkit](https://01.org/openvinotoolkit) - Deep Learning Deployment Toolkit repository
[![Stable release](https://img.shields.io/badge/version-2020.1-green.svg)](https://github.com/opencv/dldt/releases/tag/2020.1)
[![Stable release](https://img.shields.io/badge/version-2020.3-green.svg)](https://github.com/openvinotoolkit/openvino/releases/tag/2020.3.2)
[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE)
This toolkit allows developers to deploy pre-trained deep learning models
@@ -30,23 +30,13 @@ and release your contribution under these terms.
* [Model Optimizer Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)
## How to Contribute
We welcome community contributions to the Deep Learning Deployment Toolkit
repository. If you have an idea how to improve the product, please share it
with us doing the following steps:
* Make sure you can build the product and run all tests and samples with your patch
* In case of a larger feature, provide relevant unit tests and one or more sample
* Submit a pull request at https://github.com/opencv/dldt/pulls
We will review your contribution and, if any additional fixes or modifications
are necessary, may give some feedback to guide you. Your pull request will be
merged into GitHub* repositories if accepted.
See [CONTRIBUTING](./CONTRIBUTING.md) for details. Thank you!
## Support
Please report questions, issues and suggestions using:
* The `openvino` [tag on StackOverflow]\*
* [GitHub* Issues](https://github.com/opencv/dldt/issues)
* [GitHub* Issues](https://github.com/openvinotoolkit/openvino/issues)
* [Forum](https://software.intel.com/en-us/forums/computer-vision)
---

345
azure-pipelines.yml Normal file
View File

@@ -0,0 +1,345 @@
jobs:
- job: Lin
# About 150% of total time
timeoutInMinutes: 75
pool:
#vmImage: 'ubuntu-18.04'
name: LIN_VMSS_VENV_F8S_WU2
variables:
BUILD_TYPE: Release
BIN_DIR: ../bin/intel64/$(BUILD_TYPE)
steps:
- script: |
whoami
uname -a
which python3
gcc --version
lsb_release
env
cat /proc/cpuinfo
cat /proc/meminfo
vmstat -s
df
displayName: 'System properties'
- script: |
sudo apt --assume-yes install libusb-1.0-0-dev
python3 -m pip install -r ./inference-engine/ie_bridges/python/requirements.txt
# For running Python API tests
python3 -m pip install -r ./inference-engine/ie_bridges/python/src/requirements-dev.txt
displayName: 'Install dependencies'
- script: |
wget https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-linux.zip
unzip ninja-linux.zip
sudo cp -v ninja /usr/local/bin/
displayName: 'Install Ninja'
- script: git submodule update --init --recursive --jobs 8
displayName: 'Clone submodules'
- script: |
mkdir dldt-build
cd dldt-build
displayName: 'Create build directory'
- task: CMake@1
inputs:
workingDirectory: dldt-build
# CMake must get Python 3.x version by default
cmakeArgs: .. -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 -DENABLE_TESTS=ON
- script: ninja
workingDirectory: dldt-build
displayName: 'Build Lin'
- script: ls -alR ../bin/
workingDirectory: dldt-build
displayName: 'List files'
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*GPU*:*CPU*:constant.shared_data
workingDirectory: dldt-build
displayName: 'nGraph UT'
continueOnError: false
- script: $(BIN_DIR)/InferenceEngineUnitTests
workingDirectory: dldt-build
displayName: 'IE UT old'
continueOnError: false
- script: $(BIN_DIR)/ieUnitTests
workingDirectory: dldt-build
displayName: 'IE UT'
continueOnError: false
- script: $(BIN_DIR)/cpuUnitTests
workingDirectory: dldt-build
displayName: 'CPU UT'
continueOnError: false
- script: $(BIN_DIR)/gnaUnitTests
workingDirectory: dldt-build
displayName: 'GNA UT'
continueOnError: false
- script: $(BIN_DIR)/vpuUnitTests
workingDirectory: dldt-build
displayName: 'VPU UT'
continueOnError: false
- script: $(BIN_DIR)/ieFuncTests
workingDirectory: dldt-build
displayName: 'IE FuncTests'
continueOnError: false
- script: $(BIN_DIR)/cpuFuncTests
workingDirectory: dldt-build
displayName: 'CPU FuncTests'
continueOnError: false
- script: $(BIN_DIR)/MklDnnBehaviorTests
workingDirectory: dldt-build
displayName: 'MklDnnBehaviorTests'
continueOnError: false
enabled: false
- script: git clone https://github.com/openvinotoolkit/testdata.git
displayName: 'Clone testdata'
enabled: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/MklDnnFunctionalTests --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric*
workingDirectory: dldt-build
displayName: 'MklDnnFunctionalTests'
continueOnError: false
enabled: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/InferenceEngineCAPITests
workingDirectory: dldt-build
displayName: 'IE CAPITests'
continueOnError: false
enabled: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
export LD_LIBRARY_PATH=`pwd`/$(BIN_DIR)/lib
export PYTHONPATH=`pwd`/$(BIN_DIR)/lib/python_api/python3.6
env
cd ../inference-engine/ie_bridges/python/tests
pytest
workingDirectory: dldt-build
displayName: 'Python API Tests'
continueOnError: false
enabled: false
- job: Mac
# About 150% of total time
timeoutInMinutes: 130
pool:
vmImage: 'macOS-10.15'
variables:
BUILD_TYPE: Release
BIN_DIR: ../bin/intel64/$(BUILD_TYPE)
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.7'
- script: |
whoami
uname -a
which python3
gcc --version
xcrun --sdk macosx --show-sdk-version
env
sysctl -a
displayName: 'System properties'
- script: |
brew install cython
brew install automake
displayName: 'Install dependencies'
- script: brew install ninja
displayName: 'Install Ninja'
- script: git submodule update --init --recursive --jobs 8
displayName: 'Clone submodules'
- script: |
mkdir dldt-build
cd dldt-build
displayName: 'Create build directory'
- script: |
export PATH="/usr/local/opt/cython/bin:$PATH"
export CC=gcc
export CXX=g++
# Disable errors with Ninja
#export CXXFLAGS="-Wno-error=unused-command-line-argument"
#export CFLAGS="-Wno-error=unused-command-line-argument"
cmake .. -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON
workingDirectory: dldt-build
displayName: 'CMake'
- script: make -j3
workingDirectory: dldt-build
displayName: 'Build Mac'
- script: ls -alR ../bin/
workingDirectory: dldt-build
displayName: 'List files'
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*GPU*:*CPU*:constant.shared_data
workingDirectory: dldt-build
displayName: 'nGraph UT'
continueOnError: false
- script: $(BIN_DIR)/InferenceEngineUnitTests --gtest_filter=-*MKLDNNGraph*
workingDirectory: dldt-build
displayName: 'IE UT old'
continueOnError: false
- script: $(BIN_DIR)/ieUnitTests
workingDirectory: dldt-build
displayName: 'IE UT'
continueOnError: false
- script: $(BIN_DIR)/cpuUnitTests
workingDirectory: dldt-build
displayName: 'CPU UT'
continueOnError: false
- script: $(BIN_DIR)/vpuUnitTests
workingDirectory: dldt-build
displayName: 'VPU UT'
continueOnError: false
- script: $(BIN_DIR)/ieFuncTests
workingDirectory: dldt-build
displayName: 'IE FuncTests'
continueOnError: false
- script: $(BIN_DIR)/cpuFuncTests
workingDirectory: dldt-build
displayName: 'CPU FuncTests'
continueOnError: false
- script: $(BIN_DIR)/MklDnnBehaviorTests
workingDirectory: dldt-build
displayName: 'MklDnnBehaviorTests'
continueOnError: false
enabled: false
- script: git clone https://github.com/openvinotoolkit/testdata.git
displayName: 'Clone testdata'
enabled: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/MklDnnFunctionalTests --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric*
workingDirectory: dldt-build
displayName: 'MklDnnFunctionalTests'
continueOnError: false
enabled: false
- script: |
export DATA_PATH=`pwd`/../testdata
export MODELS_PATH=`pwd`/../testdata
$(BIN_DIR)/InferenceEngineCAPITests
workingDirectory: dldt-build
displayName: 'IE CAPITests'
continueOnError: false
enabled: false
- job: Win
# About 150% of total time
timeoutInMinutes: 120
pool:
#vmImage: 'vs2017-win2016'
name: WIN_VMSS_VENV_F8S_WU2
variables:
BUILD_TYPE: Release
BUILD_DIR: D:\dldt-build
BIN_DIR: ..\bin\intel64
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
steps:
- script: |
where python3
wmic computersystem get TotalPhysicalMemory
wmic cpu list
wmic logicaldisk get description,name
wmic VOLUME list
set
displayName: 'System properties'
- script: |
certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-win.zip ninja-win.zip
powershell -command "Expand-Archive -Force ninja-win.zip"
displayName: Install Ninja
- script: git submodule update --init --recursive --jobs 8
displayName: 'Clone submodules'
- script: |
rd /Q /S $(BUILD_DIR)
mkdir $(BUILD_DIR)\bin
rd /Q /S dldt-build
mkdir dldt-build
displayName: 'Create build directory'
- script: |
set PATH=$(Build.Repository.LocalPath)\ninja-win;%PATH%
call "$(MSVS_VARS_PATH)" && cmake -GNinja -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(Build.Repository.LocalPath)
workingDirectory: $(BUILD_DIR)
displayName: 'CMake'
- script: |
set PATH=$(Build.Repository.LocalPath)\ninja-win;%PATH%
call "$(MSVS_VARS_PATH)" && ninja
workingDirectory: $(BUILD_DIR)
displayName: 'Build Win'
- script: dir ..\bin\ /s /b
workingDirectory: dldt-build
displayName: 'List files'
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*GPU*:*CPU*:constant.shared_data
workingDirectory: dldt-build
displayName: 'nGraph UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\InferenceEngineUnitTests
workingDirectory: dldt-build
displayName: 'IE UT old'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\ieUnitTests
workingDirectory: dldt-build
displayName: 'IE UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\cpuUnitTests
workingDirectory: dldt-build
displayName: 'CPU UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\gnaUnitTests
workingDirectory: dldt-build
displayName: 'GNA UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\vpuUnitTests
workingDirectory: dldt-build
displayName: 'VPU UT'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\ieFuncTests
workingDirectory: dldt-build
displayName: 'IE FuncTests'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\cpuFuncTests
workingDirectory: dldt-build
displayName: 'CPU FuncTests'
continueOnError: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;%PATH%
$(BIN_DIR)\MklDnnBehaviorTests
workingDirectory: dldt-build
displayName: 'MklDnnBehaviorTests'
continueOnError: false
enabled: false
- script: git clone https://github.com/openvinotoolkit/testdata.git
workingDirectory: $(BUILD_DIR)
displayName: 'Clone testdata'
enabled: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;$(Build.Repository.LocalPath)\inference-engine\temp\opencv_4.3.0\opencv\bin;%PATH%
set DATA_PATH=$(BUILD_DIR)\testdata
set MODELS_PATH=$(BUILD_DIR)\testdata
$(BIN_DIR)\MklDnnFunctionalTests --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric*
workingDirectory: dldt-build
displayName: 'MklDnnFunctionalTests'
continueOnError: false
enabled: false
- script: |
set PATH=$(Build.Repository.LocalPath)\inference-engine\temp\tbb\bin;$(Build.Repository.LocalPath)\inference-engine\temp\opencv_4.3.0\opencv\bin;%PATH%
set DATA_PATH=$(BUILD_DIR)\testdata
set MODELS_PATH=$(BUILD_DIR)\testdata
$(BIN_DIR)\InferenceEngineCAPITests
workingDirectory: dldt-build
displayName: 'IE CAPITests'
continueOnError: false
enabled: false

View File

@@ -28,7 +28,6 @@
- [Add Inference Engine to Your Project](#add-inference-engine-to-your-project)
- [(Optional) Additional Installation Steps for the Intel® Movidius™ Neural Compute Stick and Neural Compute Stick 2](#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2)
- [For Linux, Raspbian Stretch* OS](#for-linux-raspbian-stretch-os)
- [For Windows](#for-windows-1)
- [Next Steps](#next-steps)
- [Additional Resources](#additional-resources)
@@ -57,15 +56,15 @@ The software was validated on:
- CentOS\* 7.4 (64-bit) with default GCC\* 4.8.5
### Software Requirements
- [CMake]\* 3.5 or higher
- [CMake]\* 3.11 or higher
- GCC\* 4.8 or higher to build the Inference Engine
- Python 2.7 or higher for Inference Engine Python API wrapper
- (Optional) [Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441].
- (Optional) [Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 20.13.16352].
### Build Steps
1. Clone submodules:
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the
@@ -78,7 +77,7 @@ The software was validated on:
```
3. By default, the build enables the Inference Engine GPU plugin to infer models
on your Intel® Processor Graphics. This requires you to
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441]
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 20.13.16352]
before running the build. If you don't want to use the GPU plugin, use the
`-DENABLE_CLDNN=OFF` CMake build option and skip the installation of the
Intel® Graphics Compute Runtime for OpenCL™ Driver.
@@ -172,10 +171,10 @@ Native compilation of the Inference Engine is the most straightforward solution.
sudo apt-get install -y git cmake libusb-1.0-0-dev
```
2. Go to the cloned `dldt` repository:
2. Go to the cloned `openvino` repository:
```bash
cd dldt
cd openvino
```
3. Initialize submodules:
@@ -234,7 +233,6 @@ with the following content:
build-essential \
crossbuild-essential-armhf \
git \
cmake \
wget \
libusb-1.0-0-dev:armhf \
libgtk-3-dev:armhf \
@@ -245,6 +243,11 @@ with the following content:
libgstreamer-plugins-base1.0-dev:armhf \
libpython3-dev:armhf \
python3-pip
RUN wget https://www.cmake.org/files/v3.14/cmake-3.14.3.tar.gz && \
tar xf cmake-3.14.3.tar.gz && \
(cd cmake-3.14.3 && ./bootstrap --parallel=$(nproc --all) && make --jobs=$(nproc --all) && make install) && \
rm -rf cmake-3.14.3 cmake-3.14.3.tar.gz
```
It uses the Debian\* Stretch (Debian 9) OS for compilation because it is a base of the Raspbian\* Stretch.
@@ -258,15 +261,15 @@ with the following content:
5. Run Docker\* container with mounted source code folder from host:
```bash
docker run -it -v /absolute/path/to/dldt:/dldt ie_cross_armhf /bin/bash
docker run -it -v /absolute/path/to/openvino:/openvino ie_cross_armhf /bin/bash
```
6. While in the container:
1. Go to the cloned `dldt` repository:
1. Go to the cloned `openvino` repository:
```bash
cd dldt
cd openvino
```
2. Create a build folder:
@@ -287,8 +290,8 @@ with the following content:
```
7. Press **Ctrl+D** to exit from Docker. You can find the resulting binaries
in the `dldt/bin/armv7l/` directory and the OpenCV*
installation in the `dldt/inference-engine/temp`.
in the `openvino/bin/armv7l/` directory and the OpenCV*
installation in the `openvino/inference-engine/temp`.
>**NOTE**: Native applications that link to cross-compiled Inference Engine
library require an extra compilation flag `-march=armv7-a`.
@@ -331,7 +334,7 @@ The software was validated on:
Compiler 2018 Update 3
### Software Requirements
- [CMake]\*3.5 or higher
- [CMake]\*3.11 or higher
- Microsoft\* Visual Studio 2017, 2019 or [Intel® C++ Compiler] 18.0
- (Optional) Intel® Graphics Driver for Windows* (26.20) [driver package].
- Python 3.4 or higher for Inference Engine Python API wrapper
@@ -377,8 +380,8 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
6. Before running the samples, add paths to the TBB and OpenCV binaries used for
the build to the `%PATH%` environment variable. By default, TBB binaries are
downloaded by the CMake-based script to the `<dldt_repo>/inference-engine/temp/tbb/lib`
folder, OpenCV binaries to the `<dldt_repo>/inference-engine/temp/opencv_4.2.0/bin`
downloaded by the CMake-based script to the `<openvino_repo>/inference-engine/temp/tbb/bin`
folder, OpenCV binaries to the `<openvino_repo>/inference-engine/temp/opencv_4.3.0/opencv/bin`
folder.
### Additional Build Options
@@ -433,7 +436,7 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
call "C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\bin\ipsxe-comp-vars.bat" intel64 vs2017
set CXX=icl
set CC=icl
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by dldt cmake script
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by openvino cmake script
set TBBROOT=
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --config Release
@@ -449,7 +452,7 @@ The software was validated on:
### Software Requirements
- [CMake]\* 3.9 or higher
- [CMake]\* 3.11 or higher
- Clang\* compiler from Xcode\* 10.1 or higher
- Python\* 3.4 or higher for the Inference Engine Python API wrapper
@@ -457,7 +460,7 @@ The software was validated on:
1. Clone submodules:
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the
@@ -525,9 +528,8 @@ This section describes how to build Inference Engine for Android x86 (64-bit) op
### Software Requirements
- [CMake]\* 3.5 or higher
- [CMake]\* 3.11 or higher
- Android NDK (this guide has been validated with r20 release)
- OpenCV for Android
### Build Steps
@@ -540,26 +542,18 @@ This section describes how to build Inference Engine for Android x86 (64-bit) op
mv android-ndk-r20 android-ndk
```
2. Download and unpack OpenCV
2. Clone submodules
```sh
cd ~/Downloads
wget https://github.com/opencv/opencv/releases/download/4.2.0/opencv-4.2.0-android-sdk.zip
unzip opencv-4.2.0-android-sdk.zip
```
3. Clone submodules
```sh
cd dldt
cd openvino
git submodule update --init --recursive
```
4. Create a build folder:
3. Create a build folder:
```sh
mkdir build
```
5. Change working directory to `build` and run `cmake` to create makefiles. Then run `make`.
4. Change working directory to `build` and run `cmake` to create makefiles. Then run `make`.
```sh
cd build
@@ -568,7 +562,7 @@ This section describes how to build Inference Engine for Android x86 (64-bit) op
-DANDROID_ABI=x86_64 \
-DANDROID_PLATFORM=21 \
-DANDROID_STL=c++_shared \
-DOpenCV_DIR=~/Downloads/OpenCV-android-sdk/sdk/native/jni/
-DENABLE_OPENCV=OFF
make --jobs=$(nproc --all)
```
@@ -580,7 +574,7 @@ This section describes how to build Inference Engine for Android x86 (64-bit) op
## Use Custom OpenCV Builds for Inference Engine
> **NOTE**: The recommended and tested version of OpenCV is 4.2. The minimum
> **NOTE**: The recommended and tested version of OpenCV is 4.3. The minimum
supported version is 3.4.0.
Required versions of OpenCV packages are downloaded automatically during the
@@ -615,7 +609,7 @@ before running the Inference Engine build:
For CMake projects, set the `InferenceEngine_DIR` environment variable:
```sh
export InferenceEngine_DIR=/path/to/dldt/build/
export InferenceEngine_DIR=/path/to/openvino/build/
```
Then you can find Inference Engine by `find_package`:
@@ -665,26 +659,12 @@ sudo ldconfig
rm 97-myriad-usbboot.rules
```
### For Windows
For Intel® Movidius™ Neural Compute Stick and Intel® Neural Compute Stick 2,
install the Movidius™ VSC driver:
1. Go to the `<DLDT_ROOT_DIR>/inference-engine/thirdparty/movidius/MovidiusDriver`
directory, where the `DLDT_ROOT_DIR` is the directory to which the DLDT
repository was cloned.
2. Right click on the `Movidius_VSC_Device.inf` file and choose **Install** from
the pop-up menu.
You have installed the driver for your Intel® Movidius™ Neural Compute Stick
or Intel® Neural Compute Stick 2.
## Next Steps
Congratulations, you have built the Inference Engine. To get started with the
OpenVINO™, proceed to the Get Started guides:
* [Get Started with Deep Learning Deployment Toolkit on Linux*](../get-started-linux.md)
* [Get Started with Deep Learning Deployment Toolkit on Linux*](get-started-linux.md)
## Notice
@@ -711,7 +691,7 @@ This target collects all dependencies, prepares the nGraph package and copies it
[Intel® Distribution of OpenVINO™]:https://software.intel.com/en-us/openvino-toolkit
[CMake]:https://cmake.org/download/
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441]:https://github.com/intel/compute-runtime/releases/tag/19.41.14441
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 20.13.16352]:https://github.com/intel/compute-runtime/releases/tag/20.13.16352
[MKL-DNN repository]:https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_lnx_2019.0.5.20190502.tgz
[MKL-DNN repository for Windows]:(https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_win_2019.0.5.20190502.zip)
[OpenBLAS]:https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download

View File

@@ -12,3 +12,62 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
macro(__cmake_find_root_save_and_reset)
foreach(v
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
)
set(__save_${v} ${${v}})
set(${v} NEVER)
endforeach()
endmacro()
macro(__cmake_find_root_restore)
foreach(v
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
)
set(${v} ${__save_${v}})
unset(__save_${v})
endforeach()
endmacro()
# macro to find programs on the host OS
macro(find_host_program)
__cmake_find_root_save_and_reset()
if(CMAKE_HOST_WIN32)
SET(WIN32 1)
SET(UNIX)
elseif(CMAKE_HOST_APPLE)
SET(APPLE 1)
SET(UNIX)
endif()
find_program(${ARGN})
SET(WIN32)
SET(APPLE)
SET(UNIX 1)
__cmake_find_root_restore()
endmacro()
# macro to find packages on the host OS
macro(find_host_package)
__cmake_find_root_save_and_reset()
if(CMAKE_HOST_WIN32)
SET(WIN32 1)
SET(UNIX)
elseif(CMAKE_HOST_APPLE)
SET(APPLE 1)
SET(UNIX)
endif()
find_package(${ARGN})
SET(WIN32)
SET(APPLE)
SET(UNIX 1)
__cmake_find_root_restore()
endmacro()

View File

@@ -12,3 +12,62 @@ set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
macro(__cmake_find_root_save_and_reset)
foreach(v
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
)
set(__save_${v} ${${v}})
set(${v} NEVER)
endforeach()
endmacro()
macro(__cmake_find_root_restore)
foreach(v
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
)
set(${v} ${__save_${v}})
unset(__save_${v})
endforeach()
endmacro()
# macro to find programs on the host OS
macro(find_host_program)
__cmake_find_root_save_and_reset()
if(CMAKE_HOST_WIN32)
SET(WIN32 1)
SET(UNIX)
elseif(CMAKE_HOST_APPLE)
SET(APPLE 1)
SET(UNIX)
endif()
find_program(${ARGN})
SET(WIN32)
SET(APPLE)
SET(UNIX 1)
__cmake_find_root_restore()
endmacro()
# macro to find packages on the host OS
macro(find_host_package)
__cmake_find_root_save_and_reset()
if(CMAKE_HOST_WIN32)
SET(WIN32 1)
SET(UNIX)
elseif(CMAKE_HOST_APPLE)
SET(APPLE 1)
SET(UNIX)
endif()
find_package(${ARGN})
SET(WIN32)
SET(APPLE)
SET(UNIX 1)
__cmake_find_root_restore()
endmacro()

View File

@@ -3,13 +3,7 @@
#
if (VERBOSE_BUILD)
set(CMAKE_VERBOSE_MAKEFILE ON)
endif()
# FIXME: there are compiler failures with LTO and Cross-Compile toolchains. Disabling for now, but
# this must be addressed in a proper way
if(CMAKE_CROSSCOMPILING OR NOT (LINUX OR WIN32))
set(ENABLE_LTO OFF)
set(CMAKE_VERBOSE_MAKEFILE ON CACHE BOOL "" FORCE)
endif()
#64 bits platform
@@ -21,35 +15,21 @@ else()
SET(ARCH_64 OFF)
endif()
# 32 bits
if(NOT ARCH_64)
if(UNIX)
set(ENABLE_CLDNN OFF)
endif()
set(ENABLE_MKL_DNN OFF)
endif()
# Apple specific
if (APPLE)
set(ENABLE_CLDNN OFF)
endif()
# ARM specific
if (ARM OR AARCH64)
# disable all base plugins but Myriad
set(ENABLE_CLDNN OFF)
set(ENABLE_MKL_DNN OFF)
endif()
#minGW specific - under wine no support for downloading file and applying them using git
if (WIN32)
if (MINGW)
SET(ENABLE_CLDNN OFF) # dont have mingw dll for linking
endif()
endif()
if (NOT ENABLE_MKL_DNN)
set(ENABLE_MKL OFF)
endif()
if(ENABLE_AVX512F)
if ((CMAKE_CXX_COMPILER_ID MATCHES MSVC) AND (MSVC_VERSION VERSION_LESS 1920))
# 1920 version of MSVC 2019. In MSVC 2017 AVX512F not work
set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE)
endif()
if (CMAKE_CXX_COMPILER_ID MATCHES Clang)
set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE)
endif()
if ((CMAKE_CXX_COMPILER_ID STREQUAL GNU) AND (NOT (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9)))
set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE)
endif()
endif()
print_enabled_features()

View File

@@ -0,0 +1,194 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if(NOT TARGET ie_coverage_clean)
add_custom_target(ie_coverage_clean)
endif()
if(NOT TARGET ie_coverage_init)
add_custom_target(ie_coverage_init)
endif()
if(NOT TARGET ie_coverage)
add_custom_target(ie_coverage)
endif()
set(IE_COVERAGE_REPORTS "${CMAKE_BINARY_DIR}/coverage")
set(IE_COVERAGE_SCRIPT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/cmake/coverage")
include(CMakeParseArguments)
#
# ie_coverage_clean(REPOSITORY <repo> DIRECTORY <dir>)
#
function(ie_coverage_clean)
cmake_parse_arguments(IE_COVERAGE "" "REPOSITORY;DIRECTORY" "" ${ARGN})
add_custom_target(ie_coverage_zerocounters_${IE_COVERAGE_REPOSITORY}
COMMAND lcov --zerocounters --quiet
--directory "${IE_COVERAGE_DIRECTORY}"
COMMENT "Add zero counters for coverage for ${IE_COVERAGE_REPOSITORY}"
VERBATIM)
add_custom_target(ie_coverage_clean_${IE_COVERAGE_REPOSITORY}
COMMAND ${CMAKE_COMMAND}
-D "IE_COVERAGE_REPORTS=${IE_COVERAGE_REPORTS}"
-D "IE_COVERAGE_DIRECTORY=${IE_COVERAGE_DIRECTORY}"
-D "CMAKE_BINARY_DIRECTORY=${CMAKE_BINARY_DIR}"
-D "CMAKE_SOURCE_DIRECTORY=${CMAKE_SOURCE_DIR}"
-P "${IE_COVERAGE_SCRIPT_DIR}/coverage_clean.cmake"
COMMENT "Clean previously created HTML report files for ${IE_COVERAGE_REPOSITORY}"
DEPENDS "${IE_COVERAGE_SCRIPT_DIR}/coverage_clean.cmake"
VERBATIM)
add_dependencies(ie_coverage_clean ie_coverage_zerocounters_${IE_COVERAGE_REPOSITORY}
ie_coverage_clean_${IE_COVERAGE_REPOSITORY})
endfunction()
#
# ie_coverage_capture(INFO_FILE <info_file>
# BASE_DIRECTORY <base dir>
# DIRECTORY <gcda dir>)
#
function(ie_coverage_capture)
cmake_parse_arguments(IE_COVERAGE "" "INFO_FILE;BASE_DIRECTORY;DIRECTORY" "" ${ARGN})
set(output_file "${IE_COVERAGE_REPORTS}/${IE_COVERAGE_INFO_FILE}.info")
set(output_base_file "${IE_COVERAGE_REPORTS}/${IE_COVERAGE_INFO_FILE}_base.info")
set(output_tests_file "${IE_COVERAGE_REPORTS}/${IE_COVERAGE_INFO_FILE}_tests.info")
add_custom_command(OUTPUT ${output_base_file}
COMMAND ${CMAKE_COMMAND} -E make_directory "${IE_COVERAGE_REPORTS}"
COMMAND lcov --no-external --capture --initial --quiet
--directory "${IE_COVERAGE_DIRECTORY}"
--base-directory "${IE_COVERAGE_BASE_DIRECTORY}"
--output-file ${output_base_file}
COMMENT "Capture initial coverage data ${IE_COVERAGE_INFO_FILE}"
VERBATIM)
add_custom_command(OUTPUT ${output_tests_file}
COMMAND ${CMAKE_COMMAND} -E make_directory "${IE_COVERAGE_REPORTS}"
COMMAND lcov --no-external --capture --quiet
--directory "${IE_COVERAGE_DIRECTORY}"
--base-directory "${IE_COVERAGE_BASE_DIRECTORY}"
--output-file ${output_tests_file}
COMMENT "Capture test coverage data ${IE_COVERAGE_INFO_FILE}"
VERBATIM)
add_custom_command(OUTPUT ${output_file}
COMMAND ${CMAKE_COMMAND}
-D "IE_COVERAGE_OUTPUT_FILE=${output_file}"
-D "IE_COVERAGE_INPUT_FILES=${output_base_file};${output_tests_file}"
-P "${IE_COVERAGE_SCRIPT_DIR}/coverage_merge.cmake"
COMMENT "Generate total coverage data ${IE_COVERAGE_INFO_FILE}"
DEPENDS ${output_base_file} ${output_tests_file}
VERBATIM)
add_custom_target(ie_coverage_${IE_COVERAGE_INFO_FILE}_info
DEPENDS ${output_file})
endfunction()
#
# ie_coverage_extract(INPUT <info_file> OUTPUT <output_file> PATTERNS <patterns ...>)
#
function(ie_coverage_extract)
cmake_parse_arguments(IE_COVERAGE "" "INPUT;OUTPUT" "PATTERNS" ${ARGN})
set(input_file "${IE_COVERAGE_REPORTS}/${IE_COVERAGE_INPUT}.info")
set(output_file "${IE_COVERAGE_REPORTS}/${IE_COVERAGE_OUTPUT}.info")
set(commands lcov --quiet)
foreach(pattern IN LISTS IE_COVERAGE_PATTERNS)
list(APPEND commands --extract ${input_file} ${pattern})
endforeach()
list(APPEND commands --output-file ${output_file})
add_custom_command(OUTPUT ${output_file}
COMMAND ${commands}
COMMENT "Generate coverage data ${IE_COVERAGE_OUTPUT}"
DEPENDS ${input_file}
VERBATIM)
add_custom_target(ie_coverage_${IE_COVERAGE_OUTPUT}_info
DEPENDS ${output_file})
add_dependencies(ie_coverage_${IE_COVERAGE_OUTPUT}_info ie_coverage_${IE_COVERAGE_INPUT}_info)
endfunction()
#
# ie_coverage_remove(INPUT <info_file> OUTPUT <output_file> PATTERNS <patterns ...>)
#
function(ie_coverage_remove)
cmake_parse_arguments(IE_COVERAGE "" "INPUT;OUTPUT" "PATTERNS" ${ARGN})
set(input_file "${IE_COVERAGE_REPORTS}/${IE_COVERAGE_INPUT}.info")
set(output_file "${IE_COVERAGE_REPORTS}/${IE_COVERAGE_OUTPUT}.info")
set(commands lcov --quiet)
foreach(pattern IN LISTS IE_COVERAGE_PATTERNS)
list(APPEND commands --remove ${input_file} ${pattern})
endforeach()
list(APPEND commands --output-file ${output_file})
add_custom_command(OUTPUT ${output_file}
COMMAND ${commands}
COMMENT "Generate coverage data ${IE_COVERAGE_OUTPUT}"
DEPENDS ${input_file}
VERBATIM)
add_custom_target(ie_coverage_${IE_COVERAGE_OUTPUT}_info
DEPENDS ${output_file})
add_dependencies(ie_coverage_${IE_COVERAGE_OUTPUT}_info ie_coverage_${IE_COVERAGE_INPUT}_info)
endfunction()
#
# ie_coverage_merge(OUTPUT <output file> INPUTS <input files ...>)
#
function(ie_coverage_merge)
cmake_parse_arguments(IE_COVERAGE "" "OUTPUT" "INPUTS" ${ARGN})
set(output_file "${IE_COVERAGE_REPORTS}/${IE_COVERAGE_OUTPUT}.info")
foreach(input_info_file IN LISTS IE_COVERAGE_INPUTS)
set(input_file ${IE_COVERAGE_REPORTS}/${input_info_file}.info)
list(APPEND dependencies ie_coverage_${input_info_file}_info)
list(APPEND input_files ${input_file})
endforeach()
add_custom_command(OUTPUT ${output_file}
COMMAND ${CMAKE_COMMAND}
-D "IE_COVERAGE_OUTPUT_FILE=${output_file}"
-D "IE_COVERAGE_INPUT_FILES=${input_files}"
-P "${IE_COVERAGE_SCRIPT_DIR}/coverage_merge.cmake"
COMMENT "Generate coverage data ${IE_COVERAGE_OUTPUT}"
DEPENDS ${input_files}
VERBATIM)
add_custom_target(ie_coverage_${IE_COVERAGE_OUTPUT}_info
DEPENDS ${output_file})
add_dependencies(ie_coverage_${IE_COVERAGE_OUTPUT}_info ${dependencies})
endfunction()
#
# ie_coverage_genhtml(INFO_FILE <info_file> PREFIX <prefix>)
#
function(ie_coverage_genhtml)
cmake_parse_arguments(IE_COVERAGE "" "INFO_FILE;PREFIX" "" ${ARGN})
set(input_file "${IE_COVERAGE_REPORTS}/${IE_COVERAGE_INFO_FILE}.info")
set(output_directory "${IE_COVERAGE_REPORTS}/${IE_COVERAGE_INFO_FILE}")
add_custom_command(OUTPUT "${output_directory}/index.html"
COMMAND genhtml ${input_file} --title "${IE_COVERAGE_INFO_FILE}" --legend
--no-branch-coverage --demangle-cpp
--output-directory "${output_directory}"
--num-spaces 4 --quiet
--prefix "${IE_COVERAGE_PREFIX}"
DEPENDS ${input_file}
COMMENT "Generate HTML report for ${IE_COVERAGE_INFO_FILE}"
VERBATIM)
add_custom_target(ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml
DEPENDS "${output_directory}/index.html")
add_dependencies(ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml ie_coverage_${IE_COVERAGE_INFO_FILE}_info)
add_dependencies(ie_coverage ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml)
endfunction()

View File

@@ -0,0 +1,30 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if(NOT DEFINED IE_COVERAGE_REPORTS)
message(FATAL_ERROR "IE_COVERAGE_REPORTS variable is not defined")
return()
endif()
file(REMOVE_RECURSE "${IE_COVERAGE_REPORTS}")
if(NOT DEFINED IE_COVERAGE_DIRECTORY)
message(FATAL_ERROR "IE_COVERAGE_DIRECTORY variable is not defined")
return()
endif()
# remove .gcno files which are kept from the previous build
file(GLOB_RECURSE gcno_files "${IE_COVERAGE_DIRECTORY}/*.gcno")
foreach(file IN LISTS gcno_files)
string(REPLACE ".gcno" "" temp_file "${file}")
string(REGEX REPLACE "CMakeFiles/.+dir/" "" temp_file "${temp_file}")
string(REPLACE "${CMAKE_BINARY_DIRECTORY}" "${CMAKE_SOURCE_DIRECTORY}" source_file "${temp_file}")
if(NOT EXISTS "${source_file}")
file(REMOVE "${file}")
string(REPLACE "${CMAKE_BINARY_DIRECTORY}/" "" file "${file}")
message("Removing ${file}")
endif()
endforeach()

View File

@@ -0,0 +1,22 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if(NOT DEFINED IE_COVERAGE_OUTPUT_FILE)
message(FATAL_ERROR "IE_COVERAGE_OUTPUT_FILE is not defined")
endif()
if(NOT DEFINED IE_COVERAGE_INPUT_FILES)
message(FATAL_ERROR "IE_COVERAGE_INPUT_FILES is not defined")
endif()
set(command lcov --quiet)
foreach(input_info_file IN LISTS IE_COVERAGE_INPUT_FILES)
file(SIZE ${input_info_file} size)
if(NOT size EQUAL 0)
list(APPEND command --add-tracefile "${input_info_file}")
endif()
endforeach()
list(APPEND command --output-file ${IE_COVERAGE_OUTPUT_FILE})
execute_process(COMMAND ${command})

37
cmake/dependencies.cmake Normal file
View File

@@ -0,0 +1,37 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set_temp_directory(TEMP "${IE_MAIN_SOURCE_DIR}")
include(dependency_solver)
if(CMAKE_CROSSCOMPILING)
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
set(HOST_X86_64 ON)
endif()
set(protoc_version "3.7.1")
if(CMAKE_HOST_SYSTEM_NAME MATCHES Linux)
RESOLVE_DEPENDENCY(SYSTEM_PROTOC_ROOT
ARCHIVE_LIN "protoc-${protoc_version}-linux-x86_64.tar.gz"
TARGET_PATH "${TEMP}/protoc-${protoc_version}-linux-x86_64")
debug_message(STATUS "host protoc-${protoc_version} root path = " ${SYSTEM_PROTOC_ROOT})
else()
message(FATAL_ERROR "Unsupported host system (${CMAKE_HOST_SYSTEM_NAME}) and arch (${CMAKE_HOST_SYSTEM_PROCESSOR}) for cross-compilation")
endif()
reset_deps_cache(SYSTEM_PROTOC)
message("${SYSTEM_PROTOC_ROOT}/bin")
find_program(
SYSTEM_PROTOC
NAMES protoc
PATHS "${SYSTEM_PROTOC_ROOT}/bin"
NO_DEFAULT_PATH)
if(NOT SYSTEM_PROTOC)
message(FATAL_ERROR "[ONNX IMPORTER] Missing host protoc binary")
endif()
update_deps_cache(SYSTEM_PROTOC "${SYSTEM_PROTOC}" "Path to host protoc for ONNX Importer")
endif()

View File

@@ -2,11 +2,31 @@
# SPDX-License-Identifier: Apache-2.0
#
set(CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake/download" ${CMAKE_MODULE_PATH})
include(CPackComponent)
unset(IE_CPACK_COMPONENTS_ALL CACHE)
set(IE_CPACK_IE_DIR deployment_tools/inference_engine)
# Search packages for the host system instead of packages for the target system
# in case of cross compilation these macros should be defined by the toolchain file
if(NOT COMMAND find_host_package)
macro(find_host_package)
find_package(${ARGN})
endmacro()
endif()
if(NOT COMMAND find_host_program)
macro(find_host_program)
find_program(${ARGN})
endmacro()
endif()
#
# ie_cpack_set_library_dir()
#
# Set library directory for cpack
#
function(ie_cpack_set_library_dir)
string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} ARCH)
if(ARCH STREQUAL "x86_64" OR ARCH STREQUAL "amd64") # Windows detects Intel's 64-bit CPU as AMD64
@@ -16,9 +36,13 @@ function(ie_cpack_set_library_dir)
endif()
if(WIN32)
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/$<CONFIG>/${ARCH} PARENT_SCOPE)
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
else()
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
endif()
endfunction()
@@ -38,14 +62,17 @@ endmacro()
macro(ie_cpack)
set(CPACK_GENERATOR "TGZ")
if(WIN32)
set(CPACK_PACKAGE_NAME inference-engine_$<CONFIG>)
set(CPACK_PACKAGE_NAME inference-engine_${CMAKE_BUILD_TYPE})
string(REPLACE "\\" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}")
else()
set(CPACK_PACKAGE_NAME inference-engine)
string(REPLACE "/" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}")
endif()
set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF)
set(CPACK_ARCHIVE_COMPONENT_INSTALL ON)
set(CPACK_PACKAGE_VENDOR "Intel")
set(CPACK_COMPONENTS_ALL ${ARGN})
set(CPACK_STRIP_FILES ON)
if(OS_FOLDER)
set(CPACK_SYSTEM_NAME "${OS_FOLDER}")
@@ -54,6 +81,32 @@ macro(ie_cpack)
include(CPack)
endmacro()
# prepare temporary folder
function(set_temp_directory temp_variable source_tree_dir)
if (DEFINED ENV{DL_SDK_TEMP} AND NOT $ENV{DL_SDK_TEMP} STREQUAL "")
message(STATUS "DL_SDK_TEMP environment is set : $ENV{DL_SDK_TEMP}")
if (WIN32)
string(REPLACE "\\" "\\\\" temp $ENV{DL_SDK_TEMP})
else()
set(temp $ENV{DL_SDK_TEMP})
endif()
if (ENABLE_ALTERNATIVE_TEMP)
set(ALTERNATIVE_PATH ${source_tree_dir}/temp)
endif()
else ()
set(temp ${source_tree_dir}/temp)
endif()
set("${temp_variable}" "${temp}" CACHE PATH "Path to temp directory")
if(ALTERNATIVE_PATH)
set(ALTERNATIVE_PATH "${ALTERNATIVE_PATH}" PARENT_SCOPE)
endif()
endfunction()
include(coverage/coverage)
# External dependencies
find_package(Threads)
@@ -63,9 +116,8 @@ include(target_flags)
# printing debug messages
include(debug)
if(UNIX AND NOT APPLE)
set(LINUX ON)
endif()
# linking libraries without discarding symbols
include(whole_archive)
string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} ARCH_FOLDER)
if(ARCH_FOLDER STREQUAL "x86_64" OR ARCH_FOLDER STREQUAL "amd64") # Windows detects Intel's 64-bit CPU as AMD64
@@ -91,14 +143,6 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "")
set(CMAKE_BUILD_TYPE "Release")
endif()
if(COVERAGE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage -O0")
endif()
if(UNIX)
SET(LIB_DL ${CMAKE_DL_LIBS})
endif()
set(OUTPUT_ROOT ${OpenVINO_MAIN_SOURCE_DIR})
# Enable postfixes for Debug/Release builds
@@ -156,14 +200,14 @@ else()
endif()
if(APPLE)
set(CMAKE_MACOSX_RPATH 1)
set(CMAKE_MACOSX_RPATH ON)
endif(APPLE)
# Use solution folders
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
include(sdl)
include(os_flags)
include(os_flags NO_POLICY_SCOPE)
include(sanitizer)
function(set_ci_build_number)

View File

@@ -138,6 +138,14 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
endfunction(RESOLVE_DEPENDENCY)
function (resolve_model_dependency network archive network_model_path)
RESOLVE_DEPENDENCY(${network_model_path}
ARCHIVE "models_archives/${archive}"
TARGET_PATH "${MODELS_PATH}/${network}")
string (REPLACE ${MODELS_PATH} "" relative_path ${${network_model_path}})
set(${network_model_path} ".${relative_path}" PARENT_SCOPE)
endfunction()
function(reset_deps_cache)
#
# Reset the dependencies cache if it was set by dependency solver

View File

@@ -154,7 +154,7 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
if(DEFINED ENV{IE_PATH_TO_DEPS})
set(URL "$ENV{IE_PATH_TO_DEPS}/${RELATIVE_URL}")
else()
set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.1/inference_engine/${RELATIVE_URL}")
set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.3/inference_engine/${RELATIVE_URL}")
endif()
#no message on recursive calls

View File

@@ -7,20 +7,34 @@ include (options)
# these options are aimed to optimize build time on development system
ie_option (ENABLE_NGRAPH "Enable nGraph build" ON)
if(X86_64)
set(ENABLE_MKL_DNN_DEFAULT ON)
else()
set(ENABLE_MKL_DNN_DEFAULT OFF)
endif()
ie_option (ENABLE_MKL_DNN "MKL-DNN plugin for inference engine" ${ENABLE_MKL_DNN_DEFAULT})
ie_option (ENABLE_INFERENCE_ENGINE "Enable Inference Engine build" ON)
ie_dependent_option (ENABLE_CLDNN "clDnn based plugin for inference engine" ON "WIN32 OR X86_64;NOT APPLE;NOT MINGW" OFF)
ie_option (ENABLE_MKL_DNN "MKL-DNN plugin for inference engine" ON)
ie_option (ENABLE_CLDNN "clDnn based plugin for inference engine" ON)
ie_option (ENABLE_LTO "Enable Link Time Optimization" OFF)
# FIXME: there are compiler failures with LTO and Cross-Compile toolchains. Disabling for now, but
# this must be addressed in a proper way
ie_dependent_option (ENABLE_LTO "Enable Link Time Optimization" OFF "LINUX OR WIN32;NOT CMAKE_CROSSCOMPILING" OFF)
ie_option (OS_FOLDER "create OS dedicated folder in output" OFF)
ie_option (TREAT_WARNING_AS_ERROR "Treat build warnings as errors" OFF)
# FIXME: ARM cross-compiler generates several "false positive" warnings regarding __builtin_memcpy buffer overflow
ie_dependent_option (TREAT_WARNING_AS_ERROR "Treat build warnings as errors" ON "X86 OR X86_64" OFF)
ie_option (ENABLE_SANITIZER "enable checking memory errors via AddressSanitizer" OFF)
ie_option (ENABLE_THREAD_SANITIZER "enable checking data races via ThreadSanitizer" OFF)
ie_dependent_option (COVERAGE "enable code coverage" OFF "CMAKE_CXX_COMPILER_ID STREQUAL GNU" OFF)
# Define CPU capabilities
ie_dependent_option (ENABLE_SSE42 "Enable SSE4.2 optimizations" ON "X86_64 OR X86" OFF)
ie_dependent_option (ENABLE_AVX2 "Enable AVX2 optimizations" ON "X86_64 OR X86" OFF)
ie_dependent_option (ENABLE_AVX512F "Enable AVX512 optimizations" ON "X86_64 OR X86" OFF)

View File

@@ -3,25 +3,25 @@
#
# Usage: ie_option(<option_variable> "description" <initial value or boolean expression> [IF <condition>])
function (ie_option variable description value)
list(FIND IE_OPTIONS "${variable}" result)
include (CMakeDependentOption)
include (version)
if(${result} EQUAL -1)
option(${variable} "${description}" ${value})
list (APPEND IE_OPTIONS "${variable}")
macro (ie_option variable description value)
option(${variable} "${description}" ${value})
list(APPEND IE_OPTIONS ${variable})
endmacro()
set (IE_OPTIONS "${IE_OPTIONS}" PARENT_SCOPE)
endif()
endfunction()
include(version)
macro (ie_dependent_option variable description def_value condition fallback_value)
cmake_dependent_option(${variable} "${description}" ${def_value} "${condition}" ${fallback_value})
list(APPEND IE_OPTIONS ${variable})
endmacro()
function (print_enabled_features)
message(STATUS "Inference Engine enabled features: ")
message("")
message(" CI_BUILD_NUMBER: ${CI_BUILD_NUMBER}")
message(STATUS "Inference Engine enabled features: ")
message(STATUS "")
message(STATUS " CI_BUILD_NUMBER: ${CI_BUILD_NUMBER}")
foreach(_var ${IE_OPTIONS})
message(" ${_var} = ${${_var}}")
message(STATUS " ${_var} = ${${_var}}")
endforeach()
message("")
message(STATUS "")
endfunction()

View File

@@ -14,7 +14,11 @@ macro(disable_deprecated_warnings)
set(ie_c_cxx_deprecated "/wd4996")
endif()
else()
set(ie_c_cxx_deprecated "-Wno-deprecated-declarations")
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
set(ie_c_cxx_deprecated "-diag-disable=1478,1786")
else()
set(ie_c_cxx_deprecated "-Wno-deprecated-declarations")
endif()
endif()
if(NOT ie_c_cxx_deprecated)
@@ -25,6 +29,98 @@ macro(disable_deprecated_warnings)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ie_c_cxx_deprecated}")
endmacro()
#
# Don't threat deprecated warnings as errors
# Defines ie_c_cxx_deprecated_no_errors varaible which contains C / C++ compiler flags
#
macro(ie_deprecated_no_errors)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
set(ie_c_cxx_deprecated "/Qdiag-warning:1478,1786")
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
set(ie_c_cxx_deprecated "/wd4996")
endif()
else()
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
set(ie_c_cxx_deprecated_no_errors "-diag-warning=1478,1786")
else()
set(ie_c_cxx_deprecated_no_errors "-Wno-error=deprecated-declarations")
endif()
if(NOT ie_c_cxx_deprecated_no_errors)
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ie_c_cxx_deprecated_no_errors}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ie_c_cxx_deprecated_no_errors}")
endmacro()
#
# Provides SSE4.2 compilation flags depending on an OS and a compiler
#
function(ie_sse42_optimization_flags flags)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
# No such option for MSVC 2019
elseif(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
set(${flags} "/arch:SSE4.2 /QxSSE4.2" PARENT_SCOPE)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
set(${flags} "-msse4.2 -xSSE4.2" PARENT_SCOPE)
else()
set(${flags} "-msse4.2" PARENT_SCOPE)
endif()
endif()
endfunction()
#
# Provides AVX2 compilation flags depending on an OS and a compiler
#
function(ie_avx2_optimization_flags flags)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
set(${flags} "/QxCORE-AVX2" PARENT_SCOPE)
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
set(${flags} "/arch:AVX2" PARENT_SCOPE)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
set(${flags} "-march=core-avx2 -xCORE-AVX2 -mtune=core-avx2" PARENT_SCOPE)
else()
set(${flags} "-mavx2 -mfma" PARENT_SCOPE)
endif()
endif()
endfunction()
#
# Provides common AVX512 compilation flags for AVX512F instruction set support
# depending on an OS and a compiler
#
function(ie_avx512_optimization_flags flags)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
set(${flags} "/QxCOMMON-AVX512" PARENT_SCOPE)
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
set(${flags} "/arch:AVX512" PARENT_SCOPE)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL Intel)
set(${flags} "-xCOMMON-AVX512" PARENT_SCOPE)
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL GNU)
set(${flags} "-mavx512f -mfma" PARENT_SCOPE)
endif()
endif()
endfunction()
#
# Enables Link Time Optimization compilation
#
@@ -47,6 +143,7 @@ macro(ie_enable_lto)
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /GL")
# set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LTCG:STATUS")
# set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /LTCG:STATUS")
# set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /LTCG:STATUS")
endif()
endif()
endmacro()
@@ -68,14 +165,32 @@ endmacro()
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(THREADS_PREFER_PTHREAD_FLAG ON)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# to allows to override CMAKE_CXX_STANDARD from command line
if(NOT DEFINED CMAKE_CXX_STANDARD)
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
set(CMAKE_CXX_STANDARD 14)
else()
set(CMAKE_CXX_STANDARD 11)
endif()
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
endif()
if(COVERAGE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --coverage")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --coverage")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage")
endif()
if(NOT MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsigned-char")
endif()
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
set(CMAKE_C_VISIBILITY_PRESET hidden)
set(CMAKE_VISIBILITY_INLINES_HIDDEN ON)
if(WIN32)
ie_add_compiler_flags(-D_CRT_SECURE_NO_WARNINGS -D_SCL_SECURE_NO_WARNINGS)
ie_add_compiler_flags(/EHsc) # no asynchronous structured exception handling
@@ -94,9 +209,6 @@ if(WIN32)
# Compiler specific flags
ie_add_compiler_flags(/bigobj)
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
ie_add_compiler_flags(/MP /std:c++14)
endif()
# Disable noisy warnings
@@ -110,12 +222,14 @@ if(WIN32)
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
# 161 unrecognized pragma
# 177 variable was declared but never referenced
# 556 not matched type of assigned function pointer
# 1744: field of class type without a DLL interface used in a class with a DLL interface
# 2586 decorated name length exceeded, name was truncated
# 2651: attribute does not apply to any entity
# 3180 unrecognized OpenMP pragma
# 11075: To get full report use -Qopt-report:4 -Qopt-report-phase ipo
# 15335 was not vectorized: vectorization possible but seems inefficient. Use vector always directive or /Qvec-threshold0 to override
ie_add_compiler_flags(/Qdiag-disable:161,177,2586,2651,3180,11075,15335)
ie_add_compiler_flags(/Qdiag-disable:161,177,556,1744,2586,2651,3180,11075,15335)
endif()
# Debug information flags
@@ -139,12 +253,13 @@ if(WIN32)
else()
# TODO: enable for C sources as well
# ie_add_compiler_flags(-Werror)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
if(TREAT_WARNING_AS_ERROR)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
endif()
ie_add_compiler_flags(-ffunction-sections -fdata-sections)
ie_add_compiler_flags(-fvisibility=hidden)
ie_add_compiler_flags(-fdiagnostics-show-option)
ie_add_compiler_flags(-Wundef)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden")
# Disable noisy warnings
@@ -167,8 +282,10 @@ else()
if(APPLE)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-dead_strip")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,-dead_strip")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-dead_strip")
elseif(LINUX)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
endif()
endif()

View File

@@ -5,30 +5,32 @@
include(CheckCXXCompilerFlag)
if (ENABLE_SANITIZER)
set(SANITIZER_COMPILER_FLAGS "-fsanitize=address -fno-omit-frame-pointer")
set(SANITIZER_COMPILER_FLAGS "-g -fsanitize=address -fno-omit-frame-pointer")
CHECK_CXX_COMPILER_FLAG("-fsanitize-recover=address" SANITIZE_RECOVER_SUPPORTED)
if (SANITIZE_RECOVER_SUPPORTED)
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize-recover=address")
endif()
set(SANITIZER_LINKER_FLAGS "-fsanitize=address")
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=gold")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
endif()
if (ENABLE_THREAD_SANITIZER)
set(SANITIZER_COMPILER_FLAGS "-fsanitize=thread")
set(SANITIZER_COMPILER_FLAGS "-g -fsanitize=thread")
set(SANITIZER_LINKER_FLAGS "-fsanitize=thread")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
endif()

View File

@@ -4,25 +4,36 @@
if (CMAKE_BUILD_TYPE STREQUAL "Release")
if(UNIX)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -Wformat -Wformat-security -D_FORTIFY_SOURCE=2")
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -Wformat -Wformat-security")
if (NOT ENABLE_SANITIZER)
# ASan does not support fortification https://github.com/google/sanitizers/issues/247
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -D_FORTIFY_SOURCE=2")
endif()
if(NOT APPLE)
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -pie")
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
set(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-all")
else()
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-strong")
endif()
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -s")
if (NOT ENABLE_SANITIZER)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -s")
endif()
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-all")
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-strong -Wl,--strip-all")
if (NOT ENABLE_SANITIZER)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -Wl,--strip-all")
endif()
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-strong")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
set(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
endif()
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)

View File

@@ -29,3 +29,7 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
set(AARCH64 ON)
endif()
if(UNIX AND NOT APPLE)
set(LINUX ON)
endif()

53
cmake/whole_archive.cmake Normal file
View File

@@ -0,0 +1,53 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
#[[
function links static library without removing any symbol from it.
ieTargetLinkWholeArchive(<target name> <lib1> [<lib2> ...])
Example:
ieTargetLinkWholeArchive("MyriadFunctionalTests" "CommonLib" "AnotherLib")
#]]
function(ieTargetLinkWholeArchive targetName)
set(libs)
foreach(staticLib ${ARGN})
if (MSVC)
# CMake does not support generator expression in LINK_FLAGS, so we workaround it a little bit:
# passing same static library as normal link (to get build deps working, and includes too), than using WHOLEARCHIVE option
# it's important here to not use slash '/' for option !
if (CMAKE_GENERATOR MATCHES "Visual Studio")
# MSBuild is unhappy when parsing double quotes in combination with WHOLEARCHIVE flag.
# remove quotes from path - so build path with spaces not supported, but it's better than nothing.
list(APPEND libs ${staticLib}
"-WHOLEARCHIVE:$<TARGET_FILE:${staticLib}>"
)
if (CMAKE_CURRENT_BINARY_DIR MATCHES " ")
message(WARNING "Visual Studio CMake generator may cause problems if your build directory contains spaces. "
"Remove spaces from path or select different generator.")
endif()
else()
list(APPEND libs ${staticLib}
"-WHOLEARCHIVE:\"$<TARGET_FILE:${staticLib}>\""
)
endif()
elseif(APPLE)
list(APPEND libs
"-Wl,-all_load"
${staticLib}
"-Wl,-noall_load"
)
else()
list(APPEND libs
"-Wl,--whole-archive"
${staticLib}
"-Wl,--no-whole-archive"
)
endif()
endforeach()
if (libs)
target_link_libraries(${targetName} PRIVATE ${libs})
endif()
endfunction()

47
docs/CMakeLists.txt Normal file
View File

@@ -0,0 +1,47 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if(NOT ENABLE_DOCKER)
add_subdirectory(examples)
# Detect nGraph
find_package(ngraph QUIET)
if(NOT ngraph_FOUND)
set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph)
endif()
# Detect InferenceEngine
find_package(InferenceEngine QUIET)
if(NOT InferenceEngine_FOUND)
set(InferenceEngine_DIR ${CMAKE_BINARY_DIR})
endif()
add_subdirectory(template_extension)
endif()
# OpenVINO docs
set(OPENVINO_DOCS_PATH "" CACHE PATH "Path to openvino-documentation local repository")
set(args "")
if(OPENVINO_DOCS_PATH)
set(args "${args} ovinodoc_path:${OPENVINO_DOCS_PATH}")
endif()
file(GLOB_RECURSE docs_files "${OpenVINO_MAIN_SOURCE_DIR}/docs")
file(GLOB_RECURSE include_files "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/include")
file(GLOB_RECURSE ovino_files "${OPENVINO_DOCS_PATH}")
add_custom_target(ie_docs
COMMAND ./build_docs.sh ${args}
WORKING_DIRECTORY "${OpenVINO_MAIN_SOURCE_DIR}/docs/build_documentation"
COMMENT "Generating OpenVINO documentation"
SOURCES ${docs_files} ${include_files} ${ovino_files}
VERBATIM)
find_program(browser NAMES xdg-open)
if(browser)
add_custom_target(ie_docs_open
COMMAND ${browser} "${OpenVINO_MAIN_SOURCE_DIR}/doc/html/index.html"
DEPENDS ie_docs
COMMENT "Open OpenVINO documentation"
VERBATIM)
endif()

View File

@@ -0,0 +1,14 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME ie_docs_examples)
file(GLOB SOURCES *.cpp)
add_library(ie_docs_examples STATIC ${SOURCES})
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api)
#add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
#add_clang_format_target(clang_format_${TARGET_NAME} FOR_TARGETS ${TARGET_NAME})

View File

@@ -0,0 +1,68 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <threading/ie_itask_executor.hpp>
#include <cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp>
#include <memory>
using namespace InferenceEngine;
class AcceleratorSyncRequest : public InferRequestInternal {
public:
using Ptr = std::shared_ptr<AcceleratorSyncRequest>;
void Preprocess();
void WriteToDevice();
void RunOnDevice();
void ReadFromDevice();
void PostProcess();
};
// ! [async_infer_request:define_pipeline]
// Inherits from AsyncInferRequestThreadSafeDefault
class AcceleratorAsyncInferRequest : public AsyncInferRequestThreadSafeDefault {
// Store the pointer to the synchronous request and five executors
AcceleratorAsyncInferRequest(const AcceleratorSyncRequest::Ptr& syncRequest,
const ITaskExecutor::Ptr& preprocessExecutor,
const ITaskExecutor::Ptr& writeToDeviceExecutor,
const ITaskExecutor::Ptr& runOnDeviceExecutor,
const ITaskExecutor::Ptr& readFromDeviceExecutor,
const ITaskExecutor::Ptr& postProcessExecutor) :
AsyncInferRequestThreadSafeDefault(syncRequest, nullptr, nullptr),
_accSyncRequest{syncRequest},
_preprocessExecutor{preprocessExecutor},
_writeToDeviceExecutor{writeToDeviceExecutor},
_runOnDeviceExecutor{runOnDeviceExecutor},
_readFromDeviceExecutor{readFromDeviceExecutor},
_postProcessExecutor{postProcessExecutor}
{
// Five pipeline stages of synchronous infer request are run by different executors
_pipeline = {
{ _preprocessExecutor , [this] {
_accSyncRequest->Preprocess();
}},
{ _writeToDeviceExecutor , [this] {
_accSyncRequest->WriteToDevice();
}},
{ _runOnDeviceExecutor , [this] {
_accSyncRequest->RunOnDevice();
}},
{ _readFromDeviceExecutor , [this] {
_accSyncRequest->ReadFromDevice();
}},
{ _postProcessExecutor , [this] {
_accSyncRequest->PostProcess();
}},
};
}
// As all stages use _accSyncRequest member we should wait for all stages tasks before the destructor destroy this member.
~AcceleratorAsyncInferRequest() {
StopAndWait();
}
AcceleratorSyncRequest::Ptr _accSyncRequest;
ITaskExecutor::Ptr _preprocessExecutor, _writeToDeviceExecutor, _runOnDeviceExecutor, _readFromDeviceExecutor, _postProcessExecutor;
};
// ! [async_infer_request:define_pipeline]

View File

@@ -0,0 +1,53 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <threading/ie_cpu_streams_executor.hpp>
#include <memory>
#include <future>
#include <iostream>
void example1() {
// ! [itask_executor:define_pipeline]
// std::promise is move only object so to satisfy copy callable constraint we use std::shared_ptr
auto promise = std::make_shared<std::promise<void>>();
// When the promise is created we can get std::future to wait the result
auto future = promise->get_future();
// Rather simple task
InferenceEngine::Task task = [] {std::cout << "Some Output" << std::endl; };
// Create an executor
InferenceEngine::ITaskExecutor::Ptr taskExecutor = std::make_shared<InferenceEngine::CPUStreamsExecutor>();
if (taskExecutor == nullptr) {
// ProcessError(e);
return;
}
// We capture the task and the promise. When the task is executed in the task executor context
// we munually call std::promise::set_value() method
taskExecutor->run([task, promise] {
std::exception_ptr currentException;
try {
task();
} catch(...) {
// If there is some exceptions store the pointer to current exception
currentException = std::current_exception();
}
if (nullptr == currentException) {
promise->set_value(); // <-- If there is no problems just call std::promise::set_value()
} else {
promise->set_exception(currentException); // <-- If there is an exception forward it to std::future object
}
});
// To wait the task completion we call std::future::wait method
future.wait(); // The current thread will be blocked here and wait when std::promise::set_value()
// or std::promise::set_exception() method will be called.
// If the future store the exception it will be rethrown in std::future::get method
try {
future.get();
} catch(std::exception& /*e*/) {
// ProcessError(e);
}
// ! [itask_executor:define_pipeline]
}

View File

@@ -0,0 +1,18 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:extension]
set(TARGET_NAME "template_extension")
find_package(ngraph REQUIRED)
find_package(InferenceEngine REQUIRED)
file(GLOB_RECURSE SRC *.cpp)
add_library(${TARGET_NAME} SHARED ${SRC})
target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_EXTENSION_API)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES}
${NGRAPH_LIBRARIES})
# [cmake:extension]

View File

@@ -0,0 +1,124 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "cpu_kernel.hpp"
#include "op.hpp"
#include <details/ie_exception.hpp>
#include <ie_layouts.h>
using namespace TemplateExtension;
//! [cpu_implementation:ctor]
OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node> &node) {
try {
auto castedNode = std::dynamic_pointer_cast<Operation>(node);
if (!castedNode)
THROW_IE_EXCEPTION << "Cannot create implementation for unknown operation!";
if (castedNode->inputs().size() != 1 || castedNode->outputs().size() != 1)
THROW_IE_EXCEPTION << "Cannot create implementation for operation with incorrect number of inputs or outputs!";
if (castedNode->get_input_partial_shape(0).is_dynamic() || castedNode->get_output_partial_shape(0).is_dynamic())
THROW_IE_EXCEPTION << "Cannot create implementation for op with dynamic shapes!";
if (castedNode->get_input_shape(0).size() != 4 || castedNode->get_output_shape(0).size() != 4)
THROW_IE_EXCEPTION << "Operation supports only 4d tensors for input and output.";
if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32)
THROW_IE_EXCEPTION << "Operation supports only FP32 tensors.";
add = castedNode->getAddAttr();
} catch (InferenceEngine::details::InferenceEngineException& ex) {
error = ex.what();
}
}
//! [cpu_implementation:ctor]
//! [cpu_implementation:getSupportedConfigurations]
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept {
auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) {
InferenceEngine::LayerConfig config;
config.dynBatchSupport = false;
InferenceEngine::DataConfig inData;
InferenceEngine::DataConfig outData;
InferenceEngine::SizeVector order = {0, 1, 2, 3};
// Allow any offset before data
size_t offset((std::numeric_limits<size_t>::max)());
if (planar) {
inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset});
config.inConfs.push_back(inData);
outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset});
config.outConfs.push_back(outData);
} else {
// Add blocked (nChw8c) format
auto div_up = [](const int a, const int b) -> int {
if (!b)
return 0;
return (a + b - 1) / b;
};
order.push_back(1);
InferenceEngine::SizeVector inBlkDims = inShape;
inBlkDims[1] = div_up(inBlkDims[1], 8);
inBlkDims.push_back(8);
InferenceEngine::SizeVector outBlkDims = outShape;
outBlkDims[1] = div_up(outBlkDims[1], 8);
outBlkDims.push_back(8);
inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset});
config.inConfs.push_back(inData);
outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset});
config.outConfs.push_back(outData);
}
return config;
};
if (!error.empty()) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
// Add planar format
conf.emplace_back(createConfig(inShape, outShape, true));
// Add blocked format nChw8c
conf.emplace_back(createConfig(inShape, outShape, false));
return InferenceEngine::OK;
}
//! [cpu_implementation:getSupportedConfigurations]
//! [cpu_implementation:init]
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept {
try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
THROW_IE_EXCEPTION << "Operation cannot be initialized with incorrect number of inputs/outputs!";
}
if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) {
THROW_IE_EXCEPTION << "Operation can be initialized only with 4d input/output tensors!";
}
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
THROW_IE_EXCEPTION << "Operation supports only FP32 precisions!";
}
} catch (InferenceEngine::details::InferenceEngineException& ex) {
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0;
}
return InferenceEngine::GENERAL_ERROR;
}
return InferenceEngine::OK;
}
//! [cpu_implementation:init]
//! [cpu_implementation:execute]
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept {
const float* src_data = inputs[0]->cbuffer().as<const float *>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float *dst_data = outputs[0]->buffer().as<float *>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
for (size_t i = 0; i < inputs[0]->size(); i++) {
dst_data[i] = src_data[i] + add;
}
return InferenceEngine::OK;
}
//! [cpu_implementation:execute]

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <ngraph/ngraph.hpp>
namespace TemplateExtension {
//! [cpu_implementation:header]
class OpImplementation : public InferenceEngine::ILayerExecImpl {
public:
explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config,
InferenceEngine::ResponseDesc *resp) noexcept override;
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept override;
private:
int64_t add;
ngraph::Shape inShape;
ngraph::Shape outShape;
std::string error;
};
//! [cpu_implementation:header]
} // namespace TemplateExtension

View File

@@ -0,0 +1,73 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "extension.hpp"
#include "cpu_kernel.hpp"
#include "op.hpp"
#include <ngraph/factory.hpp>
#include <ngraph/opsets/opset.hpp>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
using namespace TemplateExtension;
//! [extension:GetVersion]
void Extension::GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept {
static InferenceEngine::Version ExtensionDescription = {
{1, 0}, // extension API version
"1.0",
"template_ext" // extension description message
};
versionInfo = &ExtensionDescription;
}
//! [extension:GetVersion]
//! [extension:getOpSets]
std::map<std::string, ngraph::OpSet> Extension::getOpSets() {
std::map<std::string, ngraph::OpSet> opsets;
ngraph::OpSet opset;
opset.insert<Operation>();
opsets["custom_opset"] = opset;
return opsets;
}
//! [extension:getOpSets]
//! [extension:getImplTypes]
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node> &node) {
if (std::dynamic_pointer_cast<Operation>(node)) {
return {"CPU"};
}
return {};
}
//! [extension:getImplTypes]
//! [extension:getImplementation]
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node> &node, const std::string &implType) {
if (std::dynamic_pointer_cast<Operation>(node) && implType == "CPU") {
return std::make_shared<OpImplementation>(node);
}
return nullptr;
}
//! [extension:getImplementation]
//! [extension:CreateExtension]
// Exported function
INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext,
InferenceEngine::ResponseDesc *resp) noexcept {
try {
ext = new Extension();
return OK;
} catch (std::exception &ex) {
if (resp) {
std::string err = ((std::string) "Couldn't create extension: ") + ex.what();
err.copy(resp->msg, 255);
}
return InferenceEngine::GENERAL_ERROR;
}
}
//! [extension:CreateExtension]

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_iextension.h>
#include <ie_api.h>
#include <ngraph/ngraph.hpp>
#include <memory>
#include <vector>
#include <string>
#include <map>
//! [extension:header]
namespace TemplateExtension {
class Extension : public InferenceEngine::IExtension {
public:
Extension() = default;
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override;
void Unload() noexcept override {}
void Release() noexcept override { delete this; }
std::map<std::string, ngraph::OpSet> getOpSets() override;
std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) override;
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override;
};
} // namespace TemplateExtension
//! [extension:header]

View File

@@ -0,0 +1,38 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op.hpp"
using namespace TemplateExtension;
constexpr ngraph::NodeTypeInfo Operation::type_info;
//! [op:ctor]
Operation::Operation(const ngraph::Output<ngraph::Node> &arg, int64_t add) : Op({arg}), add(add) {
constructor_validate_and_infer_types();
}
//! [op:ctor]
//! [op:validate]
void Operation::validate_and_infer_types() {
// Operation doesn't change shapes end element type
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
}
//! [op:validate]
//! [op:copy]
std::shared_ptr<ngraph::Node> Operation::copy_with_new_args(const ngraph::NodeVector &new_args) const {
if (new_args.size() != 1) {
throw ngraph::ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Operation>(new_args.at(0), add);
}
//! [op:copy]
//! [op:visit_attributes]
bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) {
visitor.on_attribute("add", add);
return true;
}
//! [op:visit_attributes]

View File

@@ -0,0 +1,29 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ngraph/ngraph.hpp>
//! [op:header]
namespace TemplateExtension {
class Operation : public ngraph::op::Op {
public:
static constexpr ngraph::NodeTypeInfo type_info{"Template", 0};
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
Operation() = default;
Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add);
void validate_and_infer_types() override;
std::shared_ptr<ngraph::Node> copy_with_new_args(const ngraph::NodeVector& new_args) const override;
bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
int64_t getAddAttr() { return add; }
private:
int64_t add;
};
//! [op:header]
} // namespace TemplateExtension

View File

@@ -0,0 +1,31 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:main]
if (APPLE)
# due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
else()
cmake_minimum_required(VERSION 3.7.2 FATAL_ERROR)
endif()
project(InferenceEngineTemplatePlugin)
set(IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR ${InferenceEngineTemplatePlugin_SOURCE_DIR})
find_package(InferenceEngineDeveloperPackage REQUIRED)
add_subdirectory(src)
if(ENABLE_TESTS)
include(CTest)
enable_testing()
endif()
# [cmake:main]
# install
# ATTENTION: uncomment to install component
# ie_cpack(template)

View File

@@ -0,0 +1,18 @@
# template-plugin
Template Plugin for Inference Engine which demonstrates basics of how Inference Engine plugin can be built and implemented on top of Inference Engine Developer Package and Plugin API.
## How to build
```bash
$ cd $DLDT_HOME
$ mkdir $DLDT_HOME/build
$ cd $DLDT_HOME/build
$ cmake -DENABLE_TESTS=ON -DENABLE_BEH_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON ..
$ make -j8
$ cd $TEMPLATE_PLUGIN_HOME
$ mkdir $TEMPLATE_PLUGIN_HOME/build
$ cd $TEMPLATE_PLUGIN_HOME/build
$ cmake -DInferenceEngineDeveloperPackage_DIR=$DLDT_HOME/build ..
$ make -j8
```

View File

@@ -0,0 +1,59 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header that defines advanced related properties for DLIA plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
* @file dlia_config.hpp
*/
#pragma once
#include <string>
#include "ie_plugin_config.hpp"
namespace InferenceEngine {
namespace TemplateMetrics {
/**
* @def TEMPLATE_METRIC_VALUE(name)
* @brief Shortcut for defining Template metric values
*/
#define TEMPLATE_METRIC_VALUE(name) InferenceEngine::TemplateMetrics::name
#define DECLARE_TEMPLATE_METRIC_VALUE(name) static constexpr auto name = #name
// ! [public_header:metrics]
/**
* @brief Defines whether current Template device instance supports hardware blocks for fast convolution computations.
*/
DECLARE_TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION);
// ! [public_header:metrics]
} // namespace TemplateMetrics
namespace TemplateConfigParams {
/**
* @def TEMPLATE_CONFIG_KEY(name)
* @brief Shortcut for defining Template device configuration keys
*/
#define TEMPLATE_CONFIG_KEY(name) InferenceEngine::TemplateConfigParams::_CONFIG_KEY(TEMPLATE_##name)
#define DECLARE_TEMPLATE_CONFIG_KEY(name) DECLARE_CONFIG_KEY(TEMPLATE_##name)
#define DECLARE_TEMPLATE_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(TEMPLATE_##name)
/**
* @brief The key to define the type of transformations for TEMPLATE inputs and outputs.
* TEMPLATE use custom data layout for input and output blobs. IE TEMPLATE Plugin provides custom
* optimized version of transformation functions that do not use OpenMP and much more faster
* than native TEMPLATE functions. Values: "NO" - optimized plugin transformations
* are used, "YES" - native TEMPLATE transformations are used.
*/
DECLARE_TEMPLATE_CONFIG_KEY(ANY_CONFIG_KEY);
} // namespace TemplateConfigParams
} // namespace InferenceEngine

View File

@@ -0,0 +1,43 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# [cmake:plugin]
set(TARGET_NAME "templatePlugin")
if(ENABLE_LTO)
ie_enable_lto()
endif()
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
# adds a shared library with plugin
ie_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "TEMPLATE"
SOURCES ${SOURCES} ${HEADERS}
SKIP_INSTALL # ATTENTION: uncomment to install component
VERSION_DEFINES_FOR template_plugin.cpp)
target_include_directories(${TARGET_NAME} PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}"
"${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include")
target_link_libraries(${TARGET_NAME} PRIVATE IE::inference_engine IE::inference_engine_transformations ${NGRAPH_LIBRARIES} ${INTEL_ITT_LIBS})
# ATTENTION: uncomment to register a plugin in the plugins.xml file
# ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
# POSSIBLE_PLUGINS ${TARGET_NAME})
# [cmake:plugin]
# ATTENTION: uncomment to install component
# install
# set(component_name template)
# ie_cpack_add_component(${component_name} REQUIRED)
# install(TARGETS ${TARGET_NAME}
# RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH}
# ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH}
# LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH}
# COMPONENT ${component_name})

View File

@@ -0,0 +1,44 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <ie_profiling.hpp>
#include "template_async_infer_request.hpp"
#include "template_executable_network.hpp"
using namespace TemplatePlugin;
// ! [async_infer_request:ctor]
TemplateAsyncInferRequest::TemplateAsyncInferRequest(
const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) :
AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor),
_inferRequest(inferRequest), _waitExecutor(waitExecutor) {
_pipeline = {
{cpuTaskExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(PreprocessingAndStartPipeline)
_inferRequest->inferPreprocess();
_inferRequest->startPipeline();
}},
{_waitExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(WaitPipeline)
_inferRequest->waitPipeline();
}},
{cpuTaskExecutor, [this] {
IE_PROFILING_AUTO_SCOPE(Postprocessing)
_inferRequest->inferPostprocess();
}}
};
}
// ! [async_infer_request:ctor]
// ! [async_infer_request:dtor]
TemplateAsyncInferRequest::~TemplateAsyncInferRequest() {
InferenceEngine::AsyncInferRequestThreadSafeDefault::StopAndWait();
}
// ! [async_infer_request:dtor]

View File

@@ -0,0 +1,30 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp>
#include "template_infer_request.hpp"
namespace TemplatePlugin {
// ! [async_infer_request:header]
class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
public:
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest,
const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
~TemplateAsyncInferRequest() override;
private:
TemplateInferRequest::Ptr _inferRequest;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [async_infer_request:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,45 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <vector>
#include <algorithm>
#include <ie_util_internal.hpp>
#include <ie_plugin_config.hpp>
#include <file_utils.h>
#include <cpp_interfaces/exception2status.hpp>
#include "template_config.hpp"
using namespace TemplatePlugin;
Configuration::Configuration() { }
Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) {
*this = defaultCfg;
for (auto&& c : config) {
const auto& key = c.first;
const auto& value = c.second;
if (CONFIG_KEY(DEVICE_ID) == key) {
deviceId = std::stoi(value);
} else if (CONFIG_KEY(PERF_COUNT) == key) {
perfCount = (CONFIG_VALUE(YES) == value);
} else if (throwOnUnsupported) {
THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << key;
}
}
}
InferenceEngine::Parameter Configuration::Get(const std::string& name) const {
if (name == CONFIG_KEY(DEVICE_ID)) {
return {std::to_string(deviceId)};
} else if (name == CONFIG_KEY(PERF_COUNT)) {
return {perfCount};
} else {
THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << name;
}
}

View File

@@ -0,0 +1,40 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <vector>
#include <string>
#include <map>
#include <unordered_map>
#include <ie_parameter.hpp>
namespace TemplatePlugin {
template<typename T>
using IOMap = std::unordered_map<std::string, T>;
// ! [configuration:header]
using ConfigMap = std::map<std::string, std::string>;
struct Configuration {
Configuration();
Configuration(const Configuration&) = default;
Configuration(Configuration&&) = default;
Configuration& operator=(const Configuration&) = default;
Configuration& operator=(Configuration&&) = default;
explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true);
InferenceEngine::Parameter Get(const std::string& name) const;
// Plugin configuration parameters
int deviceId = 0;
bool perfCount = true;
};
// ! [configuration:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,167 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <atomic>
#include <set>
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include <ie_metric_helpers.hpp>
#include <ie_util_internal.hpp>
#include <ie_plugin_config.hpp>
#include <network_serializer.h>
#include <threading/ie_executor_manager.hpp>
#include <details/ie_cnn_network_tools.h>
#include <ngraph/specialize_function.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <transformations/convert_divide.hpp>
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
using namespace TemplatePlugin;
// ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(InferenceEngine::ICNNNetwork& network,
const Configuration& cfg):
_name(network.getName()),
_cfg(cfg),
_waitExecutor(InferenceEngine::ExecutorManager::getInstance()->getExecutor("Template")) {
// TODO: if your plugin supports device ID (more that single instance of device can be on host machine)
// you should select proper device based on KEY_DEVICE_ID or automatic behavior
// In this case, _waitExecutor should also be created per device.
try {
if (std::shared_ptr<const ngraph::Function> ngraphFunction = network.getFunction()) {
CompileGraph(ngraphFunction);
} else {
THROW_IE_EXCEPTION << "TEMPLATE plugin can compile only IR v10 networks";
}
}
catch (const InferenceEngineException & e) {
throw e;
}
catch (const std::exception & e) {
THROW_IE_EXCEPTION << "Standard exception from compilation library: " << e.what();
}
catch (...) {
THROW_IE_EXCEPTION << "Generic exception is thrown";
}
}
// ! [executable_network:ctor_cnnnetwork]
// ! [executable_network:ctor_import_stream]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
const Configuration& cfg) :
_cfg(cfg) {
// TODO: since Import network is not a mandatory functionality, this ctor can just be removed
}
// ! [executable_network:ctor_import_stream]
// ! [executable_network:compile_graph]
void TemplatePlugin::ExecutableNetwork::CompileGraph(const std::shared_ptr<const ngraph::Function> & ngraphFunction) {
// TODO: perform actual graph compilation taking `_cfg` into account
// 1.Copy ngraph::Function first to apply some transformations later in
// ExecutableNetwork::CompileGraph, which modify original ngraph::Function
const bool shareConsts = false, constFolding = false;
std::vector<::ngraph::element::Type> new_types;
std::vector<::ngraph::PartialShape> new_shapes;
for (const auto &parameter : ngraphFunction->get_parameters()) {
new_shapes.emplace_back(parameter->get_partial_shape());
new_types.emplace_back(parameter->get_element_type());
}
auto copyFunction = ngraph::specialize_function(std::const_pointer_cast<ngraph::Function>(ngraphFunction),
new_types, new_shapes, std::vector<void *>(new_types.size(), nullptr), constFolding, shareConsts);
// 2. Perform common and device-specific transformations
ngraph::pass::Manager passManager;
// Example: register standard ngraph transformation from ngraph::ngraph
passManager.register_pass<ngraph::pass::ConstantFolding>();
// Example: register inference engine optimization transformation for IE::inference_engine_transformations
passManager.register_pass<ngraph::pass::ConvertDivide>();
// Register any other transformations
// ..
// After `run_passes`, we have the transformed function, where operations match device operations,
// and we can create device hardware-dependent graph
passManager.run_passes(copyFunction);
// 3. Iterate over operations and create hardware-specific ngraph
for (const auto& op : copyFunction->get_ordered_ops()) {
// TODO: map ngraph `op` to device operation
}
// 4. Perform any other steps like allocation and filling device buffers, and so on
}
// ! [executable_network:compile_graph]
// ! [executable_network:create_infer_request_impl]
InferenceEngine::InferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) {
return std::make_shared<TemplateInferRequest>(networkInputs, networkOutputs, std::static_pointer_cast<ExecutableNetwork>(shared_from_this()));
}
// ! [executable_network:create_infer_request_impl]
// ! [executable_network:create_infer_request]
void TemplatePlugin::ExecutableNetwork::CreateInferRequest(IInferRequest::Ptr& asyncRequest) {
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
auto asyncThreadSafeImpl = std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest),
_taskExecutor, _waitExecutor, _callbackExecutor);
asyncRequest.reset(new InferenceEngine::InferRequestBase<TemplateAsyncInferRequest>(asyncThreadSafeImpl),
[](InferenceEngine::IInferRequest *p) { p->Release(); });
asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
}
// ! [executable_network:create_infer_request]
// ! [executable_network:get_config]
void TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name, Parameter &result, ResponseDesc *resp) const {
// TODO: return more supported values for config keys
if (name == CONFIG_KEY(DEVICE_ID) ||
name == CONFIG_KEY(PERF_COUNT)) {
result = _cfg.Get(name);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork config key: " << name;
}
}
// ! [executable_network:get_config]
// ! [executable_network:get_metric]
void TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *) const {
// TODO: return more supported values for metrics
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
result = IE_SET_METRIC(SUPPORTED_METRICS, std::vector<std::string>{
METRIC_KEY(NETWORK_NAME),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, std::vector<std::string>{
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT)});
} else if (METRIC_KEY(NETWORK_NAME) == name) {
result = IE_SET_METRIC(NETWORK_NAME, _name);
} else if (METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS) == name) {
// TODO: fill with actual number
unsigned int value = 1;
result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork metric: " << name;
}
}
// ! [executable_network:get_metric]
// ! [executable_network:export_impl]
void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& dlaModel) {
// TODO: Code which exports graph from std::ostream
}
// ! [executable_network:export_impl]

View File

@@ -0,0 +1,68 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <utility>
#include <tuple>
#include <memory>
#include <string>
#include <vector>
#include <map>
#include <unordered_map>
#include <list>
#include <ie_common.h>
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include <cnn_network_impl.hpp>
#include <threading/ie_itask_executor.hpp>
#include <ngraph/function.hpp>
#include "template_config.hpp"
#include "template_infer_request.hpp"
#include "template_async_infer_request.hpp"
namespace TemplatePlugin {
class Engine;
/**
* @class ExecutableNetwork
* @brief Interface of executable network
*/
// ! [executable_network:header]
class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public:
ExecutableNetwork(InferenceEngine::ICNNNetwork& network,
const Configuration& cfg);
ExecutableNetwork(std::istream & model,
const Configuration& cfg);
~ExecutableNetwork() override = default;
// Methods from a base class ExecutableNetworkThreadSafeDefault
void ExportImpl(std::ostream& model) override;
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
std::atomic<std::size_t> _requestId = {0};
std::string _name;
Configuration _cfg;
private:
void CompileGraph(const std::shared_ptr<const ngraph::Function> & ngraphFunction);
std::shared_ptr<Engine> _plugin;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
};
// ! [executable_network:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,224 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <map>
#include <ie_blob.h>
#include <ie_plugin.hpp>
#include <description_buffer.hpp>
#include <debug.h>
#include <ie_layouts.h>
#include <threading/ie_executor_manager.hpp>
#include <blob_transform.hpp>
#include <ie_parallel.hpp>
#include <ie_memcpy.h>
#include <precision_utils.h>
#include <template/template_config.hpp>
#include "template_infer_request.hpp"
#include "template_executable_network.hpp"
#include "template_plugin.hpp"
using namespace TemplatePlugin;
using Time = std::chrono::high_resolution_clock;
using ns = std::chrono::nanoseconds;
using fsec = std::chrono::duration<float>;
// ! [infer_request:ctor]
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork) :
InferRequestInternal(networkInputs, networkOutputs),
_executableNetwork(executableNetwork) {
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
auto requestID = std::to_string(_executableNetwork->_requestId);
_executableNetwork->_requestId++;
std::string name = _executableNetwork->_name + "_Req" + requestID;
_profilingTask = { {
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Preprocess") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_Postprocess") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_StartPipline") },
{ ProfilingTask("Template" + std::to_string(_executableNetwork->_cfg.deviceId) + "_" + name + "_WaitPipline") },
} };
allocateDeviceBuffers();
allocateInputBlobs();
allocateOutputBlobs();
}
// ! [infer_request:ctor]
// ! [infer_request:dtor]
TemplateInferRequest::~TemplateInferRequest() {
_executableNetwork->_requestId--;
}
// ! [infer_request:dtor]
void TemplateInferRequest::allocateDeviceBuffers() {
// TODO: allocate device buffers if Template device is a remote one
}
void TemplateInferRequest::allocateInputBlobs() {
for (auto &networkInput : _networkInputs) {
SizeVector dims = networkInput.second->getTensorDesc().getDims();
Precision precision = networkInput.second->getTensorDesc().getPrecision();
Layout input_layout = networkInput.second->getInputData()->getLayout();
Blob::Ptr inputBlob;
Blob::Ptr inputBlobNCHW;
switch (precision) {
case Precision::FP32 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<float>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<float>({ precision, dims, Layout::NCHW });
}
break;
case Precision::FP16 :
case Precision::I16 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, Layout::NCHW });
}
break;
case Precision::U8 :
inputBlobNCHW = inputBlob = InferenceEngine::make_shared_blob<uint8_t>({ precision, dims, input_layout });
if (input_layout == Layout::NHWC) {
inputBlobNCHW = InferenceEngine::make_shared_blob<uint8_t>({ precision, dims, Layout::NCHW });
}
break;
default:
THROW_IE_EXCEPTION << "Unsupported network precision: " << precision
<< precision << "! Supported precisions are: FP32, FP16, I16, U8";
}
// allocate the input blob
inputBlob->allocate();
_inputs[networkInput.first] = inputBlob;
if (inputBlobNCHW != inputBlob) {
inputBlobNCHW->allocate();
}
_inputsNCHW[networkInput.first] = inputBlobNCHW;
}
}
void TemplateInferRequest::allocateOutputBlobs() {
for (auto &networkOutput : _networkOutputs) {
SizeVector dims = networkOutput.second->getTensorDesc().getDims();
Precision precision = networkOutput.second->getPrecision();
Blob::Ptr outputBlob;
// allocate the output blob
Blob::Ptr outputBlobNCHW;
switch (precision) {
case Precision::FP32 :
outputBlobNCHW = outputBlob = InferenceEngine::make_shared_blob<float>({ precision, dims, networkOutput.second->getLayout() });
if (networkOutput.second->getLayout() == Layout::NHWC) {
outputBlobNCHW = InferenceEngine::make_shared_blob<float>({ precision, dims, Layout::NCHW });
}
break;
case Precision::FP16 :
outputBlobNCHW = outputBlob = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, networkOutput.second->getLayout() });
if (networkOutput.second->getLayout() == Layout::NHWC) {
outputBlobNCHW = InferenceEngine::make_shared_blob<int16_t>({ precision, dims, Layout::NCHW });
}
break;
default:
THROW_IE_EXCEPTION << PARAMETER_MISMATCH_str << "Unsupported output precision: "
<< precision << "! Supported precisions are: FP32, FP16";
}
// allocate the output blob
outputBlob->allocate();
_outputs[networkOutput.first] = outputBlob;
if (outputBlobNCHW != outputBlob) {
outputBlobNCHW->allocate();
}
_outputsNCHW[networkOutput.first] = outputBlobNCHW;
}
if (_networkOutputs.empty() || _networkInputs.empty()) {
THROW_IE_EXCEPTION << "Internal error: no information about network's output/input";
}
}
// ! [infer_request:infer_impl]
void TemplateInferRequest::InferImpl() {
// TODO: fill with actual list of pipeline stages, which are executed syncronously for sync infer requests
inferPreprocess();
startPipeline();
waitPipeline();
inferPostprocess();
}
// ! [infer_request:infer_impl]
// ! [infer_request:infer_preprocess]
void TemplateInferRequest::inferPreprocess() {
auto prev = Time::now();
// execute input pre-processing.
InferRequestInternal::execDataPreprocessing(_inputs);
for (auto &input : InferRequestInternal::_inputs) {
auto& src = input.second;
auto& dst = _inputsNCHW[input.first];
if (src != dst) {
if (src->getTensorDesc().getPrecision() == dst->getTensorDesc().getPrecision()
&& src->getTensorDesc().getDims() == dst->getTensorDesc().getDims()
&& src->getTensorDesc().getLayout() == dst->getTensorDesc().getLayout()) {
_inputsNCHW[input.first] = input.second;
} else { // Convert Layout to NCHW
InferenceEngine::blob_copy(src, dst);
}
}
}
// TODO: Preprocessing on inputs if needed: work _inputsNCHW
_inputPreprocessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
// ! [infer_request:infer_preprocess]
void TemplateInferRequest::startPipeline() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[StartPipeline])
// TODO: Start pipeline and fill _inputTransferTime, _executeTime, _outputTransferTime
}
void TemplateInferRequest::waitPipeline() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[WaitPipeline])
auto prev = Time::now();
// TODO: Wait pipeline using driver API or other synronizations methods
_inputPreprocessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
void TemplateInferRequest::inferPostprocess() {
IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[Postprocess])
auto prev = Time::now();
// TODO: perform post-processing and convert to NHWC layout
_outputPostProcessTime = static_cast<double>(std::chrono::duration_cast<ns>(Time::now() - prev).count());
}
// ! [infer_request:get_performance_counts]
void TemplateInferRequest::GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo> &perfMap) const {
InferenceEngineProfileInfo info;
info.execution_index = 0;
info.status = InferenceEngineProfileInfo::EXECUTED;
info.cpu_uSec = info.realTime_uSec = _inputPreprocessTime / 1000;
perfMap["1. input preprocessing"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _inputTransferTime / 1000;
perfMap["2. input transfer to a device"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _executeTime / 1000;
perfMap["3. execution time"] = info;
info.cpu_uSec = 0;
info.realTime_uSec = _outputTransferTime / 1000;
perfMap["4. output transfer from a device"] = info;
info.cpu_uSec = info.realTime_uSec = _outputPostProcessTime / 1000;
perfMap["5. output postprocessing"] = info;
}
// ! [infer_request:get_performance_counts]

View File

@@ -0,0 +1,74 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include <vector>
#include <memory>
#include <unordered_map>
#include <ie_common.h>
#include <ie_profiling.hpp>
#include <cpp_interfaces/impl/ie_infer_request_internal.hpp>
#include <cpp_interfaces/impl/ie_executable_network_internal.hpp>
#include <threading/ie_itask_executor.hpp>
#include "template_config.hpp"
namespace TemplatePlugin {
class ExecutableNetwork;
// ! [infer_request:header]
class TemplateInferRequest : public InferenceEngine::InferRequestInternal {
public:
typedef std::shared_ptr<TemplateInferRequest> Ptr;
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<ExecutableNetwork>& executableNetwork);
~TemplateInferRequest() override;
void InferImpl() override;
void GetPerformanceCounts(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo>& perfMap) const override;
// pipeline methods-stages which are used in async infer request implementation and assigned to particular executor
void inferPreprocess();
void startPipeline();
void waitPipeline();
void inferPostprocess();
std::shared_ptr<ExecutableNetwork> _executableNetwork;
private:
void allocateDeviceBuffers();
void allocateInputBlobs();
void allocateOutputBlobs();
enum {
Preprocess,
Postprocess,
StartPipeline,
WaitPipeline,
numOfStages
};
std::array<InferenceEngine::ProfilingTask, numOfStages> _profilingTask;
InferenceEngine::BlobMap _inputsNCHW;
InferenceEngine::BlobMap _outputsNCHW;
// for performance counts
double _inputPreprocessTime = 0.0;
double _inputTransferTime = 0.0;
double _executeTime = 0.0;
double _outputTransferTime = 0.0;
double _outputPostProcessTime = 0.0;
};
// ! [infer_request:header]
} // namespace TemplatePlugin

View File

@@ -0,0 +1,194 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <utility>
#include <memory>
#include <vector>
#include <sstream>
#include <regex>
#include <string>
#include <map>
#include <ie_metric_helpers.hpp>
#include <details/ie_cnn_network_tools.h>
#include <ie_plugin_config.hpp>
#include <ie_util_internal.hpp>
#include <inference_engine.hpp>
#include <file_utils.h>
#include <cpp_interfaces/base/ie_plugin_base.hpp>
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include <threading/ie_executor_manager.hpp>
#include <graph_tools.hpp>
#include <ie_input_info.hpp>
#include <ie_layouts.h>
#include <hetero/hetero_plugin_config.hpp>
#include <template/template_config.hpp>
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
#include "template_infer_request.hpp"
using namespace TemplatePlugin;
// ! [plugin:ctor]
Plugin::Plugin() {
// TODO: fill with actual device name
_pluginName = "TEMPLATE";
}
// ! [plugin:ctor]
// ! [plugin:load_exe_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::ICore * core,
const InferenceEngine::ICNNNetwork & network,
const ConfigMap &config) {
auto cfg = Configuration{ config, _cfg };
InferenceEngine::InputsDataMap networkInputs;
InferenceEngine::OutputsDataMap networkOutputs;
network.getInputsInfo(networkInputs);
network.getOutputsInfo(networkOutputs);
// TODO: check with precisions supported by Template device
for (auto networkOutput : networkOutputs) {
auto output_precision = networkOutput.second->getPrecision();
if (output_precision != Precision::FP32 &&
output_precision != Precision::FP16) {
THROW_IE_EXCEPTION << "Template device supports only FP16 and FP32 output precision.";
}
}
for (auto networkInput : networkInputs) {
auto input_precision = networkInput.second->getTensorDesc().getPrecision();
if (input_precision != InferenceEngine::Precision::FP32 &&
input_precision != InferenceEngine::Precision::FP16 &&
input_precision != InferenceEngine::Precision::I16 &&
input_precision != InferenceEngine::Precision::U8) {
THROW_IE_EXCEPTION << "Input image format " << input_precision << " is not supported yet.\n"
<< "Supported formats are: FP32, FP16, I16 and U8.";
}
}
auto clonedNetwork = cloneNet(network);
ConstTransformer transformator(clonedNetwork.get());
transformator.fullTrim();
return std::make_shared<ExecutableNetwork>(*clonedNetwork, cfg);
}
// ! [plugin:load_exe_network_impl]
// ! [plugin:import_network_impl]
InferenceEngine::ExecutableNetwork Plugin::ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) {
// TODO: Import network from stream is not mandatory functionality;
// Can just throw an exception and remove the code below
Configuration exportedCfg;
// some code below which reads exportedCfg from `model` stream
// ..
auto cfg = Configuration(config, exportedCfg);
IExecutableNetwork::Ptr executableNetwork;
auto exec_network_impl = std::make_shared<ExecutableNetwork>(model, cfg);
executableNetwork.reset(new ExecutableNetworkBase<ExecutableNetworkInternal>(exec_network_impl),
[](InferenceEngine::details::IRelease *p) {p->Release(); });
return InferenceEngine::ExecutableNetwork{ executableNetwork };
}
// ! [plugin:import_network_impl]
// ! [plugin:query_network]
void Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config, QueryNetworkResult &res) const {
Configuration cfg{config, _cfg, false};
res.rc = StatusCode::OK;
if (std::shared_ptr<const ngraph::Function> ngraphFunction = network.getFunction()) {
auto ops = ngraphFunction->get_ordered_ops();
for (auto&& op : ops) {
// TODO: investigate if an op is actually supported by Template device
bool supported = true;
if (supported) {
res.supportedLayersMap.insert({ op->get_friendly_name(), GetName() });
}
}
} else {
THROW_IE_EXCEPTION << "TEMPLATE plugin can query only IR v10 networks";
}
}
// ! [plugin:query_network]
// ! [plugin:add_extension]
void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) {
// TODO: add extensions if plugin supports extensions
}
// ! [plugin:add_extension]
// ! [plugin:set_config]
void Plugin::SetConfig(const ConfigMap &config) {
_cfg = Configuration{config, _cfg};
}
// ! [plugin:set_config]
// ! [plugin:get_config]
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & /*options*/) const {
return _cfg.Get(name);
}
// ! [plugin:get_config]
// ! [plugin:get_metric]
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const {
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> supportedMetrics = {
METRIC_KEY(AVAILABLE_DEVICES),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(OPTIMIZATION_CAPABILITIES),
METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) };
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> confiKeys = {
CONFIG_KEY(DEVICE_ID),
CONFIG_KEY(PERF_COUNT) };
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, confiKeys);
} else if (METRIC_KEY(AVAILABLE_DEVICES) == name) {
// TODO: fill list of available devices
std::vector<std::string> availableDevices = { "" };
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
} else if (METRIC_KEY(FULL_DEVICE_NAME) == name) {
std::string name = "Template Device Full Name";
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, name);
} else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) {
// TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32
std::vector<std::string> capabilities = { METRIC_VALUE(FP32), TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION) };
IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities);
} else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) {
// TODO: fill with actual values
using uint = unsigned int;
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1}));
} else {
THROW_IE_EXCEPTION << "Unsupported device metric: " << name;
}
}
// ! [plugin:get_metric]
IE_SUPPRESS_DEPRECATED_START
// ! [plugin:create_plugin_engine]
INFERENCE_PLUGIN_API(StatusCode) CreatePluginEngine(IInferencePlugin *&plugin, ResponseDesc *resp) noexcept {
try {
plugin = make_ie_compatible_plugin({2, 1, CI_BUILD_NUMBER, "templatePlugin"},
std::make_shared<Plugin>());
return OK;
}
catch (std::exception &ex) {
return DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
}
}
// ! [plugin:create_plugin_engine]
IE_SUPPRESS_DEPRECATED_END

View File

@@ -0,0 +1,48 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <inference_engine.hpp>
#include <description_buffer.hpp>
#include <cpp_interfaces/impl/ie_plugin_internal.hpp>
#include <ie_error.hpp>
#include <memory>
#include <string>
#include <map>
#include <unordered_map>
#include <vector>
#include "template_executable_network.hpp"
#include "template_config.hpp"
//! [plugin:header]
namespace TemplatePlugin {
class Plugin : public InferenceEngine::InferencePluginInternal {
public:
using Ptr = std::shared_ptr<Plugin>;
Plugin();
~Plugin() override = default;
void SetConfig(const std::map<std::string, std::string> &config) override;
void QueryNetwork(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string>& config,
InferenceEngine::QueryNetworkResult &res) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr
LoadExeNetworkImpl(const InferenceEngine::ICore * core, const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string> &config) override;
void AddExtension(InferenceEngine::IExtensionPtr extension) override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
InferenceEngine::ExecutableNetwork ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) override;
private:
Configuration _cfg;
};
} // namespace TemplatePlugin
//! [plugin:header]

View File

@@ -1,7 +1,7 @@
# Get Started with OpenVINO™ Deep Learning Deployment Toolkit (DLDT) on Linux*
This guide provides you with the information that will help you to start using
the DLDT on Linux\*. With this guide, you will learn how to:
the OpenVINO on Linux\*. With this guide, you will learn how to:
1. [Configure the Model Optimizer](#configure-the-model-optimizer)
2. [Prepare a model for sample inference](#prepare-a-model-for-sample-inference)
@@ -10,13 +10,13 @@ the DLDT on Linux\*. With this guide, you will learn how to:
3. [Run the Image Classification Sample Application with the model](#run-the-image-classification-sample-application)
## Prerequisites
1. This guide assumes that you have already cloned the `dldt` repo and
1. This guide assumes that you have already cloned the `openvino` repo and
successfully built the Inference Engine and Samples using the
[build instructions](inference-engine/README.md).
2. The original structure of the repository directories remains unchanged.
> **NOTE**: Below, the directory to which the `dldt` repository is cloned is
referred to as `<DLDT_DIR>`.
> **NOTE**: Below, the directory to which the `openvino` repository is cloned is
referred to as `<OPENVINO_DIR>`.
## Configure the Model Optimizer
@@ -53,7 +53,7 @@ If you see error messages, check for any missing dependencies.
1. Go to the Model Optimizer prerequisites directory:
```sh
cd <DLDT_DIR>/model_optimizer/install_prerequisites
cd <OPENVINO_DIR>/model_optimizer/install_prerequisites
```
2. Run the script to configure the Model Optimizer for Caffe,
TensorFlow, MXNet, Kaldi\*, and ONNX:
@@ -68,7 +68,7 @@ Configure individual frameworks separately **ONLY** if you did not select
1. Go to the Model Optimizer prerequisites directory:
```sh
cd <DLDT_DIR>/model_optimizer/install_prerequisites
cd <OPENVINO_DIR>/model_optimizer/install_prerequisites
```
2. Run the script for your model framework. You can run more than one script:
@@ -162,20 +162,20 @@ as `<models_dir>` below) with the Model Downloader:
**For CPU (FP32):**
```sh
python3 <DLDT_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP32 --output_dir <ir_dir>
python3 <OPENVINO_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP32 --output_dir <ir_dir>
```
**For GPU and MYRIAD (FP16):**
```sh
python3 <DLDT_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir <ir_dir>
python3 <OPENVINO_DIR>/model_optimizer/mo.py --input_model <models_dir>/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir <ir_dir>
```
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `<ir_dir>` directory.
3. Copy the `squeezenet1.1.labels` file from the `<DLDT_DIR>/inference-engine/samples/sample_data/`
3. Copy the `squeezenet1.1.labels` file from the `<OPENVINO_DIR>/scripts/demo/`
folder to the model IR directory. This file contains the classes that ImageNet
uses so that the inference results show text instead of classification numbers:
```sh
cp <DLDT_DIR>/inference-engine/samples/sample_data/squeezenet1.1.labels <ir_dir>
cp <OPENVINO_DIR>/scripts/demo/squeezenet1.1.labels <ir_dir>
```
Now you are ready to run the Image Classification Sample Application.
@@ -184,28 +184,28 @@ Now you are ready to run the Image Classification Sample Application.
The Inference Engine sample applications are automatically compiled when you
built the Inference Engine using the [build instructions](inference-engine/README.md).
The binary files are located in the `<DLDT_DIR>/inference-engine/bin/intel64/Release`
The binary files are located in the `<OPENVINO_DIR>/inference-engine/bin/intel64/Release`
directory.
To run the Image Classification sample application with an input image on the prepared IR:
1. Go to the samples build directory:
```sh
cd <DLDT_DIR>/inference-engine/bin/intel64/Release
cd <OPENVINO_DIR>/inference-engine/bin/intel64/Release
2. Run the sample executable with specifying the `car.png` file from the
`<DLDT_DIR>/inference-engine/samples/sample_data/` directory as an input
`<OPENVINO_DIR>/scripts/demo/` directory as an input
image, the IR of your model and a plugin for a hardware device to perform
inference on:
**For CPU:**
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d CPU
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d CPU
```
**For GPU:**
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d GPU
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d GPU
```
**For MYRIAD:**
@@ -214,14 +214,14 @@ To run the Image Classification sample application with an input image on the pr
Stick or Intel® Neural Compute Stick 2) with the MYRIAD plugin requires
performing [additional hardware configuration steps](inference-engine/README.md#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2).
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
./classification_sample -i <OPENVINO_DIR>/scripts/demo/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
```
When the Sample Application completes, you will have the label and confidence for the top-10 categories printed on the screen. Below is a sample output with inference results on CPU:
```sh
Top 10 results:
Image /home/user/dldt/inference-engine/samples/sample_data/car.png
Image /home/user/openvino/scripts/demo/car.png
classid probability label
------- ----------- -----

View File

@@ -17,15 +17,19 @@ include(check_features_ie)
# resolving dependencies for the project
include(dependencies)
# Fuzz tests also building without ENABLE_FUZZING
include(fuzzing)
if (ENABLE_FUZZING)
include(fuzzing)
enable_fuzzing()
endif()
if(ENABLE_NGRAPH)
find_package(ngraph QUIET)
if(NOT ngraph_FOUND)
set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph)
find_package(ngraph REQUIRED)
endif()
find_package(ngraph REQUIRED)
find_package(Threads REQUIRED)
@@ -51,11 +55,20 @@ function(ie_developer_export_targets)
"Paths to extra Inference Engine plugins" FORCE)
endfunction()
function(ie_developer_export)
export(TARGETS ${IEDeveloperPackageTargets} NAMESPACE IE::
APPEND FILE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
# Custom target to build only Inference Engine Developer Package targets
add_custom_target(ie_dev_targets ALL DEPENDS ${IEDeveloperPackageTargets})
endfunction()
add_subdirectory(thirdparty)
add_subdirectory(src)
if(ENABLE_TESTS)
add_subdirectory(tests_deprecated)
add_subdirectory(tests)
endif()
@@ -80,6 +93,10 @@ endif()
add_cpplint_report_target()
#
# Install
#
# install C++ samples
ie_cpack_add_component(cpp_samples REQUIRED DEPENDS core)
@@ -89,13 +106,15 @@ if(UNIX)
DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp
COMPONENT cpp_samples
USE_SOURCE_PERMISSIONS
PATTERN *.bat EXCLUDE)
PATTERN *.bat EXCLUDE
PATTERN speech_libs_and_demos EXCLUDE)
elseif(WIN32)
install(DIRECTORY samples
install(DIRECTORY samples/
DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp
COMPONENT cpp_samples
USE_SOURCE_PERMISSIONS
PATTERN *.sh EXCLUDE)
PATTERN *.sh EXCLUDE
PATTERN speech_libs_and_demos EXCLUDE)
endif()
# install C samples
@@ -123,25 +142,22 @@ install(FILES samples/CMakeLists.txt
# install Python samples
ie_cpack_add_component(python_samples REQUIRED DEPENDS core)
if(ENABLE_PYTHON)
ie_cpack_add_component(python_samples REQUIRED DEPENDS core)
install(DIRECTORY ${ie_python_api_SOURCE_DIR}/sample/
DESTINATION ${IE_CPACK_IE_DIR}/samples/python
COMPONENT python_samples)
# Custom target to build only Inference Engine Developer Package targets
add_custom_target(ie_dev_targets ALL DEPENDS inference_engine HeteroPlugin)
# Developer package
ie_developer_export_targets(format_reader)
if (ENABLE_NGRAPH)
ie_developer_export_targets(${NGRAPH_LIBRARIES})
install(DIRECTORY ${ie_python_api_SOURCE_DIR}/sample/
DESTINATION ${IE_CPACK_IE_DIR}/samples/python
COMPONENT python_samples)
endif()
export(TARGETS ${IEDeveloperPackageTargets} NAMESPACE IE::
APPEND FILE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
#
# Developer package
#
ie_developer_export_targets(format_reader)
ie_developer_export_targets(${NGRAPH_LIBRARIES})
ie_developer_export()
configure_file(
"${IE_MAIN_SOURCE_DIR}/cmake/developer_package_config.cmake.in"
@@ -153,7 +169,17 @@ configure_file(
"${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig-version.cmake"
COPYONLY)
#
# Coverage
#
if(COVERAGE)
include(coverage_ie)
endif()
#
# Add plugins
#
function(register_extra_plugins)
set(InferenceEngineDeveloperPackage_DIR "${CMAKE_CURRENT_BINARY_DIR}/build-plugins")
@@ -161,6 +187,7 @@ function(register_extra_plugins)
file(REMOVE "${iedevconfig_file}")
file(WRITE "${iedevconfig_file}" "\# !! AUTOGENERATED: DON'T EDIT !!\n\n")
file(APPEND "${iedevconfig_file}" "ie_deprecated_no_errors()\n")
foreach(target IN LISTS IEDeveloperPackageTargets)
if(target)
@@ -170,6 +197,7 @@ function(register_extra_plugins)
# automatically import plugins from the 'plugins' folder
file(GLOB local_extra_plugins "plugins/*")
list(APPEND local_extra_plugins "${OpenVINO_MAIN_SOURCE_DIR}/docs/template_plugin")
foreach(plugin_path IN LISTS IE_EXTRA_PLUGINS local_extra_plugins)
get_filename_component(plugin_dir "${plugin_path}" NAME)

View File

@@ -0,0 +1,32 @@
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
########################################################################
#
# Perform search of TBB package corresponding with specified search order.
#
# TBBROOT var is set into external package path or has a default value
# with IE own version of TBB. Search order is next:
# 1) ${TBBROOT}/cmake
# 2) ${TBBROOT} with IE own version of TBBConfig.cmake (actual for TBB < 2017.7)
#
## Path to IE own version of TBBConfig.cmake old TBB version without cmake config.
if(APPLE)
set(IE_OWN_TBB_CONFIG tbb/mac)
elseif(UNIX)
set(IE_OWN_TBB_CONFIG tbb/lnx)
elseif(WIN)
set(IE_OWN_TBB_CONFIG tbb/win)
else()
unset(IE_OWN_TBB_CONFIG)
endif()
find_package(TBB
CONFIG
NO_DEFAULT_PATH
PATHS ${TBBROOT}/cmake
${CMAKE_CURRENT_LIST_DIR}/${IE_OWN_TBB_CONFIG}
)
find_package_handle_standard_args(TBB CONFIG_MODE)

View File

@@ -7,31 +7,50 @@ function to create CMake target and setup its options in a declarative style.
Example:
addIeTarget(
NAME core_lib
TYPE shared
ADD_CPPLINT
DEVELOPER_PACKAGE
TYPE SHARED
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
ADDITIONAL_SOURCE_DIRS
/some/additional/sources
EXCLUDED_SOURCE_DIRS
${CMAKE_CURRENT_SOURCE_DIR}/unnecessary_sources/
INCLUDES
${SDL_INCLUDES}
/some/specific/path
LINK_LIBRARIES
ie::important_plugin
EXPORT_DEPENDENCIES
dependency_lib_to_export
DEPENDENCIES
dependencies
OBJECT_FILES
object libraries
)
#]]
function(addIeTarget)
set(options
ADD_CPPLINT # Enables code style checks for the target
DEVELOPER_PACKAGE # Enables exporting of the target through the developer package
)
set(oneValueRequiredArgs
TYPE # type of target, shared|static|executable. shared and static correspond to add_library, executable to add_executable.
TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable
NAME # name of target
ROOT # directory will used for source files globbing root.
ROOT # root directory to be used for recursive search of source files
)
set(oneValueOptionalArgs
)
set(multiValueArgs
INCLUDES # Extra include directories.
LINK_LIBRARIES # Link libraries (in form of target name or file name)
DEPENDENCIES # compile order dependencies (no link implied)
DEFINES # extra preprocessor definitions
ADDITIONAL_SOURCE_DIRS # list of directories, which will be used to search for source files in addition to ROOT.
INCLUDES # Extra include directories
LINK_LIBRARIES # Link libraries (in form of target name or file name)
DEPENDENCIES # compile order dependencies (no link implied)
DEFINES # extra preprocessor definitions
ADDITIONAL_SOURCE_DIRS # list of directories which will be used to recursive search of source files in addition to ROOT
OBJECT_FILES # list of object files to be additionally built into the target
EXCLUDED_SOURCE_DIRS # list of directories excluded from the global recursive search of source files
LINK_LIBRARIES_WHOLE_ARCHIVE # list of static libraries to link, each object file should be used and not discarded
LINK_FLAGS # list of extra commands to linker
EXPORT_DEPENDENCIES # list of the dependencies to be exported with the target through the developer package
)
cmake_parse_arguments(ARG "${options}" "${oneValueRequiredArgs};${oneValueOptionalArgs}" "${multiValueArgs}" ${ARGN} )
@@ -56,22 +75,29 @@ function(addIeTarget)
file(GLOB_RECURSE includes ${includeSearch})
file(GLOB_RECURSE sources ${sourceSearch})
# remove unnecessary directories
if (ARG_EXCLUDED_SOURCE_DIRS)
list(FILTER includes EXCLUDE REGEX "${ARG_EXCLUDED_SOURCE_DIRS}/*")
list(FILTER sources EXCLUDE REGEX "${ARG_EXCLUDED_SOURCE_DIRS}/*")
endif()
source_group("include" FILES ${includes})
source_group("src" FILES ${sources})
set(all_sources)
list(APPEND all_sources ${sources} ${includes} ${ARG_OBJECT_FILES})
# defining a target
if (ARG_TYPE STREQUAL executable)
add_executable(${ARG_NAME} ${sources} ${includes})
elseif(ARG_TYPE STREQUAL static OR ARG_TYPE STREQUAL shared)
string(TOUPPER ${ARG_TYPE} type)
add_library(${ARG_NAME} ${type} ${sources} ${includes})
if (ARG_TYPE STREQUAL EXECUTABLE)
add_executable(${ARG_NAME} ${all_sources})
elseif(ARG_TYPE STREQUAL STATIC OR ARG_TYPE STREQUAL SHARED)
add_library(${ARG_NAME} ${type} ${all_sources})
else()
message(SEND_ERROR "Invalid target type: ${ARG_TYPE}")
message(SEND_ERROR "Invalid target type ${ARG_TYPE} specified for target name ${ARG_NAME}")
endif()
# filling target properties
set_property(TARGET ${ARG_NAME} PROPERTY CXX_STANDARD 11)
set_property(TARGET ${ARG_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
ieTargetLinkWholeArchive(${ARG_NAME} ${ARG_LINK_LIBRARIES_WHOLE_ARCHIVE})
if (ARG_DEFINES)
target_compile_definitions(${ARG_NAME} PRIVATE ${ARG_DEFINES})
endif()
@@ -84,6 +110,23 @@ function(addIeTarget)
if (ARG_DEPENDENCIES)
add_dependencies(${ARG_NAME} ${ARG_DEPENDENCIES})
endif()
if (ARG_LINK_FLAGS)
get_target_property(oldLinkFlags ${ARG_NAME} LINK_FLAGS)
string(REPLACE ";" " " ARG_LINK_FLAGS "${ARG_LINK_FLAGS}")
set_target_properties(${ARG_NAME} PROPERTIES LINK_FLAGS "${oldLinkFlags} ${ARG_LINK_FLAGS}")
endif()
if (ARG_ADD_CPPLINT)
# code style
add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME})
add_clang_format_target(${ARG_NAME}_clang_format FOR_TARGETS ${ARG_NAME})
endif()
if (ARG_DEVELOPER_PACKAGE)
# developer package
ie_developer_export_targets(${ARG_NAME})
if (ARG_EXPORT_DEPENDENCIES)
ie_developer_export_targets(${ARG_NAME} ${ARG_EXPORT_DEPENDENCIES})
endif()
endif()
endfunction()
#[[
@@ -106,7 +149,7 @@ function(addIeTargetTest)
)
cmake_parse_arguments(ARG "${options}" "${oneValueRequiredArgs};${oneValueOptionalArgs}" "${multiValueArgs}" ${ARGN} )
addIeTarget(TYPE executable NAME ${ARG_NAME} ${ARG_UNPARSED_ARGUMENTS})
addIeTarget(TYPE EXECUTABLE NAME ${ARG_NAME} ${ARG_UNPARSED_ARGUMENTS})
add_test(NAME ${ARG_NAME} COMMAND ${ARG_NAME})
set_property(TEST ${ARG_NAME} PROPERTY LABELS ${ARG_LABELS})

View File

@@ -2,44 +2,6 @@
# SPDX-License-Identifier: Apache-2.0
#
# Apple specific
if (APPLE)
set(ENABLE_GNA OFF)
endif()
# Android specific
if(ANDROID)
set(ENABLE_GNA OFF)
endif()
# ARM specific
if (ARM OR AARCH64)
# disable all base plugins but Myriad
set(ENABLE_GNA OFF)
set(ENABLE_HDDL OFF)
endif()
# disable SSE
if(NOT(X86_64 OR X86))
set(ENABLE_SSE42 OFF)
endif()
#minGW specific - under wine no support for downloading file and applying them using git
if (WIN32)
if (MINGW)
set(ENABLE_SAMPLES OFF)
endif()
endif()
if (NOT ENABLE_VPU OR NOT ENABLE_NGRAPH)
set(ENABLE_MYRIAD OFF)
endif()
if(CMAKE_CROSSCOMPILING)
set(ENABLE_PROFILING_ITT OFF)
endif()
#next section set defines to be accesible in c++/c code for certain feature
if (ENABLE_PROFILING_RAW)
add_definitions(-DENABLE_PROFILING_RAW=1)
@@ -53,11 +15,6 @@ if (ENABLE_MYRIAD_NO_BOOT AND ENABLE_MYRIAD )
add_definitions(-DENABLE_MYRIAD_NO_BOOT=1)
endif()
if (NOT ENABLE_TESTS)
SET(ENABLE_BEH_TESTS OFF)
SET(ENABLE_FUNCTIONAL_TESTS OFF)
endif()
if (ENABLE_CLDNN)
add_definitions(-DENABLE_CLDNN=1)
endif()
@@ -69,40 +26,14 @@ endif()
if (ENABLE_GNA)
add_definitions(-DENABLE_GNA)
set (DEFAULT_GNA_LIB GNA1_1401)
# "GNA library version: GNA1|GNA1_1401|GNA2" - default is 1401
if (NOT GNA_LIBRARY_VERSION STREQUAL "GNA1"
AND NOT GNA_LIBRARY_VERSION STREQUAL "GNA1_1401"
AND NOT GNA_LIBRARY_VERSION STREQUAL "GNA2")
set (GNA_LIBRARY_VERSION ${DEFAULT_GNA_LIB})
message(STATUS "GNA_LIBRARY_VERSION not set. Can be GNA1, GNA1_1401 or GNA2. Default is ${GNA_LIBRARY_VERSION}")
endif()
if (UNIX AND NOT APPLE AND CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.4)
message(WARNING "${GNA_LIBRARY_VERSION} no supported on GCC version ${CMAKE_CXX_COMPILER_VERSION}. Fallback to GNA1")
message(WARNING "${GNA_LIBRARY_VERSION} is not supported on GCC version ${CMAKE_CXX_COMPILER_VERSION}. Fallback to GNA1")
set(GNA_LIBRARY_VERSION GNA1)
endif()
set(GNA_LIBRARY_VERSION "${GNA_LIBRARY_VERSION}" CACHE STRING "GNAVersion" FORCE)
list (APPEND IE_OPTIONS GNA_LIBRARY_VERSION)
endif()
if(ENABLE_DUMP)
add_definitions(-DDEBUG_DUMP)
endif()
if (LINUX AND CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.3)
set(ENABLE_UNICODE_PATH_SUPPORT OFF)
endif()
if (ENABLE_UNICODE_PATH_SUPPORT)
add_definitions(-DENABLE_UNICODE_PATH_SUPPORT=1)
endif()
# functional tests require FormarParser which is disabled by this option
if(NOT ENABLE_IR_READER)
set(ENABLE_FUNCTIONAL_TESTS OFF)
if (ENABLE_SPEECH_DEMO)
add_definitions(-DENABLE_SPEECH_DEMO)
endif()
print_enabled_features()

View File

@@ -10,7 +10,7 @@ if (ENABLE_CLANG_FORMAT)
message(WARNING "Supported clang-format version is 9!")
set(ENABLE_CLANG_FORMAT OFF)
else()
string(REGEX REPLACE ".*([0-9]+)\\.[0-9]+\\.[0-9]+.*" "\\1" CLANG_FORMAT_MAJOR_VERSION ${CLANG_VERSION})
string(REGEX REPLACE "[^0-9]+([0-9]+)\\..*" "\\1" CLANG_FORMAT_MAJOR_VERSION ${CLANG_VERSION})
if (NOT ${CLANG_FORMAT_MAJOR_VERSION} EQUAL "9")
message(WARNING "Supported clang-format version is 9!")
set(ENABLE_CLANG_FORMAT OFF)

View File

@@ -3,7 +3,8 @@
#
if(DEFINED IE_MAIN_SOURCE_DIR AND TARGET inference_engine)
set(InferenceEngine_LIBRARIES inference_engine_c_api inference_engine_nn_builder inference_engine)
set(InferenceEngine_LIBRARIES inference_engine_legacy inference_engine
inference_engine_c_api inference_engine_nn_builder)
else()
include("${CMAKE_CURRENT_LIST_DIR}/targets.cmake")
if(NOT WIN32)
@@ -28,5 +29,6 @@ else()
endif()
get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
set(InferenceEngine_LIBRARIES IE::inference_engine_c_api IE::inference_engine_nn_builder IE::inference_engine)
set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine
IE::inference_engine_c_api IE::inference_engine_nn_builder)
endif()

View File

@@ -0,0 +1,73 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(DLDT_COVERAGE_GCDA_DATA_DIRECTORY "${CMAKE_BINARY_DIR}/inference-engine/src")
set(DLDT_COVERAGE_BASE_DIRECTORY "${IE_MAIN_SOURCE_DIR}/src")
ie_coverage_clean(REPOSITORY "dldt"
DIRECTORY "${DLDT_COVERAGE_GCDA_DATA_DIRECTORY}")
ie_coverage_capture(INFO_FILE "dldt"
BASE_DIRECTORY "${DLDT_COVERAGE_BASE_DIRECTORY}"
DIRECTORY "${DLDT_COVERAGE_GCDA_DATA_DIRECTORY}")
# Generate reports
ie_coverage_extract(INPUT "dldt" OUTPUT "inference_engine_with_builders"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/inference_engine/*"
"${DLDT_COVERAGE_BASE_DIRECTORY}/plugin_api/*")
ie_coverage_remove(INPUT "inference_engine_with_builders" OUTPUT "inference_engine"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/inference_engine/builders/*")
ie_coverage_genhtml(INFO_FILE "inference_engine"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
ie_coverage_extract(INPUT "dldt" OUTPUT "inference_engine_legacy"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/legacy_api/*")
ie_coverage_genhtml(INFO_FILE "inference_engine_legacy"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
ie_coverage_extract(INPUT "dldt" OUTPUT "hetero_plugin"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/hetero_plugin/*")
ie_coverage_genhtml(INFO_FILE "hetero_plugin"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
ie_coverage_extract(INPUT "dldt" OUTPUT "multi_device"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/multi_device/*")
ie_coverage_genhtml(INFO_FILE "multi_device"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
ie_coverage_extract(INPUT "dldt" OUTPUT "preprocessing"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/preprocessing/*")
ie_coverage_genhtml(INFO_FILE "preprocessing"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
ie_coverage_extract(INPUT "dldt" OUTPUT "inference_engine_transformations"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/inference_engine_transformations/*")
ie_coverage_genhtml(INFO_FILE "inference_engine_transformations"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
ie_coverage_extract(INPUT "dldt" OUTPUT "low_precision_transformations"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/low_precision_transformations/*")
ie_coverage_genhtml(INFO_FILE "low_precision_transformations"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
if(ENABLE_MKL_DNN)
ie_coverage_extract(INPUT "dldt" OUTPUT "mkldnn_plugin"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/mkldnn_plugin/*")
ie_coverage_genhtml(INFO_FILE "mkldnn_plugin"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
endif()
if(ENABLE_CLDNN)
ie_coverage_extract(INPUT "dldt" OUTPUT "cldnn_engine"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/cldnn_engine/*")
ie_coverage_genhtml(INFO_FILE "cldnn_engine"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
endif()
if(ENABLE_GNA)
ie_coverage_extract(INPUT "dldt" OUTPUT "gna_plugin"
PATTERNS "${DLDT_COVERAGE_BASE_DIRECTORY}/gna_plugin/*")
ie_coverage_genhtml(INFO_FILE "gna_plugin"
PREFIX "${DLDT_COVERAGE_BASE_DIRECTORY}")
endif()

View File

@@ -1,28 +0,0 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if(ENABLE_CPPCHECK)
find_program(CPPCHECK_EXECUTABLE cppcheck)
if(NOT CPPCHECK_EXECUTABLE)
message(WARNING "cppcheck was not found : disable static analysis")
set(ENABLE_CPPCHECK OFF)
endif()
endif()
function(add_cppcheck)
if(NOT ENABLE_CPPCHECK)
return()
endif()
set_property(
TARGET ${ARGN}
PROPERTY CXX_CPPCHECK
${CPPCHECK_EXECUTABLE}
"--suppress=*:*/temp/*"
"--suppress=*:*/thirdparty/*"
"--error-exitcode=1"
"--template={file}:{line}: error: [cppcheck:{severity}] {message}"
"--quiet")
endfunction()

View File

@@ -3,10 +3,10 @@
#
if(ENABLE_CPPLINT)
find_package(PythonInterp 2.7 EXACT)
find_host_package(PythonInterp)
if(NOT PYTHONINTERP_FOUND OR NOT PYTHON_VERSION_MAJOR EQUAL 2)
message(WARNING "Python 2.7 was not found (required for cpplint check)")
if(NOT PYTHONINTERP_FOUND)
message(WARNING "Python interpreter was not found (required for cpplint check)")
set(ENABLE_CPPLINT OFF)
endif()
endif()
@@ -23,7 +23,7 @@ function(add_cpplint_target TARGET_NAME)
set(options "")
set(oneValueArgs "")
set(multiValueArgs "FOR_TARGETS" "FOR_SOURCES" "EXCLUDE_PATTERNS")
set(multiValueArgs FOR_TARGETS FOR_SOURCES EXCLUDE_PATTERNS CUSTOM_FILTERS)
cmake_parse_arguments(CPPLINT "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
foreach(target IN LISTS CPPLINT_FOR_TARGETS)
@@ -32,6 +32,11 @@ function(add_cpplint_target TARGET_NAME)
endforeach()
list(REMOVE_DUPLICATES CPPLINT_FOR_SOURCES)
set(custom_filter "")
foreach(filter IN LISTS CPPLINT_CUSTOM_FILTERS)
string(CONCAT custom_filter "${custom_filter}" "," "${filter}")
endforeach()
set(all_output_files "")
foreach(source_file IN LISTS CPPLINT_FOR_SOURCES)
set(exclude FALSE)
@@ -62,12 +67,12 @@ function(add_cpplint_target TARGET_NAME)
"${output_file}"
COMMAND
"${CMAKE_COMMAND}"
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
-D "CPPLINT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cpplint.py"
-D "INPUT_FILE=${source_file}"
-D "OUTPUT_FILE=${output_file}"
-D "WORKING_DIRECTORY=${CMAKE_CURRENT_SOURCE_DIR}"
-D "SKIP_RETURN_CODE=${ENABLE_CPPLINT_REPORT}"
-D "CUSTOM_FILTER=${custom_filter}"
-P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_run.cmake"
DEPENDS
"${source_file}"

View File

@@ -4,33 +4,52 @@
file(REMOVE "${OUTPUT_FILE}")
set(DEFAULT_FILTER "
-build/header_guard,\
-build/include,\
-build/include_order,\
-build/include_subdir,\
-build/include_what_you_use,\
-build/namespaces,\
-build/c++11,\
-whitespace/indent,\
-whitespace/comments,\
-whitespace/ending_newline,\
-runtime/references,\
-runtime/int,\
-runtime/explicit,\
-readability/todo,\
-readability/fn_size,\
")
set(FILTER "${DEFAULT_FILTER}${CUSTOM_FILTER}")
execute_process(
COMMAND
"${PYTHON_EXECUTABLE}"
python3
"${CPPLINT_SCRIPT}"
"--linelength=160"
"--counting=detailed"
"--filter=-readability/fn_size"
"--quiet"
"--filter=${FILTER}"
"${INPUT_FILE}"
WORKING_DIRECTORY "${WORKING_DIRECTORY}"
RESULT_VARIABLE result
OUTPUT_VARIABLE output
ERROR_VARIABLE output)
# Display the cpplint output to console (to parse it form IDE)
message("${output}")
# Store cpplint output to file (replace problematic symbols)
string(REPLACE "\"" "&quot\;" output "${output}")
string(REPLACE "<" "&lt\;" output "${output}")
string(REPLACE ">" "&gt\;" output "${output}")
string(REPLACE "'" "&apos\;" output "${output}")
string(REPLACE "&" "&amp\;" output "${output}")
file(WRITE "${OUTPUT_FILE}" "${output}")
string(REPLACE "\"" "&quot\;" formatted_output "${output}")
string(REPLACE "<" "&lt\;" formatted_output "${formatted_output}")
string(REPLACE ">" "&gt\;" formatted_output "${formatted_output}")
string(REPLACE "'" "&apos\;" formatted_output "${formatted_output}")
string(REPLACE "&" "&amp\;" formatted_output "${formatted_output}")
file(WRITE "${OUTPUT_FILE}" "${formatted_output}")
if(NOT SKIP_RETURN_CODE)
# Pass through the cpplint return code
if(NOT result EQUAL 0)
# Display the cpplint output to console (to parse it form IDE)
message("${output}")
message(FATAL_ERROR "[cpplint] Code style check failed for : ${INPUT_FILE}")
endif()
endif()

View File

@@ -7,7 +7,6 @@ cmake_policy(SET CMP0054 NEW)
#we have number of dependencies stored on ftp
include(dependency_solver)
set_temp_directory(TEMP "${IE_MAIN_SOURCE_DIR}")
if (CMAKE_CROSSCOMPILING)
set(CMAKE_STAGING_PREFIX "${TEMP}")
endif()
@@ -49,58 +48,60 @@ endif ()
## Intel OMP package
if (THREADING STREQUAL "OMP")
if (WIN32)
reset_deps_cache(OMP)
if (WIN32 AND X86_64)
RESOLVE_DEPENDENCY(OMP
ARCHIVE_WIN "iomp.zip"
TARGET_PATH "${TEMP}/omp"
ENVIRONMENT "OMP"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(LINUX)
elseif(LINUX AND X86_64)
RESOLVE_DEPENDENCY(OMP
ARCHIVE_LIN "iomp.tgz"
TARGET_PATH "${TEMP}/omp"
ENVIRONMENT "OMP"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
else(APPLE)
elseif(APPLE AND X86_64)
RESOLVE_DEPENDENCY(OMP
ARCHIVE_MAC "iomp_20190130_mac.tgz"
TARGET_PATH "${TEMP}/omp"
ENVIRONMENT "OMP"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
else()
message(FATAL_ERROR "Intel OMP is not available on current platform")
endif()
update_deps_cache(OMP "${OMP}" "Path to OMP root folder")
log_rpath_from_dir(OMP "${OMP}/lib")
debug_message(STATUS "intel_omp=" ${OMP})
endif ()
## TBB package
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
reset_deps_cache(TBBROOT TBB_DIR)
reset_deps_cache(TBBROOT)
if(NOT DEFINED TBB_DIR AND NOT DEFINED ENV{TBB_DIR})
if (WIN32)
if (WIN32 AND X86_64)
#TODO: add target_path to be platform specific as well, to avoid following if
RESOLVE_DEPENDENCY(TBB
ARCHIVE_WIN "tbb2020_20191023_win_tbbbind_patched.zip"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(ANDROID) # Should be before LINUX due LINUX is detected as well
RESOLVE_DEPENDENCY(TBB
ARCHIVE_ANDROID "tbb2020_20191023_android.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(LINUX)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_LIN "tbb2020_20191023_lin_tbbbind_patched.tgz"
ARCHIVE_WIN "tbb2020_20200415_win.zip"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT")
else(APPLE)
elseif(ANDROID) # Should be before LINUX due LINUX is detected as well
RESOLVE_DEPENDENCY(TBB
ARCHIVE_MAC "tbb2020_20191023_mac.tgz"
ARCHIVE_ANDROID "tbb2020_20200404_android.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
ENVIRONMENT "TBBROOT")
elseif(LINUX AND X86_64)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_LIN "tbb2020_20200415_lin_strip.tgz"
TARGET_PATH "${TEMP}/tbb")
elseif(APPLE AND X86_64)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_MAC "tbb2020_20200404_mac.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT")
else()
message(FATAL_ERROR "TBB is not available on current platform")
endif()
else()
if(DEFINED TBB_DIR)
@@ -111,12 +112,11 @@ if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
endif()
update_deps_cache(TBBROOT "${TBB}" "Path to TBB root folder")
update_deps_cache(TBB_DIR "${TBBROOT}/cmake" "Path to TBB package folder")
if (WIN32)
log_rpath_from_dir(TBB "${TBB_DIR}/../bin")
log_rpath_from_dir(TBB "${TBB}/bin")
else ()
log_rpath_from_dir(TBB "${TBB_DIR}/../lib")
log_rpath_from_dir(TBB "${TBB}/lib")
endif ()
debug_message(STATUS "tbb=" ${TBB})
endif ()
@@ -124,15 +124,15 @@ endif ()
if (ENABLE_OPENCV)
reset_deps_cache(OpenCV_DIR)
set(OPENCV_VERSION "4.2.0")
set(OPENCV_BUILD "082")
if (WIN32)
set(OPENCV_VERSION "4.3.0")
set(OPENCV_BUILD "060")
if (WIN32 AND X86_64)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_WIN "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
elseif(APPLE)
elseif(APPLE AND X86_64)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_MAC "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_osx.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}_osx/opencv"
@@ -147,6 +147,8 @@ if (ENABLE_OPENCV)
set(OPENCV_SUFFIX "ubuntu16")
elseif (${LINUX_OS_NAME} STREQUAL "Ubuntu 18.04")
set(OPENCV_SUFFIX "ubuntu18")
else()
message(FATAL_ERROR "OpenCV is not available on current platform")
endif()
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_LIN "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_${OPENCV_SUFFIX}.txz"
@@ -177,6 +179,7 @@ include(ie_parallel)
if (ENABLE_GNA)
reset_deps_cache(
GNA
GNA_PLATFORM_DIR
GNA_KERNEL_LIB_NAME
GNA_LIBS_LIST
@@ -192,13 +195,14 @@ if (ENABLE_GNA)
set(GNA_VERSION "01.00.00.1401")
endif()
if(GNA_LIBRARY_VERSION STREQUAL "GNA2")
set(GNA_VERSION "02.00.00.0587")
set(GNA_VERSION "02.00.00.0654")
endif()
RESOLVE_DEPENDENCY(GNA
ARCHIVE_UNIFIED "GNA_${GNA_VERSION}.zip"
TARGET_PATH "${TEMP}/gna_${GNA_VERSION}"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*")
endif()
update_deps_cache(GNA "${GNA}" "Path to GNA root folder")
debug_message(STATUS "gna=" ${GNA})
endif()

View File

@@ -29,7 +29,7 @@ include("${CMAKE_CURRENT_LIST_DIR}/targets_developer.cmake")
set_property(TARGET IE::inference_engine PROPERTY IMPORTED_GLOBAL TRUE)
get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
set(InferenceEngine_LIBRARIES IE::inference_engine IE::inference_engine_nn_builder)
set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine IE::inference_engine_nn_builder)
#
# Common cmake includes
@@ -39,9 +39,12 @@ list(APPEND CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake")
list(APPEND CMAKE_MODULE_PATH "${IE_MAIN_SOURCE_DIR}/cmake")
# generic stuff from developer package
include(developer_package)
include(developer_package NO_POLICY_SCOPE)
include(developer_package_ie)
# Don't threat deprecated API warnings as errors in 3rd party apps
ie_deprecated_no_errors()
# inherit OpenCV from main IE project if enabled
if (ENABLE_OPENCV)
load_cache("${cache_path}" READ_WITH_PREFIX "" OpenCV_DIR)

View File

@@ -2,36 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
#
# environment variables used
# name of environment variable stored path to temp directory"
set(DL_SDK_TEMP "DL_SDK_TEMP")
# prepare temporary folder
function(set_temp_directory temp_variable source_tree_dir)
if (DEFINED ENV{${DL_SDK_TEMP}} AND NOT $ENV{${DL_SDK_TEMP}} STREQUAL "")
if (WIN32)
string(REPLACE "\\" "\\\\" temp $ENV{${DL_SDK_TEMP}})
else(WIN32)
set(temp $ENV{${DL_SDK_TEMP}})
endif(WIN32)
if (ENABLE_ALTERNATIVE_TEMP)
set(ALTERNATIVE_PATH ${source_tree_dir}/temp)
endif()
else ()
message(STATUS "DL_SDK_TEMP envionment not set")
set(temp ${source_tree_dir}/temp)
endif()
set("${temp_variable}" "${temp}" PARENT_SCOPE)
if(ALTERNATIVE_PATH)
set(ALTERNATIVE_PATH "${ALTERNATIVE_PATH}" PARENT_SCOPE)
endif()
endfunction()
include(cpplint)
include(clang_format)
include(cppcheck)
if(ENABLE_PROFILING_ITT)
find_package(ITT REQUIRED)
@@ -40,3 +12,4 @@ endif()
set(TBB_FIND_RELEASE_ONLY ${ENABLE_TBB_RELEASE_ONLY})
include(plugins/plugins)
include(add_ie_target)

View File

@@ -7,75 +7,93 @@ include (options)
#these options are aimed to optimize build time on development system
ie_option (ENABLE_GNA "GNA support for inference engine" ON)
ie_dependent_option (ENABLE_GNA "GNA support for inference engine" ON "NOT APPLE;NOT ANDROID;X86 OR X86_64" OFF)
ie_option (ENABLE_CLDNN_TESTS "Enable clDNN unit tests" OFF)
ie_dependent_option (ENABLE_CLDNN_TESTS "Enable clDNN unit tests" OFF "ENABLE_CLDNN" OFF)
ie_option (ENABLE_PROFILING_ITT "ITT tracing of IE and plugins internals" ON)
ie_option (ENABLE_PROFILING_RAW "Raw counters profiling (just values, no start/stop time or timeline)" OFF)
ie_dependent_option (ENABLE_PROFILING_ITT "ITT tracing of IE and plugins internals" ON "NOT CMAKE_CROSSCOMPILING" OFF)
# "MKL-DNN library might use MKL-ML or OpenBLAS for gemm tasks: MKL|OPENBLAS|JIT"
if (NOT GEMM STREQUAL "MKL"
AND NOT GEMM STREQUAL "OPENBLAS"
AND NOT GEMM STREQUAL "JIT")
if(ANDROID)
set(GEMM "JIT")
if (ENABLE_MKL_DNN)
if(AARCH64)
set(GEMM_DEFAULT "OPENBLAS")
else()
set(GEMM "JIT")
set(GEMM_DEFAULT "JIT")
endif()
set(GEMM "${GEMM_DEFAULT}" CACHE STRING "GEMM implementation")
set_property(CACHE GEMM PROPERTY STRINGS "MKL" "OPENBLAS" "JIT")
list (APPEND IE_OPTIONS GEMM)
if (NOT GEMM STREQUAL "MKL" AND
NOT GEMM STREQUAL "OPENBLAS" AND
NOT GEMM STREQUAL "JIT")
message(FATAL_ERROR "GEMM should be set to MKL, OPENBLAS or JIT. Default option is ${GEMM_DEFAULT}")
endif()
message(STATUS "GEMM should be set to MKL, OPENBLAS or JIT. Default option is " ${GEMM})
endif()
set(GEMM "${GEMM}" CACHE STRING "Gemm implementation" FORCE)
list (APPEND IE_OPTIONS GEMM)
# "MKL-DNN library based on OMP or TBB or Sequential implementation: TBB|OMP|SEQ"
if (NOT THREADING STREQUAL "TBB"
AND NOT THREADING STREQUAL "TBB_AUTO"
AND NOT THREADING STREQUAL "OMP"
AND NOT THREADING STREQUAL "SEQ")
if (ARM OR AARCH64)
set (THREADING "SEQ")
else()
set (THREADING "TBB")
endif()
message(STATUS "THREADING should be set to TBB, TBB_AUTO, OMP or SEQ. Default option is " ${THREADING})
if(ARM)
set(THREADING_DEFAULT "SEQ")
else()
set(THREADING_DEFAULT "TBB")
endif()
set(THREADING "${THREADING}" CACHE STRING "Threading" FORCE)
set(THREADING "${THREADING_DEFAULT}" CACHE STRING "Threading")
set_property(CACHE THREADING PROPERTY STRINGS "TBB" "TBB_AUTO" "OMP" "SEQ")
list (APPEND IE_OPTIONS THREADING)
if (NOT THREADING STREQUAL "TBB" AND
NOT THREADING STREQUAL "TBB_AUTO" AND
NOT THREADING STREQUAL "OMP" AND
NOT THREADING STREQUAL "SEQ")
message(FATAL_ERROR "THREADING should be set to TBB, TBB_AUTO, OMP or SEQ. Default option is ${THREADING_DEFAULT}")
endif()
if (ENABLE_GNA)
if (UNIX AND NOT APPLE AND CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.4)
set (DEFAULT_GNA_LIB GNA1)
else()
set (DEFAULT_GNA_LIB GNA1_1401)
endif()
set(GNA_LIBRARY_VERSION "${DEFAULT_GNA_LIB}" CACHE STRING "GNAVersion")
set_property(CACHE GNA_LIBRARY_VERSION PROPERTY STRINGS "GNA1" "GNA1_1401" "GNA2")
list (APPEND IE_OPTIONS GNA_LIBRARY_VERSION)
if (NOT GNA_LIBRARY_VERSION STREQUAL "GNA1" AND
NOT GNA_LIBRARY_VERSION STREQUAL "GNA1_1401" AND
NOT GNA_LIBRARY_VERSION STREQUAL "GNA2")
message(FATAL_ERROR "GNA_LIBRARY_VERSION should be set to GNA1, GNA1_1401 or GNA2. Default option is ${DEFAULT_GNA_LIB}")
endif()
endif()
ie_option (ENABLE_IR_READER "Compile with IR readers / parsers" ON)
ie_option (ENABLE_VPU "vpu targeted plugins for inference engine" ON)
ie_option (ENABLE_MYRIAD "myriad targeted plugin for inference engine" ON)
ie_dependent_option (ENABLE_MYRIAD "myriad targeted plugin for inference engine" ON "ENABLE_VPU" OFF)
ie_option (ENABLE_MYRIAD_NO_BOOT "myriad plugin will skip device boot" OFF)
ie_dependent_option (ENABLE_MYRIAD_NO_BOOT "myriad plugin will skip device boot" OFF "ENABLE_MYRIAD" OFF)
ie_option (ENABLE_TESTS "unit, behavior and functional tests" OFF)
ie_option (ENABLE_GAPI_TESTS "tests for GAPI kernels" OFF)
ie_dependent_option (ENABLE_GAPI_TESTS "tests for GAPI kernels" OFF "ENABLE_TESTS" OFF)
ie_option (GAPI_TEST_PERF "if GAPI unit tests should examine performance" OFF)
ie_dependent_option (GAPI_TEST_PERF "if GAPI unit tests should examine performance" OFF "ENABLE_GAPI_TESTS" OFF)
ie_option (ENABLE_MYRIAD_MVNC_TESTS "functional and behavior tests for mvnc api" OFF)
ie_dependent_option (ENABLE_MYRIAD_MVNC_TESTS "functional and behavior tests for mvnc api" OFF "ENABLE_TESTS;ENABLE_MYRIAD" OFF)
ie_option (ENABLE_BEH_TESTS "tests oriented to check inference engine API corecteness" ON)
ie_dependent_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON "NOT MINGW" OFF)
ie_option (ENABLE_FUNCTIONAL_TESTS "functional tests" ON)
ie_dependent_option (ENABLE_BEH_TESTS "tests oriented to check inference engine API corecteness" ON "ENABLE_TESTS" OFF)
ie_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON)
ie_dependent_option (ENABLE_FUNCTIONAL_TESTS "functional tests" ON "ENABLE_TESTS;ENABLE_IR_READER" OFF)
ie_dependent_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON "NOT MINGW" OFF)
ie_option (ENABLE_FUZZING "instrument build for fuzzing" OFF)
ie_option (COVERAGE "enable code coverage" OFF)
ie_option (VERBOSE_BUILD "shows extra information about build" OFF)
ie_option (ENABLE_UNSAFE_LOCATIONS "skip check for MD5 for dependency" OFF)
ie_option (ENABLE_ALTERNATIVE_TEMP "in case of dependency conflict, to avoid modification in master, use local copy of dependency" ON)
ie_option (ENABLE_DUMP "enables mode for dumping per layer information" OFF)
ie_option (ENABLE_OPENCV "enables OpenCV" ON)
ie_option (ENABLE_DEBUG_SYMBOLS "generates symbols for debugging" OFF)
@@ -84,22 +102,13 @@ ie_option (ENABLE_PYTHON "enables ie python bridge build" OFF)
ie_option (ENABLE_CPP_CCT "enables C++ version of Cross Check Tool" OFF)
ie_option (ENABLE_UNICODE_PATH_SUPPORT "Enable loading models from Unicode paths" ON)
ie_option (ENABLE_IR_READER "Compile with IR readers / parsers" ON)
ie_option (ENABLE_C "enables ie c bridge build" ON)
ie_option(ENABLE_CPPLINT "Enable cpplint checks during the build" OFF)
ie_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF)
ie_dependent_option(ENABLE_CPPLINT "Enable cpplint checks during the build" OFF "OFF;UNIX;NOT APPLE;NOT ANDROID" OFF)
ie_dependent_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF)
ie_option(ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" OFF)
ie_option(ENABLE_CPPCHECK "Enable cppcheck during the build" OFF)
set(IE_EXTRA_PLUGINS "" CACHE STRING "Extra paths for plugins to include into DLDT build tree")
if (LINUX)
ie_option(ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are linked to the Inference Engine binaries" ON)
endif()
ie_dependent_option(ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are linked to the Inference Engine binaries" ON "THREADING MATCHES TBB;LINUX" OFF)

View File

@@ -2,10 +2,30 @@
# SPDX-License-Identifier: Apache-2.0
#
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
find_package(TBB COMPONENTS tbb tbbmalloc)
if (TBB_FOUND)
if (${TBB_VERSION} VERSION_LESS 2020)
ext_message(WARNING "TBB version is less than OpenVINO recommends to use.\
Some TBB related features like NUMA-aware tbb::task_arena\
execution will be disabled.")
endif()
else ()
ext_message(WARNING "TBB was not found by the configured TBB_DIR/TBBROOT path. \
SEQ method will be used.")
endif ()
endif()
function(set_ie_threading_interface_for TARGET_NAME)
get_target_property(target_type ${TARGET_NAME} TYPE)
if(target_type STREQUAL "INTERFACE_LIBRARY")
set(LINK_TYPE "INTERFACE")
else()
set(LINK_TYPE "PUBLIC")
endif()
function(ie_target_link_libraries TARGET_NAME LINK_TYPE)
if(CMAKE_VERSION VERSION_LESS "3.12.0")
get_target_property(target_type ${TARGET_NAME} TYPE)
if(NOT target_type STREQUAL "OBJECT_LIBRARY")
target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${ARGN})
else()
@@ -42,10 +62,9 @@ function(set_ie_threading_interface_for TARGET_NAME)
set(IE_THREAD_DEFINE "IE_THREAD_SEQ")
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
find_package(TBB COMPONENTS tbb tbbmalloc)
if (TBB_FOUND)
set(IE_THREAD_DEFINE "IE_THREAD_TBB")
ie_target_link_libraries(${TARGET_NAME} PUBLIC ${TBB_IMPORTED_TARGETS})
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${TBB_IMPORTED_TARGETS})
else ()
ext_message(WARNING "TBB was not found by the configured TBB_DIR path. \
SEQ method will be used for ${TARGET_NAME}")
@@ -89,36 +108,36 @@ function(set_ie_threading_interface_for TARGET_NAME)
set(IE_THREAD_DEFINE "IE_THREAD_OMP")
if (WIN32)
target_compile_options(${TARGET_NAME} PUBLIC ${OpenMP_CXX_FLAGS} /openmp)
target_compile_options(${TARGET_NAME} PUBLIC ${OpenMP_CXX_FLAGS} /Qopenmp)
ie_target_link_libraries(${TARGET_NAME} PUBLIC "-nodefaultlib:vcomp")
target_compile_options(${TARGET_NAME} ${LINK_TYPE} ${OpenMP_CXX_FLAGS} /openmp)
target_compile_options(${TARGET_NAME} ${LINK_TYPE} ${OpenMP_CXX_FLAGS} /Qopenmp)
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} "-nodefaultlib:vcomp")
else()
target_compile_options(${TARGET_NAME} PUBLIC ${OpenMP_CXX_FLAGS} -fopenmp)
target_compile_options(${TARGET_NAME} ${LINK_TYPE} ${OpenMP_CXX_FLAGS} -fopenmp)
endif ()
# Debug binaries are optional.
if (OMP_LIBRARIES_DEBUG AND NOT LINUX)
if (WIN32)
ie_target_link_libraries(${TARGET_NAME} PUBLIC "$<$<CONFIG:DEBUG>:${OMP_LIBRARIES_DEBUG}>;$<$<NOT:$<CONFIG:DEBUG>>:${OMP_LIBRARIES_RELEASE}>")
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} "$<$<CONFIG:DEBUG>:${OMP_LIBRARIES_DEBUG}>;$<$<NOT:$<CONFIG:DEBUG>>:${OMP_LIBRARIES_RELEASE}>")
else()
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
ie_target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_DEBUG})
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${OMP_LIBRARIES_DEBUG})
else()
ie_target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_RELEASE})
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${OMP_LIBRARIES_RELEASE})
endif ()
endif ()
else ()
# Link Release library to all configurations.
ie_target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_RELEASE})
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${OMP_LIBRARIES_RELEASE})
endif ()
endif ()
endif ()
target_compile_definitions(${TARGET_NAME} PUBLIC -DIE_THREAD=${IE_THREAD_DEFINE})
target_compile_definitions(${TARGET_NAME} ${LINK_TYPE} -DIE_THREAD=${IE_THREAD_DEFINE})
if (NOT THREADING STREQUAL "SEQ")
find_package(Threads REQUIRED)
ie_target_link_libraries(${TARGET_NAME} PUBLIC ${CMAKE_THREAD_LIBS_INIT})
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${CMAKE_THREAD_LIBS_INIT})
endif()
endfunction(set_ie_threading_interface_for)

View File

@@ -23,12 +23,13 @@ endif()
# SOURCES <sources>
# OBJECT_LIBRARIES <object_libs>
# VERSION_DEFINES_FOR <source>
# SKIP_INSTALL
# )
#
function(ie_add_plugin)
set(options)
set(options SKIP_INSTALL)
set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR)
set(multiValueArgs SOURCES OBJECT_LIBRARIES)
set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS)
cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
if(NOT IE_PLUGIN_NAME)
@@ -54,19 +55,22 @@ function(ie_add_plugin)
add_library(${IE_PLUGIN_NAME} SHARED ${input_files})
target_compile_definitions(${IE_PLUGIN_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_PLUGIN)
if(TARGET inference_engine_preproc)
target_include_directories(${IE_PLUGIN_NAME} PRIVATE $<TARGET_PROPERTY:inference_engine_preproc,INTERFACE_INCLUDE_DIRECTORIES>)
target_link_libraries(${IE_PLUGIN_NAME} PRIVATE inference_engine_plugin_api)
else()
target_include_directories(${IE_PLUGIN_NAME} PRIVATE $<TARGET_PROPERTY:IE::inference_engine_preproc,INTERFACE_INCLUDE_DIRECTORIES>)
if(TARGET IE::inference_engine_plugin_api)
target_link_libraries(${IE_PLUGIN_NAME} PRIVATE IE::inference_engine_plugin_api)
else()
target_link_libraries(${IE_PLUGIN_NAME} PRIVATE inference_engine_plugin_api)
endif()
if(WIN32)
set_target_properties(${IE_PLUGIN_NAME} PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME})
endif()
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME})
set(custom_filter "")
foreach(filter IN LISTS IE_PLUGIN_CPPLINT_FILTERS)
string(CONCAT custom_filter "${custom_filter}" "," "${filter}")
endforeach()
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
# append plugin to the list to register
@@ -75,17 +79,21 @@ function(ie_add_plugin)
set(PLUGIN_FILES "${PLUGIN_FILES}" CACHE INTERNAL "" FORCE)
add_dependencies(ie_plugins ${IE_PLUGIN_NAME})
if(TARGET inference_engine_preproc)
add_dependencies(${IE_PLUGIN_NAME} inference_engine_preproc)
endif()
# install rules
string(TOLOWER "${IE_PLUGIN_DEVICE_NAME}" install_component)
ie_cpack_add_component(${install_component} REQUIRED DEPENDS core)
if(NOT IE_PLUGIN_SKIP_INSTALL)
string(TOLOWER "${IE_PLUGIN_DEVICE_NAME}" install_component)
ie_cpack_add_component(${install_component} REQUIRED DEPENDS core)
install(TARGETS ${IE_PLUGIN_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH}
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH}
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH}
COMPONENT ${install_component})
install(TARGETS ${IE_PLUGIN_NAME}
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ${install_component}
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ${install_component}
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component})
endif()
endfunction()
#

View File

@@ -16,6 +16,7 @@
# and the following imported targets:
#
# IE::inference_engine - The Inference Engine library
# IE::inference_engine_legacy - The Inference Engine library with legacy API for IR v7 and older.
# IE::inference_engine_c_api - The Inference Engine C API library
# IE::inference_engine_nn_builder - The Inference Engine NN Builder library
#
@@ -38,7 +39,8 @@ set(InferenceEngine_FOUND FALSE)
if(TARGET IE::inference_engine)
set(InferenceEngine_FOUND TRUE)
get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
set(InferenceEngine_LIBRARIES IE::inference_engine IE::inference_engine_c_api IE::inference_engine_nn_builder)
set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine
IE::inference_engine_c_api IE::inference_engine_nn_builder)
else()
if (WIN32)
set(_ARCH intel64)
@@ -84,28 +86,31 @@ else()
if(WIN32)
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
find_library(IE_LEGACY_RELEASE_LIBRARY inference_engine_legacy@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
elseif(APPLE)
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_LEGACY_RELEASE_LIBRARY inference_engine_legacy@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
else()
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_LEGACY_RELEASE_LIBRARY inference_engine_legacy@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
endif()
find_package_handle_standard_args( InferenceEngine
FOUND_VAR INFERENCEENGINE_FOUND
REQUIRED_VARS IE_RELEASE_LIBRARY IE_C_API_RELEASE_LIBRARY IE_NN_BUILDER_RELEASE_LIBRARY IE_INCLUDE_DIR
REQUIRED_VARS IE_RELEASE_LIBRARY IE_LEGACY_RELEASE_LIBRARY IE_C_API_RELEASE_LIBRARY IE_NN_BUILDER_RELEASE_LIBRARY IE_INCLUDE_DIR
FAIL_MESSAGE "Some of mandatory Inference Engine components are not found. Please consult InferenceEgnineConfig.cmake module's help page.")
if(INFERENCEENGINE_FOUND)
# to keep this line for successful execution in CMake 2.8
set(InferenceEngine_FOUND TRUE)
foreach(ie_library_suffix "" "_c_api" "_nn_builder")
foreach(ie_library_suffix "" "_legacy" "_c_api" "_nn_builder")
string(TOUPPER "${ie_library_suffix}" ie_library_usuffix)
add_library(IE::inference_engine${ie_library_suffix} SHARED IMPORTED GLOBAL)
@@ -161,7 +166,8 @@ else()
endforeach()
set(InferenceEngine_INCLUDE_DIRS ${IE_INCLUDE_DIR})
set(InferenceEngine_LIBRARIES IE::inference_engine_c_api IE::inference_engine_nn_builder IE::inference_engine)
set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine
IE::inference_engine_c_api IE::inference_engine_nn_builder)
set(IE_EXTERNAL_DIR "${IE_ROOT_DIR}/external")
include("${IE_ROOT_DIR}/share/ie_parallel.cmake")

View File

@@ -0,0 +1,196 @@
#===============================================================================
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# TBB_FOUND should not be set explicitly. It is defined automatically by CMake.
# Handling of TBB_VERSION is in TBBConfigVersion.cmake.
if (NOT TBB_FIND_COMPONENTS)
set(TBB_FIND_COMPONENTS "tbb;tbbmalloc;tbbmalloc_proxy")
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(TBB_FIND_REQUIRED_${_tbb_component} 1)
endforeach()
endif()
# Add components with internal dependencies: tbbmalloc_proxy -> tbbmalloc
list(FIND TBB_FIND_COMPONENTS tbbmalloc_proxy _tbbmalloc_proxy_ix)
if (NOT _tbbmalloc_proxy_ix EQUAL -1)
list(FIND TBB_FIND_COMPONENTS tbbmalloc _tbbmalloc_ix)
if (_tbbmalloc_ix EQUAL -1)
list(APPEND TBB_FIND_COMPONENTS tbbmalloc)
set(TBB_FIND_REQUIRED_tbbmalloc ${TBB_FIND_REQUIRED_tbbmalloc_proxy})
endif()
endif()
if (NOT TBBROOT)
if(DEFINED ENV{TBBROOT})
set (TBBROOT $ENV{TBBROOT})
endif()
endif()
set(_tbb_root ${TBBROOT})
set(_tbb_x32_subdir ia32)
set(_tbb_x64_subdir intel64)
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
set(_tbb_arch_subdir ${_tbb_x64_subdir})
else()
set(_tbb_arch_subdir ${_tbb_x32_subdir})
endif()
if (CMAKE_CXX_COMPILER_LOADED)
set(_tbb_compiler_id ${CMAKE_CXX_COMPILER_ID})
set(_tbb_compiler_ver ${CMAKE_CXX_COMPILER_VERSION})
elseif (CMAKE_C_COMPILER_LOADED)
set(_tbb_compiler_id ${CMAKE_C_COMPILER_ID})
set(_tbb_compiler_ver ${CMAKE_C_COMPILER_VERSION})
endif()
# For non-GCC compilers try to find version of system GCC to choose right compiler subdirectory.
if (NOT _tbb_compiler_id STREQUAL "GNU")
execute_process(COMMAND gcc --version OUTPUT_VARIABLE _tbb_gcc_ver_output ERROR_QUIET)
string(REGEX REPLACE ".*gcc.*([0-9]+\\.[0-9]+)\\.[0-9]+.*" "\\1" _tbb_compiler_ver "${_tbb_gcc_ver_output}")
if (NOT _tbb_compiler_ver)
message(FATAL_ERROR "This Intel TBB package is intended to be used only environment with available 'gcc'")
endif()
unset(_tbb_gcc_ver_output)
endif()
if (EXISTS "${_tbb_root}/lib/${_tbb_arch_subdir}")
set(_tbb_lib ${_tbb_root}/lib/${_tbb_arch_subdir})
set(_tbb_inc ${_tbb_root}/include)
file(GLOB _tbb_gcc_versions_available RELATIVE ${_tbb_lib} ${_tbb_lib}/*)
# shall we check _tbb_gcc_versions_available is not empty?
foreach (_tbb_gcc_version ${_tbb_gcc_versions_available})
string(SUBSTRING ${_tbb_gcc_version} 3 -1 _tbb_gcc_version_number)
if (NOT _tbb_compiler_ver VERSION_LESS _tbb_gcc_version_number)
set(_tbb_compiler_subdir ${_tbb_gcc_version})
endif()
endforeach()
else()
if (TBBROOT)
set(__tbb_hint_path "${TBBROOT}")
else()
set(__tbb_hint_path "/non/existing/path")
endif()
# try to find TBB in the system
find_library(_tbb_lib NAMES tbb
HINTS "${__tbb_hint_path}"
PATH_SUFFIXES lib lib64)
find_path(_tbb_inc NAMES tbb.h
HINTS "${__tbb_hint_path}"
PATH_SUFFIXES include tbb include/tbb)
unset(__tbb_hint_path)
if (NOT _tbb_lib OR NOT _tbb_inc)
message("FATAL_ERROR" "Cannot find TBB")
endif()
get_filename_component(_tbb_lib "${_tbb_lib}" PATH)
get_filename_component(_tbb_inc "${_tbb_inc}" PATH)
set(_tbb_arch_subdir "")
set(_tbb_compiler_subdir "")
endif()
unset(_tbb_gcc_version_number)
unset(_tbb_compiler_id)
unset(_tbb_compiler_ver)
# Now we check that all the needed component are present
get_filename_component(_tbb_lib_path "${_tbb_lib}/${_tbb_compiler_subdir}" ABSOLUTE)
if (TBB_FOUND)
return()
endif()
# detect version
find_file(_tbb_def_header tbb_stddef.h HINTS "${_tbb_root}/include/tbb")
if (_tbb_def_header)
file(READ "${_tbb_def_header}" _tbb_def_content)
string(REGEX MATCH "TBB_VERSION_MAJOR[ ]*[0-9]*" _tbb_version_major ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_major ${_tbb_version_major})
string(REGEX MATCH "TBB_VERSION_MINOR[ ]*[0-9]" _tbb_version_minor ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_minor ${_tbb_version_minor})
set(TBB_VERSION "${_tbb_version_major}.${_tbb_version_minor}")
else()
set(TBB_VERSION "")
endif()
foreach (_tbb_soversion 2 12)
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(_tbb_release_lib
"${_tbb_lib_path}/lib${_tbb_component}.so.${_tbb_soversion}")
set(_tbb_debug_lib
"${_tbb_lib_path}/lib${_tbb_component}_debug.so.${_tbb_soversion}")
# oneDNN change: check library existence (BUILD_MODE related only, not both)
string(TOUPPER "${CMAKE_BUILD_TYPE}" UPPERCASE_CMAKE_BUILD_TYPE)
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL "DEBUG")
if (EXISTS "${_tbb_debug_lib}")
set(_lib_exists TRUE)
elseif (EXISTS "${_tbb_release_lib}")
message(FATAL_ERROR
"Intel TBB release library is found here: ${_tbb_release_lib}. "
"But the debug library
(lib${_tbb_component}_debug.so.${_tbb_soversion}) is missing.")
endif()
else()
if (EXISTS "${_tbb_release_lib}")
set(_lib_exists TRUE)
endif()
endif()
if (_lib_exists)
if (NOT TARGET TBB::${_tbb_component})
add_library(TBB::${_tbb_component} SHARED IMPORTED)
set_target_properties(TBB::${_tbb_component} PROPERTIES
IMPORTED_CONFIGURATIONS "RELEASE;DEBUG"
IMPORTED_LOCATION_RELEASE "${_tbb_release_lib}"
IMPORTED_LOCATION_DEBUG "${_tbb_debug_lib}"
INTERFACE_INCLUDE_DIRECTORIES "${_tbb_inc}")
# Add internal dependencies for imported targets: TBB::tbbmalloc_proxy -> TBB::tbbmalloc
if (_tbb_component STREQUAL tbbmalloc_proxy)
set_target_properties(TBB::tbbmalloc_proxy PROPERTIES INTERFACE_LINK_LIBRARIES TBB::tbbmalloc)
endif()
list(APPEND TBB_IMPORTED_TARGETS TBB::${_tbb_component})
set(TBB_${_tbb_component}_FOUND 1)
endif()
break()
endif()
endforeach()
endforeach()
if (NOT _lib_exists AND TBB_FIND_REQUIRED AND TBB_FIND_REQUIRED_${_tbb_component})
message(FATAL_ERROR "Missed required Intel TBB component: ${_tbb_component}")
endif()
unset(_tbb_x32_subdir)
unset(_tbb_x64_subdir)
unset(_tbb_arch_subdir)
unset(_tbb_compiler_subdir)
unset(_tbbmalloc_proxy_ix)
unset(_tbbmalloc_ix)
unset(_tbb_lib_path)
unset(_tbb_release_lib)
unset(_tbb_debug_lib)

View File

@@ -0,0 +1,114 @@
#===============================================================================
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# TBB_FOUND should not be set explicitly. It is defined automatically by CMake.
# Handling of TBB_VERSION is in TBBConfigVersion.cmake.
if (NOT TBB_FIND_COMPONENTS)
set(TBB_FIND_COMPONENTS "tbb;tbbmalloc;tbbmalloc_proxy")
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(TBB_FIND_REQUIRED_${_tbb_component} 1)
endforeach()
endif()
# Add components with internal dependencies: tbbmalloc_proxy -> tbbmalloc
list(FIND TBB_FIND_COMPONENTS tbbmalloc_proxy _tbbmalloc_proxy_ix)
if (NOT _tbbmalloc_proxy_ix EQUAL -1)
list(FIND TBB_FIND_COMPONENTS tbbmalloc _tbbmalloc_ix)
if (_tbbmalloc_ix EQUAL -1)
list(APPEND TBB_FIND_COMPONENTS tbbmalloc)
set(TBB_FIND_REQUIRED_tbbmalloc ${TBB_FIND_REQUIRED_tbbmalloc_proxy})
endif()
endif()
if (NOT TBBROOT)
if(DEFINED ENV{TBBROOT})
set (TBBROOT $ENV{TBBROOT})
else()
message("FATAL_ERROR" "TBBROOT is unset")
endif()
endif()
set(_tbb_root ${TBBROOT})
set(_tbb_x32_subdir .)
set(_tbb_x64_subdir .)
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
set(_tbb_arch_subdir ${_tbb_x64_subdir})
else()
set(_tbb_arch_subdir ${_tbb_x32_subdir})
endif()
set(_tbb_compiler_subdir .)
get_filename_component(_tbb_lib_path "${_tbb_root}/lib/${_tbb_arch_subdir}/${_tbb_compiler_subdir}" ABSOLUTE)
if (TBB_FOUND)
return()
endif()
# detect version
find_file(_tbb_def_header tbb_stddef.h HINTS "${_tbb_root}/include/tbb")
if (_tbb_def_header)
file(READ "${_tbb_def_header}" _tbb_def_content)
string(REGEX MATCH "TBB_VERSION_MAJOR[ ]*[0-9]*" _tbb_version_major ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_major ${_tbb_version_major})
string(REGEX MATCH "TBB_VERSION_MINOR[ ]*[0-9]" _tbb_version_minor ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_minor ${_tbb_version_minor})
set(TBB_VERSION "${_tbb_version_major}.${_tbb_version_minor}")
else()
set(TBB_VERSION "")
endif()
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(_tbb_release_lib "${_tbb_lib_path}/lib${_tbb_component}.dylib")
set(_tbb_debug_lib "${_tbb_lib_path}/lib${_tbb_component}_debug.dylib")
if (EXISTS "${_tbb_release_lib}" AND EXISTS "${_tbb_debug_lib}")
if (NOT TARGET TBB::${_tbb_component})
add_library(TBB::${_tbb_component} SHARED IMPORTED)
set_target_properties(TBB::${_tbb_component} PROPERTIES
IMPORTED_CONFIGURATIONS "RELEASE;DEBUG"
IMPORTED_LOCATION_RELEASE "${_tbb_release_lib}"
IMPORTED_LOCATION_DEBUG "${_tbb_debug_lib}"
INTERFACE_INCLUDE_DIRECTORIES "${_tbb_root}/include")
# Add internal dependencies for imported targets: TBB::tbbmalloc_proxy -> TBB::tbbmalloc
if (_tbb_component STREQUAL tbbmalloc_proxy)
set_target_properties(TBB::tbbmalloc_proxy PROPERTIES INTERFACE_LINK_LIBRARIES TBB::tbbmalloc)
endif()
list(APPEND TBB_IMPORTED_TARGETS TBB::${_tbb_component})
set(TBB_${_tbb_component}_FOUND 1)
endif()
elseif (TBB_FIND_REQUIRED AND TBB_FIND_REQUIRED_${_tbb_component})
message(FATAL_ERROR "Missed required Intel TBB component: ${_tbb_component}")
endif()
endforeach()
unset(_tbb_x32_subdir)
unset(_tbb_x64_subdir)
unset(_tbb_arch_subdir)
unset(_tbb_compiler_subdir)
unset(_tbbmalloc_proxy_ix)
unset(_tbbmalloc_ix)
unset(_tbb_lib_path)
unset(_tbb_release_lib)
unset(_tbb_debug_lib)

View File

@@ -0,0 +1,140 @@
#===============================================================================
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# TBB_FOUND should not be set explicitly. It is defined automatically by CMake.
# Handling of TBB_VERSION is in TBBConfigVersion.cmake.
if (NOT TBB_FIND_COMPONENTS)
set(TBB_FIND_COMPONENTS "tbb;tbbmalloc;tbbmalloc_proxy")
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(TBB_FIND_REQUIRED_${_tbb_component} 1)
endforeach()
endif()
# Add components with internal dependencies: tbbmalloc_proxy -> tbbmalloc
list(FIND TBB_FIND_COMPONENTS tbbmalloc_proxy _tbbmalloc_proxy_ix)
if (NOT _tbbmalloc_proxy_ix EQUAL -1)
list(FIND TBB_FIND_COMPONENTS tbbmalloc _tbbmalloc_ix)
if (_tbbmalloc_ix EQUAL -1)
list(APPEND TBB_FIND_COMPONENTS tbbmalloc)
set(TBB_FIND_REQUIRED_tbbmalloc ${TBB_FIND_REQUIRED_tbbmalloc_proxy})
endif()
endif()
if (NOT TBBROOT)
if(DEFINED ENV{TBBROOT})
set (TBBROOT $ENV{TBBROOT})
else()
message("FATAL_ERROR" "TBBROOT is unset")
endif()
endif()
set(_tbb_root ${TBBROOT})
set(_tbb_x32_subdir ia32)
set(_tbb_x64_subdir intel64)
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
set(_tbb_arch_subdir ${_tbb_x64_subdir})
else()
set(_tbb_arch_subdir ${_tbb_x32_subdir})
endif()
if (NOT MSVC)
message(FATAL_ERROR "This Intel TBB package is intended to be used only in the project with MSVC")
endif()
# Detect the most relevant MSVC subdirectory
set(_tbb_msvc_1700_subdir vc11)
set(_tbb_msvc_1800_subdir vc12)
set(_tbb_msvc_1900_subdir vc14)
set(_tbb_msvc_ver ${MSVC_VERSION})
if (MSVC_VERSION VERSION_LESS 1700)
message(FATAL_ERROR "This Intel TBB package is intended to be used only in the project with MSVC version 1700 (vc11) or higher")
elseif (MSVC_VERSION VERSION_GREATER 1900)
set(_tbb_msvc_ver 1900)
endif()
set(_tbb_compiler_subdir ${_tbb_msvc_${_tbb_msvc_ver}_subdir})
unset(_tbb_msvc_1700_subdir)
unset(_tbb_msvc_1800_subdir)
unset(_tbb_msvc_1900_subdir)
if (WINDOWS_STORE)
set(_tbb_compiler_subdir ${_tbb_compiler_subdir}_ui)
endif()
#set conveniance variable to locate TBB files (these are used for a PSXE install)
get_filename_component(_tbb_lib_path "${_tbb_root}/lib/${_tbb_arch_subdir}/${_tbb_compiler_subdir}" ABSOLUTE)
get_filename_component(_tbb_inc_path "${_tbb_root}/include/" ABSOLUTE)
if (TBB_FOUND)
return()
endif()
# detect version
find_file(_tbb_def_header tbb_stddef.h HINTS "${_tbb_root}/include/tbb")
if (_tbb_def_header)
file(READ "${_tbb_def_header}" _tbb_def_content)
string(REGEX MATCH "TBB_VERSION_MAJOR[ ]*[0-9]*" _tbb_version_major ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_major ${_tbb_version_major})
string(REGEX MATCH "TBB_VERSION_MINOR[ ]*[0-9]" _tbb_version_minor ${_tbb_def_content})
string(REGEX MATCH "[0-9][0-9]*" _tbb_version_minor ${_tbb_version_minor})
set(TBB_VERSION "${_tbb_version_major}.${_tbb_version_minor}")
else()
set(TBB_VERSION "")
endif()
foreach (_tbb_component ${TBB_FIND_COMPONENTS})
set(_tbb_release_lib "${_tbb_lib_path}/${_tbb_component}.lib")
set(_tbb_debug_lib "${_tbb_lib_path}/${_tbb_component}_debug.lib")
if (EXISTS "${_tbb_release_lib}" AND EXISTS "${_tbb_debug_lib}")
if (NOT TARGET TBB::${_tbb_component})
add_library(TBB::${_tbb_component} SHARED IMPORTED)
set_target_properties(TBB::${_tbb_component} PROPERTIES
IMPORTED_CONFIGURATIONS "RELEASE;DEBUG"
IMPORTED_LOCATION_RELEASE "${_tbb_release_lib}"
IMPORTED_LOCATION_DEBUG "${_tbb_debug_lib}"
INTERFACE_INCLUDE_DIRECTORIES "${_tbb_inc_path}"
IMPORTED_IMPLIB_RELEASE "${_tbb_release_lib}"
IMPORTED_IMPLIB_DEBUG "${_tbb_debug_lib}"
INTERFACE_COMPILE_DEFINITIONS "__TBB_NO_IMPLICIT_LINKAGE=1")
# Add internal dependencies for imported targets: TBB::tbbmalloc_proxy -> TBB::tbbmalloc
if (_tbb_component STREQUAL tbbmalloc_proxy)
set_target_properties(TBB::tbbmalloc_proxy PROPERTIES INTERFACE_LINK_LIBRARIES TBB::tbbmalloc)
endif()
list(APPEND TBB_IMPORTED_TARGETS TBB::${_tbb_component})
set(TBB_${_tbb_component}_FOUND 1)
endif()
elseif (TBB_FIND_REQUIRED AND TBB_FIND_REQUIRED_${_tbb_component})
message(FATAL_ERROR "Missed required Intel TBB component: ${_tbb_component}")
endif()
endforeach()
unset(_tbb_x32_subdir)
unset(_tbb_x64_subdir)
unset(_tbb_arch_subdir)
unset(_tbb_compiler_subdir)
unset(_tbbmalloc_proxy_ix)
unset(_tbbmalloc_ix)
unset(_tbb_lib_path)
unset(_tbb_release_lib)
unset(_tbb_debug_lib)

View File

@@ -19,7 +19,7 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x)
# Default packages
#
set(FIRMWARE_PACKAGE_VERSION 942_R10.15)
set(FIRMWARE_PACKAGE_VERSION 1656)
#
# CMake variables to override default firmware files
@@ -82,7 +82,7 @@ foreach(firmware_name IN LISTS VPU_SUPPORTED_FIRMWARES)
VERBATIM)
install(FILES ${${var_name}}
DESTINATION ${IE_CPACK_LIBRARY_PATH}
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT myriad)
endforeach()
@@ -96,12 +96,12 @@ add_custom_target(vpu_copy_firmware
if(ANDROID)
RESOLVE_DEPENDENCY(LIBUSB
ARCHIVE_ANDROID "libusb_33167_android.tgz"
ARCHIVE_ANDROID "libusb_39409_android.tgz"
TARGET_PATH "${TEMP}/vpu/libusb")
debug_message(STATUS "LIBUSB=" ${LIBUSB})
set(LIBUSB_INCLUDE_DIR "${LIBUSB}/include")
set(LIBUSB_LIBRARY "${LIBUSB}/lib/libusb1.0.so")
set(LIBUSB_LIBRARY "${LIBUSB}/libs/${ANDROID_ABI}/libusb1.0.so")
log_rpath_from_dir(LIBUSB "${LIBUSB}/lib")
endif()
log_rpath_from_dir(LIBUSB "${LIBUSB}/libs/${ANDROID_ABI}")
endif()

View File

@@ -1,7 +1,7 @@
# Overview of Inference Engine C* API
> **NOTE**: It is a preview version of the Inference Engine C* API for evaluation purpose only.
> Module structure and API itself may be changed in future releases.
> Module structure and API itself may be changed in future releases.
This API provides a simplified interface for Inference Engine functionality that allows to:
@@ -11,10 +11,10 @@ This API provides a simplified interface for Inference Engine functionality that
## Supported OSes
Currently the Inference Engine C* API is supported on Ubuntu* 16.04, Microsoft Windows* 10 and CentOS* 7.3 OSes.
Supported Python* versions:
Currently the Inference Engine C* API is supported on Ubuntu* 16.04, Microsoft Windows* 10 and CentOS* 7.3 OSes.
Supported Python* versions:
- On Ubuntu 16.04: 2.7, 3.5, 3.6
- On Ubuntu 16.04: 2.7, 3.5, 3.6
- On Windows 10: 3.5, 3.6
- On CentOS 7.3: 3.4, 3.5, 3.6
@@ -25,7 +25,7 @@ To configure the environment for the Inference Engine C* API, run:
- On Ubuntu 16.04: `source <INSTALL_DIR>/bin/setupvars.sh .`
- On Windows 10: XXXX
The script automatically detects latest installed C* version and configures required environment if the version is supported.
The script automatically detects latest installed C* version and configures required environment if the version is supported.
If you want to use certain version of C*, set the environment variable XXXXX
after running the environment configuration script.
@@ -78,7 +78,7 @@ typedef struct ie_param_config {
```
typedef struct desc {
char msg[256];
char msg[256];
}desc_t;
```
@@ -134,6 +134,8 @@ enum precision_e{
I64 = 72, /**< 64bit signed integer value */
U64 = 73, /**< 64bit unsigned integer value */
BIN = 71, /**< 1bit integer value */
CUSTOM = 80 /**< custom precision has it's own name and size of elements */
@@ -295,26 +297,26 @@ This strcut represents an Inference Engine entity and allows you to manipulate w
- `IEStatusCode ie_core_create(char *xml_config_file, ie_core_t *core_result)`
> Note: create an ie_core_t instance with default configuration when xml_config_file=null.
- Parameters:
- `xml_config_file`- A full path to`.xml` file containing plugins configuration. If the parameter is not specified, the default configuration is handled automatically.
- `core_result` - A pointer to the newly created `ie_core_t`.
- Return value: Status code of the operation: OK(0) for success.
- Usage examples:
Create an `ie_core_t` t instance with a custom configuration location sepcified:
```
char *xml_config_file="/localdisk/plugins/my_custom_cfg.xml";
ie_core_t ie;
IEStatusCode status = ie_core_create(xml_config_file,ie);
```
.`xml` file has the following structure:
```
<ie>
<plugins>
@@ -329,7 +331,7 @@ This strcut represents an Inference Engine entity and allows you to manipulate w
</plugins>
</ie>
```
### <a name="iecore-methods"></a>Methods
@@ -342,11 +344,11 @@ This strcut represents an Inference Engine entity and allows you to manipulate w
- `core` -A pointer to `ie_core_t` instance.
- `device_name` - Name of the the registered plugin.
- `version_result` - Dictionary mapping a plugin name .
- Return value: Status of the operation: OK(0) for success.
- Usage example:
```
char *xml_config_file="/localdisk/plugins/my_custom_cfg.xml";
char *device_name="CPU";
@@ -356,30 +358,30 @@ This strcut represents an Inference Engine entity and allows you to manipulate w
IEStatusCode status2=ie_core_get_versions(ie,device_name, version);
print("description:%s, major:%d, minor:%d, build_number:%s.\n",version- >description, version->major, version->minor, version->build_number);
```
- `IEStatusCode ie_core_load_network(ie_core_t *core, ie_network_t *network, const char *device_name, ie_config_t config, ie_executable_network_t *exec_network_result)`
- Description: Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device name and creates an `ie_executable_network_t` instance of the `ie_network_t` struct.
You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware resources).
- Parameters:
- `core` - A pointer to `ie_core_t` instance.
- `network` - A pointer to `ie_network_t` instance.
- `device_name` - A device name of a target plugin.
- `device_name` - A device name of a target plugin.
- `config` - A dictionary of plugin configuration keys and their values.
- `exec_network_result` - A pointer to the newly loaded network.
- Return value: Status code of the operation: OK(0) for success.
- Usage example:
```
```
- `IEStatusCode ie_core_set_config(ie_core_t *core, ie_config_t *ie_core_config, const char *device_name)`
- Description: Sets a configuration for a plugin.
- Parameters:
- `core`- A pointer to `ie_core_t` instance.
@@ -387,7 +389,7 @@ This strcut represents an Inference Engine entity and allows you to manipulate w
- `device_name` - A device name of a target plugin.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_core_register_plugin(ie_core_t *core, const char *plugin_name, const char *device_name )`
- Description: Registers a new device and a plugin which implement this device inside Inference Engine.
- Parameters:
- `core`- A pointer to `ie_core_t` instance.
@@ -396,13 +398,13 @@ This strcut represents an Inference Engine entity and allows you to manipulate w
a plugin with the default name.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_core_register_plugins(ie_core_t *core, const char *xml_config_file)`
- Description: Registers plugins specified in an `.xml` configuration file
- Parameters:
- `core` - A pointer to `ie_core_t` instance.
- `xml_config_file` - A full path to `.xml` file containing plugins configuration.
- Return value: Status code of the operation: 0 for success.
- `IEStatusCode ie_core_unregister_plugin(ie_core_t *core, const char *device_name)`
- Description: Unregisters a plugin with a specified device name
@@ -413,7 +415,7 @@ This strcut represents an Inference Engine entity and allows you to manipulate w
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_core_add_extension(ie_core_t *core, const char *extension_path, const char *device_name)`
- Description: Loads extension library to the plugin with a specified device name.
- Parameters:
- `core` - A pointer `ie_core_t` instance.
@@ -421,7 +423,7 @@ This strcut represents an Inference Engine entity and allows you to manipulate w
- `device_name` - A device name of a plugin to load the extensions to.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_core_get_metric(ie_core_t *core, const char *device_name, const char *metric_name, ie_param_t *param_result)`
- Description: Gets a general runtime metric for dedicated hardware. Enables to request common device properties, which are `ie_executable_network_t` agnostic, such as device name, temperature, and other devices-specific values.
- Parameters:
- `core` - A pointer `ie_core_t` instance.
@@ -429,20 +431,20 @@ This strcut represents an Inference Engine entity and allows you to manipulate w
- `metric_name` - A metric name to request.
- `param_result` - A metric value corresponding to a metric key.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_core_get_config(ie_core_t *core, const char *device_name, const char *config_name, ie_param_t *param_result)`
- Description: Gets a configuration dedicated to device behavior. The method targets to extract information which can be set via SetConfig method.
- Parameters:
- `core` - A pointer `ie_core_t` instance.
- `device_name` - A name of a device to get a metric value.
- `config_name` - A configuration value corresponding to a configuration key.
- `param_result` - A metric value corresponding to a metric key.
- Return value: Status code of the operation: OK(0) for success.
## IENetwork
@@ -453,7 +455,7 @@ This struct contains the information about the network model read from IR and al
- `IEStatusCode ie_network_read(char *xml, char *weights_file, ie_network_t *network_result)`
- Description: Reads the model from the `.xml` and `.bin` files of the IR.
- Parameters:
- `xml_file` - `.xml` file's path of the IR.
- `xml_file` - `.xml` file's path of the IR.
- `weights_file` - `.bin` file's path of the IR.
- `network_result` - A pointer to the newly created network.
- Return value: Status code of the operation: OK(0) for success.
@@ -538,7 +540,7 @@ This struct contains the information about the network model read from IR and al
- `resize_algo` - Resize algorithm.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_color_format(ie_network_t *network, char *input_name, colorformat_e *colformat_result)`
- Description: Gets color format of the input data named "input_name".
- Description: Gets color format of the input data named "input_name".
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `input` - Name of input data.
@@ -594,7 +596,7 @@ This struct represents a network instance loaded to plugin and ready for inferen
### Methods
- `IEStatusCode ie_exec_network_create_infer_request(ie_executable_network_t *ie_exec_network, desc_t *desc, ie_infer_request_t **req)`
- Description: Creates an inference request instance used to infer the network. The created request has allocated input and output blobs (that can be changed later).
- Parameters:
- `ie_exec_network` - A pointer to `ie_executable_network_t` instance.
@@ -602,22 +604,22 @@ This struct represents a network instance loaded to plugin and ready for inferen
- `req` - A pointer to the newly created `ie_infer_request_t` instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_exec_network_get_metric(ie_executable_network_t *ie_exec_network, const char *metric_name, ie_param_t *param_result)`
- Description: - Gets general runtime metric for an executable network. It can be network name, actual device ID on which executable network is running or all other properties which cannot be changed dynamically.
- Parameters:
- Parameters:
- `ie_exec_network`: A pointer to `ie_executable_network_t` instance.
- `metric_name` - A metric name to request.
- `param_result` - A metric value corresponding to a metric key.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_exec_network_set_config(ie_executable_network_t *ie_exec_network, ie_param_config_t *param_config, desc_t *desc)`
- Description: Sets a configuration for current executable network.
- Parameters:
- `ie_exec_network`: A pointer to `ie_executable_network_t` instance.
- `config`: An config for current executable network.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_exec_network_get_config(ie_executable_network_t *ie_exec_network, const char *metric_config, ie_param_t *param_result)`
- Description: - Gets configuration for current executable network. The method is responsible to extract information
- which affects executable network execution
- Parameters:
@@ -625,47 +627,47 @@ This struct represents a network instance loaded to plugin and ready for inferen
- `metric_config` - A configuration parameter name to request.
- `param_result` - A configuration value corresponding to a configuration key.
- Return value: Status code of the operation: OK(0) for success.
## InferRequest
This struct provides an interface to infer requests of `ExecutableNetwork` and serves to handle infer requests execution and to set and get output data.
This struct provides an interface to infer requests of `ExecutableNetwork` and serves to handle infer requests execution and to set and get output data.
### Methods
- `IEStatusCode *ie_infer_request_get_blob(ie_infer_request_t *infer_request, const char *name, ie_blob_t **blob_result)`
- Description: Get a Blob corresponding to blob name.
- Parameters:
- `infer_request` - A pointer to `ie_infer_request_t` instance
- `name` - Blob name.
- `name` - Blob name.
- `blob_result` - A pointer to the blob corresponding to the blob name.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_request_set_blob(ie_infer_request_t *infer_request, ie_blob_t *blob)`
- Description: Sets the blob in a inference request.
- Parameters:
- `infer_request`: A pointer to `ie_infer_request_t` instance.
- `blob ` - A pointer to `ie_blob_t` instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_request_infer(ie_infer_request_t *infer_request)`
- Description: Starts synchronous inference of the infer request and fill outputs array
- Description: Starts synchronous inference of the infer request and fill outputs array
- Parameters:
- `infer_request`: A pointer to `ie_infer_request_t` instance.
- `infer_request`: A pointer to `ie_infer_request_t` instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_request_infer_async(ie_infer_request_t *infer_request)`
- Description: Starts asynchronous inference of the infer request and fill outputs array.
- Parameters:
- `infer_request` - A pointer to `ie_infer_request_t` instance.
- Parameters:
- `infer_request` - A pointer to `ie_infer_request_t` instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_set_completion_callback(ie_infer_request_t *infer_request,completeCallBackFunc callback)`
- Description: Sets a callback function that will be called on success or failure of asynchronous request.
@@ -673,10 +675,10 @@ This struct provides an interface to infer requests of `ExecutableNetwork` and s
- `infer_request` - A pointer to a `ie_infer_request_t` instance.
- `callback` - A function to be called.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_request_wait(ie_infer_request_t *infer_request, int64_t timeout)`
- Description: Waits for the result to become available. Blocks until specified timeout elapses or the result becomes available, whichever comes first.
- Description: Waits for the result to become available. Blocks until specified timeout elapses or the result becomes available, whichever comes first.
NOTE:** There are special values of the timeout parameter:
@@ -684,7 +686,7 @@ This struct provides an interface to infer requests of `ExecutableNetwork` and s
ind statuses meaning.
- -1 - Waits until inference result becomes available (default value).
- Parameters:
- Parameters:
- `infer_request` -A pointer to a `ie_infer_request_t` instance.
- `timeout` - Time to wait in milliseconds or special (0, -1) cases described above. If not specified, `timeout` value is set to -1 by default.
@@ -695,12 +697,12 @@ This struct provides an interface to infer requests of `ExecutableNetwork` and s
- Description: Sets new batch size for certain infer request when dynamic batching is enabled in executable network that created this request.
NOTE:** Support of dynamic batch size depends on the target plugin.
NOTE:** Support of dynamic batch size depends on the target plugin.
- Parameters:
- `infer_request` -A pointer to a `ie_infer_request_t` instance.
- `size` - New batch size to be used by all the following inference calls for this request.
- `size` - New batch size to be used by all the following inference calls for this request.
- Return value: Status code of the operation: OK(0) for success.
@@ -712,42 +714,42 @@ This struct provides an interface to infer requests of `ExecutableNetwork` and s
- `IEStatusCode make_memory_blob(const tensor_desc *tensorDesc, ie_blob_t *blob_result)`
- Description: Creates a `ie_blob_t` instance with the specified dimensions and layout but does not allocate the memory. Use the allocate() method to allocate memory. `tensor_desc` Defines the layout and dims of the blob.
- Parameters:
- Parameters:
- `tensorDesc` - Defines the layout and dims of the blob.
- `blob_result` - A pointer to an empty ie_blob_t instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode make_memory_blob_from_preallocated_memory(const tensor_desc *tensorDesc, void *ptr, size_t size = 0, ie_blob_t *blob_result)`
- Description: The constructor creates a `ie_blob_t` instance with the specified dimensions and layout on the pre-allocated memory. The allocate() call is not required.
- Parameters:
- Parameters:
- `tensorDesc` - Tensor description for Blob creation.
- `ptr` - A pointer to the pre-allocated memory.
- `size` -Length of the pre-allocated array. If not set, size is assumed equal to the dot product of dims.
- `blob_result` - A pointer to the newly created ie_blob_t instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode make_memory_blob_with_roi(const ie_blob_t **inputBlob, const roi_e *roi, ie_blob_t *blob_result)`
- Description: Creates a blob describing given roi instance based on the given blob with pre-allocated memory.
- Parameters:
- Parameters:
- `inputBlob` - Original blob with pre-allocated memory.
- `roi` - A roi object inside of the original blob.
- `blob_result` - A pointer to the newly created blob.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_size(ie_blob_t *blob, int *size_result)`
- Description: Gets the total number of elements, which is a product of all the dimensions.
- Parameters:
- `blob` - A pointer to the blob.
- `size_result` - The total number of elements.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_byte_size(ie_blob_t *blob, int *bsize_result)`
- Description: Gets the size of the current Blob in bytes.
- Parameters:
- `blob` - A pointer to the blob.
- `bsize_result` - The size of the current Blob in bytes.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_allocate(ie_blob_t *blob)`
- Description: Allocates memory for blob.
- Parameters:
@@ -756,41 +758,41 @@ This struct provides an interface to infer requests of `ExecutableNetwork` and s
- `IEStatusCode ie_blob_deallocate(ie_blob_t *blob)`
- Description: Releases previously allocated data.
- Parameters:
- Parameters:
- `blob` - A pointer to the blob.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_buffer(ie_blob_t *blob, void *buffer)`
- Description: Gets access to the allocated memory .
- Parameters:
- Parameters:
- `blob` - A pointer to the blob.
- `buffer` - A pointer to the coped date from the given pointer to the blob.
- `buffer` - A pointer to the coped date from the given pointer to the blob.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_cbuffer(ie_blob_t *blob, const void *cbuffer)`
- Description: Gets read-only access to the allocated memory.
- Parameters:
- Parameters:
- `blob` - A pointer to the blob.
- `cbuffer` - A pointer to the coped date from the given pointer to the blob and the date is read-only.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_get_dims(ie_blob_t *blob, dimensions_t *dims_result)`
- Description: Gets dimensions of blob instance's tensor.
- Parameters:
- `blob` - A pointer to the blob.
- `dims_result` - A pointer to the dimensions of blob instance's tensor.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_get_layout(ie_blob_t *blob, layout_t *layout_result)`
- Description: Gets layout of blob instance's tensor.
- Parameters:
- `blob` - A pointer to the blob.
- `layout_result` - A pointer to the layout of blob instance's tensor.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_get_precision(ie_blob_t *blob, precision_e *prec_result)`
- Description: Gets precision of blob instance's tensor.
- Parameters:
- `blob` - A pointer to the blob.
- `prec_result` - A pointer to the precision of blob instance's tensor.
- Return value: Status code of the operation: OK(0) for success.
- Return value: Status code of the operation: OK(0) for success.

View File

@@ -5,12 +5,12 @@
/**
* @file ie_c_api.h
* C API of Inference Engine bridge unlocks using of OpenVINO Inference Engine
* library and all its plugins in native applications disabling usage
* library and all its plugins in native applications disabling usage
* of C++ API. The scope of API covers significant part of C++ API and includes
* an ability to read model from the disk, modify input and output information
* to correspond their runtime representation like data types or memory layout,
* load in-memory model to Inference Engine on different devices including
* heterogeneous and multi-device modes, manage memory where input and output
* load in-memory model to Inference Engine on different devices including
* heterogeneous and multi-device modes, manage memory where input and output
* is allocated and manage inference flow.
**/
@@ -28,6 +28,7 @@
#if defined(__GNUC__) && (__GNUC__ < 4)
#define INFERENCE_ENGINE_C_API(...) INFERENCE_ENGINE_C_API_EXTERN __VA_ARGS__
#define IE_NODISCARD
#else
#if defined(_WIN32)
#ifdef inference_engine_c_api_EXPORTS
@@ -35,8 +36,10 @@
#else
#define INFERENCE_ENGINE_C_API(...) INFERENCE_ENGINE_C_API_EXTERN __declspec(dllimport) __VA_ARGS__ __cdecl
#endif
#define IE_NODISCARD
#else
#define INFERENCE_ENGINE_C_API(...) INFERENCE_ENGINE_C_API_EXTERN __attribute__((visibility("default"))) __VA_ARGS__
#define IE_NODISCARD __attribute__((warn_unused_result))
#endif
#endif
@@ -46,6 +49,14 @@ typedef struct ie_executable ie_executable_network_t;
typedef struct ie_infer_request ie_infer_request_t;
typedef struct ie_blob ie_blob_t;
/**
* @struct ie_version
* @brief Represents an API version information that reflects the set of supported features
*/
typedef struct ie_version {
char *api_version;
}ie_version_t;
/**
* @struct ie_core_version
* @brief Represents version information that describes devices and the inference engine runtime library
@@ -165,6 +176,7 @@ typedef enum {
U16 = 60, /**< 16bit unsigned integer value */
I32 = 70, /**< 32bit signed integer value */
I64 = 72, /**< 64bit signed integer value */
U64 = 73, /**< 64bit unsigned integer value */
BIN = 71, /**< 1bit integer value */
CUSTOM = 80 /**< custom precision has it's own name and size of elements */
}precision_e;
@@ -190,6 +202,7 @@ typedef enum {
RGBX, ///< RGBX color format with X ignored during inference
BGRX, ///< BGRX color format with X ignored during inference
NV12, ///< NV12 color format represented as compound Y+UV blob
I420, ///< I420 color format represented as compound Y+U+V blob
}colorformat_e;
/**
@@ -276,11 +289,31 @@ typedef struct ie_complete_call_back {
}ie_complete_call_back_t;
/**
* @brief Returns number of version that is exported.
* @struct ie_available_devices
* @brief Represent all available devices.
*/
typedef struct ie_available_devices {
char **devices;
size_t num_devices;
}ie_available_devices_t;
/**
* @brief Returns number of version that is exported. Use the ie_version_free() to free memory.
* @return Version number of the API.
*/
INFERENCE_ENGINE_C_API(const char *) ie_c_api_version(void);
INFERENCE_ENGINE_C_API(ie_version_t) ie_c_api_version(void);
/**
* @brief Release the memory allocated by ie_c_api_version.
* @param version A pointer to the ie_version_t to free memory.
*/
INFERENCE_ENGINE_C_API(void) ie_version_free(ie_version_t *version);
/**
* @brief Release the memory allocated by ie_param_t.
* @param version A pointer to the ie_param_t to free memory.
*/
INFERENCE_ENGINE_C_API(void) ie_param_free(ie_param_t *param);
// Core
@@ -300,15 +333,14 @@ INFERENCE_ENGINE_C_API(const char *) ie_c_api_version(void);
* @param core A pointer to the newly created ie_core_t.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_create(const char *xml_config_file, ie_core_t **core);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_create(const char *xml_config_file, ie_core_t **core);
/**
* @brief Releases memory occupied by core.
* @ingroup Core
* @param core A pointer to the core to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_free(ie_core_t **core);
INFERENCE_ENGINE_C_API(void) ie_core_free(ie_core_t **core);
/**
* @brief Gets version information of the device specified. Use the ie_core_versions_free() method to free memory.
@@ -318,15 +350,14 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_free(ie_core_t **core);
* @param versions A pointer to versions corresponding to device_name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_get_versions(const ie_core_t *core, const char *device_name, ie_core_versions_t *versions);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_get_versions(const ie_core_t *core, const char *device_name, ie_core_versions_t *versions);
/**
* @brief Releases memory occupied by ie_core_versions.
* @ingroup Core
* @param vers A pointer to the ie_core_versions to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_versions_free(ie_core_versions_t *vers);
INFERENCE_ENGINE_C_API(void) ie_core_versions_free(ie_core_versions_t *vers);
/**
* @brief Reads the model from the .xml and .bin files of the IR. Use the ie_network_free() method to free memory.
@@ -338,7 +369,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_versions_free(ie_core_versions_t *v
* @param network A pointer to the newly created network.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_read_network(ie_core_t *core, const char *xml, const char *weights_file, ie_network_t **network);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_read_network(ie_core_t *core, const char *xml, const char *weights_file, ie_network_t **network);
/**
* @brief Creates an executable network from a network object. Users can create as many networks as they need and use
@@ -351,7 +382,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_read_network(ie_core_t *core, const
* @param exe_network A pointer to the newly created executable network.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_load_network(ie_core_t *core, const ie_network_t *network, const char *device_name, \
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_load_network(ie_core_t *core, const ie_network_t *network, const char *device_name, \
const ie_config_t *config, ie_executable_network_t **exe_network);
/**
@@ -363,7 +394,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_load_network(ie_core_t *core, const
* the config is set for all the registered devices.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_set_config(ie_core_t *core, const ie_config_t *ie_core_config, const char *device_name);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_set_config(ie_core_t *core, const ie_config_t *ie_core_config, const char *device_name);
/**
* @brief Registers a new device and a plugin which implement this device inside Inference Engine.
@@ -375,7 +406,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_set_config(ie_core_t *core, const i
* a plugin with the default name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_register_plugin(ie_core_t *core, const char *plugin_name, const char *device_name);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_register_plugin(ie_core_t *core, const char *plugin_name, const char *device_name);
/**
* @brief Registers plugins specified in an ".xml" configuration file.
@@ -384,7 +415,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_register_plugin(ie_core_t *core, co
* @param xml_config_file A full path to ".xml" file containing plugins configuration.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_register_plugins(ie_core_t *core, const char *xml_config_file);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_register_plugins(ie_core_t *core, const char *xml_config_file);
/**
* @brief Unregisters a plugin with a specified device name.
@@ -393,7 +424,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_register_plugins(ie_core_t *core, c
* @param device_name A device name of the device to unregister.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_unregister_plugin(ie_core_t *core, const char *device_name);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_unregister_plugin(ie_core_t *core, const char *device_name);
/**
* @brief Loads extension library to the device with a specified device name.
@@ -403,7 +434,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_unregister_plugin(ie_core_t *core,
* @param device_name A device name of a device to load the extensions to.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_add_extension(ie_core_t *core, const char *extension_path, const char *device_name);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_add_extension(ie_core_t *core, const char *extension_path, const char *device_name);
/**
* @brief Gets general runtime metric for dedicated hardware. The method is needed to request common device properties
@@ -415,7 +446,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_add_extension(ie_core_t *core, cons
* @param param_result A metric value corresponding to the metric_name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_get_metric(const ie_core_t *core, const char *device_name, const char *metric_name, ie_param_t *param_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_get_metric(const ie_core_t *core, const char *device_name, const char *metric_name, ie_param_t *param_result);
/**
* @brief Gets configuration dedicated to device behaviour. The method is targeted to extract information
@@ -427,7 +458,24 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_get_metric(const ie_core_t *core, c
* @param param_result A configuration value corresponding to the config_name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_get_config(const ie_core_t *core, const char *device_name, const char *config_name, ie_param_t *param_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_get_config(const ie_core_t *core, const char *device_name, const char *config_name, ie_param_t *param_result);
/**
* @brief Gets available devices for neural network inference.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param avai_devices The devices are returned as { CPU, FPGA.0, FPGA.1, MYRIAD }
* If there more than one device of specific type, they are enumerated with .# suffix
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_get_available_devices(const ie_core_t *core, ie_available_devices_t *avai_devices);
/**
* @brief Releases memory occpuied by ie_available_devices_t
* @ingroup Core
* @param avai_devices A pointer to the ie_available_devices_t to free memory.
*/
INFERENCE_ENGINE_C_API(void) ie_core_available_devices_free(ie_available_devices_t *avai_devices);
/** @} */ // end of Core
@@ -443,9 +491,8 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_get_config(const ie_core_t *core, c
* @brief Releases memory occupied by ExecutableNetwork.
* @ingroup ExecutableNetwork
* @param ie_exec_network A pointer to the ExecutableNetwork to free memory.
* return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_free(ie_executable_network_t **ie_exec_network);
INFERENCE_ENGINE_C_API(void) ie_exec_network_free(ie_executable_network_t **ie_exec_network);
/**
* @brief Creates an inference request instance used to infer the network. The created request has allocated input
@@ -455,7 +502,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_free(ie_executable_network_
* @param request A pointer to the newly created ie_infer_request_t instance
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_create_infer_request(ie_executable_network_t *ie_exec_network, ie_infer_request_t **request);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_exec_network_create_infer_request(ie_executable_network_t *ie_exec_network, ie_infer_request_t **request);
/**
* @brief Gets general runtime metric for an executable network. It can be network name, actual device ID on which executable network is running
@@ -466,7 +513,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_create_infer_request(ie_exe
* @param param_result A metric value corresponding to the metric_name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_get_metric(const ie_executable_network_t *ie_exec_network, \
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_exec_network_get_metric(const ie_executable_network_t *ie_exec_network, \
const char *metric_name, ie_param_t *param_result);
/**
@@ -477,7 +524,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_get_metric(const ie_executa
* @param param_config A pointer to device configuration..
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_set_config(ie_executable_network_t *ie_exec_network, const ie_config_t *param_config);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_exec_network_set_config(ie_executable_network_t *ie_exec_network, const ie_config_t *param_config);
/**
* @brief Gets configuration for current executable network. The method is responsible to
@@ -488,7 +535,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_set_config(ie_executable_ne
* @param param_result A configuration value corresponding to a configuration paramter name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_get_config(const ie_executable_network_t *ie_exec_network, \
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_exec_network_get_config(const ie_executable_network_t *ie_exec_network, \
const char *metric_config, ie_param_t *param_result);
/** @} */ // end of ExecutableNetwork
@@ -506,9 +553,8 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_get_config(const ie_executa
* @brief Releases memory occupied by ie_infer_request_t instance.
* @ingroup InferRequest
* @param infer_request A pointer to the ie_infer_request_t to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_free(ie_infer_request_t **infer_request);
INFERENCE_ENGINE_C_API(void) ie_infer_request_free(ie_infer_request_t **infer_request);
/**
* @brief Gets input/output data for inference
@@ -518,7 +564,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_free(ie_infer_request_t **
* @param blob A pointer to input or output blob. The type of Blob must match the network input precision and size.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_get_blob(ie_infer_request_t *infer_request, const char *name, ie_blob_t **blob);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_infer_request_get_blob(ie_infer_request_t *infer_request, const char *name, ie_blob_t **blob);
/**
* @brief Sets input/output data to inference.
@@ -528,7 +574,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_get_blob(ie_infer_request_
* @param blob Reference to input or output blob. The type of a blob must match the network input precision and size.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_set_blob(ie_infer_request_t *infer_request, const char *name, const ie_blob_t *blob);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_infer_request_set_blob(ie_infer_request_t *infer_request, const char *name, const ie_blob_t *blob);
/**
* @brief Starts synchronous inference of the infer request and fill outputs.
@@ -536,7 +582,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_set_blob(ie_infer_request_
* @param infer_request A pointer to ie_infer_request_t instance.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_infer(ie_infer_request_t *infer_request);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_infer_request_infer(ie_infer_request_t *infer_request);
/**
* @brief Starts asynchronous inference of the infer request and fill outputs.
@@ -544,7 +590,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_infer(ie_infer_request_t *
* @param infer_request A pointer to ie_infer_request_t instance.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_infer_async(ie_infer_request_t *infer_request);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_infer_request_infer_async(ie_infer_request_t *infer_request);
/**
* @brief Sets a callback function that will be called on success or failure of asynchronous request
@@ -553,7 +599,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_infer_async(ie_infer_reque
* @param callback A function to be called.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_set_completion_callback(ie_infer_request_t *infer_request, ie_complete_call_back_t *callback);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_infer_set_completion_callback(ie_infer_request_t *infer_request, ie_complete_call_back_t *callback);
/**
* @brief Waits for the result to become available. Blocks until specified timeout elapses or the result becomes available, whichever comes first.
@@ -565,7 +611,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_set_completion_callback(ie_infer_r
* * -1 - waits until inference result becomes available
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_wait(ie_infer_request_t *infer_request, const int64_t timeout);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_infer_request_wait(ie_infer_request_t *infer_request, const int64_t timeout);
/**
* @brief Sets new batch size for certain infer request when dynamic batching is enabled in executable network that created this request.
@@ -574,7 +620,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_wait(ie_infer_request_t *i
* @param size New batch size to be used by all the following inference calls for this request.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_set_batch(ie_infer_request_t *infer_request, const size_t size);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_infer_request_set_batch(ie_infer_request_t *infer_request, const size_t size);
/** @} */ // end of InferRequest
@@ -591,9 +637,16 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_set_batch(ie_infer_request
* @brief When netowrk is loaded into the Infernece Engine, it is not required anymore and should be released
* @ingroup Network
* @param network The pointer to the instance of the ie_network_t to free.
*/
INFERENCE_ENGINE_C_API(void) ie_network_free(ie_network_t **network);
/**
* @brief Get name of network.
* @ingroup Network
* @param name Name of the network.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_free(ie_network_t **network);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_name(const ie_network_t *network, char **name);
/**
* @brief Gets number of inputs for the network.
@@ -602,7 +655,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_free(ie_network_t **network);
* @param size_result A number of the instance's input information.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_inputs_number(const ie_network_t *network, size_t *size_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_inputs_number(const ie_network_t *network, size_t *size_result);
/**
* @brief Gets name corresponding to the "number". Use the ie_network_name_free() method to free memory.
@@ -612,7 +665,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_inputs_number(const ie_netwo
* @param name Input name corresponding to the number.
* @return status Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_name(const ie_network_t *network, size_t number, char **name);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_input_name(const ie_network_t *network, size_t number, char **name);
/**
* @brief Gets a precision of the input data provided by user.
@@ -622,7 +675,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_name(const ie_network_
* @param prec_result A pointer to the precision used for input blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_precision(const ie_network_t *network, const char *input_name, precision_e *prec_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_input_precision(const ie_network_t *network, const char *input_name, precision_e *prec_result);
/**
* @brief Changes the precision of the input data provided by the user.
@@ -633,7 +686,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_precision(const ie_net
* @param p A new precision of the input data to set (eg. precision_e.FP16).
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_input_precision(ie_network_t *network, const char *input_name, const precision_e p);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_set_input_precision(ie_network_t *network, const char *input_name, const precision_e p);
/**
* @brief Gets a layout of the input data.
@@ -643,7 +696,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_input_precision(ie_network_t
* @param layout_result A pointer to the layout used for input blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_layout(const ie_network_t *network, const char *input_name, layout_e *layout_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_input_layout(const ie_network_t *network, const char *input_name, layout_e *layout_result);
/**
* @brief Changes the layout of the input data named "input_name".
@@ -654,7 +707,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_layout(const ie_networ
* @param l A new layout of the input data to set.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_input_layout(ie_network_t *network, const char *input_name, const layout_e l);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_set_input_layout(ie_network_t *network, const char *input_name, const layout_e l);
/**
* @Gets dimensions/shape of the input data with reversed order.
@@ -664,7 +717,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_input_layout(ie_network_t *n
* @param dims_result A pointer to the dimensions used for input blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_dims(const ie_network_t *network, const char *input_name, dimensions_t *dims_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_input_dims(const ie_network_t *network, const char *input_name, dimensions_t *dims_result);
/**
* @brief Gets pre-configured resize algorithm.
@@ -674,7 +727,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_dims(const ie_network_
* @parm resize_alg_result The pointer to the resize algorithm used for input blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_resize_algorithm(const ie_network_t *network, const char *input_name, \
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_input_resize_algorithm(const ie_network_t *network, const char *input_name, \
resize_alg_e *resize_alg_result);
/**
@@ -685,7 +738,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_resize_algorithm(const
* @param resize_algo Resize algorithm.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_input_resize_algorithm(ie_network_t *network, const char *input_name, const resize_alg_e resize_algo);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_set_input_resize_algorithm(ie_network_t *network, const char *input_name, const resize_alg_e resize_algo);
/**
* @brief Gets color format of the input data.
@@ -695,7 +748,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_input_resize_algorithm(ie_ne
* @param colformat_result The pointer to the color format used for input blob creation.
* @reutrn Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_color_format(const ie_network_t *network, const char *input_name, colorformat_e *colformat_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_color_format(const ie_network_t *network, const char *input_name, colorformat_e *colformat_result);
/**
* @brief Changes the color format of the input data.
@@ -705,7 +758,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_color_format(const ie_networ
* @param color_format Color format of the input data.
* @reutrn Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_color_format(ie_network_t *network, const char *input_name, const colorformat_e color_format);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_set_color_format(ie_network_t *network, const char *input_name, const colorformat_e color_format);
/**
* @brief Helper method collect all input shapes with input names of corresponding input data.
@@ -715,7 +768,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_color_format(ie_network_t *n
* @param shapes A pointer to the input_shapes.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_shapes(ie_network_t *network, input_shapes_t *shapes);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_input_shapes(ie_network_t *network, input_shapes_t *shapes);
/**
* @brief Run shape inference with new input shapes for the network.
@@ -724,7 +777,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_shapes(ie_network_t *n
* @param shapes A new input shapes to set for the network.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_reshape(ie_network_t *network, const input_shapes_t shapes);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_reshape(ie_network_t *network, const input_shapes_t shapes);
/**
* @brief Gets number of output for the network.
@@ -733,7 +786,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_reshape(ie_network_t *network, c
* @param size_result A number of the network's output information.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_outputs_number(const ie_network_t *network, size_t *size_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_outputs_number(const ie_network_t *network, size_t *size_result);
/**
* @brief Gets name corresponding to the "number". Use the ie_network_name_free() method to free memory.
@@ -743,7 +796,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_outputs_number(const ie_netw
* @param name Output name corresponding to the number.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_name(const ie_network_t *network, const size_t number, char **name);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_output_name(const ie_network_t *network, const size_t number, char **name);
/**
* @brief Gets a precision of the output data named "output_name".
@@ -753,7 +806,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_name(const ie_network
* @param prec_result A pointer to the precision used for output blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_precision(const ie_network_t *network, const char *output_name, precision_e *prec_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_output_precision(const ie_network_t *network, const char *output_name, precision_e *prec_result);
/**
* @brief Changes the precision of the output data named "output_name".
@@ -763,7 +816,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_precision(const ie_ne
* @param p A new precision of the output data to set (eg. precision_e.FP16).
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_output_precision(ie_network_t *network, const char *output_name, const precision_e p);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_set_output_precision(ie_network_t *network, const char *output_name, const precision_e p);
/**
* @brief Gets a layout of the output data.
@@ -773,7 +826,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_output_precision(ie_network_
* @param layout_result A pointer to the layout used for output blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_layout(const ie_network_t *network, const char *output_name, layout_e *layout_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_output_layout(const ie_network_t *network, const char *output_name, layout_e *layout_result);
/**
* @brief Changes the layout of the output data named "output_name".
@@ -783,7 +836,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_layout(const ie_netwo
* @param l A new layout of the output data to set.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_output_layout(ie_network_t *network, const char *output_name, const layout_e l);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_set_output_layout(ie_network_t *network, const char *output_name, const layout_e l);
/**
* @brief Gets dimensions/shape of the output data with reversed order.
@@ -793,30 +846,28 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_output_layout(ie_network_t *
* @param dims_result A pointer to the dimensions used for output blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_dims(const ie_network_t *network, const char *output_name, dimensions_t *dims_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_output_dims(const ie_network_t *network, const char *output_name, dimensions_t *dims_result);
/**
* @brief Releases memory occupied by input_shapes.
* @ingroup Network
* @param inputShapes A pointer to the input_shapes to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_input_shapes_free(input_shapes_t *inputShapes);
INFERENCE_ENGINE_C_API(void) ie_network_input_shapes_free(input_shapes_t *inputShapes);
/**
* @brief Releases momory occupied by input_name or output_name.
* @ingroup Network
* @param name A pointer to the input_name or output_name to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_name_free(char **name);
INFERENCE_ENGINE_C_API(void) ie_network_name_free(char **name);
/** @} */ // end of InferRequest
// Blob
/**
* @defgroup Blob Blob
* @defgroup Blob Blob
* Set of functions allowing to research memory from infer requests or make new
* memory objects to be passed to InferRequests.
* @{
@@ -829,7 +880,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_name_free(char **name);
* @param blob A pointer to the newly created blob.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **blob);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **blob);
/**
* @brief Creates a blob with the given tensor descriptor from the pointer to the pre-allocated memory.
@@ -840,7 +891,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_make_memory(const tensor_desc_t *te
* @param blob A pointer to the newly created blob.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDesc, void *ptr, size_t size, ie_blob_t **blob);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDesc, void *ptr, size_t size, ie_blob_t **blob);
/**
* @brief Creates a blob describing given roi_t instance based on the given blob with pre-allocated memory.
@@ -850,7 +901,28 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_make_memory_from_preallocated(const
* @param blob A pointer to the newly created blob.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_make_memory_with_roi(const ie_blob_t *inputBlob, const roi_t *roi, ie_blob_t **blob);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_make_memory_with_roi(const ie_blob_t *inputBlob, const roi_t *roi, ie_blob_t **blob);
/**
* @brief Creates a NV12 blob from two planes Y and UV.
* @ingroup Blob
* @param y A pointer to the ie_blob_t instance that represents Y plane in NV12 color format.
* @param uv A pointer to the ie_blob_t instance that represents UV plane in NV12 color format.
* @param nv12Blob A pointer to the newly created blob.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_make_memory_nv12(const ie_blob_t *y, const ie_blob_t *uv, ie_blob_t **nv12Blob);
/**
* @brief Creates I420 blob from three planes Y, U and V.
* @ingroup Blob
* @param y A pointer to the ie_blob_t instance that represents Y plane in I420 color format.
* @param u A pointer to the ie_blob_t instance that represents U plane in I420 color format.
* @param v A pointer to the ie_blob_t instance that represents V plane in I420 color format.
* @param i420Blob A pointer to the newly created blob.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_make_memory_i420(const ie_blob_t *y, const ie_blob_t *u, const ie_blob_t *v, ie_blob_t **i420Blob);
/**
* @brief Gets the total number of elements, which is a product of all the dimensions.
@@ -859,7 +931,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_make_memory_with_roi(const ie_blob_
* @param size_result The total number of elements.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_size(ie_blob_t *blob, int *size_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_size(ie_blob_t *blob, int *size_result);
/**
* @brief Gets the size of the current Blob in bytes.
@@ -868,15 +940,14 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_size(ie_blob_t *blob, int *size_res
* @param bsize_result The size of the current blob in bytes.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_byte_size(ie_blob_t *blob, int *bsize_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_byte_size(ie_blob_t *blob, int *bsize_result);
/**
* @brief Releases previously allocated data
* @ingroup Blob
* @param blob A pointer to the blob to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_deallocate(ie_blob_t **blob);
INFERENCE_ENGINE_C_API(void) ie_blob_deallocate(ie_blob_t **blob);
/**
* @brief Gets access to the allocated memory .
@@ -885,7 +956,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_deallocate(ie_blob_t **blob);
* @param blob_buffer A pointer to the copied data from the given blob.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_buffer(const ie_blob_t *blob, ie_blob_buffer_t *blob_buffer);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_get_buffer(const ie_blob_t *blob, ie_blob_buffer_t *blob_buffer);
/**
* @brief Gets read-only access to the allocated memory.
@@ -894,7 +965,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_buffer(const ie_blob_t *blob, i
* @param blob_cbuffer A pointer to the coped data from the given pointer to the blob and the data is read-only.
* @return Status code of the operation: OK(0) for success
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_cbuffer(const ie_blob_t *blob, ie_blob_buffer_t *blob_cbuffer);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_get_cbuffer(const ie_blob_t *blob, ie_blob_buffer_t *blob_cbuffer);
/**
* @brief Gets dimensions of blob's tensor.
@@ -903,7 +974,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_cbuffer(const ie_blob_t *blob,
* @param dims_result A pointer to the dimensions of blob's tensor.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_dims(const ie_blob_t *blob, dimensions_t *dims_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_get_dims(const ie_blob_t *blob, dimensions_t *dims_result);
/**
* @brief Gets layout of blob's tensor.
@@ -912,7 +983,7 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_dims(const ie_blob_t *blob, dim
* @param layout_result A pointer to the layout of blob's tensor.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_layout(const ie_blob_t *blob, layout_e *layout_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_get_layout(const ie_blob_t *blob, layout_e *layout_result);
/**
* @brief Gets precision of blob's tensor.
@@ -921,7 +992,14 @@ INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_layout(const ie_blob_t *blob, l
* @param prec_result A pointer to the precision of blob's tensor.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_precision(const ie_blob_t *blob, precision_e *prec_result);
INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_get_precision(const ie_blob_t *blob, precision_e *prec_result);
/**
* @Releases the memory occupied by the ie_blob_t pointer.
* @ingroup Blob
* @param blob A pointer to the blob pointer to release memory.
*/
INFERENCE_ENGINE_C_API(void) ie_blob_free(ie_blob_t **blob);
/** @} */ // end of Blob

View File

@@ -0,0 +1,29 @@
# Hello Classification C Sample
This topic describes how to run the Hello Classification C sample application.
It demonstrates how to use the following Inference Engine C API in applications:
* Synchronous Infer Request API
* Input auto-resize API. It allows to set image of the original size as input for a network with other input size.
Resize will be performed automatically by the corresponding plugin just before inference.
There is also an API introduced to crop a ROI object and set it as input without additional memory re-allocation.
To properly demonstrate this API, it is required to run several networks in pipeline which is out of scope of this sample.
> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
## Running
To run the sample, you can use public or pre-trained models. To download the pre-trained models, use the OpenVINO [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader) or go to [https://download.01.org/opencv/](https://download.01.org/opencv/).
> **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
You can do inference of an image using a trained AlexNet network on a GPU using the following command:
```sh
./hello_classification_c <path_to_model>/alexnet_fp32.xml <path_to_image>/cat.bmp GPU
```
## Sample Output
The application outputs top-10 inference results.

View File

@@ -32,14 +32,18 @@ void classify_res_sort(struct classify_res *res, size_t n) {
struct classify_res *output_blob_to_classify_res(ie_blob_t *blob, size_t *n) {
dimensions_t output_dim;
ie_blob_get_dims(blob, &output_dim);
IEStatusCode status = ie_blob_get_dims(blob, &output_dim);
if (status != OK)
return NULL;
*n = output_dim.dims[1];
struct classify_res *cls = (struct classify_res *)malloc(sizeof(struct classify_res) * (*n));
IEStatusCode status;
ie_blob_buffer_t blob_cbuffer;
status = ie_blob_get_cbuffer(blob, &blob_cbuffer);
if (status != OK)
return NULL;
float *blob_data = (float*) (blob_cbuffer.cbuffer);
size_t i;
@@ -71,50 +75,61 @@ int main(int argc, char **argv) {
const char *input_model = argv[1];
const char *input_image_path = argv[2];
const char *device_name = argv[3];
ie_core_t *core = NULL;
ie_network_t *network = NULL;
ie_executable_network_t *exe_network = NULL;
ie_infer_request_t *infer_request = NULL;
char *input_name = NULL, *output_name = NULL;
ie_blob_t *imgBlob = NULL, *output_blob = NULL;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 1. Load inference engine instance -------------------------------------
ie_core_t *core = NULL;
IEStatusCode status = ie_core_create("", &core);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
ie_network_t *network = NULL;
ie_core_read_network(core, input_model, NULL, &network);
status = ie_core_read_network(core, input_model, NULL, &network);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 3. Configure input & output ---------------------------------------------
// --------------------------- Prepare input blobs -----------------------------------------------------
char *input_name = NULL;
status = ie_network_get_input_name(network, 0, &input_name);
if (status != OK)
goto err;
/* Mark input as resizable by setting of a resize algorithm.
* In this case we will be able to set an input blob of any shape to an infer request.
* Resize and layout conversions are executed automatically during inference */
ie_network_set_input_resize_algorithm(network, input_name, RESIZE_BILINEAR);
ie_network_set_input_layout(network, input_name, NHWC);
ie_network_set_input_precision(network, input_name, U8);
status |= ie_network_set_input_resize_algorithm(network, input_name, RESIZE_BILINEAR);
status |= ie_network_set_input_layout(network, input_name, NHWC);
status |= ie_network_set_input_precision(network, input_name, U8);
if (status != OK)
goto err;
// --------------------------- Prepare output blobs ----------------------------------------------------
char *output_name = NULL;
status = ie_network_get_output_name(network, 0, &output_name);
ie_network_set_output_precision(network, output_name, FP32);
status |= ie_network_get_output_name(network, 0, &output_name);
status |= ie_network_set_output_precision(network, output_name, FP32);
if (status !=OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 4. Loading model to the device ------------------------------------------
ie_config_t config = {NULL, NULL, NULL};
ie_executable_network_t *exe_network = NULL;
status = ie_core_load_network(core, network, device_name, &config, &exe_network);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 5. Create infer request -------------------------------------------------
ie_infer_request_t *infer_request = NULL;
status = ie_exec_network_create_infer_request(exe_network, &infer_request);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 6. Prepare input --------------------------------------------------------
@@ -126,21 +141,31 @@ int main(int argc, char **argv) {
tensor_desc_t tensorDesc = {NHWC, dimens, U8};
size_t size = img.mat_data_size;
//just wrap IplImage data to ie_blob_t pointer without allocating of new memory
ie_blob_t *imgBlob = NULL;
ie_blob_make_memory_from_preallocated(&tensorDesc, img.mat_data, size, &imgBlob);
status = ie_blob_make_memory_from_preallocated(&tensorDesc, img.mat_data, size, &imgBlob);
if (status != OK) {
image_free(&img);
goto err;
}
//infer_request accepts input blob of any size
ie_infer_request_set_blob(infer_request, input_name, imgBlob);
status = ie_infer_request_set_blob(infer_request, input_name, imgBlob);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 7. Do inference --------------------------------------------------------
/* Running the request synchronously */
ie_infer_request_infer(infer_request);
status = ie_infer_request_infer(infer_request);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 8. Process output ------------------------------------------------------
ie_blob_t *output_blob = NULL;
ie_infer_request_get_blob(infer_request, output_name, &output_blob);
status = ie_infer_request_get_blob(infer_request, output_name, &output_blob);
if (status != OK) {
image_free(&img);
goto err;
}
size_t class_num;
struct classify_res *cls = output_blob_to_classify_res(output_blob, &class_num);
@@ -156,7 +181,9 @@ int main(int argc, char **argv) {
// -----------------------------------------------------------------------------------------------------
ie_blob_deallocate(&output_blob);
free(cls);
ie_blob_free(&output_blob);
ie_blob_free(&imgBlob);
image_free(&img);
ie_infer_request_free(&infer_request);
ie_exec_network_free(&exe_network);
@@ -166,4 +193,23 @@ int main(int argc, char **argv) {
ie_core_free(&core);
return EXIT_SUCCESS;
err:
if (core)
ie_core_free(&core);
if (network)
ie_network_free(&network);
if (input_name)
ie_network_name_free(&input_name);
if (output_name)
ie_network_name_free(&output_name);
if (exe_network)
ie_exec_network_free(&exe_network);
if (infer_request)
ie_infer_request_free(&infer_request);
if (imgBlob)
ie_blob_free(&imgBlob);
if (output_blob)
ie_blob_free(&output_blob);
return EXIT_FAILURE;
}

View File

@@ -0,0 +1,15 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME "hello_nv12_input_classification_c")
# create sample target
add_executable(${TARGET_NAME} main.c)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
if(COMMAND add_cpplint_target)
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
endif()

View File

@@ -0,0 +1,51 @@
# Hello NV12 Input Classification C Sample
This topic describes how to run the Hello NV12 Input Classification sample application.
The sample demonstrates how to use the new NV12 automatic input pre-processing API of the Inference Engine in your applications.
Refer to [Integrate the Inference Engine New Request API with Your Application](./docs/IE_DG/Integrate_with_customer_application_new_API.md) for details.
## How It Works
Upon the start-up, the sample application reads command-line parameters, loads a network and sets an
image in the NV12 color format to an Inference Engine plugin. When inference is done, the
application outputs data to the standard output stream.
The sample accepts an uncompressed image in the NV12 color format. To run the sample, you need to
convert your BGR/RGB image to NV12. To do this, you can use one of the widely available tools such
as FFmpeg\* or GStreamer\*. The following command shows how to convert an ordinary image into an
uncompressed NV12 image using FFmpeg:
```sh
ffmpeg -i cat.jpg -pix_fmt nv12 cat.yuv
```
> **NOTE**:
>
> * Because the sample reads raw image files, you should provide a correct image size along with the
> image path. The sample expects the logical size of the image, not the buffer size. For example,
> for 640x480 BGR/RGB image the corresponding NV12 logical image size is also 640x480, whereas the
> buffer size is 640x720.
> * The sample uses input autoresize API of the Inference Engine to simplify user-side
> pre-processing.
> * By default, this sample expects that network input has BGR channels order. If you trained your
> model to work with RGB order, you need to reconvert your model using the Model Optimizer tool
> with `--reverse_input_channels` argument specified. For more information about the argument,
> refer to **When to Reverse Input Channels** section of
> [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
## Running
To run the sample, you can use public or pre-trained models. To download pre-trained models, use
the OpenVINO&trade; [Model Downloader](https://github.com/opencv/open_model_zoo/tree/master/model_downloader)
or go to [https://download.01.org/opencv/](https://download.01.org/opencv/).
> **NOTE**: Before running the sample with a trained model, make sure the model is converted to the
> Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
You can perform inference on an NV12 image using a trained AlexNet network on CPU with the following command:
```sh
./hello_nv12_input_classification_c <path_to_model>/alexnet_fp32.xml <path_to_image>/cat.yuv 640x480 CPU
```
## Sample Output
The application outputs top-10 inference results.

View File

@@ -0,0 +1,292 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier : Apache-2.0
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <c_api/ie_c_api.h>
struct classify_res {
size_t class_id;
float probability;
};
void classify_res_sort(struct classify_res *res, size_t n) {
size_t i, j;
for (i = 0; i < n; ++i) {
for (j = i + 1; j < n; ++j) {
if (res[i].probability < res[j].probability) {
struct classify_res temp = res[i];
res[i] = res[j];
res[j] = temp;
} else if (res[i].probability == res[j].probability && res[i].class_id > res[j].class_id) {
struct classify_res temp = res[i];
res[i] = res[j];
res[j] = temp;
}
}
}
}
struct classify_res *output_blob_to_classify_res(ie_blob_t *blob, size_t *n) {
dimensions_t output_dim;
IEStatusCode status = ie_blob_get_dims(blob, &output_dim);
if (status != OK)
return NULL;
*n = output_dim.dims[1];
struct classify_res *cls = (struct classify_res *)malloc(sizeof(struct classify_res) * (*n));
ie_blob_buffer_t blob_cbuffer;
status = ie_blob_get_cbuffer(blob, &blob_cbuffer);
if (status != OK)
return NULL;
float *blob_data = (float*) (blob_cbuffer.cbuffer);
size_t i;
for (i = 0; i < *n; ++i) {
cls[i].class_id = i;
cls[i].probability = blob_data[i];
}
return cls;
}
void print_classify_res(struct classify_res *cls, size_t n, const char *img_path) {
printf("\nImage %s\n", img_path);
printf("\nclassid probability\n");
printf("------- -----------\n");
size_t i;
for (i = 0; i < n; ++i) {
printf("%zu %f\n", cls[i].class_id, cls[i].probability);
}
}
size_t read_image_from_file(const char *img_path, unsigned char *img_data, size_t size) {
FILE *fp = fopen(img_path, "rb+");
size_t read_size = 0;
if (fp) {
fseek(fp, 0, SEEK_END);
if (ftell(fp) >= size) {
fseek(fp, 0, SEEK_SET);
read_size = fread(img_data, 1, size, fp);
}
}
fclose(fp);
return read_size;
}
size_t parse_image_size(const char *size_str, size_t *width, size_t *height) {
const char *_size = size_str;
size_t _width = 0, _height = 0;
while (_size && *_size != 'x' && *_size != '\0') {
if ((*_size <= '9') && (*_size >= '0')) {
_width = (_width * 10) + (*_size - '0');
_size++;
} else {
goto err;
}
}
if (_size)
_size++;
while (_size && *_size != '\0') {
if ((*_size <= '9') && (*_size >= '0')) {
_height = (_height * 10) + (*_size - '0');
_size++;
} else {
goto err;
}
}
if (_width > 0 && _height > 0) {
if (_width % 2 == 0 && _height % 2 == 0) {
*width = _width;
*height = _height;
return 0;
} else {
printf("Unsupported image size, width and height must be even numbers \n");
return -1;
}
} else {
goto err;
}
err:
printf("Incorrect format of image size parameter, expected WIDTHxHEIGHT, "
"actual: %s\n", size_str);
return -1;
}
int main(int argc, char **argv) {
// ------------------------------ Parsing and validation of input args ---------------------------------
if (argc != 5) {
printf("Usage : ./hello_classification <path_to_model> <path_to_image> <image_size> <device_name>\n");
return EXIT_FAILURE;
}
size_t input_width = 0, input_height = 0, img_size = 0;
if (parse_image_size(argv[3], &input_width, &input_height) == -1)
return EXIT_FAILURE;
const char *input_model = argv[1];
const char *input_image_path = argv[2];
const char *device_name = argv[4];
unsigned char *img_data = NULL;
ie_core_t *core = NULL;
ie_network_t *network = NULL;
ie_executable_network_t *exe_network = NULL;
ie_infer_request_t *infer_request = NULL;
char *input_name = NULL, *output_name = NULL;
ie_blob_t *y_blob = NULL, *uv_blob = NULL, *nv12_blob = NULL, *output_blob = NULL;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 1. Load inference engine instance -------------------------------------
IEStatusCode status = ie_core_create("", &core);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
status = ie_core_read_network(core, input_model, NULL, &network);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 3. Configure input & output ---------------------------------------------
// --------------------------- Prepare input blobs -----------------------------------------------------
status = ie_network_get_input_name(network, 0, &input_name);
if (status != OK)
goto err;
status |= ie_network_set_input_layout(network, input_name, NCHW);
status |= ie_network_set_input_precision(network, input_name, U8);
// set input resize algorithm to enable input autoresize
status |= ie_network_set_input_resize_algorithm(network, input_name, RESIZE_BILINEAR);
// set input color format to NV12 to enable automatic input color format pre-processing
status |= ie_network_set_color_format(network, input_name, NV12 );
if (status != OK)
goto err;
// --------------------------- Prepare output blobs ----------------------------------------------------
status |= ie_network_get_output_name(network, 0, &output_name);
status |= ie_network_set_output_precision(network, output_name, FP32);
if (status !=OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 4. Loading model to the device ------------------------------------------
ie_config_t config = {NULL, NULL, NULL};
status = ie_core_load_network(core, network, device_name, &config, &exe_network);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 5. Create infer request -------------------------------------------------
status = ie_exec_network_create_infer_request(exe_network, &infer_request);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 6. Prepare input --------------------------------------------------------
// read image with size converted to NV12 data size: height(NV12) = 3 / 2 * logical height
img_size = input_width * (input_height * 3 / 2);
img_data = (unsigned char *)calloc(img_size, sizeof(unsigned char));
if (NULL == img_data)
goto err;
if (img_size != read_image_from_file(input_image_path, img_data, img_size))
goto err;
// --------------------------- Create a blob to hold the NV12 input data -------------------------------
// Create tensor descriptors for Y and UV blobs
dimensions_t y_dimens = {4, {1, 1, input_height, input_width}};
dimensions_t uv_dimens = {4, {1, 2, input_height / 2, input_width / 2}};
tensor_desc_t y_tensor = {NHWC, y_dimens, U8};
tensor_desc_t uv_tensor = {NHWC, uv_dimens, U8};
size_t y_plane_size = input_height * input_width;
size_t uv_plane_size = input_width * (input_height / 2);
// Create blob for Y plane from raw data
status |= ie_blob_make_memory_from_preallocated(&y_tensor, img_data, y_plane_size, &y_blob);
// Create blob for UV plane from raw data
status |= ie_blob_make_memory_from_preallocated(&uv_tensor, img_data + y_plane_size, uv_plane_size, &uv_blob);
// Create NV12Blob from Y and UV blobs
status |= ie_blob_make_memory_nv12(y_blob, uv_blob, &nv12_blob);
if (status != OK)
goto err;
status = ie_infer_request_set_blob(infer_request, input_name, nv12_blob);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 7. Do inference --------------------------------------------------------
/* Running the request synchronously */
status = ie_infer_request_infer(infer_request);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 8. Process output ------------------------------------------------------
status = ie_infer_request_get_blob(infer_request, output_name, &output_blob);
if (status != OK)
goto err;
size_t class_num;
struct classify_res *cls = output_blob_to_classify_res(output_blob, &class_num);
classify_res_sort(cls, class_num);
// Print classification results
size_t top = 10;
if (top > class_num) {
top = class_num;
}
printf("\nTop %zu results:\n", top);
print_classify_res(cls, top, input_image_path);
// -----------------------------------------------------------------------------------------------------
free(cls);
ie_blob_free(&output_blob);
ie_blob_free(&nv12_blob);
ie_blob_free(&uv_blob);
ie_blob_free(&y_blob);
ie_infer_request_free(&infer_request);
ie_exec_network_free(&exe_network);
ie_network_name_free(&input_name);
ie_network_name_free(&output_name);
ie_network_free(&network);
ie_core_free(&core);
free(img_data);
return EXIT_SUCCESS;
err:
if (core)
ie_core_free(&core);
if (network)
ie_network_free(&network);
if (input_name)
ie_network_name_free(&input_name);
if (output_name)
ie_network_name_free(&output_name);
if (exe_network)
ie_exec_network_free(&exe_network);
if (infer_request)
ie_infer_request_free(&infer_request);
if (nv12_blob)
ie_blob_free(&nv12_blob);
if (uv_blob)
ie_blob_free(&uv_blob);
if (y_blob)
ie_blob_free(&y_blob);
if (output_blob)
ie_blob_free(&output_blob);
if (img_data)
free(img_data);
return EXIT_FAILURE;
}

View File

@@ -0,0 +1,70 @@
# Object Detection C Sample SSD
This topic demonstrates how to run the Object Detection C sample application, which does inference using object detection
networks like SSD-VGG on Intel® Processors and Intel® HD Graphics.
> **NOTE:** This topic describes usage of C implementation of the Object Detection Sample SSD. For the C++* implementation, refer to [Object Detection C++* Sample SSD](./inference-engine/samples/object_detection_sample_ssd/README.md) and for the Python* implementation, refer to [Object Detection Python* Sample SSD](./inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/README.md).
## How It Works
Upon the start-up the sample application reads command line parameters and loads a network and an image to the Inference
Engine device. When inference is done, the application creates output images and outputs data to the standard output stream.
> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
## Running
Running the application with the <code>-h</code> option yields the following usage message:
```sh
./object_detection_sample_ssd_c -h
[ INFO ] InferenceEngine:
<version><number>
[ INFO ] Parsing input parameters
object_detection_sample_ssd_c [OPTION]
Options:
-h Print a usage message.
-i "<path>" Required. Path to an .bmp image.
-m "<path>" Required. Path to an .xml file with a trained model.
-l "<absolute_path>" Required for CPU custom layers. Absolute path to a shared library with the kernels implementations.
Or
-c "<absolute_path>" Required for GPU custom kernels. Absolute path to the .xml file with the kernels descriptions.
-d "<device>" Optional. Specify the target device to infer on (the list of available devices is shown below). Default value is CPU. Use "-d HETERO:<comma-separated_devices_list>" format to specify HETERO plugin. Sample will look for a suitable plugin for device specified
-g Path to the configuration file. Default value: "config".
```
Running the application with the empty list of options yields the usage message given above and an error message.
To run the sample, you can use public or pre-trained models. To download the pre-trained models, use the OpenVINO [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader) or go to [https://download.01.org/opencv/](https://download.01.org/opencv/).
> **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
For example, to do inference on a CPU with the OpenVINO&trade; toolkit person detection SSD models, run one of the following commands:
```sh
./object_detection_sample_ssd_c -i <path_to_image>/inputImage.bmp -m <path_to_model>person-detection-retail-0013.xml -d CPU
```
or
```sh
./object_detection_sample_ssd_c -i <path_to_image>/inputImage1.bmp <path_to_image>/inputImage2.bmp ... -m <path_to_model>person-detection-retail-0013.xml -d CPU
```
or
```sh
./object_detection_sample_ssd_c -i <path_to_image>/inputImage.jpg -m <path_to_model>person-detection-retail-0002.xml -d CPU
```
## Sample Output
The application outputs several images (`out_0.bmp`, `out_1.bmp`, ... ) with detected objects enclosed in rectangles. It outputs the list of
classes of the detected objects along with the respective confidence values and the coordinates of the rectangles to the standard output stream.
## See Also
* [Model Optimizer](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
* [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader)

View File

@@ -5,7 +5,6 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <sys/stat.h>
#include <c_api/ie_c_api.h>
#include "object_detection_sample_ssd.h"
@@ -105,13 +104,13 @@ void readInputFilesArgument(const char *arg) {
while (NULL != (ep = readdir(dp))) {
const char *fileName = ep->d_name;
if (strcmp(fileName, ".") == 0 || strcmp(fileName, "..") == 0) continue;
char *file_path = (char *)malloc(strlen(arg) + strlen(ep->d_name) + 1);
char *file_path = (char *)calloc(strlen(arg) + strlen(ep->d_name) + 2, sizeof(char));
strcpy(file_path, arg);
strcat(file_path, "/");
strcat(file_path, ep->d_name);
if (file_num == 0) {
file_paths = (char **)malloc(sizeof(char *));
file_paths = (char **)calloc(1, sizeof(char *));
file_paths[0] = file_path;
++file_num;
} else {
@@ -131,10 +130,10 @@ void readInputFilesArgument(const char *arg) {
closedir(dp);
dp = NULL;
} else {
char *file_path = malloc(strlen(arg));
char *file_path = (char *)calloc(strlen(arg) + 1, sizeof(char));
strcpy(file_path, arg);
if (file_num == 0) {
file_paths = (char **)malloc(sizeof(char *));
file_paths = (char **)calloc(1, sizeof(char *));
}
file_paths[file_num++] = file_path;
}
@@ -186,11 +185,11 @@ ie_config_t *parseConfig(const char *config_file, char comment) {
char key[256], value[256];
if (fscanf(file, "%s", key)!= EOF && fscanf(file, "%s", value) != EOF) {
char *cfg_name = (char *)malloc(strlen(key));
char *cfg_value = (char *)malloc(strlen(value));
char *cfg_name = (char *)calloc(strlen(key) + 1, sizeof(char));
char *cfg_value = (char *)calloc(strlen(value) + 1, sizeof(char));
strcpy(cfg_name, key);
strcpy(cfg_value, value);
ie_config_t *cfg_t = (ie_config_t *)malloc(sizeof(ie_config_t));
ie_config_t *cfg_t = (ie_config_t *)calloc(1, sizeof(ie_config_t));
cfg_t->name = cfg_name;
cfg_t->value = cfg_value;
cfg_t->next = NULL;
@@ -202,11 +201,11 @@ ie_config_t *parseConfig(const char *config_file, char comment) {
if (strlen(key) == 0 || key[0] == comment) {
continue;
}
char *cfg_name = (char *)malloc(strlen(key));
char *cfg_value = (char *)malloc(strlen(value));
char *cfg_name = (char *)calloc(strlen(key) + 1, sizeof(char));
char *cfg_value = (char *)calloc(strlen(value) + 1, sizeof(char));
strcpy(cfg_name, key);
strcpy(cfg_value, value);
ie_config_t *cfg_t = (ie_config_t *)malloc(sizeof(ie_config_t));
ie_config_t *cfg_t = (ie_config_t *)calloc(1, sizeof(ie_config_t));
cfg_t->name = cfg_name;
cfg_t->value = cfg_value;
cfg_t->next = NULL;
@@ -256,7 +255,7 @@ void int2str(char *str, int num) {
str[1] = '\0';
return;
}
while (num != 0) {
str[i++] = num % 10 + '0';
num = num / 10;
@@ -273,17 +272,28 @@ void int2str(char *str, int num) {
int main(int argc, char **argv) {
/** This sample covers certain topology and cannot be generalized for any object detection one **/
ie_version_t version = ie_c_api_version();
printf("%sInferenceEngine: \n", info);
printf("%s\n", ie_c_api_version());
printf("%s\n", version.api_version);
ie_version_free(&version);
char **argv_temp =(char **)malloc(sizeof(char *) * argc);
char **argv_temp =(char **)calloc(argc, sizeof(char *));
int i, j;
for (i = 0; i < argc; ++i) {
argv_temp[i] = argv[i];
}
char *input_weight = NULL, *imageInputName = NULL, *imInfoInputName = NULL, *output_name = NULL;
ie_core_t *core = NULL;
ie_network_t *network = NULL;
ie_executable_network_t *exe_network = NULL;
ie_infer_request_t *infer_request = NULL;
ie_blob_t *imageInput = NULL, *output_blob = NULL;
// --------------------------- 1. Parsing and validation of input args ---------------------------------
if (ParseAndCheckCommandLine(argc, argv) < 0) {
return -1;
free(argv_temp);
return EXIT_FAILURE;
}
// -----------------------------------------------------------------------------------------------------
@@ -292,19 +302,22 @@ int main(int argc, char **argv) {
parseInputFilesArguments(argc, argv_temp);
if (!file_num) {
printf("No suitable images were found\n");
return -1;
free(argv_temp);
return EXIT_FAILURE;
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- 3. Load inference engine ------------------------------------------------
printf("%sLoading Inference Engine\n", info);
ie_core_t *core = NULL;
IEStatusCode status = ie_core_create("", &core);
assert(core);
if (status != OK)
goto err;
ie_core_versions_t ver;
printf("%sDevice info: \n", info);
ie_core_get_versions(core, device_name, &ver);
status = ie_core_get_versions(core, device_name, &ver);
if (status != OK)
goto err;
for (i = 0; i < ver.num_vers; ++i) {
printf(" %s\n", ver.versions[i].device_name);
printf(" %s version ......... %zu.%zu\n", ver.versions[i].description, ver.versions[i].major, ver.versions[i].minor);
@@ -314,30 +327,33 @@ int main(int argc, char **argv) {
if (custom_cpu_library_msg) {
// CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
ie_core_add_extension(core, custom_cpu_library_msg, "CPU");
status = ie_core_add_extension(core, custom_cpu_library_msg, "CPU");
if (status != OK)
goto err;
printf("%sCPU Extension loaded: %s\n", info, custom_cpu_library_msg);
}
if (custom_cldnn_msg) {
// clDNN Extensions are loaded from an .xml description and OpenCL kernel files
ie_config_t cfg = {"CONFIG_FILE", custom_cldnn_msg, NULL};
ie_core_set_config(core, &cfg, "GPU");
status = ie_core_set_config(core, &cfg, "GPU");
if (status != OK)
goto err;
printf("%sGPU Extension loaded: %s\n", info, custom_cldnn_msg);
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- 4. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
char *input_weight = (char *)malloc(strlen(input_model) + 1);
input_weight = (char *)calloc(strlen(input_model) + 1, sizeof(char));
strncpy(input_weight, input_model, strlen(input_model)-4);
input_weight[strlen(input_model)-4] = '\0';
strcat(input_weight, ".bin");
printf("%sLoading network files:\n", info);
printf("\t%s\n", input_model);
printf("\t%s\n", input_weight);
ie_network_t *network = NULL;
ie_core_read_network(core, input_model, input_weight, &network);
assert(network);
status = ie_core_read_network(core, input_model, input_weight, &network);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 5. Prepare input blobs --------------------------------------------------
@@ -346,9 +362,9 @@ int main(int argc, char **argv) {
/** SSD network has one input and one output **/
size_t input_num = 0;
status = ie_network_get_inputs_number(network, &input_num);
if (input_num != 1 && input_num != 2) {
if (status != OK || (input_num != 1 && input_num != 2)) {
printf("Sample supports topologies only with 1 or 2 inputs\n");
return -1;
goto err;
}
/**
@@ -358,7 +374,6 @@ int main(int argc, char **argv) {
* Although object_datection_sample_ssd's main task is to support clean SSD, it could score
* the networks with two inputs as well. For such networks imInfoInputName will contain the "second" input name.
*/
char *imageInputName = NULL, *imInfoInputName = NULL;
size_t input_width = 0, input_height = 0;
/** Stores input image **/
@@ -366,9 +381,11 @@ int main(int argc, char **argv) {
/** Iterating over all input blobs **/
for (i = 0; i < input_num; ++i) {
char *name = NULL;
ie_network_get_input_name(network, i, &name);
status |= ie_network_get_input_name(network, i, &name);
dimensions_t input_dim;
ie_network_get_input_dims(network, name, &input_dim);
status |= ie_network_get_input_dims(network, name, &input_dim);
if (status != OK)
goto err;
/** Working with first input tensor that stores image **/
if(input_dim.ranks == 4) {
@@ -377,30 +394,35 @@ int main(int argc, char **argv) {
input_width = input_dim.dims[3];
/** Creating first input blob **/
ie_network_set_input_precision(network, name, U8);
status = ie_network_set_input_precision(network, name, U8);
if (status != OK)
goto err;
} else if (input_dim.ranks == 2) {
imInfoInputName = name;
ie_network_set_input_precision(network, name, FP32);
if(input_dim.dims[1] != 3 && input_dim.dims[1] != 6) {
status = ie_network_set_input_precision(network, name, FP32);
if(status !=OK || (input_dim.dims[1] != 3 && input_dim.dims[1] != 6)) {
printf("Invalid input info. Should be 3 or 6 values length\n");
return -1;
goto err;
}
}
}
if (imageInputName == NULL) {
ie_network_get_input_name(network, 0, &imageInputName);
status = ie_network_get_input_name(network, 0, &imageInputName);
if (status != OK)
goto err;
dimensions_t input_dim;
ie_network_get_input_dims(network, imageInputName, &input_dim);
status = ie_network_get_input_dims(network, imageInputName, &input_dim);
if (status != OK)
goto err;
input_height = input_dim.dims[2];
input_width = input_dim.dims[3];
}
/** Collect images data **/
c_mat_t *originalImages = (c_mat_t *)malloc(file_num * sizeof(c_mat_t));
c_mat_t *images = (c_mat_t *)malloc(file_num * sizeof(c_mat_t));
c_mat_t *originalImages = (c_mat_t *)calloc(file_num, sizeof(c_mat_t));
c_mat_t *images = (c_mat_t *)calloc(file_num, sizeof(c_mat_t));
int image_num = 0;
for (i = 0; i < file_num; ++i) {
c_mat_t img = {NULL, 0, 0, 0, 0, 0};
@@ -410,13 +432,13 @@ int main(int argc, char **argv) {
}
/** Store image data **/
c_mat_t resized_img = {NULL, 0, 0, 0, 0, 0};
if (input_width == img.mat_width && input_height == img.mat_height) {
if ((input_width == img.mat_width) && (input_height == img.mat_height)) {
resized_img.mat_data_size = img.mat_data_size;
resized_img.mat_channels = img.mat_channels;
resized_img.mat_width = img.mat_width;
resized_img.mat_height = img.mat_height;
resized_img.mat_type = img.mat_type;
resized_img.mat_data = malloc(resized_img.mat_data_size);
resized_img.mat_data = calloc(1, resized_img.mat_data_size);
for (j = 0; j < resized_img.mat_data_size; ++j)
resized_img.mat_data[j] = img.mat_data[j];
} else {
@@ -435,17 +457,26 @@ int main(int argc, char **argv) {
if (!image_num) {
printf("Valid input images were not found!\n");
return -1;
free(originalImages);
free(images);
goto err;
}
input_shapes_t shapes;
ie_network_get_input_shapes(network, &shapes);
status = ie_network_get_input_shapes(network, &shapes);
if (status != OK)
goto err;
shapes.shapes[0].shape.dims[0] = image_num;
ie_network_reshape(network, shapes);
status = ie_network_reshape(network, shapes);
if (status != OK)
goto err;
ie_network_input_shapes_free(&shapes);
input_shapes_t shapes2;
ie_network_get_input_shapes(network, &shapes2);
status = ie_network_get_input_shapes(network, &shapes2);
if (status != OK)
goto err;
size_t batchSize = shapes2.shapes[0].shape.dims[0];
ie_network_input_shapes_free(&shapes2);
printf("%sBatch size is %zu\n", info, batchSize);
@@ -455,22 +486,24 @@ int main(int argc, char **argv) {
printf("%sPreparing output blobs\n", info);
size_t output_num = 0;
ie_network_get_outputs_number(network, &output_num);
status = ie_network_get_outputs_number(network, &output_num);
if (!output_num) {
if (status != OK || !output_num) {
printf("Can't find a DetectionOutput layer in the topology\n");
return -1;
goto err;
}
char *output_name = NULL;
ie_network_get_output_name(network, output_num-1, &output_name);
status = ie_network_get_output_name(network, output_num-1, &output_name);
if (status !=OK)
goto err;
dimensions_t output_dim;
ie_network_get_output_dims(network, output_name, &output_dim);
status = ie_network_get_output_dims(network, output_name, &output_dim);
if (status != OK)
goto err;
if (output_dim.ranks != 4) {
printf("Incorrect output dimensions for SSD model\n");
return -1;
goto err;
}
const int maxProposalCount = (int)output_dim.dims[2];
@@ -478,51 +511,60 @@ int main(int argc, char **argv) {
if (objectSize != 7) {
printf("Output item should have 7 as a last dimension\n");
return -1;
goto err;
}
/** Set the precision of output data provided by the user, should be called before load of the network to the device **/
ie_network_set_output_precision(network, output_name, FP32);
status = ie_network_set_output_precision(network, output_name, FP32);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 7. Loading model to the device ------------------------------------------
printf("%sLoading model to the device\n", info);
ie_executable_network_t *exe_network = NULL;
if (config_msg) {
ie_config_t * config = parseConfig(config_msg, '#');
ie_core_load_network(core, network, device_name, config, &exe_network);
config_free(config);
status = ie_core_load_network(core, network, device_name, config, &exe_network);
if (status != OK) {
config_free(config);
goto err;
}
} else {
ie_config_t cfg = {NULL, NULL, NULL};
ie_core_load_network(core, network, device_name, &cfg, &exe_network);
status = ie_core_load_network(core, network, device_name, &cfg, &exe_network);
if (status != OK)
goto err;
}
assert(exe_network);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 8. Create infer request -------------------------------------------------
printf("%sCreate infer request\n", info);
ie_infer_request_t *infer_request = NULL;
ie_exec_network_create_infer_request(exe_network, &infer_request);
assert(infer_request);
status = ie_exec_network_create_infer_request(exe_network, &infer_request);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 9. Prepare input --------------------------------------------------------
/** Creating input blob **/
ie_blob_t *imageInput = NULL;
ie_infer_request_get_blob(infer_request, imageInputName, &imageInput);
assert(imageInput);
status = ie_infer_request_get_blob(infer_request, imageInputName, &imageInput);
if (status != OK)
goto err;
/** Filling input tensor with images. First b channel, then g and r channels **/
dimensions_t input_tensor_dims;
ie_blob_get_dims(imageInput, &input_tensor_dims);
status = ie_blob_get_dims(imageInput, &input_tensor_dims);
if (status != OK)
goto err;
size_t num_channels = input_tensor_dims.dims[1];
size_t image_size = input_tensor_dims.dims[3] * input_tensor_dims.dims[2];
ie_blob_buffer_t blob_buffer;
ie_blob_get_buffer(imageInput, &blob_buffer);
status = ie_blob_get_buffer(imageInput, &blob_buffer);
if (status != OK)
goto err;
unsigned char *data = (unsigned char *)(blob_buffer.buffer);
/** Iterate over all input images **/
@@ -540,16 +582,21 @@ int main(int argc, char **argv) {
image_free(&images[image_id]);
}
free(images);
ie_blob_free(&imageInput);
if (imInfoInputName != NULL) {
ie_blob_t *input2 = NULL;
ie_infer_request_get_blob(infer_request, imInfoInputName, &input2);
status = ie_infer_request_get_blob(infer_request, imInfoInputName, &input2);
dimensions_t imInfoDim;
ie_blob_get_dims(input2, &imInfoDim);
status |= ie_blob_get_dims(input2, &imInfoDim);
//Fill input tensor with values
ie_blob_buffer_t info_blob_buffer;
ie_blob_get_buffer(input2, &info_blob_buffer);
status |= ie_blob_get_buffer(input2, &info_blob_buffer);
if (status != OK) {
ie_blob_free(&input2);
goto err;
}
float *p = (float *)(info_blob_buffer.buffer);
for (image_id = 0; image_id < batchSize; ++image_id) {
p[image_id * imInfoDim.dims[1] + 0] = (float)input_height;
@@ -559,32 +606,37 @@ int main(int argc, char **argv) {
p[image_id * imInfoDim.dims[1] + k] = 1.0f; // all scale factors are set to 1.0
}
}
ie_blob_free(&input2);
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- 10. Do inference ---------------------------------------------------------
printf("%sStart inference\n", info);
ie_infer_request_infer_async(infer_request);
ie_infer_request_wait(infer_request, -1);
status = ie_infer_request_infer_async(infer_request);
status |= ie_infer_request_wait(infer_request, -1);
if (status != OK)
goto err;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 11. Process output -------------------------------------------------------
printf("%sProcessing output blobs\n", info);
ie_blob_t *output_blob = NULL;
ie_infer_request_get_blob(infer_request, output_name, &output_blob);
assert(output_blob);
status = ie_infer_request_get_blob(infer_request, output_name, &output_blob);
if (status != OK)
goto err;
ie_blob_buffer_t output_blob_buffer;
ie_blob_get_cbuffer(output_blob, &output_blob_buffer);
status = ie_blob_get_cbuffer(output_blob, &output_blob_buffer);
if (status != OK)
goto err;
const float* detection = (float *)(output_blob_buffer.cbuffer);
int **classes = (int **)malloc(image_num * sizeof(int *));
rectangle_t **boxes = (rectangle_t **)malloc(image_num * sizeof(rectangle_t *));
int *object_num = (int *)malloc(image_num * sizeof(int));
int **classes = (int **)calloc(image_num, sizeof(int *));
rectangle_t **boxes = (rectangle_t **)calloc(image_num, sizeof(rectangle_t *));
int *object_num = (int *)calloc(image_num, sizeof(int));
for ( i = 0; i < image_num; ++i) {
classes[i] = (int *)malloc(maxProposalCount * sizeof(int));
boxes[i] = (rectangle_t *)malloc(maxProposalCount * sizeof(rectangle_t));
classes[i] = (int *)calloc(maxProposalCount, sizeof(int));
boxes[i] = (rectangle_t *)calloc(maxProposalCount, sizeof(rectangle_t));
object_num[i] = 0;
}
@@ -625,10 +677,10 @@ int main(int argc, char **argv) {
image_add_rectangles(&originalImages[batch_id], boxes[batch_id], classes[batch_id], object_num[batch_id], 2);
}
const char *out = "out_";
char *img_path = (char *)malloc(strlen(out) + 1);
char str_num[16] = {0};
strcpy(img_path, out);
int2str(str_num, batch_id);
char *img_path = (char *)calloc(strlen(out) + strlen(str_num) + strlen(".bmp") + 1, sizeof(char));
strcpy(img_path, out);
strcat(img_path, str_num);
strcat(img_path, ".bmp");
image_save(img_path, &originalImages[batch_id]);
@@ -648,6 +700,7 @@ int main(int argc, char **argv) {
free(classes);
free(boxes);
free(object_num);
ie_blob_free(&output_blob);
ie_infer_request_free(&infer_request);
ie_exec_network_free(&exe_network);
ie_network_free(&network);
@@ -657,6 +710,28 @@ int main(int argc, char **argv) {
ie_network_name_free(&output_name);
free(input_weight);
free(argv_temp);
return 0;
return EXIT_SUCCESS;
err:
free(argv_temp);
if (input_weight)
free(input_weight);
if (core)
ie_core_free(&core);
if (network)
ie_network_free(&network);
if (imageInputName)
ie_network_name_free(&imageInputName);
if (imInfoInputName)
ie_network_name_free(&imInfoInputName);
if (output_name)
ie_network_name_free(&output_name);
if (exe_network)
ie_exec_network_free(&exe_network);
if (imageInput)
ie_blob_free(&imageInput);
if (output_blob)
ie_blob_free(&output_blob);
return EXIT_FAILURE;
}

View File

@@ -2,6 +2,10 @@
# SPDX-License-Identifier: Apache-2.0
#
if(ENABLE_LTO)
ie_enable_lto()
endif()
set(TARGET_NAME inference_engine_c_api)
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
@@ -24,10 +28,9 @@ export(TARGETS ${TARGET_NAME} NAMESPACE IE:: APPEND FILE "${CMAKE_BINARY_DIR}/ta
# install
install(TARGETS ${TARGET_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH}
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH}
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH}
COMPONENT core)
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core)
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/
DESTINATION ${IE_CPACK_IE_DIR}/include/

View File

@@ -17,6 +17,7 @@
#include <ie_extension.h>
#include "inference_engine.hpp"
#include "details/ie_exception.hpp"
#include "ie_compound_blob.h"
#include "c_api/ie_c_api.h"
namespace IE = InferenceEngine;
@@ -86,6 +87,7 @@ std::map<IE::Precision, precision_e> precision_map = {{IE::Precision::UNSPECIFIE
{IE::Precision::U16, precision_e::U16},
{IE::Precision::I32, precision_e::I32},
{IE::Precision::I64, precision_e::I64},
{IE::Precision::U64, precision_e::U64},
{IE::Precision::BIN, precision_e::BIN},
{IE::Precision::CUSTOM, precision_e::CUSTOM}};
@@ -112,7 +114,8 @@ std::map<IE::ColorFormat, colorformat_e> colorformat_map = {{IE::ColorFormat::RA
{IE::ColorFormat::BGR, colorformat_e::BGR},
{IE::ColorFormat::BGRX, colorformat_e::BGRX},
{IE::ColorFormat::RGBX, colorformat_e::RGBX},
{IE::ColorFormat::NV12, colorformat_e::NV12}};
{IE::ColorFormat::NV12, colorformat_e::NV12},
{IE::ColorFormat::I420, colorformat_e::I420}};
/**
*@brief convert the config type data to map type data.
@@ -147,7 +150,7 @@ void parameter2IEparam(const IE::Parameter param, ie_param_t *ie_param) {
if (param.is<std::string>()) {
std::unique_ptr<char> params_temp(new char[param.as<std::string>().length() + 1]);
ie_param->params = params_temp.release();
snprintf(ie_param->params, param.as<std::string>().length() + 1, "%s", param.as<std::string>().c_str());
memcpy(ie_param->params, param.as<std::string>().c_str(), param.as<std::string>().length() + 1);
} else if (param.is<std::vector<std::string>>()) {
auto val = param.as<std::vector<std::string>>();
if (val.size() > 0) {
@@ -158,11 +161,11 @@ void parameter2IEparam(const IE::Parameter param, ie_param_t *ie_param) {
std::unique_ptr<char[]> params_temp(new char[tmp.length() + 1]);
ie_param->params = params_temp.release();
snprintf(ie_param->params, tmp.length() + 1, "%s", tmp.c_str());
memcpy(ie_param->params, tmp.c_str(), tmp.length() + 1);
} else {
std::unique_ptr<char[]> params_temp(new char[1]);
ie_param->params = params_temp.release();
snprintf(ie_param->params, sizeof(char), "%s", "");
memcpy(ie_param->params, "", sizeof(char));
}
} else if (param.is<std::tuple<unsigned int, unsigned int >>()) {
auto val = param.as<std::tuple<unsigned int, unsigned int >>();
@@ -179,19 +182,34 @@ void parameter2IEparam(const IE::Parameter param, ie_param_t *ie_param) {
}
}
const char *ie_c_api_version(void) {
ie_version_t ie_c_api_version(void) {
auto version = IE::GetInferenceEngineVersion();
std::string version_str = std::to_string(version->apiVersion.major) + ".";
version_str += std::to_string(version->apiVersion.minor) + ".";
version_str += version->buildNumber;
ie_version_t version_res;
std::unique_ptr<char[]> ver(new char[version_str.length() + 1]);
char *version_res = ver.release();
snprintf(version_res, version_str.length() + 1, "%s", version_str.c_str());
version_res.api_version = ver.release();
memcpy(version_res.api_version, version_str.c_str(), version_str.length() + 1);
return version_res;
}
void ie_version_free(ie_version_t *version) {
if (version) {
delete[] version->api_version;
version->api_version = NULL;
}
}
void ie_param_free(ie_param_t *param) {
if (param && param->params) {
delete[] param->params;
param->params = NULL;
}
}
IEStatusCode ie_core_create(const char *xml_config_file, ie_core_t **core) {
if (xml_config_file == nullptr || core == nullptr) {
return IEStatusCode::GENERAL_ERROR;
@@ -204,20 +222,18 @@ IEStatusCode ie_core_create(const char *xml_config_file, ie_core_t **core) {
*core = tmp.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return status;
}
IEStatusCode ie_core_free(ie_core_t **core) {
void ie_core_free(ie_core_t **core) {
if (core) {
delete *core;
*core = NULL;
}
return IEStatusCode::OK;
}
IEStatusCode ie_core_get_versions(const ie_core_t *core, const char *device_name, ie_core_versions_t *versions) {
@@ -240,7 +256,10 @@ IEStatusCode ie_core_get_versions(const ie_core_t *core, const char *device_name
std::map<std::string, IE::Version>::iterator iter = IEversions.begin();
for (size_t i = 0; i < num; ++i, ++iter) {
vers_ptrs[i].device_name = iter->first.c_str();
std::unique_ptr<char[]> deviceName(new char[iter->first.length() + 1]);
char *_deviceName = deviceName.release();
memcpy(_deviceName, iter->first.c_str(), iter->first.length() + 1);
vers_ptrs[i].device_name = _deviceName;
vers_ptrs[i].major = iter->second.apiVersion.major;
vers_ptrs[i].minor = iter->second.apiVersion.minor;
vers_ptrs[i].build_number = iter->second.buildNumber;
@@ -249,20 +268,22 @@ IEStatusCode ie_core_get_versions(const ie_core_t *core, const char *device_name
versions->versions = vers_ptrs.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return status;
}
IEStatusCode ie_core_versions_free(ie_core_versions_t *vers) {
void ie_core_versions_free(ie_core_versions_t *vers) {
if (vers) {
for (int i = 0; i < vers->num_vers; ++i) {
delete[] const_cast<char *>(vers->versions[i].device_name);
vers->versions[i].device_name = NULL;
}
delete[] vers->versions;
vers->versions = NULL;
}
return IEStatusCode::OK;
}
IEStatusCode ie_core_read_network(ie_core_t *core, const char *xml, const char *weights_file, ie_network_t **network) {
@@ -282,7 +303,7 @@ IEStatusCode ie_core_read_network(ie_core_t *core, const char *xml, const char *
*network = network_result.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -308,7 +329,7 @@ IEStatusCode ie_core_load_network(ie_core_t *core, const ie_network_t *network,
*exe_network = exe_net.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -333,7 +354,7 @@ IEStatusCode ie_core_set_config(ie_core_t *core, const ie_config_t *ie_core_conf
core->object.SetConfig(conf_map, deviceName);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -352,7 +373,7 @@ IEStatusCode ie_core_register_plugin(ie_core_t *core, const char *plugin_name, c
core->object.RegisterPlugin(plugin_name, device_name);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -371,7 +392,7 @@ IEStatusCode ie_core_register_plugins(ie_core_t *core, const char *xml_config_fi
core->object.RegisterPlugins(xml_config_file);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -390,7 +411,7 @@ IEStatusCode ie_core_unregister_plugin(ie_core_t *core, const char *device_name)
core->object.UnregisterPlugin(device_name);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -411,7 +432,7 @@ IEStatusCode ie_core_add_extension(ie_core_t *core, const char *extension_path,
core->object.AddExtension(extension, device_name);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -431,7 +452,7 @@ IEStatusCode ie_core_get_metric(const ie_core_t *core, const char *device_name,
parameter2IEparam(param, param_result);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -453,20 +474,58 @@ IEStatusCode ie_core_get_config(const ie_core_t *core, const char *device_name,
parameter2IEparam(param, param_result);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return status;
}
IEStatusCode ie_exec_network_free(ie_executable_network_t **ie_exec_network) {
IEStatusCode ie_core_get_available_devices(const ie_core_t *core, ie_available_devices_t *avai_devices) {
if (core == nullptr || avai_devices == nullptr)
return IEStatusCode::GENERAL_ERROR;
try {
std::vector<std::string> _devices = core->object.GetAvailableDevices();
avai_devices->num_devices = _devices.size();
std::unique_ptr<char*[]> dev_ptrs(new char*[avai_devices->num_devices]);
assert(dev_ptrs);
for (size_t i = 0; i < avai_devices->num_devices; ++i) {
std::unique_ptr<char[]> device_name(new char[_devices[i].length() + 1]);
assert(device_name);
dev_ptrs[i] = device_name.release();
memcpy(dev_ptrs[i], _devices[i].c_str(), _devices[i].length() + 1);
}
avai_devices->devices = dev_ptrs.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return IEStatusCode::OK;
}
void ie_core_available_devices_free(ie_available_devices_t *avai_devices) {
if (avai_devices->devices) {
for (int i = 0; i < avai_devices->num_devices; ++i) {
if (avai_devices->devices[i]) {
delete[] avai_devices->devices[i];
avai_devices->devices[i] = NULL;
}
}
delete[] avai_devices->devices;
avai_devices->devices = NULL;
avai_devices->num_devices = 0;
}
}
void ie_exec_network_free(ie_executable_network_t **ie_exec_network) {
if (ie_exec_network) {
delete *ie_exec_network;
*ie_exec_network = NULL;
}
return IEStatusCode::OK;
}
IEStatusCode ie_exec_network_create_infer_request(ie_executable_network_t *ie_exec_network, ie_infer_request_t **request) {
@@ -482,7 +541,7 @@ IEStatusCode ie_exec_network_create_infer_request(ie_executable_network_t *ie_ex
*request = req.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -502,7 +561,7 @@ IEStatusCode ie_exec_network_get_metric(const ie_executable_network_t *ie_exec_n
parameter2IEparam(parameter, param_result);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -522,7 +581,7 @@ IEStatusCode ie_exec_network_set_config(ie_executable_network_t *ie_exec_network
ie_exec_network->object.SetConfig(conf_map);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -542,18 +601,35 @@ IEStatusCode ie_exec_network_get_config(const ie_executable_network_t *ie_exec_n
parameter2IEparam(parameter, param_result);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return status;
}
IEStatusCode ie_network_free(ie_network_t **network) {
void ie_network_free(ie_network_t **network) {
if (network) {
delete *network;
*network = NULL;
}
}
IEStatusCode ie_network_get_name(const ie_network_t *network, char **name) {
if (network == nullptr || name == nullptr) {
return IEStatusCode::GENERAL_ERROR;
}
try {
std::string _name = network->object.getName();
std::unique_ptr<char[]> netName(new char[_name.length() + 1]);
*name = netName.release();
memcpy(*name, _name.c_str(), _name.length() + 1);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return IEStatusCode::OK;
}
@@ -570,7 +646,7 @@ IEStatusCode ie_network_get_inputs_number(const ie_network_t *network, size_t *s
*size_result = inputs.size();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -597,11 +673,11 @@ IEStatusCode ie_network_get_input_name(const ie_network_t *network, size_t numbe
}
std::unique_ptr<char[]> inputName(new char[iter->first.length() + 1]);
*name = inputName.release();
snprintf(*name, iter->first.length() + 1, "%s", iter->first.c_str());
memcpy(*name, iter->first.c_str(), iter->first.length() + 1);
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -626,7 +702,7 @@ IEStatusCode ie_network_get_input_precision(const ie_network_t *network, const c
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -657,7 +733,7 @@ IEStatusCode ie_network_set_input_precision(ie_network_t *network, const char *i
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -682,7 +758,7 @@ IEStatusCode ie_network_get_input_layout(const ie_network_t *network, const char
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -713,7 +789,7 @@ IEStatusCode ie_network_set_input_layout(ie_network_t *network, const char *inpu
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -741,7 +817,7 @@ IEStatusCode ie_network_get_input_dims(const ie_network_t *network, const char *
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -766,7 +842,7 @@ IEStatusCode ie_network_get_input_resize_algorithm(const ie_network_t *network,
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -797,7 +873,7 @@ IEStatusCode ie_network_set_input_resize_algorithm(ie_network_t *network, const
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -822,7 +898,7 @@ IEStatusCode ie_network_get_color_format(const ie_network_t *network, const char
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -853,7 +929,7 @@ IEStatusCode ie_network_set_color_format(ie_network_t *network, const char *inpu
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -882,7 +958,7 @@ IEStatusCode ie_network_get_input_shapes(ie_network *network, input_shapes_t *sh
std::unique_ptr<char[]> _name(new char[iter->first.length() + 1]);
shape_ptrs[i].name = _name.release();
snprintf(shape_ptrs[i].name, iter->first.length() + 1, "%s", iter->first.c_str());
memcpy(shape_ptrs[i].name, iter->first.c_str(), iter->first.length() + 1);
shape_ptrs[i].shape.ranks = net_dim.size();
for (size_t j = 0; j < shape_ptrs[i].shape.ranks; ++j) {
@@ -893,7 +969,7 @@ IEStatusCode ie_network_get_input_shapes(ie_network *network, input_shapes_t *sh
status = IEStatusCode::OK;
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -922,7 +998,7 @@ IEStatusCode ie_network_reshape(ie_network_t *network, const input_shapes_t shap
network->object.reshape(net_shapes);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -942,7 +1018,7 @@ IEStatusCode ie_network_get_outputs_number(const ie_network_t *network, size_t *
*size_result = outputs.size();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -969,11 +1045,11 @@ IEStatusCode ie_network_get_output_name(const ie_network_t *network, const size_
}
std::unique_ptr<char[]> outputName(new char[iter->first.length() + 1]);
*name = outputName.release();
snprintf(*name, iter->first.length() + 1, "%s", iter->first.c_str());
memcpy(*name, iter->first.c_str(), iter->first.length() + 1);
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -998,7 +1074,7 @@ IEStatusCode ie_network_get_output_precision(const ie_network_t *network, const
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1029,7 +1105,7 @@ IEStatusCode ie_network_set_output_precision(ie_network_t *network, const char *
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1054,7 +1130,7 @@ IEStatusCode ie_network_get_output_layout(const ie_network_t *network, const cha
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1085,7 +1161,7 @@ IEStatusCode ie_network_set_output_layout(ie_network_t *network, const char *out
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1113,14 +1189,14 @@ IEStatusCode ie_network_get_output_dims(const ie_network_t *network, const char
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return status;
}
IEStatusCode ie_network_input_shapes_free(input_shapes_t *inputShapes) {
void ie_network_input_shapes_free(input_shapes_t *inputShapes) {
if (inputShapes) {
for (size_t i = 0; i < inputShapes->shape_num; ++i) {
delete[] inputShapes->shapes[i].name;
@@ -1129,26 +1205,20 @@ IEStatusCode ie_network_input_shapes_free(input_shapes_t *inputShapes) {
delete[] inputShapes->shapes;
inputShapes->shapes = NULL;
}
return IEStatusCode::OK;
}
IEStatusCode ie_network_name_free(char **name) {
void ie_network_name_free(char **name) {
if (*name) {
delete[] *name;
*name = NULL;
}
return IEStatusCode::OK;
}
IEStatusCode ie_infer_request_free(ie_infer_request_t **infer_request) {
void ie_infer_request_free(ie_infer_request_t **infer_request) {
if (infer_request) {
delete *infer_request;
*infer_request = NULL;
}
return IEStatusCode::OK;
}
IEStatusCode ie_infer_request_get_blob(ie_infer_request_t *infer_request, const char *name, ie_blob_t **blob) {
@@ -1166,7 +1236,7 @@ IEStatusCode ie_infer_request_get_blob(ie_infer_request_t *infer_request, const
*blob = blob_result.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1185,7 +1255,7 @@ IEStatusCode ie_infer_request_set_blob(ie_infer_request_t *infer_request, const
infer_request->object.SetBlob(name, blob->object);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1204,7 +1274,7 @@ IEStatusCode ie_infer_request_infer(ie_infer_request_t *infer_request) {
infer_request->object.Infer();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1223,7 +1293,7 @@ IEStatusCode ie_infer_request_infer_async(ie_infer_request_t *infer_request) {
infer_request->object.StartAsync();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1245,7 +1315,7 @@ IEStatusCode ie_infer_set_completion_callback(ie_infer_request_t *infer_request,
infer_request->object.SetCompletionCallback(fun);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1265,7 +1335,7 @@ IEStatusCode ie_infer_request_wait(ie_infer_request_t *infer_request, const int6
status = status_map[status_code];
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1284,7 +1354,7 @@ IEStatusCode ie_infer_request_set_batch(ie_infer_request_t *infer_request, const
infer_request->object.SetBatch(size);
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1334,6 +1404,8 @@ IEStatusCode ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **bl
_blob->object = IE::make_shared_blob<int32_t>(tensor);
} else if (prec == IE::Precision::I64) {
_blob->object = IE::make_shared_blob<int64_t>(tensor);
} else if (prec == IE::Precision::U64) {
_blob->object = IE::make_shared_blob<uint64_t>(tensor);
} else if (prec == IE::Precision::FP32) {
_blob->object = IE::make_shared_blob<float>(tensor);
} else {
@@ -1344,7 +1416,7 @@ IEStatusCode ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **bl
*blob = _blob.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1399,6 +1471,9 @@ IEStatusCode ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDe
} else if (prec == IE::Precision::I64) {
int64_t *p = reinterpret_cast<int64_t *>(ptr);
_blob->object = IE::make_shared_blob(tensor, p, size);
} else if (prec == IE::Precision::U64) {
uint64_t *p = reinterpret_cast<uint64_t *>(ptr);
_blob->object = IE::make_shared_blob(tensor, p, size);
} else if (prec == IE::Precision::FP32) {
float *p = reinterpret_cast<float *>(ptr);
_blob->object = IE::make_shared_blob(tensor, p, size);
@@ -1409,7 +1484,7 @@ IEStatusCode ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDe
*blob = _blob.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1429,13 +1504,49 @@ IEStatusCode ie_blob_make_memory_with_roi(const ie_blob_t *inputBlob, const roi_
*blob = _blob.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return status;
}
IEStatusCode ie_blob_make_memory_nv12(const ie_blob_t *y, const ie_blob_t *uv, ie_blob_t **nv12Blob) {
if (y == nullptr || uv == nullptr || nv12Blob == nullptr) {
return IEStatusCode::GENERAL_ERROR;
}
try {
std::unique_ptr<ie_blob_t> _blob(new ie_blob_t);
_blob->object = IE::make_shared_blob<IE::NV12Blob>(y->object, uv->object);
*nv12Blob = _blob.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return IEStatusCode::OK;
}
IEStatusCode ie_blob_make_memory_i420(const ie_blob_t *y, const ie_blob_t *u, const ie_blob_t *v, ie_blob_t **i420Blob) {
if (y == nullptr || u == nullptr || v == nullptr || i420Blob == nullptr) {
return IEStatusCode::GENERAL_ERROR;
}
try {
std::unique_ptr<ie_blob_t> _blob(new ie_blob_t);
_blob->object = IE::make_shared_blob<IE::I420Blob>(y->object, u->object, v->object);
*i420Blob = _blob.release();
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return IEStatusCode::OK;
}
IEStatusCode ie_blob_size(ie_blob_t *blob, int *size_result) {
IEStatusCode status = IEStatusCode::OK;
@@ -1462,16 +1573,12 @@ IEStatusCode ie_blob_byte_size(ie_blob_t *blob, int *bsize_result) {
return status;
}
IEStatusCode ie_blob_deallocate(ie_blob_t **blob) {
IEStatusCode status = IEStatusCode::OK;
void ie_blob_deallocate(ie_blob_t **blob) {
if (*blob) {
(*blob)->object->deallocate();
delete *blob;
*blob = NULL;
}
return status;
}
IEStatusCode ie_blob_get_buffer(const ie_blob_t *blob, ie_blob_buffer_t *blob_buffer) {
@@ -1510,7 +1617,7 @@ IEStatusCode ie_blob_get_dims(const ie_blob_t *blob, dimensions_t *dims_result)
}
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1530,7 +1637,7 @@ IEStatusCode ie_blob_get_layout(const ie_blob_t *blob, layout_e *layout_result)
*layout_result = layout_map[l];
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
@@ -1550,9 +1657,16 @@ IEStatusCode ie_blob_get_precision(const ie_blob_t *blob, precision_e *prec_resu
*prec_result = precision_map[p];
} catch (const IE::details::InferenceEngineException& e) {
return e.hasStatus() ? status_map[e.getStatus()] : IEStatusCode::UNEXPECTED;
} catch (const std::exception& e) {
} catch (...) {
return IEStatusCode::UNEXPECTED;
}
return status;
}
void ie_blob_free(ie_blob_t **blob) {
if (blob) {
delete *blob;
*blob = NULL;
}
}

View File

@@ -8,6 +8,8 @@ cmake_minimum_required (VERSION 3.3)
project (ie_python_api)
set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_LIST_DIR}/cmake")
option(ENABLE_CONDA_FOLDER "Create output folder with conda python bindings" OFF)
string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} ARCH)
if(ARCH STREQUAL "x86_64" OR ARCH STREQUAL "amd64") # Windows detects Intel's 64-bit CPU as AMD64
set(ARCH intel64)
@@ -15,19 +17,17 @@ elseif(ARCH STREQUAL "i386")
set(ARCH ia32)
endif()
if(ENABLE_NGRAPH)
add_definitions(-DENABLE_NGRAPH)
endif()
if(DEFINED IE_MAIN_SOURCE_DIR)
set(InferenceEngine_LIBRARIES inference_engine)
else()
find_package(InferenceEngineDeveloperPackage REQUIRED)
set(InferenceEngine_LIBRARIES IE::inference_engine)
endif()
if(UNIX)
# cython generated files requires public visibility. Force visibility required.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
set(CMAKE_CXX_VISIBILITY_PRESET default)
set(CMAKE_C_VISIBILITY_PRESET default)
endif()
include (UseCython)
@@ -38,10 +38,18 @@ else()
message(FATAL_ERROR "Python Interpretator was not found!")
endif()
if(WIN32)
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$<CONFIG>/python_api/${PYTHON_VERSION}/openvino)
if(ENABLE_CONDA_FOLDER)
if(WIN32)
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$<CONFIG>/python_api/Conda/${PYTHON_VERSION}/openvino)
else()
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python_api/Conda/${PYTHON_VERSION}/openvino)
endif()
else()
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python_api/${PYTHON_VERSION}/openvino)
if(WIN32)
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$<CONFIG>/python_api/${PYTHON_VERSION}/openvino)
else()
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python_api/${PYTHON_VERSION}/openvino)
endif()
endif()
set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR})

View File

@@ -29,10 +29,10 @@ To configure the environment for the Inference Engine Python\* API, run:
* On CentOS\* 7.4: `source <INSTALL_DIR>/bin/setupvars.sh .`
* On macOS\* 10.x: `source <INSTALL_DIR>/bin/setupvars.sh .`
* On Raspbian\* 9,: `source <INSTALL_DIR>/bin/setupvars.sh .`
* On Windows\* 10: `call <INSTALL_DIR>\deployment_tools\inference_engine\python_api\setenv.bat`
* On Windows\* 10: `call <INSTALL_DIR>\bin\setupvars.bat`
The script automatically detects latest installed Python\* version and configures required environment if the version is supported.
If you want to use certain version of Python\*, set the environment variable `PYTHONPATH=<INSTALL_DIR>/deployment_tools/inference_engine/python_api/<desired_python_version>`
If you want to use certain version of Python\*, set the environment variable `PYTHONPATH=<INSTALL_DIR>/python/<desired_python_version>`
after running the environment configuration script.
## API Reference

View File

@@ -1,4 +1,2 @@
opencv-python
numpy
cython
progress
cython>=0.29

View File

@@ -21,8 +21,7 @@ from argparse import ArgumentParser, SUPPRESS
import cv2
import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IECore
from openvino.inference_engine import IECore
def build_argparser():
@@ -62,7 +61,7 @@ def main():
ie.add_extension(args.cpu_extension, "CPU")
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork(model=model_xml, weights=model_bin)
net = ie.read_network(model=model_xml, weights=model_bin)
if "CPU" in args.device:
supported_layers = ie.query_network(net, "CPU")

View File

@@ -21,8 +21,7 @@ from argparse import ArgumentParser, SUPPRESS
import cv2
import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IECore
from openvino.inference_engine import IECore
import threading
@@ -107,7 +106,7 @@ def main():
ie.add_extension(args.cpu_extension, "CPU")
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork(model=model_xml, weights=model_bin)
net = ie.read_network(model=model_xml, weights=model_bin)
if "CPU" in args.device:
supported_layers = ie.query_network(net, "CPU")

View File

@@ -21,8 +21,7 @@ from argparse import ArgumentParser, SUPPRESS
import cv2
import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IECore
from openvino.inference_engine import IECore
def build_argparser():
@@ -30,40 +29,44 @@ def build_argparser():
args = parser.add_argument_group("Options")
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
required=True, type=str)
required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to image file.",
required=True, type=str, nargs="+")
required=True, type=str, nargs="+")
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. Absolute path to a shared library with the kernels implementations.",
type=str, default=None)
help="Optional. Required for CPU custom layers. "
"Absolute path to a shared library with the kernels implementations.",
type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample will look for a suitable plugin for device specified (CPU by default)",
default="CPU", type=str)
help="Optional. Specify the target device to infer on; "
"CPU, GPU, FPGA or MYRIAD is acceptable. "
"Sample will look for a suitable plugin for device specified (CPU by default)",
default="CPU", type=str)
args.add_argument("--labels", help="Optional. Labels mapping file", default=None, type=str)
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
log.info("Loading Inference Engine")
ie = IECore()
# --------------------------- 1. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork(model=model_xml, weights=model_bin)
net = ie.read_network(model=model_xml, weights=model_bin)
# -----------------------------------------------------------------------------------------------------
# ------------- 2. Load Plugin for inference engine and extensions library if specified --------------
log.info("Loading Inference Engine")
ie = IECore()
log.info("Device info:")
versions = ie.get_versions(args.device)
print("{}{}".format(" "*8, args.device))
print("{}MKLDNNPlugin version ......... {}.{}".format(" "*8, versions[args.device].major, versions[args.device].minor))
print("{}Build ........... {}".format(" "*8, versions[args.device].build_number))
print("{}{}".format(" " * 8, args.device))
print("{}MKLDNNPlugin version ......... {}.{}".format(" " * 8, versions[args.device].major,
versions[args.device].minor))
print("{}Build ........... {}".format(" " * 8, versions[args.device].build_number))
if args.cpu_extension and "CPU" in args.device:
ie.add_extension(args.cpu_extension, "CPU")
log.info("CPU extension loaded: {}".format(args.cpu_extension))
@@ -80,8 +83,15 @@ def main():
# -----------------------------------------------------------------------------------------------------
# --------------------------- 3. Read and preprocess input --------------------------------------------
input_blob = next(iter(net.inputs))
n, c, h, w = net.inputs[input_blob].shape
print("inputs number: " + str(len(net.inputs.keys())))
for input_key in net.inputs:
print("input shape: " + str(net.inputs[input_key].shape))
print("input key: " + input_key)
if len(net.inputs[input_key].layout) == 4:
n, c, h, w = net.inputs[input_key].shape
images = np.ndarray(shape=(n, c, h, w))
images_hw = []
for i in range(n):
@@ -95,13 +105,14 @@ def main():
log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w)))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image
# -----------------------------------------------------------------------------------------------------
# --------------------------- 4. Configure input & output ---------------------------------------------
# --------------------------- Prepare input blobs -----------------------------------------------------
log.info("Preparing input blobs")
assert (len(net.inputs.keys()) == 1 or len(net.inputs.keys()) == 2), "Sample supports topologies only with 1 or 2 inputs"
input_blob = next(iter(net.inputs))
assert (len(net.inputs.keys()) == 1 or len(
net.inputs.keys()) == 2), "Sample supports topologies only with 1 or 2 inputs"
out_blob = next(iter(net.outputs))
input_name, input_info_name = "", ""
@@ -113,9 +124,21 @@ def main():
elif len(net.inputs[input_key].layout) == 2:
input_info_name = input_key
net.inputs[input_key].precision = 'FP32'
if net.inputs[input_key].shape[1] != 3 and net.inputs[input_key].shape[1] != 6 or net.inputs[input_key].shape[0] != 1:
if net.inputs[input_key].shape[1] != 3 and net.inputs[input_key].shape[1] != 6 or \
net.inputs[input_key].shape[0] != 1:
log.error('Invalid input info. Should be 3 or 6 values length.')
data = {}
data[input_name] = images
if input_info_name != "":
infos = np.ndarray(shape=(n, c), dtype=float)
for i in range(n):
infos[i, 0] = h
infos[i, 1] = w
infos[i, 2] = 1.0
data[input_info_name] = infos
# --------------------------- Prepare output blobs ----------------------------------------------------
log.info('Preparing output blobs')
@@ -142,7 +165,7 @@ def main():
log.info("Loading model to the device")
exec_net = ie.load_network(network=net, device_name=args.device)
log.info("Creating infer request and starting inference")
res = exec_net.infer(inputs={input_blob: images})
res = exec_net.infer(inputs=data)
# -----------------------------------------------------------------------------------------------------
# --------------------------- Read and postprocess output ---------------------------------------------
@@ -160,8 +183,8 @@ def main():
ymin = np.int(ih * proposal[4])
xmax = np.int(iw * proposal[5])
ymax = np.int(ih * proposal[6])
print("[{},{}] element, prob = {:.6} ({},{})-({},{}) batch id : {}"\
.format(number, label, confidence, xmin, ymin, xmax, ymax, imid), end="")
print("[{},{}] element, prob = {:.6} ({},{})-({},{}) batch id : {}" \
.format(number, label, confidence, xmin, ymin, xmax, ymax, imid), end="")
if proposal[2] > 0.5:
print(" WILL BE PRINTED!")
if not imid in boxes.keys():
@@ -182,8 +205,9 @@ def main():
# -----------------------------------------------------------------------------------------------------
log.info("Execution successful\n")
log.info("This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool")
log.info(
"This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool")
if __name__ == '__main__':
sys.exit(main() or 0)
sys.exit(main() or 0)

View File

@@ -21,8 +21,7 @@ from argparse import ArgumentParser, SUPPRESS
import cv2
import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IECore
from openvino.inference_engine import IECore
def build_argparser():
@@ -66,7 +65,7 @@ def main():
ie.add_extension(args.cpu_extension, "CPU")
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork(model=model_xml, weights=model_bin)
net = ie.read_network(model=model_xml, weights=model_bin)
if "CPU" in args.device:
supported_layers = ie.query_network(net, "CPU")

View File

@@ -24,7 +24,6 @@ endfunction()
python_disable_deprecated_warnings()
target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
set_target_properties(${TARGET_NAME} PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
# Compatibility with python 2.7 which has deprecated "register" specifier
@@ -43,8 +42,9 @@ add_custom_command(TARGET ${TARGET_NAME}
# install
install(TARGETS ${TARGET_NAME}
DESTINATION python/${PYTHON_VERSION}/openvino/inference_engine
COMPONENT ${PYTHON_VERSION})
RUNTIME DESTINATION python/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_VERSION}
ARCHIVE DESTINATION python/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_VERSION}
LIBRARY DESTINATION python/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_VERSION})
install(PROGRAMS __init__.py
DESTINATION python/${PYTHON_VERSION}/openvino/inference_engine

View File

@@ -3,6 +3,7 @@ from .ie_api_impl_defs cimport Blob, TensorDesc
from libcpp.string cimport string
from libcpp.vector cimport vector
from libcpp cimport bool
from libcpp.memory cimport unique_ptr, shared_ptr
cdef class BlobBuffer:
@@ -36,6 +37,8 @@ cdef class ExecutableNetwork:
cdef unique_ptr[C.IEExecNetwork] impl
cdef C.IEPlugin plugin_impl
cdef C.IECore ie_core_impl
cpdef wait(self, num_requests = ?, timeout = ?)
cpdef get_idle_request_id(self)
cdef public:
_requests, _infer_requests
@@ -52,6 +55,7 @@ cdef class LayersStatsMap(dict):
cdef class IECore:
cdef C.IECore impl
cpdef IENetwork read_network(self, model : [str, bytes], weights : [str, bytes] = ?, bool init_from_buffer = ?)
cpdef ExecutableNetwork load_network(self, IENetwork network, str device_name, config = ?, int num_requests = ?)
cpdef ExecutableNetwork import_network(self, str model_file, str device_name, config = ?, int num_requests = ?)
@@ -63,4 +67,4 @@ cdef class CDataPtr:
cdef C.CDataPtr _ptr
cdef class IENetLayer:
cdef C.CNNLayerPtr _ptr
cdef C.CNNLayerPtr _ptr

View File

@@ -4,12 +4,13 @@ from .cimport ie_api_impl_defs as C
from .ie_api_impl_defs cimport Blob, TensorDesc, SizeVector, Precision
from libcpp.string cimport string
from libcpp.vector cimport vector
from libcpp cimport bool
from libcpp.pair cimport pair
from libcpp.map cimport map
from libcpp.memory cimport unique_ptr, shared_ptr
from libc.stdlib cimport malloc, free
from libc.stdint cimport int64_t, uint8_t
from libc.string cimport memcpy, strcpy
from libc.string cimport memcpy
import os
import numpy as np
from copy import deepcopy
@@ -41,7 +42,7 @@ cdef c_map_to_dict(map[string, string] c_map):
py_dict[v.first.decode()] = v.second.decode()
return py_dict
supported_precisions = ["FP32", "FP16", "I64", "I32", "I16", "I8", "U16", "U8"]
supported_precisions = ["FP32", "FP16", "I64", "U64", "I32", "I16", "I8", "U16", "U8"]
layout_int_to_str_map = {0: "ANY", 1: "NCHW", 2: "NHWC", 3: "NCDHW", 4: "NDHWC", 64: "OIHW", 95: "SCALAR", 96: "C",
128: "CHW", 192: "HW", 193: "NC", 194: "CN", 200: "BLOCKED"}
@@ -66,7 +67,7 @@ layout_str_to_enum = {'ANY': C.Layout.ANY,
known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL', 'MULTI']
ctypedef enum StatusCode:
cpdef enum StatusCode:
OK = 0
GENERAL_ERROR = -1
NOT_IMPLEMENTED = -2
@@ -81,6 +82,10 @@ ctypedef enum StatusCode:
INFER_NOT_STARTED = -11
NETWORK_NOT_READ = -12
cpdef enum WaitMode:
RESULT_READY = -1
STATUS_ONLY = 0
def get_version():
return C.get_version().decode()
@@ -114,6 +119,44 @@ cdef class IECore:
versions[device].major = ver.apiVersion.major
return versions
## Reads a network from the Intermediate Representation (IR) and creates an `IENetwork`.
# @param model: A `.xml` file of the IR or string with IR.
# @param weights: A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or
# bytes with file content.
# @param init_from_buffer: Defines the way of how `model` and `weights` attributes are interpreted.
# If `False`, attributes are interpreted as strings with paths to .xml and .bin files
# of IR. If `True`, they are interpreted as Python `bytes` object with .xml and .bin files content.
# @return An `IENetwork` object
#
# Usage example:\n
# ```python
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# ```
cpdef IENetwork read_network(self, model: [str, bytes], weights: [str, bytes] = "", init_from_buffer: bool = False):
cdef char*xml_buffer
cdef uint8_t*bin_buffer
cdef string weights_
cdef string model_
cdef IENetwork net = IENetwork()
if init_from_buffer:
xml_buffer = <char*> malloc(len(model)+1)
bin_buffer = <uint8_t *> malloc(len(weights))
memcpy(xml_buffer, <char*> model, len(model))
memcpy(bin_buffer, <uint8_t *> weights, len(weights))
xml_buffer[len(model)] = b'\0'
net.impl = self.impl.readNetwork(xml_buffer, bin_buffer, len(weights))
free(xml_buffer)
else:
if not os.path.isfile(model):
raise Exception("Path to the model {} doesn't exists or it's a directory".format(model))
if not os.path.isfile(weights):
raise Exception("Path to the weights {} doesn't exists or it's a directory".format(weights))
model_ = model.encode()
weights_ = weights.encode()
net.impl = self.impl.readNetwork(model_, weights_)
return net
## Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device name
# and creates an `ExecutableNetwork` object of the `IENetwork` class.
# You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware
@@ -122,19 +165,22 @@ cdef class IECore:
# @param device_name: A device name of a target plugin
# @param config: A dictionary of plugin configuration keys and their values
# @param num_requests: A positive integer value of infer requests to be created. Number of infer requests is limited
# by device capabilities.
# by device capabilities.
# Value `0` indicates that optimal number of infer requests will be created.
# @return An `ExecutableNetwork` object
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# exec_net = plugin.load_network(network=net, device_name="CPU", num_requsts=2)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(network=net, device_name="CPU", num_requests=2)
# ```
cpdef ExecutableNetwork load_network(self, IENetwork network, str device_name, config=None, int num_requests=1):
cdef ExecutableNetwork exec_net = ExecutableNetwork()
cdef map[string, string] c_config
if num_requests < 0:
raise ValueError("Incorrect number of requests specified: {}. Expected positive integer number "
"or zero for auto detection".format(num_requests))
if config:
c_config = dict_to_c_map(config)
exec_net.ie_core_impl = self.impl
@@ -147,12 +193,13 @@ cdef class IECore:
# @param config: A dictionary of plugin configuration keys and their values
# @param num_requests: A positive integer value of infer requests to be created. Number of infer requests is limited
# by device capabilities.
# Value `0` indicates that optimal number of infer requests will be created.
# @return An `ExecutableNetwork` object
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# exec_net = plugin.load_network(network=net, device_name="MYRIAD", num_requsts=2)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(network=net, device_name="MYRIAD", num_requsts=2)
# # export executable network
# exec_net.export(path_to_file_to_save)
# # import previously exported executable network
@@ -161,7 +208,9 @@ cdef class IECore:
cpdef ExecutableNetwork import_network(self, str model_file, str device_name, config=None, int num_requests=1):
cdef ExecutableNetwork exec_net = ExecutableNetwork()
cdef map[string, string] c_config
if num_requests < 0:
raise ValueError("Incorrect number of requests specified: {}. Expected positive integer number "
"or zero for auto detection".format(num_requests))
if config:
c_config = dict_to_c_map(config)
exec_net.ie_core_impl = self.impl
@@ -177,9 +226,9 @@ cdef class IECore:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# layers_map = plugin.query_network(network=net, device_name="HETERO:GPU,CPU")
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# layers_map = ie.query_network(network=net, device_name="HETERO:GPU,CPU")
# ```
def query_network(self, IENetwork network, str device_name, config=None):
cdef map[string, string] c_config
@@ -189,11 +238,19 @@ cdef class IECore:
return c_map_to_dict(res)
## Sets a configuration for a plugin
#
# \note When specifying a key value of a config, the "KEY_" prefix is omitted.
#
# @param config: a dictionary of configuration parameters as keys and their values
# @param device_name: a device name of a target plugin
# @return None
#
# Usage examples: See the `set_affinity` method of the `IENetwork` class
# Usage examples:\n
# ```python
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# ie.set_config(config={"DYN_BATCH_ENABLED": "YES"}, device_name="CPU")
# ```
def set_config(self, config: dict, device_name: str):
cdef map[string, string] c_config = dict_to_c_map(config)
self.impl.setConfig(c_config, device_name.encode())
@@ -232,8 +289,6 @@ cdef class IECore:
# Usage example:\n
# ```python
# ie = IECore()
# plugin = IEPlugin("GPU")
# ie.register_plugin(plugin=plugin, device_name="MY_NEW_GPU")
# ie.unregister_plugin(device_name="GPU")
# ```
def unregister_plugin(self, device_name: str):
@@ -268,6 +323,9 @@ cdef class IECore:
## Gets a configuration dedicated to device behavior. The method targets to extract information
# which can be set via set_config method.
#
# \note When specifying a key value of a config, the "KEY_" prefix is omitted.
#
# @param device_name: A name of a device to get a config value.
# @param config_name: A config name to request.
# @return A config value corresponding to a config key.
@@ -275,7 +333,7 @@ cdef class IECore:
# Usage example:\n
# ```python
# ie = IECore()
# ie.get_config(metric_name="CPU_BIND_THREAD", device_name="CPU")
# ie.get_config(device_name="CPU", config_name="CPU_BIND_THREAD")
# ```
def get_config(self, device_name: str, config_name: str):
return self.impl.getConfig(device_name.encode(), config_name.encode())
@@ -391,7 +449,7 @@ cdef class CDataPtr:
## This class represents a network instance loaded to plugin and ready for inference.
cdef class ExecutableNetwork:
## There is no explicit class constructor. To make a valid instance of `ExecutableNetwork`,
# use `load()` method of the `IEPlugin` class.
# use `load_network()` method of the `IECore` class.
def __init__(self):
self._infer_requests = []
@@ -403,9 +461,9 @@ cdef class ExecutableNetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# plugin = IEPlugin(device="CPU")
# exec_net = plugin.load(network=net, num_requests=2)
# ie_core = IECore()
# net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie_core.load_network(net, device, num_requests=2)
# res = exec_net.infer({'data': img})
# res
# {'prob': array([[[[2.83426580e-08]],
@@ -447,15 +505,13 @@ cdef class ExecutableNetwork:
for i in range(deref(self.impl).infer_requests.size()):
infer_request = InferRequest()
infer_request.impl = &(deref(self.impl).infer_requests[i])
infer_request._inputs_list = list(self.inputs.keys())
infer_request._outputs_list = list(self.outputs.keys())
self._infer_requests.append(infer_request)
if len(self._infer_requests) != deref(self.impl).infer_requests.size():
raise Exception("Mismatch of infer requests number!")
for i in range(len(self._infer_requests)):
self._infer_requests[i]._inputs_list = list(self.inputs.keys())
self._infer_requests[i]._outputs_list = list(self.outputs.keys())
return self._infer_requests
## A dictionary that maps input layer names to DataPtr objects
@property
@@ -484,9 +540,9 @@ cdef class ExecutableNetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# plugin = IEPlugin(device="CPU")
# exec_net = plugin.load(network=net, num_requsts=2)
# ie_core = IECore()
# net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie_core.load_network(net, device, num_requests=2)
# exec_graph = exec_net.get_exec_graph_info()
# ```
def get_exec_graph_info(self):
@@ -502,7 +558,7 @@ cdef class ExecutableNetwork:
# Usage example:\n
# ```python
# ie = IECore()
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(net, "CPU")
# exec_net.get_metric("NETWORK_NAME")
# ```
@@ -517,9 +573,9 @@ cdef class ExecutableNetwork:
# Usage example:\n
# ```python
# ie = IECore()
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(net, "CPU")
# exec_net.get_metric("DEVICE_ID")
# config = exec_net.get_config("CPU_BIND_THREAD")
# ```
def get_config(self, config_name: str):
return deref(self.impl).getConfig(config_name.encode())
@@ -529,14 +585,32 @@ cdef class ExecutableNetwork:
# @return None
#
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# exec_net = plugin.load_network(network=net, device_name="MYRIAD", num_requsts=2)
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# exec_net = ie.load_network(network=net, device_name="MYRIAD", num_requsts=2)
# exec_net.export(path_to_file_to_save)
# ```
def export(self, model_file: str):
deref(self.impl).exportNetwork(model_file.encode())
## Waits when the result from any request becomes available. Blocks until specified timeout elapses or the result.
# @param num_requests: Number of idle requests for which wait.
# If not specified, `num_requests` value is set to number of requests by default.
# @param timeout: Time to wait in milliseconds or special (0, -1) cases described above.
# If not specified, `timeout` value is set to -1 by default.
# @return Request status code: OK or RESULT_NOT_READY
cpdef wait(self, num_requests=None, timeout=None):
if num_requests is None:
num_requests = len(self.requests)
if timeout is None:
timeout = WaitMode.RESULT_READY
return deref(self.impl).wait(<int> num_requests, <int64_t> timeout)
## Get idle request ID
# @return Request index
cpdef get_idle_request_id(self):
return deref(self.impl).getIdleRequestId()
ctypedef extern void (*cb_type)(void*, int) with gil
## This class provides an interface to infer requests of `ExecutableNetwork` and serves to handle infer requests execution
@@ -555,8 +629,9 @@ cdef class InferRequest:
cdef void user_callback(self, int status) with gil:
if self._py_callback:
self._py_callback(status, self._py_data)
# Set flag at first since user can call wait in callback
self._py_callback_called.set()
self._py_callback(status, self._py_data)
## Description: Sets a callback function that is called on success or failure of an asynchronous request
#
@@ -567,8 +642,8 @@ cdef class InferRequest:
# Usage example:\n
# ```python
# callback = lambda status, py_data: print("Request with id {} finished with status {}".format(py_data, status))
# net = IENetwork("./model.xml", "./model.bin")
# ie = IECore()
# net = ie.read_network(model="./model.xml", weights="./model.bin")
# exec_net = ie.load_network(net, "CPU", num_requests=4)
# for id, req in enumerate(exec_net.requests):
# req.set_completion_callback(py_callback=callback, py_data=id)
@@ -597,7 +672,7 @@ cdef class InferRequest:
#
# Usage example:\n
# ```python
# exec_net = plugin.load(network=net, num_requests=2)
# exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
# exec_net.requests[0].infer({input_blob: image})
# res = exec_net.requests[0].outputs['prob']
# np.flip(np.sort(np.squeeze(res)),0)
@@ -618,7 +693,7 @@ cdef class InferRequest:
#
# Usage example:\n
# ```python
# exec_net = plugin.load(network=net, num_requests=2)
# exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
# exec_net.requests[0].async_infer({input_blob: image})
# request_status = exec_net.requests[0].wait()
# res = exec_net.requests[0].outputs['prob']
@@ -626,12 +701,14 @@ cdef class InferRequest:
cpdef async_infer(self, inputs=None):
if inputs is not None:
self._fill_inputs(inputs)
self._py_callback_called.clear()
if self._py_callback_used:
self._py_callback_called.clear()
deref(self.impl).infer_async()
## Waits for the result to become available. Blocks until specified timeout elapses or the result
# becomes available, whichever comes first.
# NOTE: There are special values of the timeout parameter:
#
# \note There are special values of the timeout parameter:
# * 0 - Immediately returns the inference status. It does not block or interrupt execution.
# To find statuses meaning, please refer to InferenceEngine::StatusCode in Inference Engine C++ documentation
# * -1 - Waits until inference result becomes available (default value)
@@ -643,22 +720,34 @@ cdef class InferRequest:
# Usage example: See `async_infer()` method of the the `InferRequest` class.
cpdef wait(self, timeout=None):
if self._py_callback_used:
while not self._py_callback_called.is_set():
# check request status to avoid blocking for idle requests
status = deref(self.impl).wait(WaitMode.STATUS_ONLY)
if status != StatusCode.RESULT_NOT_READY:
return status
if not self._py_callback_called.is_set():
if timeout == WaitMode.RESULT_READY:
timeout = None
if timeout is not None:
# Convert milliseconds to seconds
timeout = float(timeout)/1000
if not self._py_callback_called.wait(timeout):
return StatusCode.REQUEST_BUSY
return StatusCode.OK
else:
if timeout is None:
timeout = -1
return deref(self.impl).wait(<int64_t> timeout)
if timeout is None:
timeout = WaitMode.RESULT_READY
return deref(self.impl).wait(<int64_t> timeout)
## Queries performance measures per layer to get feedback of what is the most time consuming layer.
# NOTE: Performance counters data and format depends on the plugin
#
# \note Performance counters data and format depends on the plugin
#
# @return Dictionary containing per-layer execution information.
#
# Usage example:
# ```python
# exec_net = plugin.load(network=net, num_requests=2)
# exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
# exec_net.requests[0].infer({input_blob: image})
# exec_net.requests[0].get_perf_counts()
# {'Conv2D': {'exec_type': 'jit_avx2_1x1',
@@ -709,18 +798,20 @@ cdef class InferRequest:
## Sets new batch size for certain infer request when dynamic batching is enabled in executable network
# that created this request.
# NOTE: Support of dynamic batch size depends on the target plugin.
#
# \note Support of dynamic batch size depends on the target plugin.
#
# @param size: New batch size to be used by all the following inference calls for this request
# @return None
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# # Set max batch size
# net.batch = 10
# plugin.set_config({"DYN_BATCH_ENABLED": "YES"})
# exec_net = plugin.load(network=net)
# ie.set_config(config={"DYN_BATCH_ENABLED": "YES"}, device_name=device)
# exec_net = ie.load_network(network=net, device_name=device)
# # Set batch size for certain network.
# # NOTE: Input data shape will not be changed, but will be used partially in inference which increases performance
# exec_net.requests[0].set_batch(2)
@@ -784,7 +875,11 @@ cdef class IENetLayer:
def type(self):
return deref(self._ptr).type.decode()
## Layer base operating precision. Provides getter and setter interfaces.
## \note This property is deprecated.
# Please, use out_data property to access DataPtr objects for all output ports, which contains full
# information about layer's output data including precision.
#
# Layer base operating precision. Provides getter and setter interfaces.
@property
def precision(self):
warnings.filterwarnings("always", category=DeprecationWarning)
@@ -798,17 +893,17 @@ cdef class IENetLayer:
def precision(self, precision: str):
deref(self._ptr).precision = C.Precision.FromStr(precision.encode())
## Layer affinity set by user or a default affinity set by the `IEPlugin.set_initial_affinity()` method.
## Layer affinity set by user or a default affinity may be setted using `IECore.query_network() method`
# which returns dictionary {layer_name : device}.
# The affinity attribute provides getter and setter interfaces, so the layer affinity can be modified directly.
# For example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# plugin = IEPlugin(device="HETERO:FPGA,CPU")
# plugin.set_config({"TARGET_FALLBACK": "HETERO:FPGA,CPU"})
# plugin.set_initial_affinity(net)
# for l in net.layers.values():
# if l.type == "Convolution":
# l.affinity = "CPU"
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# layers_map = ie.query_network(network=net, device_name="HETERO:GPU,CPU")
# layers = net.layers
# for layer, device in layers_map.items():
# layers[layer].affinity = device
# ```
@property
def affinity(self):
@@ -851,8 +946,10 @@ cdef class IENetLayer:
input_to_list.append(deref(layer.second).name.decode())
return input_to_list
## Deprecated: use out_data property to access DataPtr objects for all output ports, which contains full
## \note This property is deprecated.
# Please, use out_data property to access DataPtr objects for all output ports, which contains full
# information about layer's output data including layout
#
# Returns the layout of the layer output data on 1st port
@property
def layout(self):
@@ -865,8 +962,10 @@ cdef class IENetLayer:
cdef C.DataPtr c_input = deref(self._ptr).outData[0]
return layout_int_to_str_map[deref(c_input).getLayout()]
## Deprecated: use out_data property to access DataPtr objects for all output ports, which contains full
## \note This property is deprecated.
# Please, use out_data property to access DataPtr objects for all output ports, which contains full
# information about layer's output data including shape
#
# Return the list of dimension of the layer output data on 1st port
@property
def shape(self):
@@ -917,7 +1016,10 @@ cdef class IENetLayer:
weights_buffer.reset(blob.second)
blobs_map[blob.first.decode()] = weights_buffer.to_numpy()
return blobs_map
## Dictionary with layer weights, biases or custom blobs if any
## \note This property is deprecated.
# Please use blobs property instead.
#
# Dictionary with layer weights, biases or custom blobs if any
@property
def weights(self):
warnings.filterwarnings("always", category=DeprecationWarning)
@@ -932,6 +1034,9 @@ cdef class IENetLayer:
cdef class IENetwork:
## Class constructor
#
# \note Reading networks using IENetwork constructor is deprecated.
# Please, use IECore.read_network() method instead.
#
# @param model: A `.xml` file of the IR or PyCapsule containing smart pointer to nGraph function.
# In case of passing a `.xml` file attribute value can be a string path or bytes with file content
# depending on `init_from_buffer` attribute value
@@ -958,36 +1063,33 @@ cdef class IENetwork:
# xml = f.read()
# net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
# ```
# Initializing `IENetwork` object from PyCapsule object containing smart pointer to nGraph function
# ```
# from openvino.inference_engine import IENetwork
# from ngraph.impl.op import Parameter, Relu
# from ngraph.impl import Function, Shape, Type
#
# element_type = Type.f32
# param = Parameter(element_type, Shape([1, 3, 22, 22]))
# relu = Relu(param)
# func = Function([relu], [param], 'test')
# caps = Function.to_capsule(func)
# cnnNetwork = IENetwork(caps)
# ```
def __cinit__(self, model: [str, bytes] = "", weights: [str, bytes] = "", init_from_buffer: bool = False):
# TODO: ucomment when ngraph python api will work
# Try to create Inference Engine network from capsule
if model.__class__.__name__ == 'PyCapsule' and weights == '' and init_from_buffer is False:
self.impl = C.IENetwork(model)
return
cdef char*xml_buffer = <char*> malloc(len(model))
# if model.__class__.__name__ == 'PyCapsule' and weights == '' and init_from_buffer is False:
# self.impl = C.IENetwork(model)
# return
cdef char*xml_buffer = <char*> malloc(len(model)+1)
cdef uint8_t*bin_buffer = <uint8_t *> malloc(len(weights))
cdef string model_
cdef string weights_
if init_from_buffer:
strcpy(xml_buffer, model)
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.warn("Reading network using constructor is deprecated. "
"Please, use IECore.read_network() method instead",
DeprecationWarning)
memcpy(xml_buffer, <char*> model, len(model))
memcpy(bin_buffer, <uint8_t *> weights, len(weights))
xml_buffer[len(model)] = b'\0'
self.impl = C.IENetwork()
self.impl.load_from_buffer(xml_buffer, len(model), bin_buffer, len(weights))
else:
if model and weights:
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.warn("Reading network using constructor is deprecated. "
"Please, use IECore.read_network() method instead",
DeprecationWarning)
if not os.path.isfile(model):
raise Exception("Path to the model {} doesn't exists or it's a directory".format(model))
if not os.path.isfile(weights):
@@ -1032,18 +1134,27 @@ cdef class IENetwork:
## Batch size of the network. Provides getter and setter interfaces to get and modify the
# network batch size. For example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# print(et.batch_size)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# print(net.batch_size)
# net.batch_size = 4
# print(net.batch_size)
# print(net.inputs['data'].shape)
# ```
@property
def batch_size(self):
return self.impl.batch_size
## Precision of the network
return self.impl.getBatch()
## \note This property is deprecated:
# network precision does not make sense, use precision on edges.
#
# Precision of the network
@property
def precision(self):
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.warn("Network precision is deprecated "
"because it does not make sence, "
"use precision on egdes.",
DeprecationWarning)
return self.impl.precision.decode()
@batch_size.setter
@@ -1051,7 +1162,6 @@ cdef class IENetwork:
if batch <= 0:
raise AttributeError("Invalid batch size {}! Batch size should be positive integer value".format(batch))
self.impl.setBatch(batch)
self.impl.batch_size = batch
## Return dictionary that maps network layer names in topological order to IENetLayer
# objects containing layer properties
@@ -1066,18 +1176,25 @@ cdef class IENetwork:
layers[deref(l).name.decode()] = net_l
return layers
## Returns `LayersStatsMap` object containing dictionary that maps network layer names to calibration statistics
## \note This property is deprecated.
# New Calibration Tool doesn't generate statistics
#
# Returns `LayersStatsMap` object containing dictionary that maps network layer names to calibration statistics
# represented by `LayerStats` objects.
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# net.stats.update({"conv1_2d" : LayserStats(min=(-25, -1, 0), max=(63, 124, 70)),
# "conv2_2d" : LayserStats(min=(-5, -1, 0, 1, -7, 2), max=(63, 124, 70, 174, 99, 106))
# })
# ```
@property
def stats(self):
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.warn("stats property of IENetwork is deprecated.",
DeprecationWarning)
cdef map[string, map[string, vector[float]]] c_stats_map = self.impl.getStats()
py_stats_map = LayersStatsMap()
py_stats_map.net_impl = self.impl
@@ -1086,7 +1203,7 @@ cdef class IENetwork:
max=tuple(it.second["max".encode()]))
return py_stats_map
## NOTE: The function is deprecated. Please use the `IENetwork()` class constructor
## \note The function is deprecated. Please use the `IENetwork()` class constructor
# to create valid instance of `IENetwork`.
#
# Reads the model from the `.xml` and `.bin` files of the IR.
@@ -1115,7 +1232,8 @@ cdef class IENetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# net.add_outputs(["conv5_1', conv2_1', (split_2, 1)])]
# ```
def add_outputs(self, outputs):
@@ -1139,14 +1257,16 @@ cdef class IENetwork:
#
# Usage example:
# ```python
# net = IENetwork(model=path_to_model, weights=path_to_weights)
# ie = IECore()
# net = ie.read_network(model=path_to_xml, weights=path_to_bin)
# net.serialize(path_to_xml, path_to_bin)
# ```
def serialize(self, path_to_xml, path_to_bin: str = ""):
self.impl.serialize(path_to_xml.encode(), path_to_bin.encode())
## Reshapes the network to change spatial dimensions, batch size, or any dimension.
# NOTE: Before using this method, make sure that the target shape is applicable for the network.
#
# \note Before using this method, make sure that the target shape is applicable for the network.
# Changing the network shape to an arbitrary value may lead to unpredictable behaviour.
#
# @param input_shapes: A dictionary that maps input layer names to tuples with the target shape
@@ -1154,10 +1274,11 @@ cdef class IENetwork:
#
# Usage example:\n
# ```python
# net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
# ie = IECore()
# net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
# input_layer = next(iter(net.inputs))
# n, c, h, w = net.inputs[input_layer]
# net.reshape({input_layer: (n, c, h*2, w*2)}]
# net.reshape({input_layer: (n, c, h*2, w*2)})
# ```
def reshape(self, input_shapes: dict):
cdef map[string, vector[size_t]] c_input_shapes;
@@ -1171,18 +1292,27 @@ cdef class IENetwork:
c_shape.push_back(v)
c_input_shapes[input.encode()] = c_shape
self.impl.reshape(c_input_shapes)
## Returns PyCapsule containing smart pointer to constant nGraph function representing tne network.
def get_function(self):
return self.impl.getFunction()
# TODO: ucomment when ngraph python api will work
# def get_function(self):
# return self.impl.getFunction()
## This class is the main plugin interface and serves to initialize and configure the plugin.
#
#\note This class is deprecated: Use IECore instead
#
cdef class IEPlugin:
## Class constructor
## Class constructor
#
# @param device: Target device name. Supported devices: CPU, GPU, FPGA, MYRIAD, HETERO, MULTI
# @param plugin_dirs: List of paths to plugin directories
# @return IEPlugin instance
def __cinit__(self, device: str, plugin_dirs=None):
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.warn("IEPlugin class is deprecated. "
"Please use IECore class instead.",
DeprecationWarning)
plugin_base = device.split(':')[0]
if plugin_base not in known_plugins:
raise ValueError("Unknown plugin: {}, expected one of: {}"
@@ -1331,6 +1461,7 @@ cdef class BlobBuffer:
'I16': 'h', # signed short
'I32': 'i', # signed int
'I64': 'q', # signed long int
'U64': 'Q', # signed long int
}
if name not in precision_to_format:

View File

@@ -14,6 +14,7 @@ std::map <std::string, InferenceEngine::Precision> precision_map = {{"FP32", Inf
{"I16", InferenceEngine::Precision::I16},
{"I32", InferenceEngine::Precision::I32},
{"I64", InferenceEngine::Precision::I64},
{"U64", InferenceEngine::Precision::U64},
{"U16", InferenceEngine::Precision::U16},
{"U8", InferenceEngine::Precision::U8}};
@@ -174,18 +175,21 @@ InferenceEnginePython::IENetwork::IENetwork(const std::string &model, const std:
actual = std::make_shared<InferenceEngine::CNNNetwork>(net);
name = actual->getName();
batch_size = actual->getBatchSize();
IE_SUPPRESS_DEPRECATED_START
precision = actual->getPrecision().name();
IE_SUPPRESS_DEPRECATED_END
}
InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork> &cnn_network)
: actual(cnn_network) {
name = actual->getName();
batch_size = actual->getBatchSize();
IE_SUPPRESS_DEPRECATED_START
precision = actual->getPrecision().name();
IE_SUPPRESS_DEPRECATED_END
}
InferenceEnginePython::IENetwork::IENetwork(PyObject* network) {
#if defined(ENABLE_NGRAPH)
auto* capsule_ptr = PyCapsule_GetPointer(network, "ngraph_function");
auto* function_sp = static_cast<std::shared_ptr<ngraph::Function>*>(capsule_ptr);
if (function_sp == nullptr)
@@ -195,10 +199,9 @@ InferenceEnginePython::IENetwork::IENetwork(PyObject* network) {
actual = std::make_shared<InferenceEngine::CNNNetwork>(cnnNetwork);
name = actual->getName();
batch_size = actual->getBatchSize();
IE_SUPPRESS_DEPRECATED_START
precision = actual->getPrecision().name();
#else
THROW_IE_EXCEPTION << "InferenceEngine was built without nGraph support!";
#endif
IE_SUPPRESS_DEPRECATED_END
}
void
@@ -214,7 +217,9 @@ InferenceEnginePython::IENetwork::load_from_buffer(const char *xml, size_t xml_s
IE_SUPPRESS_DEPRECATED_END
actual = std::make_shared<InferenceEngine::CNNNetwork>(net);
batch_size = actual->getBatchSize();
IE_SUPPRESS_DEPRECATED_START
precision = actual->getPrecision().name();
IE_SUPPRESS_DEPRECATED_END
}
void InferenceEnginePython::IENetwork::serialize(const std::string &path_to_xml, const std::string &path_to_bin) {
@@ -232,7 +237,6 @@ InferenceEnginePython::IENetwork::getLayers() {
}
PyObject* InferenceEnginePython::IENetwork::getFunction() {
#if defined(ENABLE_NGRAPH)
const char * py_capsule_name = "ngraph_function";
auto ngraph_func_ptr = actual->getFunction();
// create a shared pointer on the heap before putting it in the capsule
@@ -254,9 +258,6 @@ PyObject* InferenceEnginePython::IENetwork::getFunction() {
} else {
return nullptr;
}
#else
return nullptr;
#endif
}
const std::map <std::string, InferenceEngine::DataPtr> InferenceEnginePython::IENetwork::getInputs() {
@@ -286,6 +287,10 @@ void InferenceEnginePython::IENetwork::setBatch(const size_t size) {
actual->setBatchSize(size);
}
size_t InferenceEnginePython::IENetwork::getBatch() {
return actual->getBatchSize();
}
void InferenceEnginePython::IENetwork::reshape(const std::map <std::string, std::vector<size_t>> &input_shapes) {
actual->reshape(input_shapes);
}
@@ -293,34 +298,42 @@ void InferenceEnginePython::IENetwork::reshape(const std::map <std::string, std:
const std::map <std::string, std::map<std::string, std::vector < float>>>
InferenceEnginePython::IENetwork::getStats() {
IE_SUPPRESS_DEPRECATED_START
std::map < std::string, std::map < std::string, std::vector < float >> > map;
InferenceEngine::ICNNNetworkStats *pstats = nullptr;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(((InferenceEngine::ICNNNetwork &) *actual).getStats(&pstats, &response));
auto statsMap = pstats->getNodesStats();
std::map < std::string, std::map < std::string, std::vector < float >> > map;
for (const auto &it : statsMap) {
std::map <std::string, std::vector<float>> stats;
stats.emplace("min", it.second->_minOutputs);
stats.emplace("max", it.second->_maxOutputs);
map.emplace(it.first, stats);
auto retCode = ((InferenceEngine::ICNNNetwork &) *actual).getStats(&pstats, &response);
if (retCode == InferenceEngine::OK) {
auto statsMap = pstats->getNodesStats();
for (const auto &it : statsMap) {
std::map <std::string, std::vector<float>> stats;
stats.emplace("min", it.second->_minOutputs);
stats.emplace("max", it.second->_maxOutputs);
map.emplace(it.first, stats);
}
}
return map;
IE_SUPPRESS_DEPRECATED_END
}
void InferenceEnginePython::IENetwork::setStats(const std::map<std::string, std::map<std::string,
std::vector<float>>> &stats) {
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::ICNNNetworkStats *pstats = nullptr;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(((InferenceEngine::ICNNNetwork &) *actual).getStats(&pstats, &response));
std::map<std::string, InferenceEngine::NetworkNodeStatsPtr> newNetNodesStats;
for (const auto &it : stats) {
InferenceEngine::NetworkNodeStatsPtr nodeStats = InferenceEngine::NetworkNodeStatsPtr(
new InferenceEngine::NetworkNodeStats());
newNetNodesStats.emplace(it.first, nodeStats);
nodeStats->_minOutputs = it.second.at("min");
nodeStats->_maxOutputs = it.second.at("max");
auto retCode = ((InferenceEngine::ICNNNetwork &) *actual).getStats(&pstats, &response);
if (retCode == InferenceEngine::OK) {
std::map<std::string, InferenceEngine::NetworkNodeStatsPtr> newNetNodesStats;
for (const auto &it : stats) {
InferenceEngine::NetworkNodeStatsPtr nodeStats = InferenceEngine::NetworkNodeStatsPtr(
new InferenceEngine::NetworkNodeStats());
newNetNodesStats.emplace(it.first, nodeStats);
nodeStats->_minOutputs = it.second.at("min");
nodeStats->_maxOutputs = it.second.at("max");
}
pstats->setNodesStats(newNetNodesStats);
}
pstats->setNodesStats(newNetNodesStats);
IE_SUPPRESS_DEPRECATED_END
}
@@ -350,7 +363,9 @@ void InferenceEnginePython::IEPlugin::setInitialAffinity(const InferenceEnginePy
THROW_IE_EXCEPTION << queryRes.resp.msg;
}
for (auto &&layer : queryRes.supportedLayersMap) {
IE_SUPPRESS_DEPRECATED_START
network->getLayerByName(layer.first.c_str())->affinity = layer.second;
IE_SUPPRESS_DEPRECATED_END
}
}
@@ -382,22 +397,12 @@ std::unique_ptr <InferenceEnginePython::IEExecNetwork>
InferenceEnginePython::IEPlugin::load(const InferenceEnginePython::IENetwork &net,
int num_requests,
const std::map <std::string, std::string> &config) {
InferenceEngine::ResponseDesc response;
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(net.name,
num_requests);
IE_SUPPRESS_DEPRECATED_START
exec_network->actual = actual.LoadNetwork(*net.actual, config);
IE_SUPPRESS_DEPRECATED_END
if (0 == num_requests) {
num_requests = getOptimalNumberOfRequests(exec_network->actual);
exec_network->infer_requests.resize(num_requests);
}
for (size_t i = 0; i < num_requests; ++i) {
InferRequestWrap &infer_request = exec_network->infer_requests[i];
IE_CHECK_CALL(exec_network->actual->CreateInferRequest(infer_request.request_ptr, &response))
}
exec_network->createInferRequests(num_requests);
return exec_network;
}
@@ -410,6 +415,7 @@ void InferenceEnginePython::IEPlugin::setConfig(const std::map<std::string, std:
InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string &name, size_t num_requests) :
infer_requests(num_requests), name(name) {
request_queue_ptr = std::make_shared<IdleInferRequestQueue>();
}
void InferenceEnginePython::IEExecNetwork::infer() {
@@ -431,10 +437,10 @@ PyObject *InferenceEnginePython::IEExecNetwork::getMetric(const std::string &met
return parse_parameter(parameter);
}
PyObject *InferenceEnginePython::IEExecNetwork::getConfig(const std::string &metric_name) {
PyObject *InferenceEnginePython::IEExecNetwork::getConfig(const std::string &name) {
InferenceEngine::Parameter parameter;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->GetMetric(metric_name, parameter, &response));
IE_CHECK_CALL(actual->GetConfig(name, parameter, &response));
return parse_parameter(parameter);
}
@@ -487,6 +493,7 @@ void latency_callback(InferenceEngine::IInferRequest::Ptr request, InferenceEngi
auto end_time = Time::now();
auto execTime = std::chrono::duration_cast<ns>(end_time - requestWrap->start_time);
requestWrap->exec_time = static_cast<double>(execTime.count()) * 0.000001;
requestWrap->request_queue_ptr->setRequestIdle(requestWrap->index);
if (requestWrap->user_callback) {
requestWrap->user_callback(requestWrap->user_data, code);
}
@@ -506,19 +513,20 @@ void InferenceEnginePython::InferRequestWrap::infer() {
exec_time = static_cast<double>(execTime.count()) * 0.000001;
}
void InferenceEnginePython::InferRequestWrap::infer_async() {
InferenceEngine::ResponseDesc response;
request_queue_ptr->setRequestBusy(index);
start_time = Time::now();
IE_CHECK_CALL(request_ptr->SetUserData(this, &response));
request_ptr->SetCompletionCallback(latency_callback);
IE_CHECK_CALL(request_ptr->StartAsync(&response));
}
int InferenceEnginePython::InferRequestWrap::wait(int64_t timeout) {
InferenceEngine::ResponseDesc responseDesc;
InferenceEngine::StatusCode code = request_ptr->Wait(timeout, &responseDesc);
return static_cast<int >(code);
if (code != InferenceEngine::RESULT_NOT_READY) {
request_queue_ptr->setRequestIdle(index);
}
return static_cast<int>(code);
}
std::map <std::string, InferenceEnginePython::ProfileInfo>
@@ -571,25 +579,80 @@ InferenceEnginePython::IECore::getVersions(const std::string &deviceName) {
return actual.GetVersions(deviceName);
}
int InferenceEnginePython::IEExecNetwork::wait(int num_requests, int64_t timeout) {
return request_queue_ptr->wait(num_requests, timeout);
}
int InferenceEnginePython::IEExecNetwork::getIdleRequestId() {
return request_queue_ptr->getIdleRequestId();
}
int InferenceEnginePython::IdleInferRequestQueue::wait(int num_requests, int64_t timeout) {
std::unique_lock<std::mutex> lock(mutex);
if (timeout > 0) {
if (!cv.wait_for(lock, std::chrono::milliseconds(timeout), [this, num_requests](){return idle_ids.size() >= num_requests;}))
return static_cast<int>(InferenceEngine::StatusCode::RESULT_NOT_READY);
} else
cv.wait(lock, [this, num_requests](){return idle_ids.size() >= num_requests;});
return static_cast<int>(InferenceEngine::StatusCode::OK);
}
void InferenceEnginePython::IdleInferRequestQueue::setRequestIdle(int index) {
std::unique_lock<std::mutex> lock(mutex);
idle_ids.emplace_back(index);
cv.notify_all();
}
void InferenceEnginePython::IdleInferRequestQueue::setRequestBusy(int index) {
std::lock_guard<std::mutex> lock(mutex);
idle_ids.remove(index);
}
int InferenceEnginePython::IdleInferRequestQueue::getIdleRequestId() {
std::lock_guard<std::mutex> lock(mutex);
return idle_ids.size() ? idle_ids.front() : -1;
}
void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests) {
if (0 == num_requests) {
num_requests = getOptimalNumberOfRequests(actual);
}
infer_requests.resize(num_requests);
InferenceEngine::ResponseDesc response;
for (size_t i = 0; i < num_requests; ++i) {
InferRequestWrap &infer_request = infer_requests[i];
infer_request.index = i;
request_queue_ptr->setRequestIdle(i);
infer_request.request_queue_ptr = request_queue_ptr;
IE_CHECK_CALL(actual->CreateInferRequest(infer_request.request_ptr, &response))
IE_CHECK_CALL(infer_request.request_ptr->SetUserData(&infer_request, &response));
infer_request.request_ptr->SetCompletionCallback(latency_callback);
}
}
InferenceEnginePython::IENetwork
InferenceEnginePython::IECore::readNetwork(const std::string& modelPath, const std::string& binPath) {
InferenceEngine::CNNNetwork net = actual.ReadNetwork(modelPath, binPath);
return IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(net));
}
InferenceEnginePython::IENetwork
InferenceEnginePython::IECore::readNetwork(const std::string& model, uint8_t *bin, size_t bin_size) {
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bin_size }, InferenceEngine::Layout::C);
auto weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc, bin, bin_size);
InferenceEngine::CNNNetwork net = actual.ReadNetwork(model, weights_blob);
return IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(net));
}
std::unique_ptr <InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::loadNetwork(IENetwork network,
const std::string &deviceName,
const std::map <std::string, std::string> &config,
int num_requests) {
InferenceEngine::ResponseDesc response;
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(network.name,
num_requests);
exec_network->actual = actual.LoadNetwork(*network.actual, deviceName, config);
if (0 == num_requests) {
num_requests = getOptimalNumberOfRequests(exec_network->actual);
exec_network->infer_requests.resize(num_requests);
}
for (size_t i = 0; i < num_requests; ++i) {
InferRequestWrap &infer_request = exec_network->infer_requests[i];
IE_CHECK_CALL(exec_network->actual->CreateInferRequest(infer_request.request_ptr, &response))
}
exec_network->createInferRequests(num_requests);
return exec_network;
}
@@ -597,20 +660,10 @@ std::unique_ptr <InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IE
std::unique_ptr <InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::importNetwork(
const std::string &modelFIle, const std::string &deviceName, const std::map <std::string, std::string> &config,
int num_requests) {
InferenceEngine::ResponseDesc response;
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(EXPORTED_NETWORK_NAME,
num_requests);
exec_network->actual = actual.ImportNetwork(modelFIle, deviceName, config);
if (0 == num_requests) {
num_requests = getOptimalNumberOfRequests(exec_network->actual);
exec_network->infer_requests.resize(num_requests);
}
for (size_t i = 0; i < num_requests; ++i) {
InferRequestWrap &infer_request = exec_network->infer_requests[i];
IE_CHECK_CALL(exec_network->actual->CreateInferRequest(infer_request.request_ptr, &response))
}
exec_network->createInferRequests(num_requests);
return exec_network;

View File

@@ -16,11 +16,12 @@
#include <algorithm>
#include <sstream>
#include <chrono>
#include <queue>
#include <condition_variable>
#include <mutex>
#include <ie_extension.h>
#include "inference_engine.hpp"
#include "../../../../../src/inference_engine/ie_ir_reader.hpp"
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::nanoseconds ns;
@@ -45,6 +46,8 @@ struct IENetwork {
void setBatch(const size_t size);
size_t getBatch();
void addOutput(const std::string &out_layer, size_t port_id);
const std::vector <InferenceEngine::CNNLayerPtr> getLayers();
@@ -72,7 +75,25 @@ struct IENetwork {
IENetwork() = default;
};
struct IdleInferRequestQueue {
std::list<size_t> idle_ids;
std::mutex mutex;
std::condition_variable cv;
void setRequestIdle(int index);
void setRequestBusy(int index);
int wait(int num_requests, int64_t timeout);
int getIdleRequestId();
using Ptr = std::shared_ptr<IdleInferRequestQueue>;
};
struct InferRequestWrap {
int index;
using cy_callback = void (*)(void*, int);
InferenceEngine::IInferRequest::Ptr request_ptr;
@@ -80,7 +101,7 @@ struct InferRequestWrap {
double exec_time;
cy_callback user_callback;
void *user_data;
int status;
IdleInferRequestQueue::Ptr request_queue_ptr;
void infer();
@@ -102,6 +123,7 @@ struct IEExecNetwork {
InferenceEngine::IExecutableNetwork::Ptr actual;
std::vector<InferRequestWrap> infer_requests;
std::string name;
IdleInferRequestQueue::Ptr request_queue_ptr;
IEExecNetwork(const std::string &name, size_t num_requests);
@@ -114,7 +136,12 @@ struct IEExecNetwork {
std::map<std::string, InferenceEngine::CDataPtr> getOutputs();
PyObject* getMetric(const std::string & metric_name);
PyObject* getConfig(const std::string & metric_name);
PyObject* getConfig(const std::string & name);
int wait(int num_requests, int64_t timeout);
int getIdleRequestId();
void createInferRequests(int num_requests);
};
@@ -147,6 +174,8 @@ struct IECore {
InferenceEngine::Core actual;
explicit IECore(const std::string & xmlConfigFile = std::string());
std::map<std::string, InferenceEngine::Version> getVersions(const std::string & deviceName);
InferenceEnginePython::IENetwork readNetwork(const std::string& modelPath, const std::string& binPath);
InferenceEnginePython::IENetwork readNetwork(const std::string& model, uint8_t *bin, size_t bin_size);
std::unique_ptr<InferenceEnginePython::IEExecNetwork> loadNetwork(IENetwork network, const std::string & deviceName,
const std::map<std::string, std::string> & config, int num_requests);
std::unique_ptr<InferenceEnginePython::IEExecNetwork> importNetwork(const std::string & modelFIle, const std::string & deviceName,

View File

@@ -117,6 +117,8 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
void exportNetwork(const string & model_file) except +
object getMetric(const string & metric_name)
object getConfig(const string & metric_name)
int wait(int num_requests, int64_t timeout)
int getIdleRequestId()
cdef cppclass IENetwork:
IENetwork() except +
@@ -132,6 +134,7 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
void addOutput(string &, size_t) except +
void setAffinity(map[string, string] & types_affinity_map, map[string, string] & layers_affinity_map) except +
void setBatch(size_t size) except +
size_t getBatch() except +
void setLayerParams(map[string, map[string, string]] params_map) except +
void serialize(const string& path_to_xml, const string& path_to_bin) except +
void reshape(map[string, vector[size_t]] input_shapes) except +
@@ -153,6 +156,7 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
cdef cppclass InferRequestWrap:
double exec_time;
int index;
void getBlobPtr(const string & blob_name, Blob.Ptr & blob_ptr) except +
map[string, ProfileInfo] getPerformanceCounts() except +
void infer() except +
@@ -165,6 +169,8 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
IECore() except +
IECore(const string & xml_config_file) except +
map[string, Version] getVersions(const string & deviceName) except +
IENetwork readNetwork(const string& modelPath, const string& binPath) except +
IENetwork readNetwork(const string& modelPath,uint8_t*bin, size_t bin_size) except +
unique_ptr[IEExecNetwork] loadNetwork(IENetwork network, const string deviceName,
const map[string, string] & config, int num_requests) except +
unique_ptr[IEExecNetwork] importNetwork(const string & modelFIle, const string & deviceName,

View File

@@ -0,0 +1,6 @@
opencv-python
numpy
cython>=0.29
pytest==4.0.1
attrs==19.1.0
pytest-html==1.19.0

Some files were not shown because too many files have changed in this diff Show More