Publishing 2020.1 content

This commit is contained in:
Alexey Suhov 2020-02-11 22:48:49 +03:00
parent 949b74059f
commit b2140c083a
3865 changed files with 323006 additions and 170518 deletions

19
.clang-format Normal file
View File

@ -0,0 +1,19 @@
BasedOnStyle: Google
IndentWidth: 4
UseTab: Never
---
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AllowAllArgumentsOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortLambdasOnASingleLine: Empty
AlwaysBreakBeforeMultilineStrings: false
ColumnLimit: 120
DerivePointerAlignment: false
FixNamespaceComments: true
IndentCaseLabels: false
SpaceBeforeCpp11BracedList: true
SpaceBeforeCtorInitializerColon: false
---

4
.gitmodules vendored
View File

@ -2,7 +2,7 @@
path = inference-engine/thirdparty/ade
url = https://github.com/opencv/ade.git
ignore = dirty
[submodule "inference-engine/thirdparty/ngraph"]
path = inference-engine/thirdparty/ngraph
[submodule "ngraph"]
path = ngraph
url = https://github.com/NervanaSystems/ngraph.git
ignore = dirty

135
CMakeLists.txt Normal file
View File

@ -0,0 +1,135 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
cmake_policy(SET CMP0054 NEW)
if (APPLE)
# due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
else()
cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
endif()
project(OpenVINO)
set(OpenVINO_MAIN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
include(CTest)
include(features)
# include developer package
include(developer_package)
# These options are shared with 3rdparty plugins
# by means of developer package
include(check_features)
# resolving dependencies for the project
message (STATUS "PROJECT ............................... " ${PROJECT_NAME})
message (STATUS "CMAKE_BINARY_DIR ...................... " ${CMAKE_BINARY_DIR})
message (STATUS "OpenVINO_MAIN_SOURCE_DIR .............. " ${OpenVINO_MAIN_SOURCE_DIR})
if (ENABLE_INFERENCE_ENGINE)
set(IE_MAIN_SOURCE_DIR ${OpenVINO_MAIN_SOURCE_DIR}/inference-engine)
message (STATUS "IE_MAIN_SOURCE_DIR .............. " ${IE_MAIN_SOURCE_DIR})
endif()
message (STATUS "CMAKE_GENERATOR ....................... " ${CMAKE_GENERATOR})
message (STATUS "CMAKE_C_COMPILER_ID ................... " ${CMAKE_C_COMPILER_ID})
message (STATUS "CMAKE_BUILD_TYPE ...................... " ${CMAKE_BUILD_TYPE})
# remove file with exported developer targets to force its regeneration
file(REMOVE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
file(REMOVE "${CMAKE_BINARY_DIR}/targets.cmake")
function(build_ngraph)
if(NOT ENABLE_NGRAPH)
return()
endif()
function(ngraph_set option value)
if(NOT DEFINED ${option})
set(${option} ${value} CACHE BOOL "" FORCE)
endif()
endfunction()
add_definitions(-DENABLE_NGRAPH)
set(NGRAPH_BUILD_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY} CACHE STRING "" FORCE)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${OpenVINO_MAIN_SOURCE_DIR}/ngraph/cmake/Modules/")
ngraph_set(NGRAPH_ADDRESS_SANITIZER FALSE)
ngraph_set(NGRAPH_TOOLS_ENABLE FALSE)
ngraph_set(NGRAPH_CPU_ENABLE FALSE)
ngraph_set(NGRAPH_MLIR_ENABLE FALSE)
ngraph_set(NGRAPH_INTELGPU_ENABLE FALSE)
ngraph_set(NGRAPH_GPU_ENABLE FALSE)
ngraph_set(NGRAPH_INTERPRETER_ENABLE FALSE)
ngraph_set(NGRAPH_NOP_ENABLE FALSE)
ngraph_set(NGRAPH_GPUH_ENABLE FALSE)
ngraph_set(NGRAPH_GENERIC_CPU_ENABLE FALSE)
ngraph_set(NGRAPH_DEBUG_ENABLE FALSE)
ngraph_set(NGRAPH_DEPRECATED_ENABLE FALSE)
ngraph_set(NGRAPH_DEX_ONLY FALSE)
ngraph_set(NGRAPH_ENABLE_CPU_CONV_AUTO FALSE)
ngraph_set(NGRAPH_CODE_COVERAGE_ENABLE FALSE)
ngraph_set(NGRAPH_LIB_VERSIONING_ENABLE FALSE)
if (ENABLE_PYTHON AND NOT WIN32)
ngraph_set(NGRAPH_PYTHON_BUILD_ENABLE TRUE)
else()
ngraph_set(NGRAPH_PYTHON_BUILD_ENABLE FALSE)
endif()
ngraph_set(NGRAPH_PLAIDML_ENABLE FALSE)
ngraph_set(NGRAPH_DISTRIBUTED_ENABLE FALSE)
ngraph_set(NGRAPH_FAST_MATH_ENABLE FALSE)
ngraph_set(NGRAPH_JSON_ENABLE FALSE)
ngraph_set(NGRAPH_STATIC_LIB_ENABLE FALSE)
ngraph_set(NGRAPH_INTERPRETER_STATIC_LIB_ENABLE FALSE)
ngraph_set(NGRAPH_CPU_STATIC_LIB_ENABLE FALSE)
ngraph_set(NGRAPH_DYNAMIC_COMPONENTS_ENABLE FALSE)
ngraph_set(NGRAPH_NATIVE_ARCH_ENABLE FALSE)
if (NOT ANDROID)
ngraph_set(NGRAPH_UNIT_TEST_ENABLE TRUE)
ngraph_set(NGRAPH_UNIT_TEST_OPENVINO_ENABLE TRUE)
else()
ngraph_set(NGRAPH_UNIT_TEST_ENABLE FALSE)
ngraph_set(NGRAPH_UNIT_TEST_OPENVINO_ENABLE FALSE)
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
ie_add_compiler_flags(-Wno-error=uninitialized -Wno-error=literal-conversion)
elseif(UNIX)
ie_add_compiler_flags(-Wno-error=maybe-uninitialized -Wno-error=return-type -fPIC)
endif()
if(ANDROID)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=defaulted-function-deleted -Wno-error=unused-command-line-argument")
endif()
# WA for GCC 7.0
if (UNIX)
ie_add_compiler_flags(-Wno-error=return-type -Wno-undef)
elseif(WIN32)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4308 /wd4146")
endif()
if(UNIX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
endif()
if(ENABLE_LTO)
ie_enable_lto()
endif()
ie_cpack_add_component(ngraph)
add_subdirectory(ngraph)
endfunction()
build_ngraph()
if (ENABLE_INFERENCE_ENGINE)
add_subdirectory(inference-engine)
endif()
# cpack
ie_cpack(${IE_CPACK_COMPONENTS_ALL})

View File

@ -1,42 +1,58 @@
# [OpenVINO™ Toolkit](https://01.org/openvinotoolkit) - Deep Learning Deployment Toolkit repository
[![Stable release](https://img.shields.io/badge/version-2019.R3-green.svg)](https://github.com/opencv/dldt/releases/tag/2019_R3)
[![Stable release](https://img.shields.io/badge/version-2020.1-green.svg)](https://github.com/opencv/dldt/releases/tag/2020.1)
[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE)
This toolkit allows developers to deploy pre-trained deep learning models through a high-level C++ Inference Engine API integrated with application logic.
This toolkit allows developers to deploy pre-trained deep learning models
through a high-level C++ Inference Engine API integrated with application logic.
This open source version includes two components, namely Model Optimizer and Inference Engine, as well as CPU, GPU and heterogeneous plugins to accelerate deep learning inferencing on Intel(R) CPUs and Intel(R) Processor Graphics. It supports pre-trained models from the [Open Model Zoo](https://github.com/opencv/open_model_zoo/) along with 100+ open source and public models in popular formats such as Caffe*, Tensorflow*, MXNet* and ONNX*.
This open source version includes two components: namely [Model Optimizer] and
[Inference Engine], as well as CPU, GPU and heterogeneous plugins to accelerate
deep learning inferencing on Intel® CPUs and Intel® Processor Graphics.
It supports pre-trained models from the [Open Model Zoo], along with 100+ open
source and public models in popular formats such as Caffe\*, TensorFlow\*,
MXNet\* and ONNX\*.
## Repository components:
* [Inference Engine](https://software.intel.com/en-us/articles/OpenVINO-InferEngine)
* [Model Optimizer](https://software.intel.com/en-us/articles/OpenVINO-ModelOptimizer)
* [Inference Engine]
* [Model Optimizer]
## License
Deep Learning Deployment Toolkit is licensed under [Apache License Version 2.0](LICENSE).
By contributing to the project, you agree to the license and copyright terms therein
and release your contribution under these terms.
## Documentation
* [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes)
* [Inference Engine build instructions](inference-engine/README.md)
* [Get Started with Deep Learning Deployment Toolkit on Linux*](get-started-linux.md)
* [OpenVINO™ Inference Engine Build Instructions](build-instruction.md)
* [Get Started with Deep Learning Deployment Toolkit on Linux](get-started-linux.md)\*
* [Introduction to Deep Learning Deployment Toolkit](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Introduction.html)
* [Inference Engine Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Deep_Learning_Inference_Engine_DevGuide.html)
* [Model Optimizer Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)
## How to Contribute
We welcome community contributions to the Deep Learning Deployment Toolkit repository. If you have an idea how to improve the product, please share it with us doing the following steps:
We welcome community contributions to the Deep Learning Deployment Toolkit
repository. If you have an idea how to improve the product, please share it
with us doing the following steps:
* Make sure you can build the product and run all tests and samples with your patch
* In case of a larger feature, provide a relevant unit tests and sample
* In case of a larger feature, provide relevant unit tests and one or more sample
* Submit a pull request at https://github.com/opencv/dldt/pulls
We will review your contribution and, if any additional fixes or modifications are necessary, may give some feedback to guide you. When accepted, your pull request will be merged into GitHub* repositories.
Deep Learning Deployment Toolkit is licensed under Apache License, Version 2.0. By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms.
We will review your contribution and, if any additional fixes or modifications
are necessary, may give some feedback to guide you. Your pull request will be
merged into GitHub* repositories if accepted.
## Support
Please report questions, issues and suggestions using:
* [\#openvino](https://stackoverflow.com/search?q=%23openvino) tag on StackOverflow*
* The `openvino` [tag on StackOverflow]\*
* [GitHub* Issues](https://github.com/opencv/dldt/issues)
* [Forum](https://software.intel.com/en-us/forums/computer-vision)
---
\* Other names and brands may be claimed as the property of others.
\* Other names and brands may be claimed as the property of others.
[Open Model Zoo]:https://github.com/opencv/open_model_zoo
[Inference Engine]:https://software.intel.com/en-us/articles/OpenVINO-InferEngine
[Model Optimizer]:https://software.intel.com/en-us/articles/OpenVINO-ModelOptimizer
[tag on StackOverflow]:https://stackoverflow.com/search?q=%23openvino

724
build-instruction.md Normal file
View File

@ -0,0 +1,724 @@
# Build OpenVINO™ Inference Engine
## Contents
- [Introduction](#introduction)
- [Build on Linux\* Systems](#build-on-linux-systems)
- [Software Requirements](#software-requirements)
- [Build Steps](#build-steps)
- [Additional Build Options](#additional-build-options)
- [Build for Raspbian* Stretch OS](#build-for-raspbian-stretch-os)
- [Hardware Requirements](#hardware-requirements)
- [Native Compilation](#native-compilation)
- [Cross Compilation Using Docker\*](#cross-compilation-using-docker)
- [Additional Build Options](#additional-build-options-1)
- [Build on Windows* Systems](#build-on-windows-systems)
- [Software Requirements](#software-requirements-1)
- [Build Steps](#build-steps-1)
- [Additional Build Options](#additional-build-options-2)
- [Building Inference Engine with Ninja* Build System](#building-inference-engine-with-ninja-build-system)
- [Build on macOS\* Systems](#build-on-macos-systems)
- [Software Requirements](#software-requirements-2)
- [Build Steps](#build-steps-2)
- [Additional Build Options](#additional-build-options-3)
- [Build on Android\* Systems](#build-on-android-systems)
- [Software Requirements](#software-requirements-3)
- [Build Steps](#build-steps-3)
- [Use Custom OpenCV Builds for Inference Engine](#use-custom-opencv-builds-for-inference-engine)
- [Add Inference Engine to Your Project](#add-inference-engine-to-your-project)
- [(Optional) Additional Installation Steps for the Intel® Movidius™ Neural Compute Stick and Neural Compute Stick 2](#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2)
- [For Linux, Raspbian Stretch* OS](#for-linux-raspbian-stretch-os)
- [For Windows](#for-windows-1)
- [Next Steps](#next-steps)
- [Additional Resources](#additional-resources)
## Introduction
The Inference Engine can infer models in different formats with various input
and output formats.
The open source version of Inference Engine includes the following plugins:
| PLUGIN | DEVICE TYPES |
| ---------------------| -------------|
| CPU plugin | Intel® Xeon® with Intel® AVX2 and AVX512, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® SSE |
| GPU plugin | Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics |
| GNA plugin | Intel® Speech Enabling Developer Kit, Amazon Alexa\* Premium Far-Field Developer Kit, Intel® Pentium® Silver processor J5005, Intel® Celeron® processor J4005, Intel® Core™ i3-8121U processor |
| MYRIAD plugin | Intel® Movidius™ Neural Compute Stick powered by the Intel® Movidius™ Myriad™ 2, Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X |
| Heterogeneous plugin | Heterogeneous plugin enables computing for inference on one network on several Intel® devices. |
Inference Engine plugin for Intel® FPGA is distributed only in a binary form,
as a part of [Intel® Distribution of OpenVINO™].
## Build on Linux\* Systems
The software was validated on:
- Ubuntu\* 16.04 (64-bit) with default GCC\* 5.4.0
- CentOS\* 7.4 (64-bit) with default GCC\* 4.8.5
### Software Requirements
- [CMake]\* 3.5 or higher
- GCC\* 4.8 or higher to build the Inference Engine
- Python 2.7 or higher for Inference Engine Python API wrapper
- (Optional) [Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441].
### Build Steps
1. Clone submodules:
```sh
cd dldt
git submodule update --init --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the
project root folder.
```sh
chmod +x install_dependencies.sh
```
```sh
./install_dependencies.sh
```
3. By default, the build enables the Inference Engine GPU plugin to infer models
on your Intel® Processor Graphics. This requires you to
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441]
before running the build. If you don't want to use the GPU plugin, use the
`-DENABLE_CLDNN=OFF` CMake build option and skip the installation of the
Intel® Graphics Compute Runtime for OpenCL™ Driver.
4. Create a build folder:
```sh
mkdir build && cd build
```
5. Inference Engine uses a CMake-based build system. In the created `build`
directory, run `cmake` to fetch project dependencies and create Unix
makefiles, then run `make` to build the project:
```sh
cmake -DCMAKE_BUILD_TYPE=Release ..
make --jobs=$(nproc --all)
```
### Additional Build Options
You can use the following additional build options:
- The default build uses an internal JIT GEMM implementation.
- To switch to an OpenBLAS\* implementation, use the `GEMM=OPENBLAS` option with
`BLAS_INCLUDE_DIRS` and `BLAS_LIBRARIES` CMake options to specify a path to the
OpenBLAS headers and library. For example, the following options on CentOS\*:
`-DGEMM=OPENBLAS -DBLAS_INCLUDE_DIRS=/usr/include/openblas -DBLAS_LIBRARIES=/usr/lib64/libopenblas.so.0`.
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL`
and `-DMKLROOT=<path_to_MKL>` CMake options to specify a path to unpacked
MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded
from the Intel® [MKL-DNN repository].
- Threading Building Blocks (TBB) is used by default. To build the Inference
Engine with OpenMP\* threading, set the `-DTHREADING=OMP` option.
- Required versions of TBB and OpenCV packages are downloaded automatically by
the CMake-based script. If you want to use the automatically downloaded
packages but you already have installed TBB or OpenCV packages configured in
your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR`
environment variables before running the `cmake` command, otherwise they
will not be downloaded and the build may fail if incompatible versions were
installed.
- If the CMake-based build script can not find and download the OpenCV package
that is supported on your platform, or if you want to use a custom build of
the OpenCV library, refer to the
[Use Custom OpenCV Builds](#use-custom-opencv-builds-for-inference-engine)
section for details.
- To build the Python API wrapper:
1. Install all additional packages listed in the
`/inference-engine/ie_bridges/python/requirements.txt` file:
```sh
pip install -r requirements.txt
```
2. Use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following
options:
```
-DPYTHON_EXECUTABLE=`which python3.7` \
-DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.7m.so \
-DPYTHON_INCLUDE_DIR=/usr/include/python3.7
```
- To switch the CPU and GPU plugins off/on, use the `cmake` options
`-DENABLE_MKL_DNN=ON/OFF` and `-DENABLE_CLDNN=ON/OFF` respectively.
- nGraph-specific compilation options:
`-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer.
`-DNGRAPH_JSON_ENABLE=ON` enables nGraph JSON-based serialization.
`-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints.
## Build for Raspbian Stretch* OS
> **NOTE**: Only the MYRIAD plugin is supported.
### Hardware Requirements
* Raspberry Pi\* 2 or 3 with Raspbian\* Stretch OS (32-bit). Check that it's CPU supports ARMv7 instruction set (`uname -m` command returns `armv7l`).
> **NOTE**: Despite the Raspberry Pi\* CPU is ARMv8, 32-bit OS detects ARMv7 CPU instruction set. The default `gcc` compiler applies ARMv6 architecture flag for compatibility with lower versions of boards. For more information, run the `gcc -Q --help=target` command and refer to the description of the `-march=` option.
You can compile the Inference Engine for Raspberry Pi\* in one of the two ways:
* [Native Compilation](#native-compilation), which is the simplest way, but time-consuming
* [Cross Compilation Using Docker*](#cross-compilation-using-docker), which is the recommended way
### Native Compilation
Native compilation of the Inference Engine is the most straightforward solution. However, it might take at least one hour to complete on Raspberry Pi\* 3.
1. Install dependencies:
```bash
sudo apt-get update
sudo apt-get install -y git cmake libusb-1.0-0-dev
```
2. Go to the cloned `dldt` repository:
```bash
cd dldt
```
3. Initialize submodules:
```bash
git submodule update --init --recursive
```
4. Create a build folder:
```bash
mkdir build && cd build
```
5. Build the Inference Engine:
```bash
cmake -DCMAKE_BUILD_TYPE=Release \
-DENABLE_SSE42=OFF \
-DTHREADING=SEQ \
-DENABLE_GNA=OFF .. && make
```
### Cross Compilation Using Docker*
This compilation was tested on the following configuration:
* Host: Ubuntu\* 16.04 (64-bit, Intel® Core™ i7-6700K CPU @ 4.00GHz × 8)
* Target: Raspbian\* Stretch (32-bit, ARMv7, Raspberry Pi\* 3)
1. Install Docker\*:
```bash
sudo apt-get install -y docker.io
```
2. Add a current user to `docker` group:
```bash
sudo usermod -a -G docker $USER
```
Log out and log in for this to take effect.
3. Create a directory named `ie_cross_armhf` and add a text file named `Dockerfile`
with the following content:
```docker
FROM debian:stretch
USER root
RUN dpkg --add-architecture armhf && \
apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
crossbuild-essential-armhf \
git \
cmake \
wget \
libusb-1.0-0-dev:armhf \
libgtk-3-dev:armhf \
libavcodec-dev:armhf \
libavformat-dev:armhf \
libswscale-dev:armhf \
libgstreamer1.0-dev:armhf \
libgstreamer-plugins-base1.0-dev:armhf \
libpython3-dev:armhf \
python3-pip
```
It uses the Debian\* Stretch (Debian 9) OS for compilation because it is a base of the Raspbian\* Stretch.
4. Build a Docker\* image:
```bash
docker image build -t ie_cross_armhf ie_cross_armhf
```
5. Run Docker\* container with mounted source code folder from host:
```bash
docker run -it -v /absolute/path/to/dldt:/dldt ie_cross_armhf /bin/bash
```
6. While in the container:
1. Go to the cloned `dldt` repository:
```bash
cd dldt
```
2. Create a build folder:
```bash
mkdir build && cd build
```
3. Build the Inference Engine:
```bash
cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE="../cmake/arm.toolchain.cmake" \
-DTHREADS_PTHREAD_ARG="-pthread" \
-DENABLE_SSE42=OFF \
-DTHREADING=SEQ \
-DENABLE_GNA=OFF .. && make --jobs=$(nproc --all)
```
7. Press **Ctrl+D** to exit from Docker. You can find the resulting binaries
in the `dldt/bin/armv7l/` directory and the OpenCV*
installation in the `dldt/inference-engine/temp`.
>**NOTE**: Native applications that link to cross-compiled Inference Engine
library require an extra compilation flag `-march=armv7-a`.
### Additional Build Options
You can use the following additional build options:
- Required versions of OpenCV packages are downloaded automatically by the
CMake-based script. If you want to use the automatically downloaded packages
but you already have installed OpenCV packages configured in your environment,
you may need to clean the `OpenCV_DIR` environment variable before running
the `cmake` command; otherwise they won't be downloaded and the build may
fail if incompatible versions were installed.
- If the CMake-based build script cannot find and download the OpenCV package
that is supported on your platform, or if you want to use a custom build of
the OpenCV library, see: [Use Custom OpenCV Builds](#use-custom-opencv-builds-for-inference-engine)
for details.
- To build Python API wrapper, install `libpython3-dev:armhf` and `python3-pip`
packages using `apt-get`; then install `numpy` and `cython` python modules
via `pip3`, adding the following options:
```sh
-DENABLE_PYTHON=ON \
-DPYTHON_EXECUTABLE=/usr/bin/python3.5 \
-DPYTHON_LIBRARY=/usr/lib/arm-linux-gnueabihf/libpython3.5m.so \
-DPYTHON_INCLUDE_DIR=/usr/include/python3.5
```
- nGraph-specific compilation options:
`-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer.
`-DNGRAPH_JSON_ENABLE=ON` enables nGraph JSON-based serialization.
`-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints.
## Build on Windows* Systems
The software was validated on:
- Microsoft\* Windows\* 10 (64-bit) with Visual Studio 2017 and Intel® C++
Compiler 2018 Update 3
### Software Requirements
- [CMake]\*3.5 or higher
- Microsoft\* Visual Studio 2017, 2019 or [Intel® C++ Compiler] 18.0
- (Optional) Intel® Graphics Driver for Windows* (26.20) [driver package].
- Python 3.4 or higher for Inference Engine Python API wrapper
### Build Steps
1. Clone submodules:
```sh
git submodule update --init --recursive
```
2. By default, the build enables the Inference Engine GPU plugin to infer models
on your Intel® Processor Graphics. This requires you to [download and install
the Intel® Graphics Driver for Windows (26.20) [driver package] before
running the build. If you don't want to use the GPU plugin, use the
`-DENABLE_CLDNN=OFF` CMake build option and skip the installation of the
Intel® Graphics Driver.
3. Create build directory:
```sh
mkdir build
```
4. In the `build` directory, run `cmake` to fetch project dependencies and
generate a Visual Studio solution.
For Microsoft\* Visual Studio 2017:
```sh
cmake -G "Visual Studio 15 2017 Win64" -DCMAKE_BUILD_TYPE=Release ..
```
For Microsoft\* Visual Studio 2019:
```sh
cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_BUILD_TYPE=Release ..
```
For Intel® C++ Compiler 18:
```sh
cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
-DCMAKE_BUILD_TYPE=Release ^
-DICCLIB="C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\compiler\lib" ..
```
5. Build generated solution in Visual Studio or run
`cmake --build . --config Release` to build from the command line.
6. Before running the samples, add paths to the TBB and OpenCV binaries used for
the build to the `%PATH%` environment variable. By default, TBB binaries are
downloaded by the CMake-based script to the `<dldt_repo>/inference-engine/temp/tbb/lib`
folder, OpenCV binaries to the `<dldt_repo>/inference-engine/temp/opencv_4.2.0/bin`
folder.
### Additional Build Options
- Internal JIT GEMM implementation is used by default.
- To switch to OpenBLAS GEMM implementation, use the `-DGEMM=OPENBLAS` CMake
option and specify path to OpenBLAS using the `-DBLAS_INCLUDE_DIRS=<OPENBLAS_DIR>\include`
and `-DBLAS_LIBRARIES=<OPENBLAS_DIR>\lib\libopenblas.dll.a` options. Download
a prebuilt OpenBLAS\* package via the [OpenBLAS] link. mingw64* runtime
dependencies can be downloaded via the [mingw64\* runtime dependencies] link.
- To switch to the optimized MKL-ML\* GEMM implementation, use the
`-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` CMake options to specify a path to
unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be
downloaded from the Intel&reg; [MKL-DNN repository for Windows].
- Threading Building Blocks (TBB) is used by default. To build the Inference
Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
- Required versions of TBB and OpenCV packages are downloaded automatically by
the CMake-based script. If you want to use the automatically-downloaded
packages but you already have installed TBB or OpenCV packages configured in
your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR`
environment variables before running the `cmake` command; otherwise they won't
be downloaded and the build may fail if incompatible versions were installed.
- If the CMake-based build script can not find and download the OpenCV package
that is supported on your platform, or if you want to use a custom build of
the OpenCV library, refer to the [Use Custom OpenCV Builds](#use-custom-opencv-builds-for-inference-engine)
section for details.
- To switch off/on the CPU and GPU plugins, use the `cmake` options
`-DENABLE_MKL_DNN=ON/OFF` and `-DENABLE_CLDNN=ON/OFF` respectively.
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To
specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE="C:\Program Files\Python37\python.exe" ^
-DPYTHON_LIBRARY="C:\Program Files\Python37\libs\python37.lib" ^
-DPYTHON_INCLUDE_DIR="C:\Program Files\Python37\include"
```
- nGraph-specific compilation options:
`-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer.
`-DNGRAPH_JSON_ENABLE=ON` enables nGraph JSON-based serialization.
`-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints.
### Building Inference Engine with Ninja* Build System
```sh
call "C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\bin\ipsxe-comp-vars.bat" intel64 vs2017
set CXX=icl
set CC=icl
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by dldt cmake script
set TBBROOT=
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --config Release
```
## Build on macOS* Systems
> **NOTE**: The current version of the OpenVINO™ toolkit for macOS* supports
inference on Intel CPUs only.
The software was validated on:
- macOS\* 10.14, 64-bit
### Software Requirements
- [CMake]\* 3.9 or higher
- Clang\* compiler from Xcode\* 10.1 or higher
- Python\* 3.4 or higher for the Inference Engine Python API wrapper
### Build Steps
1. Clone submodules:
```sh
cd dldt
git submodule update --init --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the
project root folder:
```sh
chmod +x install_dependencies.sh
```
```sh
./install_dependencies.sh
```
3. Create a build folder:
```sh
mkdir build
```
4. Inference Engine uses a CMake-based build system. In the created `build`
directory, run `cmake` to fetch project dependencies and create Unix makefiles,
then run `make` to build the project:
```sh
cmake -DCMAKE_BUILD_TYPE=Release ..
make --jobs=$(nproc --all)
```
### Additional Build Options
You can use the following additional build options:
- Internal JIT GEMM implementation is used by default.
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and
`-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML
with the `include` and `lib` folders. MKL-ML\* [package for Mac] can be downloaded
[here](https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_mac_2019.0.5.20190502.tgz)
- Threading Building Blocks (TBB) is used by default. To build the Inference
Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
- Required versions of TBB and OpenCV packages are downloaded automatically by
the CMake-based script. If you want to use the automatically downloaded
packages but you already have installed TBB or OpenCV packages configured in
your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR`
environment variables before running the `cmake` command, otherwise they won't
be downloaded and the build may fail if incompatible versions were installed.
- If the CMake-based build script can not find and download the OpenCV package
that is supported on your platform, or if you want to use a custom build of
the OpenCV library, refer to the
[Use Custom OpenCV Builds](#use-custom-opencv-builds-for-inference-engine)
section for details.
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To
specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE=/Library/Frameworks/Python.framework/Versions/3.7/bin/python3.7 \
-DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.7/lib/libpython3.7m.dylib \
-DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m
```
- nGraph-specific compilation options:
`-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer.
`-DNGRAPH_JSON_ENABLE=ON` enables nGraph JSON-based serialization.
`-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints.
## Build on Android* Systems
This section describes how to build Inference Engine for Android x86 (64-bit) operating systems.
### Software Requirements
- [CMake]\* 3.5 or higher
- Android NDK (this guide has been validated with r20 release)
- OpenCV for Android
### Build Steps
1. Download and unpack Android NDK: https://developer.android.com/ndk/downloads. Let's assume that `~/Downloads` is used as a working folder.
```sh
cd ~/Downloads
wget https://dl.google.com/android/repository/android-ndk-r20-linux-x86_64.zip
unzip android-ndk-r20-linux-x86_64.zip
mv android-ndk-r20 android-ndk
```
2. Download and unpack OpenCV
```sh
cd ~/Downloads
wget https://github.com/opencv/opencv/releases/download/4.2.0/opencv-4.2.0-android-sdk.zip
unzip opencv-4.2.0-android-sdk.zip
```
3. Clone submodules
```sh
cd dldt
git submodule update --init --recursive
```
4. Create a build folder:
```sh
mkdir build
```
5. Change working directory to `build` and run `cmake` to create makefiles. Then run `make`.
```sh
cd build
cmake .. \
-DCMAKE_TOOLCHAIN_FILE=~/Downloads/android-ndk/build/cmake/android.toolchain.cmake \
-DANDROID_ABI=x86_64 \
-DANDROID_PLATFORM=21 \
-DANDROID_STL=c++_shared \
-DOpenCV_DIR=~/Downloads/OpenCV-android-sdk/sdk/native/jni/
make --jobs=$(nproc --all)
```
* `ANDROID_ABI` specifies target architecture (`x86_64`)
* `ANDROID_PLATFORM` - Android API version
* `ANDROID_STL` specifies that shared C++ runtime is used. Copy `~/Downloads/android-ndk/sources/cxx-stl/llvm-libc++/libs/x86_64/libc++_shared.so` from Android NDK along with built binaries
## Use Custom OpenCV Builds for Inference Engine
> **NOTE**: The recommended and tested version of OpenCV is 4.2. The minimum
supported version is 3.4.0.
Required versions of OpenCV packages are downloaded automatically during the
building Inference Engine library. If the build script can not find and download
the OpenCV package that is supported on your platform, you can use one of the
following options:
* Download the most suitable version from the list of available pre-build
packages from [https://download.01.org/opencv/2020/openvinotoolkit] from the
`<release_version>/inference_engine` directory.
* Use a system-provided OpenCV package (e.g with running the
`apt install libopencv-dev` command). The following modules must be enabled:
`imgcodecs`, `videoio`, `highgui`.
* Get the OpenCV package using a package manager: pip, conda, conan etc. The
package must have the development components included (header files and CMake
scripts).
* Build OpenCV from source using the [build instructions](https://docs.opencv.org/master/df/d65/tutorial_table_of_content_introduction.html) on the OpenCV site.
After you got the built OpenCV library, perform the following preparation steps
before running the Inference Engine build:
1. Set the `OpenCV_DIR` environment variable to the directory where the
`OpenCVConfig.cmake` file of you custom OpenCV build is located.
2. Disable the package automatic downloading with using the `-DENABLE_OPENCV=OFF`
option for CMake-based build script for Inference Engine.
## Add Inference Engine to Your Project
For CMake projects, set the `InferenceEngine_DIR` environment variable:
```sh
export InferenceEngine_DIR=/path/to/dldt/build/
```
Then you can find Inference Engine by `find_package`:
```cmake
find_package(InferenceEngine)
include_directories(${InferenceEngine_INCLUDE_DIRS})
target_link_libraries(${PROJECT_NAME} ${InferenceEngine_LIBRARIES} dl)
```
## (Optional) Additional Installation Steps for the Intel® Movidius™ Neural Compute Stick and Neural Compute Stick 2
> **NOTE**: These steps are only required if you want to perform inference on
Intel® Movidius™ Neural Compute Stick or the Intel® Neural Compute Stick 2 using
the Inference Engine MYRIAD Plugin. See also [Intel® Neural Compute Stick 2 Get Started].
### For Linux, Raspbian\* Stretch OS
1. Add the current Linux user to the `users` group; you will need to log out and
log in for it to take effect:
```sh
sudo usermod -a -G users "$(whoami)"
```
2. To perform inference on Intel® Movidius™ Neural Compute Stick and Intel®
Neural Compute Stick 2, install the USB rules as follows:
```sh
cat <<EOF > 97-myriad-usbboot.rules
SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1"
SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1"
SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1"
EOF
```
```sh
sudo cp 97-myriad-usbboot.rules /etc/udev/rules.d/
```
```sh
sudo udevadm control --reload-rules
```
```sh
sudo udevadm trigger
```
```sh
sudo ldconfig
```
```sh
rm 97-myriad-usbboot.rules
```
### For Windows
For Intel® Movidius™ Neural Compute Stick and Intel® Neural Compute Stick 2,
install the Movidius™ VSC driver:
1. Go to the `<DLDT_ROOT_DIR>/inference-engine/thirdparty/movidius/MovidiusDriver`
directory, where the `DLDT_ROOT_DIR` is the directory to which the DLDT
repository was cloned.
2. Right click on the `Movidius_VSC_Device.inf` file and choose **Install** from
the pop-up menu.
You have installed the driver for your Intel® Movidius™ Neural Compute Stick
or Intel® Neural Compute Stick 2.
## Next Steps
Congratulations, you have built the Inference Engine. To get started with the
OpenVINO™, proceed to the Get Started guides:
* [Get Started with Deep Learning Deployment Toolkit on Linux*](../get-started-linux.md)
## Notice
To enable some additional nGraph features and use your custom nGraph library with the OpenVINO™ binary package,
make sure the following:
- nGraph library was built with the same version which is used in the Inference Engine.
- nGraph library and the Inference Engine were built with the same compilers. Otherwise you might face application binary interface (ABI) problems.
To prepare your custom nGraph library for distribution, which includes collecting all headers, copy
binaries, and so on, use the `install` CMake target.
This target collects all dependencies, prepares the nGraph package and copies it to a separate directory.
## Additional Resources
* [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes)
* [Introduction to Intel® Deep Learning Deployment Toolkit](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Introduction.html)
* [Inference Engine Samples Overview](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Samples_Overview.html)
* [Inference Engine Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Deep_Learning_Inference_Engine_DevGuide.html)
* [Model Optimizer Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)
---
\* Other names and brands may be claimed as the property of others.
[Intel® Distribution of OpenVINO™]:https://software.intel.com/en-us/openvino-toolkit
[CMake]:https://cmake.org/download/
[Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.41.14441]:https://github.com/intel/compute-runtime/releases/tag/19.41.14441
[MKL-DNN repository]:https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_lnx_2019.0.5.20190502.tgz
[MKL-DNN repository for Windows]:(https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_win_2019.0.5.20190502.zip)
[OpenBLAS]:https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download
[mingw64\* runtime dependencies]:https://sourceforge.net/projects/openblas/files/v0.2.14/mingw64_dll.zip/download
[https://download.01.org/opencv/2020/openvinotoolkit]:https://download.01.org/opencv/2020/openvinotoolkit
[build instructions]:https://docs.opencv.org/master/df/d65/tutorial_table_of_content_introduction.html
[driver package]:https://downloadcenter.intel.com/download/29335/Intel-Graphics-Windows-10-DCH-Drivers
[Intel® Neural Compute Stick 2 Get Started]:https://software.intel.com/en-us/neural-compute-stick/get-started
[Intel® C++ Compiler]:https://software.intel.com/en-us/intel-parallel-studio-xe
[OpenBLAS]:https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -0,0 +1,55 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if (VERBOSE_BUILD)
set(CMAKE_VERBOSE_MAKEFILE ON)
endif()
# FIXME: there are compiler failures with LTO and Cross-Compile toolchains. Disabling for now, but
# this must be addressed in a proper way
if(CMAKE_CROSSCOMPILING OR NOT (LINUX OR WIN32))
set(ENABLE_LTO OFF)
endif()
#64 bits platform
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
message(STATUS "Detected 64 bit architecture")
SET(ARCH_64 ON)
else()
message(STATUS "Detected 32 bit architecture")
SET(ARCH_64 OFF)
endif()
# 32 bits
if(NOT ARCH_64)
if(UNIX)
set(ENABLE_CLDNN OFF)
endif()
set(ENABLE_MKL_DNN OFF)
endif()
# Apple specific
if (APPLE)
set(ENABLE_CLDNN OFF)
endif()
# ARM specific
if (ARM OR AARCH64)
# disable all base plugins but Myriad
set(ENABLE_CLDNN OFF)
set(ENABLE_MKL_DNN OFF)
endif()
#minGW specific - under wine no support for downloading file and applying them using git
if (WIN32)
if (MINGW)
SET(ENABLE_CLDNN OFF) # dont have mingw dll for linking
endif()
endif()
if (NOT ENABLE_MKL_DNN)
set(ENABLE_MKL OFF)
endif()
print_enabled_features()

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
@ -26,11 +26,11 @@ function (log_rpath_remove_top component component_remove_top lib lib_remove_top
# debug_message(STATUS "LIB-IN=${lib} ")
# debug_message(STATUS "TOPLIB-IN=${top_lib_dir} ")
get_filename_component(top_lib_dir ${${component}} DIRECTORY)
get_filename_component(top_lib_dir "${${component}}" DIRECTORY)
if (${component_remove_top} AND ${component})
else()
get_filename_component(add_name ${${component}} NAME)
get_filename_component(add_name "${${component}}" NAME)
set(top_lib_dir "${top_lib_dir}/${add_name}")
endif()
if (${lib_remove_top} AND lib)
@ -70,4 +70,4 @@ endfunction()
# This macro is redefined (with additional checks) within the InferenceEngineConfig.cmake file.
macro(ext_message TRACE_LEVEL)
message(${TRACE_LEVEL} "${ARGN}")
endmacro()
endmacro()

View File

@ -2,10 +2,68 @@
# SPDX-License-Identifier: Apache-2.0
#
include(CPackComponent)
unset(IE_CPACK_COMPONENTS_ALL CACHE)
set(IE_CPACK_IE_DIR deployment_tools/inference_engine)
function(ie_cpack_set_library_dir)
string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} ARCH)
if(ARCH STREQUAL "x86_64" OR ARCH STREQUAL "amd64") # Windows detects Intel's 64-bit CPU as AMD64
set(ARCH intel64)
elseif(ARCH STREQUAL "i386")
set(ARCH ia32)
endif()
if(WIN32)
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/$<CONFIG>/${ARCH} PARENT_SCOPE)
else()
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
endif()
endfunction()
ie_cpack_set_library_dir()
#
# ie_cpack_add_component(NAME ...)
#
# Wraps original `cpack_add_component` and adds component to internal IE list
#
macro(ie_cpack_add_component NAME)
list(APPEND IE_CPACK_COMPONENTS_ALL ${NAME})
set(IE_CPACK_COMPONENTS_ALL "${IE_CPACK_COMPONENTS_ALL}" CACHE STRING "" FORCE)
cpack_add_component(${NAME} ${ARGN})
endmacro()
macro(ie_cpack)
set(CPACK_GENERATOR "TGZ")
if(WIN32)
set(CPACK_PACKAGE_NAME inference-engine_$<CONFIG>)
else()
set(CPACK_PACKAGE_NAME inference-engine)
endif()
set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF)
set(CPACK_ARCHIVE_COMPONENT_INSTALL ON)
set(CPACK_PACKAGE_VENDOR "Intel")
set(CPACK_COMPONENTS_ALL ${ARGN})
if(OS_FOLDER)
set(CPACK_SYSTEM_NAME "${OS_FOLDER}")
endif()
include(CPack)
endmacro()
# External dependencies
find_package(Threads)
# Detect target
include(target_flags)
# printing debug messages
include(debug)
if (UNIX AND NOT APPLE)
if(UNIX AND NOT APPLE)
set(LINUX ON)
endif()
@ -41,7 +99,7 @@ if(UNIX)
SET(LIB_DL ${CMAKE_DL_LIBS})
endif()
set(OUTPUT_ROOT ${IE_MAIN_SOURCE_DIR})
set(OUTPUT_ROOT ${OpenVINO_MAIN_SOURCE_DIR})
# Enable postfixes for Debug/Release builds
set(IE_DEBUG_POSTFIX_WIN "d")
@ -85,75 +143,32 @@ if(NOT UNIX)
# set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
endif()
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
set(CMAKE_LIBRARY_PATH ${OUTPUT_ROOT}/${BIN_FOLDER})
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
set(CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
set(LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
set(LIBRARY_OUTPUT_PATH ${LIBRARY_OUTPUT_DIRECTORY}) # compatibility issue: linux uses LIBRARY_OUTPUT_PATH, windows uses LIBRARY_OUTPUT_DIRECTORY
else()
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib)
set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE})
set(CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE})
set(LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib)
set(LIBRARY_OUTPUT_PATH ${LIBRARY_OUTPUT_DIRECTORY}/lib)
endif()
if(APPLE)
set(CMAKE_MACOSX_RPATH 1)
endif(APPLE)
# rpath fully disabled
if (NOT ENABLE_PLUGIN_RPATH)
set(CMAKE_SKIP_RPATH TRUE)
endif()
# prepare temporary folder
function(set_temp_directory temp_variable source_tree_dir)
if (DEFINED ENV{${DL_SDK_TEMP}} AND NOT $ENV{${DL_SDK_TEMP}} STREQUAL "")
if (WIN32)
string(REPLACE "\\" "\\\\" temp $ENV{${DL_SDK_TEMP}})
else(WIN32)
set(temp $ENV{${DL_SDK_TEMP}})
endif(WIN32)
if (ENABLE_ALTERNATIVE_TEMP)
set(ALTERNATIVE_PATH ${source_tree_dir}/temp)
endif()
else ()
message(STATUS "DL_SDK_TEMP envionment not set")
set(temp ${source_tree_dir}/temp)
endif()
set("${temp_variable}" "${temp}" PARENT_SCOPE)
if(ALTERNATIVE_PATH)
set(ALTERNATIVE_PATH "${ALTERNATIVE_PATH}" PARENT_SCOPE)
endif()
endfunction()
# Use solution folders
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
include(os_flags)
include(sdl)
include(os_flags)
include(sanitizer)
include(cpplint)
include(cppcheck)
function(set_ci_build_number)
set(IE_MAIN_SOURCE_DIR "${CMAKE_SOURCE_DIR}")
set(OpenVINO_MAIN_SOURCE_DIR "${CMAKE_SOURCE_DIR}")
include(version)
set(CI_BUILD_NUMBER "${CI_BUILD_NUMBER}" PARENT_SCOPE)
endfunction()
set_ci_build_number()
if(ENABLE_PROFILING_ITT)
find_package(ITT REQUIRED)
endif()
include(plugins/plugins)

26
cmake/features.cmake Normal file
View File

@ -0,0 +1,26 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
include (target_flags)
include (options)
# these options are aimed to optimize build time on development system
ie_option (ENABLE_NGRAPH "Enable nGraph build" ON)
ie_option (ENABLE_INFERENCE_ENGINE "Enable Inference Engine build" ON)
ie_option (ENABLE_MKL_DNN "MKL-DNN plugin for inference engine" ON)
ie_option (ENABLE_CLDNN "clDnn based plugin for inference engine" ON)
ie_option (ENABLE_LTO "Enable Link Time Optimization" OFF)
ie_option (OS_FOLDER "create OS dedicated folder in output" OFF)
ie_option (TREAT_WARNING_AS_ERROR "Treat build warnings as errors" OFF)
ie_option (ENABLE_SANITIZER "enable checking memory errors via AddressSanitizer" OFF)
ie_option (ENABLE_THREAD_SANITIZER "enable checking data races via ThreadSanitizer" OFF)

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# Usage: ie_option(<option_variable> "description" <initial value or boolean expression> [IF <condition>])

174
cmake/os_flags.cmake Normal file
View File

@ -0,0 +1,174 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
#
# Disables deprecated warnings generation
# Defines ie_c_cxx_deprecated varaible which contains C / C++ compiler flags
#
macro(disable_deprecated_warnings)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
set(ie_c_cxx_deprecated "/Qdiag-disable:1478,1786")
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
set(ie_c_cxx_deprecated "/wd4996")
endif()
else()
set(ie_c_cxx_deprecated "-Wno-deprecated-declarations")
endif()
if(NOT ie_c_cxx_deprecated)
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ie_c_cxx_deprecated}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ie_c_cxx_deprecated}")
endmacro()
#
# Enables Link Time Optimization compilation
#
macro(ie_enable_lto)
if(UNIX)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -flto")
# LTO causes issues with gcc 4.8.5 during cmake pthread check
if(NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 4.9)
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -flto")
endif()
# modify linker and ar
if(LINUX)
set(CMAKE_AR "gcc-ar")
set(CMAKE_RANLIB "gcc-ranlib")
endif()
elseif(WIN32)
if(CMAKE_BUILD_TYPE STREQUAL Release)
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GL")
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /GL")
# set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LTCG:STATUS")
# set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /LTCG:STATUS")
endif()
endif()
endmacro()
#
# Adds compiler flags to C / C++ sources
#
macro(ie_add_compiler_flags)
foreach(flag ${ARGN})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
endforeach()
endmacro()
#
# Compilation and linker flags
#
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(THREADS_PREFER_PTHREAD_FLAG ON)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
if(NOT MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsigned-char")
endif()
if(WIN32)
ie_add_compiler_flags(-D_CRT_SECURE_NO_WARNINGS -D_SCL_SECURE_NO_WARNINGS)
ie_add_compiler_flags(/EHsc) # no asynchronous structured exception handling
ie_add_compiler_flags(/Gy) # remove unreferenced functions: function level linking
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE")
if (TREAT_WARNING_AS_ERROR)
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
ie_add_compiler_flags(/WX)
ie_add_compiler_flags(/Qdiag-warning:47,1740,1786)
elseif (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
# ie_add_compiler_flags(/WX) # Too many warnings
endif()
endif()
# Compiler specific flags
ie_add_compiler_flags(/bigobj)
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
ie_add_compiler_flags(/MP /std:c++14)
endif()
# Disable noisy warnings
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
# C4251 needs to have dll-interface to be used by clients of class
ie_add_compiler_flags(/wd4251)
# C4275 non dll-interface class used as base for dll-interface class
ie_add_compiler_flags(/wd4275)
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
# 161 unrecognized pragma
# 177 variable was declared but never referenced
# 2586 decorated name length exceeded, name was truncated
# 2651: attribute does not apply to any entity
# 3180 unrecognized OpenMP pragma
# 11075: To get full report use -Qopt-report:4 -Qopt-report-phase ipo
# 15335 was not vectorized: vectorization possible but seems inefficient. Use vector always directive or /Qvec-threshold0 to override
ie_add_compiler_flags(/Qdiag-disable:161,177,2586,2651,3180,11075,15335)
endif()
# Debug information flags
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /Z7")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Z7")
if(ENABLE_DEBUG_SYMBOLS)
ie_add_compiler_flags(/Z7)
set(DEBUG_SYMBOLS_LINKER_FLAGS "/DEBUG")
if (CMAKE_BUILD_TYPE STREQUAL "Release")
# Keep default /OPT values. See /DEBUG reference for details.
set(DEBUG_SYMBOLS_LINKER_FLAGS "${DEBUG_SYMBOLS_LINKER_FLAGS} /OPT:REF /OPT:ICF")
endif()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
endif()
else()
# TODO: enable for C sources as well
# ie_add_compiler_flags(-Werror)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
ie_add_compiler_flags(-ffunction-sections -fdata-sections)
ie_add_compiler_flags(-fvisibility=hidden)
ie_add_compiler_flags(-fdiagnostics-show-option)
ie_add_compiler_flags(-Wundef)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden")
# Disable noisy warnings
if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
ie_add_compiler_flags(-Wswitch)
elseif(UNIX)
ie_add_compiler_flags(-Wuninitialized -Winit-self)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
ie_add_compiler_flags(-Wno-error=switch)
else()
ie_add_compiler_flags(-Wmaybe-uninitialized)
endif()
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
ie_add_compiler_flags(-diag-disable=remark)
endif()
# Linker flags
if(APPLE)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-dead_strip")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-dead_strip")
elseif(LINUX)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
endif()
endif()

View File

@ -1,11 +1,11 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
include(CheckCXXCompilerFlag)
if (ENABLE_SANITIZER)
set(SANITIZER_COMPILER_FLAGS "-fsanitize=address")
set(SANITIZER_COMPILER_FLAGS "-fsanitize=address -fno-omit-frame-pointer")
CHECK_CXX_COMPILER_FLAG("-fsanitize-recover=address" SANITIZE_RECOVER_SUPPORTED)
if (SANITIZE_RECOVER_SUPPORTED)
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize-recover=address")
@ -16,7 +16,18 @@ if (ENABLE_SANITIZER)
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=gold")
endif()
set(CMAKE_CC_FLAGS "${CMAKE_CC_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
endif()
if (ENABLE_THREAD_SANITIZER)
set(SANITIZER_COMPILER_FLAGS "-fsanitize=thread")
set(SANITIZER_LINKER_FLAGS "-fsanitize=thread")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")

34
cmake/sdl.cmake Normal file
View File

@ -0,0 +1,34 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if (CMAKE_BUILD_TYPE STREQUAL "Release")
if(UNIX)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -Wformat -Wformat-security -D_FORTIFY_SOURCE=2")
if(NOT APPLE)
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -pie")
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-all")
else()
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-strong")
endif()
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -s")
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-all")
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-strong -Wl,--strip-all")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -z noexecstack -z relro -z now")
endif()
elseif(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} /sdl")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${IE_C_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${IE_C_CXX_FLAGS}")
endif()

31
cmake/target_flags.cmake Normal file
View File

@ -0,0 +1,31 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# Target system specific flags
if(CMAKE_CL_64)
set(MSVC64 ON)
endif()
if(WIN32 AND CMAKE_CXX_COMPILER_ID MATCHES "GNU")
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpmachine
OUTPUT_VARIABLE OPENVINO_GCC_TARGET_MACHINE
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(OPENVINO_GCC_TARGET_MACHINE MATCHES "amd64|x86_64|AMD64")
set(MINGW64 ON)
endif()
endif()
if(MSVC64 OR MINGW64)
set(X86_64 ON)
elseif(MINGW OR (MSVC AND NOT CMAKE_CROSSCOMPILING))
set(X86 ON)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
set(X86_64 ON)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686.*|i386.*|x86.*|amd64.*|AMD64.*")
set(X86 ON)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)")
set(ARM ON)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
set(AARCH64 ON)
endif()

View File

@ -1,11 +1,11 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
function (branchName VAR)
execute_process(
COMMAND git rev-parse --abbrev-ref HEAD
WORKING_DIRECTORY ${IE_MAIN_SOURCE_DIR}
WORKING_DIRECTORY ${OpenVINO_MAIN_SOURCE_DIR}
OUTPUT_VARIABLE GIT_BRANCH
OUTPUT_STRIP_TRAILING_WHITESPACE)
set (${VAR} ${GIT_BRANCH} PARENT_SCOPE)
@ -14,7 +14,7 @@ endfunction()
function (commitHash VAR)
execute_process(
COMMAND git rev-parse HEAD
WORKING_DIRECTORY ${IE_MAIN_SOURCE_DIR}
WORKING_DIRECTORY ${OpenVINO_MAIN_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE)
set (${VAR} ${GIT_COMMIT_HASH} PARENT_SCOPE)

View File

@ -1,37 +1,53 @@
# Get Started with OpenVINO™ Deep Learning Deployment Toolkit (DLDT) on Linux*
This guide provides you with the information that will help you to start using the DLDT on Linux*. With this guide you will learn how to:
This guide provides you with the information that will help you to start using
the DLDT on Linux\*. With this guide, you will learn how to:
1. [Configure the Model Optimizer](#configure-the-model-optimizer)
2. [Prepare a model for sample inference:](#prepare-a-model-for-sample-inference)
2. [Prepare a model for sample inference](#prepare-a-model-for-sample-inference)
1. [Download a pre-trained model](#download-a-trained-model)
2. [Convert the model to an Intermediate Representation (IR) with the Model Optimizer](#convert-the-model-to-an-intermediate-representation-with-the-model-optimizer)
3. [Run the Image Classification Sample Application with the model](#run-the-image-classification-sample-application)
## Prerequisites
1. This guide assumes that you have already cloned the `dldt` repo and successfully built the Inference Engine and Samples using the [build instructions](inference-engine/README.md).
2. The original structure of the repository directories is kept unchanged.
1. This guide assumes that you have already cloned the `dldt` repo and
successfully built the Inference Engine and Samples using the
[build instructions](inference-engine/README.md).
2. The original structure of the repository directories remains unchanged.
> **NOTE**: Below, the directory to which the `dldt` repository is cloned is referred to as `<DLDT_DIR>`.
> **NOTE**: Below, the directory to which the `dldt` repository is cloned is
referred to as `<DLDT_DIR>`.
## Configure the Model Optimizer
The Model Optimizer is a Python\*-based command line tool for importing trained models from popular deep learning frameworks such as Caffe\*, TensorFlow\*, Apache MXNet\*, ONNX\* and Kaldi\*.
The Model Optimizer is a Python\*-based command line tool for importing trained
models from popular deep learning frameworks such as Caffe\*, TensorFlow\*,
Apache MXNet\*, ONNX\* and Kaldi\*.
You cannot perform inference on your trained model without running the model through the Model Optimizer. When you run a pre-trained model through the Model Optimizer, your output is an Intermediate Representation (IR) of the network. The Intermediate Representation is a pair of files that describe the whole model:
You cannot perform inference on your trained model without having first run the
model through the Model Optimizer. When you run a pre-trained model through the
Model Optimizer, it outputs an *Intermediate Representation*, or *(IR)* of
the network, a pair of files that describes the whole model:
- `.xml`: Describes the network topology
- `.bin`: Contains the weights and biases binary data
For more information about the Model Optimizer, refer to the [Model Optimizer Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html). 
For more information about the Model Optimizer, refer to the
[Model Optimizer Developer Guide].
### Model Optimizer Configuration Steps
You can choose to either configure all supported frameworks at once **OR** configure one framework at a time. Choose the option that best suits your needs. If you see error messages, make sure you installed all dependencies.
You can choose to either configure all supported frameworks at once **OR**
configure one framework at a time. Choose the option that best suits your needs.
If you see error messages, check for any missing dependencies.
> **NOTE**: Since the TensorFlow framework is not officially supported on CentOS*, the Model Optimizer for TensorFlow can't be configured and ran on those systems.
> **NOTE**: The TensorFlow\* framework is not officially supported on CentOS\*,
so the Model Optimizer for TensorFlow cannot be configured on, or run with
CentOS.
> **IMPORTANT**: The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment.
> **IMPORTANT**: Internet access is required to execute the following steps
successfully. If you access the Internet via proxy server only, please make
sure that it is configured in your OS environment as well.
**Option 1: Configure all supported frameworks at the same time**
@ -47,7 +63,8 @@ sudo ./install_prerequisites.sh
**Option 2: Configure each framework separately**
Configure individual frameworks separately **ONLY** if you did not select **Option 1** above.
Configure individual frameworks separately **ONLY** if you did not select
**Option 1** above.
1. Go to the Model Optimizer prerequisites directory:
```sh
@ -79,25 +96,45 @@ cd <DLDT_DIR>/model_optimizer/install_prerequisites
```sh
sudo ./install_prerequisites_kaldi.sh
```
The Model Optimizer is configured for one or more frameworks. Continue to the next session to download and prepare a model for running a sample inference.
The Model Optimizer is configured for one or more frameworks. Continue to the
next session to download and prepare a model for running a sample inference.
## Prepare a Model for Sample Inference
This paragraph contains the steps to get the pre-trained model for sample inference and to prepare the model's optimized Intermediate Representation that Inference Engine uses.
This section describes how to get a pre-trained model for sample inference
and how to prepare the optimized Intermediate Representation (IR) that
Inference Inference Engine uses.
### Download a Trained Model
To run the Image Classification Sample you'll need a pre-trained model to run the inference on. This guide will use the public SqueezeNet 1.1 Caffe* model. You can find and download this model manually or use the OpenVINO™ [Model Downloader](https://github.com/opencv/open_model_zoo/tree/master/tools/downloader).
To run the Image Classification Sample, you need a pre-trained model to run
the inference on. This guide uses the public SqueezeNet 1.1 Caffe\* model.
You can find and download this model manually or use the OpenVINO™
[Model Downloader].
With the Model Downloader, you can download other popular public deep learning topologies and the [OpenVINO™ pre-trained models](https://github.com/opencv/open_model_zoo/tree/master/models/intel) prepared for running inference for a wide list of inference scenarios: object detection, object recognition, object re-identification, human pose estimation, action recognition and others.
With the Model Downloader, you can download other popular public deep learning
topologies and [OpenVINO™ pre-trained models], which are already prepared for
running inference upon a wide list of inference scenarios:
To download the SqueezeNet 1.1 Caffe* model to a models folder with the Model Downloader:
1. Install the [prerequisites](https://github.com/opencv/open_model_zoo/tree/master/tools/downloader#prerequisites).
2. Run the `downloader.py` with specifying the topology name and a `<models_dir>` path. For example to download the model to the `~/public_models` directory:
* object detection,
* object recognition,
* object re-identification,
* human pose estimation,
* action recognition, and others.
To download the SqueezeNet 1.1 Caffe* model to a `models` folder (referred to
as `<models_dir>` below) with the Model Downloader:
1. Install the [prerequisites].
2. Run the `downloader.py` script, specifying the topology name and the path
to your `<models_dir>`. For example, to download the model to a directory
named `~/public_models`, run:
```sh
./downloader.py --name squeezenet1.1 --output_dir ~/public_models
```
When the model files are successfully downloaded the output similar to the following is printed:
When the model files are successfully downloaded, output similar to the
following is printed:
```sh
###############|| Downloading topologies ||###############
@ -117,7 +154,11 @@ To download the SqueezeNet 1.1 Caffe* model to a models folder with the Model Do
1. Create a `<ir_dir>` directory that will contains the Intermediate Representation (IR) of the model.
2. Inference Engine can perform inference on a [list of supported devices](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_Supported_Devices.html) using specific device plugins. Different plugins support models of [different precision formats](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_Supported_Devices.html#supported_model_formats), such as FP32, FP16, INT8. To prepare an IR to run inference on a particular hardware, run the Model Optimizer with the appropriate `--data_type` options:
2. Inference Engine can perform inference on a [list of supported devices]
using specific device plugins. Different plugins support models of
[different precision formats], such as `FP32`, `FP16`, `INT8`. To prepare an
IR to run inference on particular hardware, run the Model Optimizer with the
appropriate `--data_type` options:
**For CPU (FP32):**
```sh
@ -130,8 +171,10 @@ To download the SqueezeNet 1.1 Caffe* model to a models folder with the Model Do
```
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `<ir_dir>` directory.
3. Copy the `squeezenet1.1.labels` file from the `<DLDT_DIR>/inference-engine/samples/sample_data/` to the model IR directory. This file contains the classes that ImageNet uses so that the inference results show text instead of classification numbers:
```sh
3. Copy the `squeezenet1.1.labels` file from the `<DLDT_DIR>/inference-engine/samples/sample_data/`
folder to the model IR directory. This file contains the classes that ImageNet
uses so that the inference results show text instead of classification numbers:
```sh
cp <DLDT_DIR>/inference-engine/samples/sample_data/squeezenet1.1.labels <ir_dir>
```
@ -139,15 +182,21 @@ Now you are ready to run the Image Classification Sample Application.
## Run the Image Classification Sample Application
The Inference Engine sample applications are automatically compiled when you built the Inference Engine using the [build instructions](inference-engine/README.md). The binary files are located in the `<DLDT_DIR>/inference-engine/bin/intel64/Release` directory.
The Inference Engine sample applications are automatically compiled when you
built the Inference Engine using the [build instructions](inference-engine/README.md).
The binary files are located in the `<DLDT_DIR>/inference-engine/bin/intel64/Release`
directory.
Follow the steps below to run the Image Classification sample application on the prepared IR and with an input image:
To run the Image Classification sample application with an input image on the prepared IR:
1. Go to the samples build directory:
```sh
cd <DLDT_DIR>/inference-engine/bin/intel64/Release
```
2. Run the sample executable with specifying the `car.png` file from the `<DLDT_DIR>/inference-engine/samples/sample_data/` directory as an input image, the IR of your model and a plugin for a hardware device to perform inference on:
2. Run the sample executable with specifying the `car.png` file from the
`<DLDT_DIR>/inference-engine/samples/sample_data/` directory as an input
image, the IR of your model and a plugin for a hardware device to perform
inference on:
**For CPU:**
```sh
@ -160,8 +209,11 @@ Follow the steps below to run the Image Classification sample application on the
```
**For MYRIAD:**
>**NOTE**: Running inference on VPU devices (Intel® Movidius™ Neural Compute Stick or Intel® Neural Compute Stick 2) with the MYRIAD plugin requires performing [additional hardware configuration steps](inference-engine/README.md#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2).
```sh
>**NOTE**: Running inference on VPU devices (Intel® Movidius™ Neural Compute
Stick or Intel® Neural Compute Stick 2) with the MYRIAD plugin requires
performing [additional hardware configuration steps](inference-engine/README.md#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2).
```sh
./classification_sample -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
```
@ -199,5 +251,12 @@ Throughput: 375.3339402 FPS
* [Inference Engine build instructions](inference-engine/README.md)
* [Introduction to Intel® Deep Learning Deployment Toolkit](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Introduction.html)
* [Inference Engine Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Deep_Learning_Inference_Engine_DevGuide.html)
* [Model Optimizer Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)
* [Model Optimizer Developer Guide]
* [Inference Engine Samples Overview](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Samples_Overview.html).
[Model Optimizer Developer Guide]:https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html
[Model Downloader]:https://github.com/opencv/open_model_zoo/tree/master/tools/downloader
[OpenVINO™ pre-trained models]:https://github.com/opencv/open_model_zoo/tree/master/models/intel
[prerequisites]:https://github.com/opencv/open_model_zoo/tree/master/tools/downloader#prerequisites
[list of supported devices]:https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_Supported_Devices.html
[different precision formats]:https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_Supported_Devices.html#supported_model_formats

View File

@ -1,41 +1,57 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if (APPLE)
# due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
else()
cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
endif()
project(InferenceEngine)
set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
set(IE_MAIN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(CMAKE_MODULE_PATH "${IE_MAIN_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
include(CTest)
include(features)
include(features_ie)
# include developer package
include(developer_package)
include(developer_package_ie)
# These options are shared with 3rdparty plugins
# by means of developer package
include(check_features)
include(check_features_ie)
# resolving dependencies for the project
include(dependencies)
message (STATUS "PROJECT ............................... " ${PROJECT_NAME})
message (STATUS "CMAKE_BINARY_DIR ...................... " ${CMAKE_BINARY_DIR})
message (STATUS "IE_MAIN_SOURCE_DIR .................... " ${IE_MAIN_SOURCE_DIR})
message (STATUS "CMAKE_GENERATOR ....................... " ${CMAKE_GENERATOR})
message (STATUS "CMAKE_C_COMPILER_ID ................... " ${CMAKE_C_COMPILER_ID})
message (STATUS "CMAKE_BUILD_TYPE ...................... " ${CMAKE_BUILD_TYPE})
if (ENABLE_FUZZING)
include(fuzzing)
enable_fuzzing()
endif()
# remove file with exported developer targets to force its regeneration
file(REMOVE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
if(ENABLE_NGRAPH)
set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph)
find_package(ngraph REQUIRED)
endif()
find_package(Threads REQUIRED)
unset(IEDeveloperPackageTargets CACHE)
function(ie_developer_export_targets)
set(IEDeveloperPackageTargets "${IEDeveloperPackageTargets};${ARGV}")
# to allow exporting of aliased targets with the original names
foreach(target_name ${IEDeveloperPackageTargets})
if(TARGET "${target_name}")
get_target_property(original_name ${target_name} ALIASED_TARGET)
if(TARGET "${original_name}")
message(STATUS "The name ${target_name} is an ALIAS for ${original_name}. "
"It will be exported to the InferenceEngineDeveloperPackage with the original name.")
list(REMOVE_ITEM IEDeveloperPackageTargets ${target_name})
list(APPEND IEDeveloperPackageTargets ${original_name})
endif()
endif()
endforeach()
list(REMOVE_DUPLICATES IEDeveloperPackageTargets)
set(IEDeveloperPackageTargets "${IEDeveloperPackageTargets}" CACHE INTERNAL
"Paths to extra Inference Engine plugins" FORCE)
endfunction()
add_subdirectory(thirdparty)
add_subdirectory(src)
@ -43,15 +59,8 @@ if(ENABLE_TESTS)
add_subdirectory(tests)
endif()
add_subdirectory(thirdparty)
add_subdirectory(tools)
if (ENABLE_SAMPLES)
# hint for find_package(InferenceEngine in the samples folder)
set(InferenceEngine_DIR "${CMAKE_BINARY_DIR}")
endif()
# gflags and format_reader targets are kept inside of samples directory and
# they must be built even if samples build is disabled (required for tests and tools).
add_subdirectory(samples)
@ -65,4 +74,108 @@ if (ENABLE_PYTHON)
add_subdirectory(ie_bridges/python)
endif()
if (ENABLE_C)
add_subdirectory(ie_bridges/c)
endif()
add_cpplint_report_target()
# install C++ samples
ie_cpack_add_component(cpp_samples REQUIRED DEPENDS core)
if(UNIX)
install(DIRECTORY samples/
DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp
COMPONENT cpp_samples
USE_SOURCE_PERMISSIONS
PATTERN *.bat EXCLUDE)
elseif(WIN32)
install(DIRECTORY samples
DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp
COMPONENT cpp_samples
USE_SOURCE_PERMISSIONS
PATTERN *.sh EXCLUDE)
endif()
# install C samples
ie_cpack_add_component(c_samples REQUIRED DEPENDS core)
if(UNIX)
install(PROGRAMS samples/build_samples.sh
DESTINATION ${IE_CPACK_IE_DIR}/samples/c
COMPONENT c_samples)
elseif(WIN32)
install(PROGRAMS samples/build_samples_msvc.bat
DESTINATION ${IE_CPACK_IE_DIR}/samples/c
COMPONENT c_samples)
endif()
install(DIRECTORY ie_bridges/c/samples/
DESTINATION ${IE_CPACK_IE_DIR}/samples/c
COMPONENT c_samples
PATTERN ie_bridges/c/samples/CMakeLists.txt EXCLUDE)
install(FILES samples/CMakeLists.txt
DESTINATION ${IE_CPACK_IE_DIR}/samples/c
COMPONENT c_samples)
# install Python samples
ie_cpack_add_component(python_samples REQUIRED DEPENDS core)
install(DIRECTORY ${ie_python_api_SOURCE_DIR}/sample/
DESTINATION ${IE_CPACK_IE_DIR}/samples/python
COMPONENT python_samples)
# Custom target to build only Inference Engine Developer Package targets
add_custom_target(ie_dev_targets ALL DEPENDS inference_engine HeteroPlugin)
# Developer package
ie_developer_export_targets(format_reader)
if (ENABLE_NGRAPH)
ie_developer_export_targets(${NGRAPH_LIBRARIES})
endif()
export(TARGETS ${IEDeveloperPackageTargets} NAMESPACE IE::
APPEND FILE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
configure_file(
"${IE_MAIN_SOURCE_DIR}/cmake/developer_package_config.cmake.in"
"${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig.cmake"
@ONLY)
configure_file(
"${IE_MAIN_SOURCE_DIR}/cmake/share/InferenceEngineConfig-version.cmake.in"
"${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig-version.cmake"
COPYONLY)
# Add plugins
function(register_extra_plugins)
set(InferenceEngineDeveloperPackage_DIR "${CMAKE_CURRENT_BINARY_DIR}/build-plugins")
set(iedevconfig_file "${InferenceEngineDeveloperPackage_DIR}/InferenceEngineDeveloperPackageConfig.cmake")
file(REMOVE "${iedevconfig_file}")
file(WRITE "${iedevconfig_file}" "\# !! AUTOGENERATED: DON'T EDIT !!\n\n")
foreach(target IN LISTS IEDeveloperPackageTargets)
if(target)
file(APPEND "${iedevconfig_file}" "add_library(IE::${target} ALIAS ${target})\n")
endif()
endforeach()
# automatically import plugins from the 'plugins' folder
file(GLOB local_extra_plugins "plugins/*")
foreach(plugin_path IN LISTS IE_EXTRA_PLUGINS local_extra_plugins)
get_filename_component(plugin_dir "${plugin_path}" NAME)
message(STATUS "Register ${plugin_dir} to be built in build-plugins/${plugin_dir}")
add_subdirectory("${plugin_path}" "build-plugins/${plugin_dir}")
endforeach()
endfunction()
register_extra_plugins()

View File

@ -1,492 +0,0 @@
# Build Inference Engine
## Contents
- [Introduction](#introduction)
- [Build on Linux* Systems](#build-on-linux-systems)
- [Software Requirements](#software-requirements)
- [Build Steps](#build-steps)
- [Additional Build Options](#additional-build-options)
- [Build for Raspbian* Stretch OS](#build-for-raspbian-stretch-os)
- [Hardware Requirements](#hardware-requirements)
- [Native Compilation](#native-compilation)
- [Cross Compilation Using Docker*](#cross-compilation-using-docker)
- [Additional Build Options](#additional-build-options-1)
- [Build on Windows* Systems](#build-on-windows-systems)
- [Software Requirements](#software-requirements-1)
- [Build Steps](#build-steps-1)
- [Additional Build Options](#additional-build-options-2)
- [Building Inference Engine with Ninja* Build System](#building-inference-engine-with-ninja-build-system)
- [Build on macOS* Systems](#build-on-macos-systems)
- [Software Requirements](#software-requirements-2)
- [Build Steps](#build-steps-2)
- [Additional Build Options](#additional-build-options-3)
- [Use Custom OpenCV Builds for Inference Engine](#use-custom-opencv-builds-for-inference-engine)
- [Adding Inference Engine to your project](#adding-inference-engine-to-your-project)
- [(Optional) Additional Installation Steps for the Intel® Movidius™ Neural Compute Stick and Neural Compute Stick 2](#optional-additional-installation-steps-for-the-intel-movidius-neural-compute-stick-and-neural-compute-stick-2)
- [For Linux, Raspbian Stretch* OS](#for-linux-raspbian-stretch-os)
- [For Windows](#for-windows-1)
- [Next Steps](#next-steps)
- [Additional Resources](#additional-resources)
## Introduction
The Inference Engine can infer models in different formats with various input and output formats.
The open source version of Inference Engine includes the following plugins:
| PLUGIN | DEVICE TYPES |
| ---------------------| -------------|
| CPU plugin | Intel® Xeon® with Intel® AVX2 and AVX512, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® SSE |
| GPU plugin | Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics |
| GNA plugin | Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver processor J5005, Intel® Celeron® processor J4005, Intel® Core™ i3-8121U processor |
| MYRIAD plugin | Intel® Movidius™ Neural Compute Stick powered by the Intel® Movidius™ Myriad™ 2, Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X |
| Heterogeneous plugin | Heterogeneous plugin enables computing for inference on one network on several Intel® devices. |
Inference Engine plugin for Intel® FPGA is distributed only in a binary form as a part of [Intel® Distribution of OpenVINO™](https://software.intel.com/en-us/openvino-toolkit).
## Build on Linux* Systems
The software was validated on:
- Ubuntu\* 16.04 (64-bit) with default GCC\* 5.4.0
- CentOS\* 7.4 (64-bit) with default GCC\* 4.8.5
### Software Requirements
- [CMake\*](https://cmake.org/download/) 3.5 or higher
- GCC\* 4.8 or higher to build the Inference Engine
- Python 2.7 or higher for Inference Engine Python API wrapper
- (Optional) [Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.04.12237](https://github.com/intel/compute-runtime/releases/tag/19.04.12237).
### Build Steps
1. Clone submodules:
```sh
cd dldt/inference-engine
git submodule init
git submodule update --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the project root folder:
```sh
chmod +x install_dependencies.sh
```
```sh
./install_dependencies.sh
```
3. By default, the build enables the Inference Engine GPU plugin to infer models on your Intel® Processor Graphics. This requires you to [Install Intel® Graphics Compute Runtime for OpenCL™ Driver package 19.04.12237](https://github.com/intel/compute-runtime/releases/tag/19.04.12237) before running the build. If you don't want to use the GPU plugin, use the `-DENABLE_CLDNN=OFF` CMake build option and skip the installation of the Intel® Graphics Compute Runtime for OpenCL™ Driver.
4. Create a build folder:
```sh
mkdir build && cd build
```
5. Inference Engine uses a CMake-based build system. In the created `build` directory, run `cmake` to fetch project dependencies and create Unix makefiles, then run `make` to build the project:
```sh
cmake -DCMAKE_BUILD_TYPE=Release ..
make --jobs=$(nproc --all)
```
### Additional Build Options
You can use the following additional build options:
- Internal JIT GEMM implementation is used by default.
- To switch to OpenBLAS\* implementation, use the `GEMM=OPENBLAS` option and `BLAS_INCLUDE_DIRS` and `BLAS_LIBRARIES` CMake options to specify path to the OpenBLAS headers and library. For example use the following options on CentOS\*: `-DGEMM=OPENBLAS -DBLAS_INCLUDE_DIRS=/usr/include/openblas -DBLAS_LIBRARIES=/usr/lib64/libopenblas.so.0`.
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` CMake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded from the [MKL-DNN repository](https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_lnx_2019.0.5.20190502.tgz).
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
- Required versions of TBB and OpenCV packages are downloaded automatically by the CMake-based script. If you want to use the automatically downloaded packages but you already have installed TBB or OpenCV packages configured in your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR` environment variables before running the `cmake` command, otherwise they won't be downloaded and the build may fail if incompatible versions were installed.
- If the CMake-based build script can not find and download the OpenCV package that is supported on your platform, or if you want to use a custom build of the OpenCV library, refer to the [Use Custom OpenCV Builds](#use-custom-opencv-builds-for-inference-engine) section for details.
- To build the Python API wrapper:
1. Install all additional packages listed in the `/inference-engine/ie_bridges/python/requirements.txt` file:
```sh
pip install -r requirements.txt
```
2. use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE=`which python3.7` \
-DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.7m.so \
-DPYTHON_INCLUDE_DIR=/usr/include/python3.7
```
- To switch off/on the CPU and GPU plugins, use the `cmake` options `-DENABLE_MKL_DNN=ON/OFF` and `-DENABLE_CLDNN=ON/OFF` respectively.
## Build for Raspbian Stretch* OS
> **NOTE**: Only the MYRIAD plugin is supported.
### Hardware Requirements
* Raspberry Pi\* 2 or 3 with Raspbian\* Stretch OS (32-bit). Check that it's CPU supports ARMv7 instruction set (`uname -m` command returns `armv7l`).
> **NOTE**: Despite the Raspberry Pi\* CPU is ARMv8, 32-bit OS detects ARMv7 CPU instruction set. The default `gcc` compiler applies ARMv6 architecture flag for compatibility with lower versions of boards. For more information, run the `gcc -Q --help=target` command and refer to the description of the `-march=` option.
You can compile the Inference Engine for Raspberry Pi\* in one of the two ways:
* [Native Compilation](#native-compilation), which is the simplest way, but time-consuming
* [Cross Compilation Using Docker*](#cross-compilation-using-docker), which is the recommended way
### Native Compilation
Native compilation of the Inference Engine is the most straightforward solution. However, it might take at least one hour to complete on Raspberry Pi\* 3.
1. Install dependencies:
```bash
sudo apt-get update
sudo apt-get install -y git cmake libusb-1.0-0-dev
```
2. Go to the `inference-engine` directory of the cloned `dldt` repository:
```bash
cd dldt/inference-engine
```
3. Initialize submodules:
```bash
git submodule init
git submodule update --recursive
```
4. Create a build folder:
```bash
mkdir build && cd build
```
5. Build the Inference Engine:
```bash
cmake -DCMAKE_BUILD_TYPE=Release \
-DENABLE_SSE42=OFF \
-DTHREADING=SEQ \
-DENABLE_GNA=OFF .. && make
```
### Cross Compilation Using Docker*
This compilation was tested on the following configuration:
* Host: Ubuntu\* 16.04 (64-bit, Intel® Core™ i7-6700K CPU @ 4.00GHz × 8)
* Target: Raspbian\* Stretch (32-bit, ARMv7, Raspberry Pi\* 3)
1. Install Docker\*:
```bash
sudo apt-get install -y docker.io
```
2. Add a current user to `docker` group:
```bash
sudo usermod -a -G docker $USER
```
Log out and log in for this to take effect.
3. Create a directory named `ie_cross_armhf` and add a text file named `Dockerfile`
with the following content:
```docker
FROM debian:stretch
USER root
RUN dpkg --add-architecture armhf && \
apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
crossbuild-essential-armhf \
git \
wget \
cmake \
libusb-1.0-0-dev:armhf \
libgtk-3-dev:armhf \
libavcodec-dev:armhf \
libavformat-dev:armhf \
libswscale-dev:armhf \
libgstreamer1.0-dev:armhf \
libgstreamer-plugins-base1.0-dev:armhf \
libpython3-dev:armhf \
python3-pip
```
It uses the Debian\* Stretch (Debian 9) OS for compilation because it is a base of the Raspbian\* Stretch.
4. Build a Docker\* image:
```bash
docker image build -t ie_cross_armhf ie_cross_armhf
```
5. Run Docker\* container with mounted source code folder from host:
```bash
docker run -it -v /absolute/path/to/dldt:/dldt ie_cross_armhf /bin/bash
```
6. While in the container:
1. Go to the `inference-engine` directory of the cloned `dldt` repository:
```bash
cd dldt/inference-engine
```
2. Create a build folder:
```bash
mkdir build && cd build
```
3. Build the Inference Engine:
```bash
cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE="../cmake/arm.toolchain.cmake" \
-DTHREADS_PTHREAD_ARG="-pthread" \
-DENABLE_SSE42=OFF \
-DTHREADING=SEQ \
-DENABLE_GNA=OFF .. && make --jobs=$(nproc --all)
```
7. Press "Ctrl"+"D" to exit from Docker\*. You can find the resulting binaries in the `dldt/inference-engine/bin/armv7l/` directory and the OpenCV* installation in the `dldt/inference-engine/temp`.
>**NOTE**: Native applications that link to cross-compiled Inference Engine library require an extra compilation flag `-march=armv7-a`.
### Additional Build Options
You can use the following additional build options:
- Required versions of OpenCV packages are downloaded automatically by the CMake-based script. If you want to use the automatically downloaded packages but you already have installed OpenCV packages configured in your environment, you may need to clean the `OpenCV_DIR` environment variable before running the `cmake` command, otherwise they won't be downloaded and the build may fail if incompatible versions were installed.
- If the CMake-based build script can not find and download the OpenCV package that is supported on your platform, or if you want to use a custom build of the OpenCV library, refer to the [Use Custom OpenCV Builds](#use-custom-opencv-builds-for-inference-engine) section for details.
- To build Python API wrapper, install `libpython3-dev:armhf` and `python3-pip` packages using `apt-get`, then install `numpy` and `cython` python modules using `pip3` command and add the following cmake options:
```sh
-DENABLE_PYTHON=ON \
-DPYTHON_EXECUTABLE=/usr/bin/python3.5 \
-DPYTHON_LIBRARY=/usr/lib/arm-linux-gnueabihf/libpython3.5m.so \
-DPYTHON_INCLUDE_DIR=/usr/include/python3.5
```
## Build on Windows* Systems
The software was validated on:
- Microsoft\* Windows\* 10 (64-bit) with Visual Studio 2017 and Intel® C++ Compiler 2018 Update 3
### Software Requirements
- [CMake\*](https://cmake.org/download/) 3.5 or higher
- [OpenBLAS\*](https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download) and [mingw64\* runtime dependencies](https://sourceforge.net/projects/openblas/files/v0.2.14/mingw64_dll.zip/download).
- [Intel® C++ Compiler](https://software.intel.com/en-us/intel-parallel-studio-xe) 18.0 to build the Inference Engine on Windows.
- (Optional) [Intel® Graphics Driver for Windows* [25.20] driver package](https://downloadcenter.intel.com/download/28646/Intel-Graphics-Windows-10-DCH-Drivers?product=80939).
- Python 3.4 or higher for Inference Engine Python API wrapper
### Build Steps
1. Clone submodules:
```sh
git submodule init
git submodule update --recursive
```
2. Download and install [Intel® C++ Compiler](https://software.intel.com/en-us/intel-parallel-studio-xe) 18.0
3. Install OpenBLAS:
1. Download [OpenBLAS\*](https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download)
2. Unzip the downloaded package to a directory on your machine. In this document, this directory is referred to as `<OPENBLAS_DIR>`.
4. By default, the build enables the Inference Engine GPU plugin to infer models on your Intel® Processor Graphics. This requires you to [download and install the Intel® Graphics Driver for Windows* [25.20] driver package](https://downloadcenter.intel.com/download/28646/Intel-Graphics-Windows-10-DCH-Drivers?product=80939) before running the build. If you don't want to use the GPU plugin, use the `-DENABLE_CLDNN=OFF` CMake build option and skip the installation of the Intel® Graphics Driver.
5. Create build directory:
```sh
mkdir build
```
6. In the `build` directory, run `cmake` to fetch project dependencies and generate a Visual Studio solution:
```sh
cd build
cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
-DCMAKE_BUILD_TYPE=Release ^
-DICCLIB="C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\compiler\lib" ..
```
7. Build generated solution in Visual Studio 2017 or run `cmake --build . --config Release` to build from the command line.
8. Before running the samples, add paths to TBB and OpenCV binaries used for the build to the `%PATH%` environment variable. By default, TBB binaries are downloaded by the CMake-based script to the `<dldt_repo>/inference-engine/temp/tbb/lib` folder, OpenCV binaries - to the `<dldt_repo>/inference-engine/temp/opencv_4.1.0/bin` folder.
### Additional Build Options
- Internal JIT GEMM implementation is used by default.
- To switch to OpenBLAS GEMM implementation, use the `-DGEMM=OPENBLAS` CMake option and specify path to OpenBLAS using the `-DBLAS_INCLUDE_DIRS=<OPENBLAS_DIR>\include` and `-DBLAS_LIBRARIES=<OPENBLAS_DIR>\lib\libopenblas.dll.a` options. Prebuilt OpenBLAS\* package can be downloaded [here](https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download). mingw64* runtime dependencies can be downloaded [here](https://sourceforge.net/projects/openblas/files/v0.2.14/mingw64_dll.zip/download).
- To switch to the optimized MKL-ML\* GEMM implementation, use the `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` CMake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded from the [MKL-DNN repository](https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_win_2019.0.5.20190502.zip).
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
- Required versions of TBB and OpenCV packages are downloaded automatically by the CMake-based script. If you want to use the automatically downloaded packages but you already have installed TBB or OpenCV packages configured in your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR` environment variables before running the `cmake` command, otherwise they won't be downloaded and the build may fail if incompatible versions were installed.
- If the CMake-based build script can not find and download the OpenCV package that is supported on your platform, or if you want to use a custom build of the OpenCV library, refer to the [Use Custom OpenCV Builds](#use-custom-opencv-builds-for-inference-engine) section for details.
- To switch off/on the CPU and GPU plugins, use the `cmake` options `-DENABLE_MKL_DNN=ON/OFF` and `-DENABLE_CLDNN=ON/OFF` respectively.
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE="C:\Program Files\Python37\python.exe" ^
-DPYTHON_LIBRARY="C:\Program Files\Python37\libs\python37.lib" ^
-DPYTHON_INCLUDE_DIR="C:\Program Files\Python37\include"
```
### Building Inference Engine with Ninja* Build System
```sh
call "C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\bin\ipsxe-comp-vars.bat" intel64 vs2017
set CXX=icl
set CC=icl
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by dldt cmake script
set TBBROOT=
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --config Release
```
## Build on macOS* Systems
> **NOTE**: The current version of the OpenVINO™ toolkit for macOS* supports inference on Intel CPUs only.
The software was validated on:
- macOS\* 10.14, 64-bit
### Software Requirements
- [CMake\*](https://cmake.org/download/) 3.5 or higher
- Clang\* compiler from Xcode\* 10.1
- Python\* 3.4 or higher for the Inference Engine Python API wrapper
### Build Steps
1. Clone submodules:
```sh
cd dldt/inference-engine
git submodule init
git submodule update --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the project root folder:
```sh
chmod +x install_dependencies.sh
```
```sh
./install_dependencies.sh
```
3. Create a build folder:
```sh
mkdir build
```
4. Inference Engine uses a CMake-based build system. In the created `build` directory, run `cmake` to fetch project dependencies and create Unix makefiles, then run `make` to build the project:
```sh
cmake -DCMAKE_BUILD_TYPE=Release ..
make --jobs=$(nproc --all)
```
### Additional Build Options
You can use the following additional build options:
- Internal JIT GEMM implementation is used by default.
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.19/mklml_mac_2019.0.5.20190502.tgz)
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
- Required versions of TBB and OpenCV packages are downloaded automatically by the CMake-based script. If you want to use the automatically downloaded packages but you already have installed TBB or OpenCV packages configured in your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR` environment variables before running the `cmake` command, otherwise they won't be downloaded and the build may fail if incompatible versions were installed.
- If the CMake-based build script can not find and download the OpenCV package that is supported on your platform, or if you want to use a custom build of the OpenCV library, refer to the [Use Custom OpenCV Builds](#use-custom-opencv-builds-for-inference-engine) section for details.
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE=/Library/Frameworks/Python.framework/Versions/3.7/bin/python3.7 \
-DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.7/lib/libpython3.7m.dylib \
-DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m
```
## Use Custom OpenCV Builds for Inference Engine
> **NOTE**: The recommended and tested version of OpenCV is 4.1. The minimum supported version is 3.4.0.
Required versions of OpenCV packages are downloaded automatically during the building Inference Engine library. If the build script can not find and download the OpenCV package that is supported on your platform, you can use one of the following options:
* Download the most suitable version from the list of available pre-build packages from [https://download.01.org/opencv/2019/openvinotoolkit](https://download.01.org/opencv/2019/openvinotoolkit) from the `<release_version>/inference_engine` directory.
* Use a system provided OpenCV package (e.g with running the `apt install libopencv-dev` command). The following modules must be enabled: `imgcodecs`, `videoio`, `highgui`.
* Get the OpenCV package using a package manager: pip, conda, conan etc. The package must have the development components included (header files and CMake scripts).
* Build OpenCV from source using the [build instructions](https://docs.opencv.org/master/df/d65/tutorial_table_of_content_introduction.html) on the OpenCV site.
After you got the built OpenCV library, perform the following preparation steps before running the Inference Engine build:
1. Set the `OpenCV_DIR` environment variable to the directory where the `OpenCVConfig.cmake` file of you custom OpenCV build is located.
2. Disable the package automatic downloading with using the `-DENABLE_OPENCV=OFF` option for CMake-based build script for Inference Engine.
## Adding Inference Engine to your project
For CMake projects, set the `InferenceEngine_DIR` environment variable:
```sh
export InferenceEngine_DIR=/path/to/dldt/inference-engine/build/
```
Then you can find Inference Engine by `find_package`:
```cmake
find_package(InferenceEngine)
include_directories(${InferenceEngine_INCLUDE_DIRS})
target_link_libraries(${PROJECT_NAME} ${InferenceEngine_LIBRARIES} dl)
```
## (Optional) Additional Installation Steps for the Intel® Movidius™ Neural Compute Stick and Neural Compute Stick 2
> **NOTE**: These steps are only required if you want to perform inference on Intel® Movidius™ Neural Compute Stick or the Intel® Neural Compute Stick 2 using the Inference Engine MYRIAD Plugin. See also [Intel® Neural Compute Stick 2 Get Started](https://software.intel.com/en-us/neural-compute-stick/get-started)
### For Linux, Raspbian\* Stretch OS
1. Add the current Linux user to the `users` group:
```sh
sudo usermod -a -G users "$(whoami)"
```
Log out and log in for it to take effect.
2. To perform inference on Intel® Movidius™ Neural Compute Stick and Intel® Neural Compute Stick 2, install the USB rules as follows:
```sh
cat <<EOF > 97-myriad-usbboot.rules
SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1"
SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1"
SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1"
EOF
```
```sh
sudo cp 97-myriad-usbboot.rules /etc/udev/rules.d/
```
```sh
sudo udevadm control --reload-rules
```
```sh
sudo udevadm trigger
```
```sh
sudo ldconfig
```
```sh
rm 97-myriad-usbboot.rules
```
### For Windows
For Intel® Movidius™ Neural Compute Stick and Intel® Neural Compute Stick 2, install the Movidius™ VSC driver:
1. Go to the `<DLDT_ROOT_DIR>/inference-engine/thirdparty/movidius/MovidiusDriver` directory, where the `DLDT_ROOT_DIR` is the directory to which the DLDT repository was cloned.
2. Right click on the `Movidius_VSC_Device.inf` file and choose **Install** from the pop up menu.
You have installed the driver for your Intel® Movidius™ Neural Compute Stick or Intel® Neural Compute Stick 2.
## Next Steps
Congratulations, you have built the Inference Engine. To get started with the OpenVINO™ DLDT, proceed to the Get Started guides:
* [Get Started with Deep Learning Deployment Toolkit on Linux*](../get-started-linux.md)
## Additional Resources
* [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes)
* [Introduction to Intel® Deep Learning Deployment Toolkit](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Introduction.html)
* [Inference Engine Samples Overview](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Samples_Overview.html)
* [Inference Engine Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_Deep_Learning_Inference_Engine_DevGuide.html)
* [Model Optimizer Developer Guide](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)
---
\* Other names and brands may be claimed as the property of others.

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,20 +1,20 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# module to locate GNA libraries
if (WIN32)
set(GNA_PLATFORM_DIR win64)
set(GNA_PLATFORM_DIR win64 CACHE STRING "" FORCE)
elseif (UNIX)
set(GNA_PLATFORM_DIR linux)
set(GNA_PLATFORM_DIR linux CACHE STRING "" FORCE)
else ()
message(FATAL_ERROR "GNA not supported on this platform, only linux, and windows")
endif ()
set(libGNA_FOUND TRUE)
set(GNA_KERNEL_LIB_NAME gna)
set(GNA_KERNEL_LIB_NAME gna CACHE STRING "" FORCE)
set(GNA_LIBS_LIST
"libGNA::API"
"libGNA::KERNEL")
@ -22,20 +22,20 @@ set(GNA_LIBS_LIST
if (GNA_LIBRARY_VERSION STREQUAL "GNA1")
# use old version of GNA Library from gna_20181120
if (WIN32)
set(GNA_LIB_DIR x64)
set(GNA_LIB_DIR x64 CACHE STRING "" FORCE)
else ()
list(APPEND GNA_LIBS_LIST
"libGNA::OLD_API_LIB")
set(GNA_LIB_DIR lib)
set(GNA_KERNEL_LIB_NAME gna_kernel)
set(GNA_LIB_DIR lib CACHE STRING "" FORCE)
set(GNA_KERNEL_LIB_NAME gna_kernel CACHE STRING "" FORCE)
endif()
set(libGNA_INCLUDE_DIRS "${GNA}/${GNA_PLATFORM_DIR}/include")
set(libGNA_INCLUDE_DIRS "${GNA}/${GNA_PLATFORM_DIR}/include" CACHE STRING "" FORCE)
else()
# use current version of GNA library
set(GNA_LIB_DIR x64)
set(libGNA_INCLUDE_DIRS "${GNA}/include")
set(GNA_LIB_DIR x64 CACHE STRING "" FORCE)
set(libGNA_INCLUDE_DIRS "${GNA}/include" CACHE STRING "" FORCE)
endif()
set(libGNA_LIBRARIES_BASE_PATH ${GNA}/${GNA_PLATFORM_DIR}/${GNA_LIB_DIR})
set(libGNA_LIBRARIES_BASE_PATH ${GNA}/${GNA_PLATFORM_DIR}/${GNA_LIB_DIR} CACHE STRING "" FORCE)
add_library(libGNA::KERNEL SHARED IMPORTED)
find_library(GNA_KERNEL_LIBRARY

View File

@ -0,0 +1,113 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
#[[
function to create CMake target and setup its options in a declarative style.
Example:
addIeTarget(
NAME core_lib
TYPE shared
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
INCLUDES
${SDL_INCLUDES}
/some/specific/path
LINK_LIBRARIES
ie::important_plugin
)
#]]
function(addIeTarget)
set(options
)
set(oneValueRequiredArgs
TYPE # type of target, shared|static|executable. shared and static correspond to add_library, executable to add_executable.
NAME # name of target
ROOT # directory will used for source files globbing root.
)
set(oneValueOptionalArgs
)
set(multiValueArgs
INCLUDES # Extra include directories.
LINK_LIBRARIES # Link libraries (in form of target name or file name)
DEPENDENCIES # compile order dependencies (no link implied)
DEFINES # extra preprocessor definitions
ADDITIONAL_SOURCE_DIRS # list of directories, which will be used to search for source files in addition to ROOT.
)
cmake_parse_arguments(ARG "${options}" "${oneValueRequiredArgs};${oneValueOptionalArgs}" "${multiValueArgs}" ${ARGN} )
# sanity checks
foreach(argName ${oneValueRequiredArgs})
if (NOT ARG_${argName})
message(SEND_ERROR "Argument '${argName}' is required.")
endif()
endforeach()
if (ARG_UNPARSED_ARGUMENTS)
message(SEND_ERROR "Unexpected parameters have passed to function: ${ARG_UNPARSED_ARGUMENTS}")
endif()
# adding files to target
set(includeSearch)
set(sourceSearch)
foreach(directory ${ARG_ROOT} ${ARG_ADDITIONAL_SOURCE_DIRS})
list(APPEND includeSearch ${directory}/*.h ${directory}/*.hpp)
list(APPEND sourceSearch ${directory}/*.cpp)
endforeach()
file(GLOB_RECURSE includes ${includeSearch})
file(GLOB_RECURSE sources ${sourceSearch})
source_group("include" FILES ${includes})
source_group("src" FILES ${sources})
# defining a target
if (ARG_TYPE STREQUAL executable)
add_executable(${ARG_NAME} ${sources} ${includes})
elseif(ARG_TYPE STREQUAL static OR ARG_TYPE STREQUAL shared)
string(TOUPPER ${ARG_TYPE} type)
add_library(${ARG_NAME} ${type} ${sources} ${includes})
else()
message(SEND_ERROR "Invalid target type: ${ARG_TYPE}")
endif()
# filling target properties
set_property(TARGET ${ARG_NAME} PROPERTY CXX_STANDARD 11)
set_property(TARGET ${ARG_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
if (ARG_DEFINES)
target_compile_definitions(${ARG_NAME} PRIVATE ${ARG_DEFINES})
endif()
if (ARG_INCLUDES)
target_include_directories(${ARG_NAME} PRIVATE ${ARG_INCLUDES})
endif()
if (ARG_LINK_LIBRARIES)
target_link_libraries(${ARG_NAME} PRIVATE ${ARG_LINK_LIBRARIES})
endif()
if (ARG_DEPENDENCIES)
add_dependencies(${ARG_NAME} ${ARG_DEPENDENCIES})
endif()
endfunction()
#[[
Wrapper function over addIeTarget, that also adds a test with the same name.
You could use
addIeTargetTest( ... LABELS labelOne labelTwo )
also to provide labels for that test.
Important: you MUST pass LABELS as last argument, otherwise it will consume any parameters that come after.
#]]
function(addIeTargetTest)
set(options
)
set(oneValueRequiredArgs
NAME
)
set(oneValueOptionalArgs
)
set(multiValueArgs
LABELS
)
cmake_parse_arguments(ARG "${options}" "${oneValueRequiredArgs};${oneValueOptionalArgs}" "${multiValueArgs}" ${ARGN} )
addIeTarget(TYPE executable NAME ${ARG_NAME} ${ARG_UNPARSED_ARGUMENTS})
add_test(NAME ${ARG_NAME} COMMAND ${ARG_NAME})
set_property(TEST ${ARG_NAME} PROPERTY LABELS ${ARG_LABELS})
endfunction()

View File

@ -1,46 +1,43 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
#64 bits platform
if ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
message(STATUS "Detected 64 bit architecture")
SET(ARCH_64 ON)
SET(ARCH_32 OFF)
else()
message(STATUS "Detected 32 bit architecture")
SET(ARCH_64 OFF)
SET(ARCH_32 ON)
endif()
if (NOT ARCH_64)
if (UNIX OR APPLE)
SET(ENABLE_CLDNN OFF)
endif()
SET(ENABLE_MKL_DNN OFF)
endif()
#apple specific
# Apple specific
if (APPLE)
set(ENABLE_GNA OFF)
set(ENABLE_CLDNN OFF)
endif()
# Android specific
if(ANDROID)
set(ENABLE_GNA OFF)
endif()
# ARM specific
if (ARM OR AARCH64)
# disable all base plugins but Myriad
set(ENABLE_GNA OFF)
set(ENABLE_HDDL OFF)
endif()
# disable SSE
if(NOT(X86_64 OR X86))
set(ENABLE_SSE42 OFF)
endif()
#minGW specific - under wine no support for downloading file and applying them using git
if (WIN32)
if (MINGW)
SET(ENABLE_CLDNN OFF) # dont have mingw dll for linking
set(ENABLE_SAMPLES OFF)
endif()
endif()
if (NOT ENABLE_MKL_DNN)
set(ENABLE_MKL OFF)
if (NOT ENABLE_VPU OR NOT ENABLE_NGRAPH)
set(ENABLE_MYRIAD OFF)
endif()
if (NOT ENABLE_VPU)
set(ENABLE_MYRIAD OFF)
if(CMAKE_CROSSCOMPILING)
set(ENABLE_PROFILING_ITT OFF)
endif()
#next section set defines to be accesible in c++/c code for certain feature
@ -48,10 +45,6 @@ if (ENABLE_PROFILING_RAW)
add_definitions(-DENABLE_PROFILING_RAW=1)
endif()
if (ENABLE_CLDNN)
add_definitions(-DENABLE_CLDNN=1)
endif()
if (ENABLE_MYRIAD)
add_definitions(-DENABLE_MYRIAD=1)
endif()
@ -60,6 +53,15 @@ if (ENABLE_MYRIAD_NO_BOOT AND ENABLE_MYRIAD )
add_definitions(-DENABLE_MYRIAD_NO_BOOT=1)
endif()
if (NOT ENABLE_TESTS)
SET(ENABLE_BEH_TESTS OFF)
SET(ENABLE_FUNCTIONAL_TESTS OFF)
endif()
if (ENABLE_CLDNN)
add_definitions(-DENABLE_CLDNN=1)
endif()
if (ENABLE_MKL_DNN)
add_definitions(-DENABLE_MKL_DNN=1)
endif()
@ -77,11 +79,6 @@ if (ENABLE_GNA)
message(STATUS "GNA_LIBRARY_VERSION not set. Can be GNA1, GNA1_1401 or GNA2. Default is ${GNA_LIBRARY_VERSION}")
endif()
if (GNA_LIBRARY_VERSION STREQUAL "GNA2")
message(WARNING "GNA2 is not currently supported. Fallback to ${DEFAULT_GNA_LIB}")
set(GNA_LIBRARY_VERSION ${DEFAULT_GNA_LIB})
endif()
if (UNIX AND NOT APPLE AND CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.4)
message(WARNING "${GNA_LIBRARY_VERSION} no supported on GCC version ${CMAKE_CXX_COMPILER_VERSION}. Fallback to GNA1")
set(GNA_LIBRARY_VERSION GNA1)
@ -91,39 +88,21 @@ if (ENABLE_GNA)
list (APPEND IE_OPTIONS GNA_LIBRARY_VERSION)
endif()
if (ENABLE_SAMPLES)
set (ENABLE_SAMPLES_CORE ON)
endif()
#models dependend tests
if (DEVELOPMENT_PLUGIN_MODE)
message (STATUS "Enabled development plugin mode")
set (ENABLE_MKL_DNN OFF)
set (ENABLE_TESTS OFF)
message (STATUS "Initialising submodules")
execute_process (COMMAND git submodule update --init ${IE_MAIN_SOURCE_DIR}/thirdparty/pugixml
RESULT_VARIABLE git_res)
if (NOT ${git_res})
message (STATUS "Initialising submodules - done")
endif()
endif()
if (NOT ENABLE_TESTS)
set(ENABLE_GNA_MODELS OFF)
endif ()
if (VERBOSE_BUILD)
set(CMAKE_VERBOSE_MAKEFILE ON)
endif()
if(ENABLE_DUMP)
add_definitions(-DDEBUG_DUMP)
endif()
if (LINUX AND CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.3)
set(ENABLE_UNICODE_PATH_SUPPORT OFF)
endif()
if (ENABLE_UNICODE_PATH_SUPPORT)
add_definitions(-DENABLE_UNICODE_PATH_SUPPORT=1)
endif()
# functional tests require FormarParser which is disabled by this option
if(NOT ENABLE_IR_READER)
set(ENABLE_FUNCTIONAL_TESTS OFF)
endif()
print_enabled_features()

View File

@ -0,0 +1,124 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if (ENABLE_CLANG_FORMAT)
set(CLANG_FORMAT_FILENAME clang-format-9 clang-format)
find_program(CLANG_FORMAT NAMES ${CLANG_FORMAT_FILENAME} PATHS ENV PATH)
if (CLANG_FORMAT)
execute_process(COMMAND ${CLANG_FORMAT} ${CMAKE_CURRENT_SOURCE_DIR} ARGS --version OUTPUT_VARIABLE CLANG_VERSION)
if (NOT CLANG_VERSION OR CLANG_VERSION STREQUAL "")
message(WARNING "Supported clang-format version is 9!")
set(ENABLE_CLANG_FORMAT OFF)
else()
string(REGEX REPLACE ".*([0-9]+)\\.[0-9]+\\.[0-9]+.*" "\\1" CLANG_FORMAT_MAJOR_VERSION ${CLANG_VERSION})
if (NOT ${CLANG_FORMAT_MAJOR_VERSION} EQUAL "9")
message(WARNING "Supported clang-format version is 9!")
set(ENABLE_CLANG_FORMAT OFF)
endif()
endif()
endif()
endif()
if(ENABLE_CLANG_FORMAT)
add_custom_target(clang_format_check_all)
add_custom_target(clang_format_fix_all)
set(CLANG_FORMAT_ALL_OUTPUT_FILES "" CACHE INTERNAL "All clang-format output files")
endif()
function(add_clang_format_target TARGET_NAME)
if(NOT ENABLE_CLANG_FORMAT)
return()
endif()
set(options ALL)
set(oneValueArgs "")
set(multiValueArgs "FOR_TARGETS" "FOR_SOURCES" "EXCLUDE_PATTERNS")
cmake_parse_arguments(CLANG_FORMAT "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
if(CLANG_FORMAT_ALL)
set(all ALL)
endif()
foreach(target IN LISTS CLANG_FORMAT_FOR_TARGETS)
get_target_property(target_sources "${target}" SOURCES)
list(APPEND CLANG_FORMAT_FOR_SOURCES ${target_sources})
endforeach()
list(REMOVE_DUPLICATES CLANG_FORMAT_FOR_SOURCES)
set(all_output_files "")
foreach(source_file IN LISTS CLANG_FORMAT_FOR_SOURCES)
set(exclude FALSE)
foreach(pattern IN LISTS CLANG_FORMAT_EXCLUDE_PATTERNS)
if(source_file MATCHES "${pattern}")
set(exclude ON)
break()
endif()
endforeach()
if(exclude)
continue()
endif()
# ignore object libraries
if(NOT EXISTS "${source_file}")
continue()
endif()
file(RELATIVE_PATH source_file_relative "${CMAKE_CURRENT_SOURCE_DIR}" "${source_file}")
set(output_file "${CMAKE_CURRENT_BINARY_DIR}/clang_format/${source_file_relative}.clang")
string(REPLACE ".." "__" output_file "${output_file}")
get_filename_component(output_dir "${output_file}" DIRECTORY)
file(MAKE_DIRECTORY "${output_dir}")
add_custom_command(
OUTPUT
"${output_file}"
COMMAND
"${CMAKE_COMMAND}"
-D "CLANG_FORMAT=${CLANG_FORMAT}"
-D "INPUT_FILE=${source_file}"
-D "OUTPUT_FILE=${output_file}"
-P "${IE_MAIN_SOURCE_DIR}/cmake/clang_format_check.cmake"
DEPENDS
"${source_file}"
"${IE_MAIN_SOURCE_DIR}/cmake/clang_format_check.cmake"
COMMENT
"[clang-format] ${source_file}"
VERBATIM)
list(APPEND all_output_files "${output_file}")
endforeach()
set(CLANG_FORMAT_ALL_OUTPUT_FILES
${CLANG_FORMAT_ALL_OUTPUT_FILES} ${all_output_files}
CACHE INTERNAL
"All clang-format output files")
add_custom_target(${TARGET_NAME}
${all}
DEPENDS ${all_output_files}
COMMENT "[clang-format] ${TARGET_NAME}")
add_custom_target(${TARGET_NAME}_fix
COMMAND
"${CMAKE_COMMAND}"
-D "CLANG_FORMAT=${CLANG_FORMAT}"
-D "INPUT_FILES=${CLANG_FORMAT_FOR_SOURCES}"
-D "EXCLUDE_PATTERNS=${CLANG_FORMAT_EXCLUDE_PATTERNS}"
-P "${IE_MAIN_SOURCE_DIR}/cmake/clang_format_fix.cmake"
DEPENDS
"${CLANG_FORMAT_FOR_SOURCES}"
"${IE_MAIN_SOURCE_DIR}/cmake/clang_format_fix.cmake"
COMMENT
"[clang-format] ${TARGET_NAME}_fix"
VERBATIM)
# if(CLANG_FORMAT_FOR_TARGETS)
# foreach(target IN LISTS CLANG_FORMAT_FOR_TARGETS)
# add_dependencies(${target} ${TARGET_NAME})
# endforeach()
# endif()
add_dependencies(clang_format_check_all ${TARGET_NAME})
add_dependencies(clang_format_fix_all ${TARGET_NAME}_fix)
endfunction()

View File

@ -0,0 +1,18 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
file(REMOVE "${OUTPUT_FILE}")
execute_process(COMMAND ${CLANG_FORMAT} -style=file -output-replacements-xml ${INPUT_FILE}
OUTPUT_VARIABLE STYLE_CHECK_RESULT
)
# Display the cpplint output to console (to parse it form IDE)
message("${output}")
file(WRITE "${OUTPUT_FILE}" "${STYLE_CHECK_RESULT}")
if(NOT SKIP_RETURN_CODE)
if("${STYLE_CHECK_RESULT}" MATCHES ".*<replacement .*")
message(FATAL_ERROR "[clang-format] Code style check failed for : ${INPUT_FILE}")
endif()
endif()

View File

@ -0,0 +1,24 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
function(style_apply_file INPUT_FILE)
execute_process(COMMAND ${CLANG_FORMAT} -style=file -i ${INPUT_FILE}
OUTPUT_VARIABLE STYLE_CHECK_RESULT)
endfunction()
foreach(source_file IN LISTS INPUT_FILES)
set(exclude FALSE)
foreach(pattern IN LISTS EXCLUDE_PATTERNS)
if(source_file MATCHES "${pattern}")
set(exclude ON)
break()
endif()
endforeach()
if(exclude)
continue()
endif()
style_apply_file(${source_file})
endforeach()

View File

@ -1,15 +1,32 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if(DEFINED IE_MAIN_SOURCE_DIR AND TARGET inference_engine)
set(InferenceEngine_INCLUDE_DIRS ${IE_MAIN_SOURCE_DIR}/include)
set(InferenceEngine_LIBRARIES inference_engine)
set(InferenceEngine_LIBRARIES inference_engine_c_api inference_engine_nn_builder inference_engine)
else()
include("${CMAKE_CURRENT_LIST_DIR}/targets.cmake")
if(NOT WIN32)
set_target_properties(IE::inference_engine PROPERTIES INTERFACE_COMPILE_OPTIONS "-Wno-error=deprecated-declarations")
endif()
file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path)
set (ie_options THREADING)
load_cache("${cache_path}" READ_WITH_PREFIX "" ${ie_options})
message(STATUS "The following CMake options are exported from the Inference Engine build")
message("")
foreach(option IN LISTS ie_options)
message(" ${option}: ${${option}}")
endforeach()
message("")
# inherit TBB from main IE project if enabled
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
load_cache("${cache_path}" READ_WITH_PREFIX "" TBB_DIR;ENABLE_TBB_RELEASE_ONLY)
set(TBB_FIND_RELEASE_ONLY ${ENABLE_TBB_RELEASE_ONLY})
find_package(TBB)
endif()
get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
set(InferenceEngine_LIBRARIES IE::inference_engine)
set(InferenceEngine_LIBRARIES IE::inference_engine_c_api IE::inference_engine_nn_builder IE::inference_engine)
endif()

View File

@ -1,4 +1,4 @@
# Copyright (C) 2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
@ -37,7 +37,7 @@ function(add_cpplint_target TARGET_NAME)
set(exclude FALSE)
foreach(pattern IN LISTS CPPLINT_EXCLUDE_PATTERNS)
if(source_file MATCHES "${pattern}")
set(exclude TRUE)
set(exclude ON)
break()
endif()
endforeach()
@ -46,6 +46,11 @@ function(add_cpplint_target TARGET_NAME)
continue()
endif()
# ignore object libraries
if(NOT EXISTS "${source_file}")
continue()
endif()
file(RELATIVE_PATH source_file_relative "${CMAKE_CURRENT_SOURCE_DIR}" "${source_file}")
set(output_file "${CMAKE_CURRENT_BINARY_DIR}/cpplint/${source_file_relative}.cpplint")
string(REPLACE ".." "__" output_file "${output_file}")

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
@ -8,6 +8,9 @@ cmake_policy(SET CMP0054 NEW)
include(dependency_solver)
set_temp_directory(TEMP "${IE_MAIN_SOURCE_DIR}")
if (CMAKE_CROSSCOMPILING)
set(CMAKE_STAGING_PREFIX "${TEMP}")
endif()
include(ExternalProject)
@ -16,6 +19,8 @@ if(COMMAND get_linux_name)
get_linux_name(LINUX_OS_NAME)
endif()
include(CMakeParseArguments)
if (ENABLE_MYRIAD)
include(vpu_dependencies)
endif()
@ -69,85 +74,126 @@ endif ()
## TBB package
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
if (WIN32)
#TODO: add target_path to be platform specific as well, to avoid following if
RESOLVE_DEPENDENCY(TBB
ARCHIVE_WIN "tbb2019_20181010_win.zip" #TODO: windows zip archive created incorrectly using old name for folder
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(LINUX)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_LIN "tbb2019_20181010_lin.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT")
else(APPLE)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_MAC "tbb2019_20190414_v1_mac.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
reset_deps_cache(TBBROOT TBB_DIR)
if(NOT DEFINED TBB_DIR AND NOT DEFINED ENV{TBB_DIR})
if (WIN32)
#TODO: add target_path to be platform specific as well, to avoid following if
RESOLVE_DEPENDENCY(TBB
ARCHIVE_WIN "tbb2020_20191023_win_tbbbind_patched.zip"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(ANDROID) # Should be before LINUX due LINUX is detected as well
RESOLVE_DEPENDENCY(TBB
ARCHIVE_ANDROID "tbb2020_20191023_android.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(LINUX)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_LIN "tbb2020_20191023_lin_tbbbind_patched.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT")
else(APPLE)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_MAC "tbb2020_20191023_mac.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
endif()
else()
if(DEFINED TBB_DIR)
get_filename_component(TBB ${TBB_DIR} DIRECTORY)
else()
get_filename_component(TBB $ENV{TBB_DIR} DIRECTORY)
endif()
endif()
log_rpath_from_dir(TBB "${TBB}/lib")
update_deps_cache(TBBROOT "${TBB}" "Path to TBB root folder")
update_deps_cache(TBB_DIR "${TBBROOT}/cmake" "Path to TBB package folder")
if (WIN32)
log_rpath_from_dir(TBB "${TBB_DIR}/../bin")
else ()
log_rpath_from_dir(TBB "${TBB_DIR}/../lib")
endif ()
debug_message(STATUS "tbb=" ${TBB})
endif ()
if (ENABLE_OPENCV)
set(OPENCV_VERSION "4.1.2")
set(OPENCV_BUILD "624")
set(OPENCV_SUFFIX "")
reset_deps_cache(OpenCV_DIR)
set(OPENCV_VERSION "4.2.0")
set(OPENCV_BUILD "082")
if (WIN32)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_WIN "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}.zip"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}"
ARCHIVE_WIN "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
log_rpath_from_dir(OPENCV "\\opencv_${OPENCV_VERSION}\\bin")
elseif(APPLE)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_MAC "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_osx.tar.xz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}_osx"
ARCHIVE_MAC "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_osx.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}_osx/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
log_rpath_from_dir(OPENCV "opencv_${OPENCV_VERSION}_osx/lib")
elseif(LINUX)
if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "armv7l")
set(OPENCV_SUFFIX "debian9arm")
elseif (${LINUX_OS_NAME} STREQUAL "CentOS 7" OR CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9")
set(OPENCV_SUFFIX "centos7")
elseif (${LINUX_OS_NAME} STREQUAL "Ubuntu 16.04")
set(OPENCV_SUFFIX "ubuntu16")
elseif (${LINUX_OS_NAME} STREQUAL "Ubuntu 18.04")
set(OPENCV_SUFFIX "ubuntu18")
elseif (${LINUX_OS_NAME} STREQUAL "CentOS 7")
set(OPENCV_SUFFIX "centos7")
endif()
endif()
if (OPENCV_SUFFIX)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_LIN "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_${OPENCV_SUFFIX}.tar.xz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}_${OPENCV_SUFFIX}"
ARCHIVE_LIN "opencv_${OPENCV_VERSION}-${OPENCV_BUILD}_${OPENCV_SUFFIX}.txz"
TARGET_PATH "${TEMP}/opencv_${OPENCV_VERSION}_${OPENCV_SUFFIX}/opencv"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
log_rpath_from_dir(OPENCV "opencv_${OPENCV_VERSION}_${OPENCV_SUFFIX}/lib")
endif()
if(ANDROID)
set(ocv_cmake_path "${OPENCV}/sdk/native/jni/")
else()
set(ocv_cmake_path "${OPENCV}/cmake")
endif()
update_deps_cache(OpenCV_DIR "${ocv_cmake_path}" "Path to OpenCV package folder")
if(WIN32)
log_rpath_from_dir(OPENCV "${OpenCV_DIR}/../bin")
elseif(ANDROID)
log_rpath_from_dir(OPENCV "${OpenCV_DIR}/../../../lib")
else()
log_rpath_from_dir(OPENCV "${OpenCV_DIR}/../lib")
endif()
debug_message(STATUS "opencv=" ${OPENCV})
# OpenCV_DIR should point to cmake folder within the specified OpenCV binary package.
# It's required to successsfully find OpenCV libs using find_package(OpenCV ...) command.
# So, the cached OpenCV_DIR variable should be update if custom value wasn't previously set here.
if (NOT DEFINED ENV{OpenCV_DIR})
set(OpenCV_DIR "${OPENCV}/cmake" CACHE PATH "Path to OpenCV in temp directory")
endif()
endif()
include(ie_parallel)
if (ENABLE_GNA)
reset_deps_cache(
GNA_PLATFORM_DIR
GNA_KERNEL_LIB_NAME
GNA_LIBS_LIST
GNA_LIB_DIR
libGNA_INCLUDE_DIRS
libGNA_LIBRARIES_BASE_PATH)
if (GNA_LIBRARY_VERSION STREQUAL "GNA1")
RESOLVE_DEPENDENCY(GNA
ARCHIVE_UNIFIED "gna_20181120.zip"
TARGET_PATH "${TEMP}/gna")
elseif(GNA_LIBRARY_VERSION STREQUAL "GNA1_1401")
set(GNA_VERSION "01.00.00.1401")
else()
if(GNA_LIBRARY_VERSION STREQUAL "GNA1_1401")
set(GNA_VERSION "01.00.00.1401")
endif()
if(GNA_LIBRARY_VERSION STREQUAL "GNA2")
set(GNA_VERSION "02.00.00.0587")
endif()
RESOLVE_DEPENDENCY(GNA
ARCHIVE_UNIFIED "GNA_${GNA_VERSION}.zip"
TARGET_PATH "${TEMP}/gna_${GNA_VERSION}"
@ -157,16 +203,16 @@ if (ENABLE_GNA)
endif()
configure_file(
"${PROJECT_SOURCE_DIR}/cmake/share/InferenceEngineConfig.cmake.in"
"${IE_MAIN_SOURCE_DIR}/cmake/share/InferenceEngineConfig.cmake.in"
"${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake"
@ONLY)
configure_file(
"${PROJECT_SOURCE_DIR}/cmake/share/InferenceEngineConfig-version.cmake.in"
"${IE_MAIN_SOURCE_DIR}/cmake/share/InferenceEngineConfig-version.cmake.in"
"${CMAKE_BINARY_DIR}/share/InferenceEngineConfig-version.cmake"
COPYONLY)
configure_file(
"${PROJECT_SOURCE_DIR}/cmake/ie_parallel.cmake"
"${IE_MAIN_SOURCE_DIR}/cmake/ie_parallel.cmake"
"${CMAKE_BINARY_DIR}/share/ie_parallel.cmake"
COPYONLY)

View File

@ -1,12 +1,12 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
include ("download")
function (resolve_archive_dependency VAR COMPONENT ARCHIVE ARCHIVE_UNIFIED ARCHIVE_WIN ARCHIVE_LIN ARCHIVE_MAC TARGET_PATH FOLDER ENVIRONMENT)
function (resolve_archive_dependency VAR COMPONENT ARCHIVE ARCHIVE_UNIFIED ARCHIVE_WIN ARCHIVE_LIN ARCHIVE_MAC ARCHIVE_ANDROID TARGET_PATH FOLDER ENVIRONMENT)
if (ENVIRONMENT AND (DEFINED ENV{${ENVIRONMENT}}))
if (ENVIRONMENT AND (DEFINED ${ENVIRONMENT} OR DEFINED ENV{${ENVIRONMENT}}))
set(HAS_ENV "TRUE")
endif()
@ -15,12 +15,16 @@ function (resolve_archive_dependency VAR COMPONENT ARCHIVE ARCHIVE_UNIFIED ARCHI
#TODO: check whether this is platform specific binary with same name per or it is in common folder
DownloadAndExtract(${COMPONENT} ${ARCHIVE} ${TARGET_PATH} result_path ${FOLDER})
else()
DownloadAndExtractPlatformSpecific(${COMPONENT} ${ARCHIVE_UNIFIED} ${ARCHIVE_WIN} ${ARCHIVE_LIN} ${ARCHIVE_MAC} ${TARGET_PATH} result_path ${FOLDER})
DownloadAndExtractPlatformSpecific(${COMPONENT} ${ARCHIVE_UNIFIED} ${ARCHIVE_WIN} ${ARCHIVE_LIN} ${ARCHIVE_MAC} ${ARCHIVE_ANDROID} ${TARGET_PATH} result_path ${FOLDER})
endif()
set (${VAR} ${result_path} PARENT_SCOPE)
else()
set (${VAR} $ENV{${ENVIRONMENT}} PARENT_SCOPE)
if (DEFINED ${ENVIRONMENT})
set (${VAR} ${${ENVIRONMENT}} PARENT_SCOPE)
else ()
set (${VAR} $ENV{${ENVIRONMENT}} PARENT_SCOPE)
endif ()
endif()
endfunction(resolve_archive_dependency)
@ -50,7 +54,7 @@ endfunction(read_version)
function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
list(REMOVE_AT ARGV 0)
set(SUPPORTED_ARGS FOLDER ARCHIVE ARCHIVE_UNIFIED ARCHIVE_WIN ARCHIVE_LIN ARCHIVE_MAC TARGET_PATH ENVIRONMENT GITHUB_PULL_REQUEST VERSION_REGEX)
set(SUPPORTED_ARGS FOLDER ARCHIVE ARCHIVE_UNIFIED ARCHIVE_WIN ARCHIVE_LIN ARCHIVE_MAC ARCHIVE_ANDROID TARGET_PATH ENVIRONMENT GITHUB_PULL_REQUEST VERSION_REGEX)
#unnecessary vars
@ -97,6 +101,10 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
SET(ARCHIVE_MAC "OFF")
endif()
if (NOT DEFINED ARCHIVE_ANDROID)
SET(ARCHIVE_ANDROID "OFF")
endif()
if (NOT DEFINED ENVIRONMENT)
set (ENVIRONMENT "OFF")
endif()
@ -108,15 +116,15 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
#for each dependency type have to do separate things
if (ARCHIVE_WIN OR ARCHIVE_LIN OR ARCHIVE_MAC OR ARCHIVE OR ARCHIVE_UNIFIED)
if (ARCHIVE_WIN OR ARCHIVE_LIN OR ARCHIVE_MAC OR ARCHIVE_ANDROID OR ARCHIVE OR ARCHIVE_UNIFIED)
if (NOT DEFINED TARGET_PATH)
message(FATAL_ERROR "TARGET_PATH should be defined for every dependency")
endif()
resolve_archive_dependency(RESULT ${NAME_OF_CMAKE_VAR} ${ARCHIVE} ${ARCHIVE_UNIFIED} ${ARCHIVE_WIN} ${ARCHIVE_LIN} ${ARCHIVE_MAC} ${TARGET_PATH} ${FOLDER} ${ENVIRONMENT})
resolve_archive_dependency(RESULT ${NAME_OF_CMAKE_VAR} ${ARCHIVE} ${ARCHIVE_UNIFIED} ${ARCHIVE_WIN} ${ARCHIVE_LIN} ${ARCHIVE_MAC} ${ARCHIVE_ANDROID} ${TARGET_PATH} ${FOLDER} ${ENVIRONMENT})
set(${NAME_OF_CMAKE_VAR} ${RESULT} PARENT_SCOPE)
if (VERSION_REGEX)
GetNameAndUrlToDownload(archive RELATIVE_URL ${ARCHIVE_UNIFIED} ${ARCHIVE_WIN} ${ARCHIVE_LIN} ${ARCHIVE_MAC})
GetNameAndUrlToDownload(archive RELATIVE_URL ${ARCHIVE_UNIFIED} ${ARCHIVE_WIN} ${ARCHIVE_LIN} ${ARCHIVE_MAC} ${ARCHIVE_ANDROID})
if (archive)
read_version(${archive} ${VERSION_REGEX} "${NAME_OF_CMAKE_VAR}_VERSION")
endif()
@ -125,7 +133,55 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
elseif (DEFINED GITHUB_PULL_REQUEST)
resolve_pull_request(${GITHUB_PULL_REQUEST} ${TARGET_PATH})
else()
message(FATAL_ERROR "Dependency of unknowntype, SHOULD set one of ARCHIVE_WIN, ARCHIVE, ARCHIVE_LIN, ARCHIVE_MAC, GITHUB_PULL_REQUEST")
message(FATAL_ERROR "Dependency of unknowntype, SHOULD set one of ARCHIVE_WIN, ARCHIVE, ARCHIVE_LIN, ARCHIVE_MAC, ARCHIVE_ANDROID, GITHUB_PULL_REQUEST")
endif()
endfunction(RESOLVE_DEPENDENCY)
function(reset_deps_cache)
#
# Reset the dependencies cache if it was set by dependency solver
#
set(need_reset FALSE)
foreach(var_name IN LISTS ARGN)
if(DEFINED ${var_name})
if(${var_name} MATCHES ${TEMP})
set(need_reset TRUE)
endif()
endif()
endforeach()
foreach(var_name IN LISTS ARGN)
if(DEFINED ENV{${var_name}})
if($ENV{${var_name}} MATCHES ${TEMP})
set(need_reset TRUE)
endif()
endif()
endforeach()
if(need_reset)
foreach(var_name IN LISTS ARGN)
unset(${var_name} CACHE)
endforeach()
foreach(var_name IN LISTS ARGN)
unset(ENV{${var_name}})
endforeach()
endif()
endfunction()
function(update_deps_cache VAR_NAME INTERNAL_VALUE DOC_MSG)
#
# Update the variable value if it wasn't provided by the user
#
if(NOT DEFINED ${VAR_NAME} AND NOT DEFINED ENV{${VAR_NAME}})
# User didn't provide its own value, use INTERNAL_VALUE
set(${VAR_NAME} ${INTERNAL_VALUE} CACHE PATH ${DOC_MSG})
else()
# The variable was provided by the user, don't use INTERNAL_VALUE
if(NOT DEFINED ${VAR_NAME} AND DEFINED ENV{${VAR_NAME}})
# User provided the variable via environment, convert it to the CACHE variable
set(${VAR_NAME} $ENV{${VAR_NAME}} CACHE PATH ${DOC_MSG})
endif()
endif()
endfunction()

View File

@ -1,33 +1,15 @@
# Copyright (C) 2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(IE_MAIN_SOURCE_DIR "@CMAKE_SOURCE_DIR@")
set(OpenVINO_MAIN_SOURCE_DIR "@OpenVINO_SOURCE_DIR@")
set(IE_MAIN_SOURCE_DIR "@InferenceEngine_SOURCE_DIR@")
file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path)
# inherit OpenCV from main IE project
load_cache("${cache_path}" READ_WITH_PREFIX "" OpenCV_DIR)
find_package(OpenCV COMPONENTS imgcodecs)
# Targets
include("${CMAKE_CURRENT_LIST_DIR}/targets_developer.cmake")
# add additional interface include directories needed for plugin development
if(NOT TARGET IE::inference_engine)
message(FATAL_ERROR "The target IE::inference_engine does not exist")
endif()
set(ie_plugin_headers "${IE_MAIN_SOURCE_DIR}/src/inference_engine")
set_property(TARGET IE::inference_engine APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${ie_plugin_headers}")
set_property(TARGET IE::inference_engine PROPERTY IMPORTED_GLOBAL TRUE)
get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
set(InferenceEngine_LIBRARIES IE::inference_engine)
# Variables to export in plugin's projects
set(ie_options "@IE_OPTIONS@;CMAKE_BUILD_TYPE")
set(ie_options "@IE_OPTIONS@;CMAKE_BUILD_TYPE;CMAKE_SKIP_RPATH")
load_cache("${cache_path}" READ_WITH_PREFIX "" ${ie_options})
@ -38,11 +20,38 @@ foreach(option IN LISTS ie_options)
endforeach()
message("")
set(gflags_DIR "@gflags_BINARY_DIR@")
# Targets
include("${CMAKE_CURRENT_LIST_DIR}/targets_developer.cmake")
# to allow too create ALIAS for IE::inference_engine in 3rd-party projects
set_property(TARGET IE::inference_engine PROPERTY IMPORTED_GLOBAL TRUE)
get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
set(InferenceEngine_LIBRARIES IE::inference_engine IE::inference_engine_nn_builder)
#
# Common cmake includes
#
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake;${IE_MAIN_SOURCE_DIR}/cmake")
list(APPEND CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake")
list(APPEND CMAKE_MODULE_PATH "${IE_MAIN_SOURCE_DIR}/cmake")
# generic stuff from developer package
include(developer_package)
include(developer_package_ie)
# inherit OpenCV from main IE project if enabled
if (ENABLE_OPENCV)
load_cache("${cache_path}" READ_WITH_PREFIX "" OpenCV_DIR)
find_package(OpenCV)
endif()
# inherit TBB from main IE project if enabled
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
load_cache("${cache_path}" READ_WITH_PREFIX "" TBB_DIR)
find_package(TBB)
endif()
find_package(Threads REQUIRED)

View File

@ -0,0 +1,42 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# environment variables used
# name of environment variable stored path to temp directory"
set(DL_SDK_TEMP "DL_SDK_TEMP")
# prepare temporary folder
function(set_temp_directory temp_variable source_tree_dir)
if (DEFINED ENV{${DL_SDK_TEMP}} AND NOT $ENV{${DL_SDK_TEMP}} STREQUAL "")
if (WIN32)
string(REPLACE "\\" "\\\\" temp $ENV{${DL_SDK_TEMP}})
else(WIN32)
set(temp $ENV{${DL_SDK_TEMP}})
endif(WIN32)
if (ENABLE_ALTERNATIVE_TEMP)
set(ALTERNATIVE_PATH ${source_tree_dir}/temp)
endif()
else ()
message(STATUS "DL_SDK_TEMP envionment not set")
set(temp ${source_tree_dir}/temp)
endif()
set("${temp_variable}" "${temp}" PARENT_SCOPE)
if(ALTERNATIVE_PATH)
set(ALTERNATIVE_PATH "${ALTERNATIVE_PATH}" PARENT_SCOPE)
endif()
endfunction()
include(cpplint)
include(clang_format)
include(cppcheck)
if(ENABLE_PROFILING_ITT)
find_package(ITT REQUIRED)
endif()
set(TBB_FIND_RELEASE_ONLY ${ENABLE_TBB_RELEASE_ONLY})
include(plugins/plugins)

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,34 +1,29 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
include ("extract")
include ("download_and_check")
function (GetNameAndUrlToDownload name url archive_name_unified archive_name_win archive_name_lin archive_name_mac)
function (GetNameAndUrlToDownload name url archive_name_unified archive_name_win archive_name_lin archive_name_mac archive_name_android)
if (archive_name_unified)
set (${url} "${archive_name_unified}" PARENT_SCOPE)
set (${name} ${archive_name_unified} PARENT_SCOPE)
else()
if (LINUX OR (APPLE AND NOT archive_name_mac))
if (NOT archive_name_lin)
return()
endif()
if(archive_name_lin)
set (PLATFORM_FOLDER linux)
set (archive_name ${archive_name_lin})
elseif(APPLE)
if (NOT archive_name_mac)
return()
endif()
elseif(archive_name_mac)
set (PLATFORM_FOLDER mac)
set (archive_name ${archive_name_mac})
else()
#if no dependency for target platfrom skip it
if (NOT archive_name_win)
return()
endif()
elseif(archive_name_android)
set (PLATFORM_FOLDER android)
set (archive_name ${archive_name_android})
elseif(archive_name_win)
set (PLATFORM_FOLDER windows)
set (archive_name ${archive_name_win})
else()
return()
endif()
set (${name} ${archive_name} PARENT_SCOPE)
@ -37,17 +32,18 @@ function (GetNameAndUrlToDownload name url archive_name_unified archive_name_win
endfunction(GetNameAndUrlToDownload)
#download from paltform specific folder from share server
function (DownloadAndExtractPlatformSpecific
component
archive_name_unified
archive_name_win
archive_name_lin
archive_name_mac
unpacked_path
function (DownloadAndExtractPlatformSpecific
component
archive_name_unified
archive_name_win
archive_name_lin
archive_name_mac
archive_name_android
unpacked_path
result_path
folder)
GetNameAndUrlToDownload(archive_name RELATIVE_URL ${archive_name_unified} ${archive_name_win} ${archive_name_lin} ${archive_name_mac} )
GetNameAndUrlToDownload(archive_name RELATIVE_URL ${archive_name_unified} ${archive_name_win} ${archive_name_lin} ${archive_name_mac} ${archive_name_android} )
if (NOT archive_name OR NOT RELATIVE_URL)
return()
endif()
@ -61,35 +57,35 @@ function (DownloadAndExtract component archive_name unpacked_path result_path fo
set (RELATIVE_URL "${archive_name}")
set(fattal TRUE)
CheckOrDownloadAndExtract(${component} ${RELATIVE_URL} ${archive_name} ${unpacked_path} result_path2 ${folder} ${fattal} result TRUE)
if (NOT ${result})
DownloadAndExtractPlatformSpecific(${component} ${archive_name} ${archive_name} ${archive_name} ${unpacked_path} ${result_path2} ${folder})
endif()
endif()
set (${result_path} ${result_path2} PARENT_SCOPE)
endfunction(DownloadAndExtract)
function (DownloadAndExtractInternal URL archive_path unpacked_path folder fattal result123)
function (DownloadAndExtractInternal URL archive_path unpacked_path folder fattal resultExt)
set (status "ON")
DownloadAndCheck(${URL} ${archive_path} ${fattal} result1)
if ("${result1}" STREQUAL "ARCHIVE_DOWNLOAD_FAIL")
#check alternative url as well
set (status "OFF")
file(REMOVE_RECURSE "${archive_path}")
file(REMOVE_RECURSE "${archive_path}")
endif()
if ("${result1}" STREQUAL "CHECKSUM_DOWNLOAD_FAIL" OR "${result1}" STREQUAL "HASH_MISMATCH")
set(status FALSE)
file(REMOVE_RECURSE "${archive_path}")
file(REMOVE_RECURSE "${archive_path}")
endif()
if("${status}" STREQUAL "ON")
ExtractWithVersion(${URL} ${archive_path} ${unpacked_path} ${folder} result)
endif()
set (result123 ${status} PARENT_SCOPE)
set (${resultExt} ${status} PARENT_SCOPE)
endfunction(DownloadAndExtractInternal)
@ -98,36 +94,49 @@ function (ExtractWithVersion URL archive_path unpacked_path folder result)
debug_message("ExtractWithVersion : ${archive_path} : ${unpacked_path}")
extract(${archive_path} ${unpacked_path} ${folder} status)
#dont need archive actually after unpacking
file(REMOVE_RECURSE "${archive_path}")
file(REMOVE_RECURSE "${archive_path}")
if (${status})
set (version_file ${unpacked_path}/ie_dependency.info)
file(WRITE ${version_file} ${URL})
else()
file(REMOVE_RECURSE "${unpacked_path}")
message(FATAL_ERROR "Failed to extract the archive from ${URL}, archive ${archive_path} to folder ${unpacked_path}")
endif()
set (${result} ${status} PARENT_SCOPE)
set (${result} ${status} PARENT_SCOPE)
endfunction (ExtractWithVersion)
function (DownloadOrExtractInternal URL archive_path unpacked_path folder fattal result123)
function (DownloadOrExtractInternal URL archive_path unpacked_path folder fattal resultExt)
debug_message("checking wether archive downloaded : ${archive_path}")
set (downloadStatus "NOTOK")
if (NOT EXISTS ${archive_path})
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
if (${result})
set (downloadStatus "OK")
endif()
else()
if (ENABLE_UNSAFE_LOCATIONS)
ExtractWithVersion(${URL} ${archive_path} ${unpacked_path} ${folder} result)
if(NOT ${result})
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
if (${result})
set (downloadStatus "OK")
endif()
endif()
else()
debug_message("archive found on FS : ${archive_path}, however we cannot check it's checksum and think that it is invalid")
file(REMOVE_RECURSE "${archive_path}")
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
endif()
if (${result})
set (downloadStatus "OK")
endif()
endif()
endif()
endif()
if (NOT ${downloadStatus} STREQUAL "OK")
message(FATAL_ERROR "Failed to download and extract the archive from ${URL}, archive ${archive_path} to folder ${unpacked_path}")
endif()
if (NOT ${result})
message(FATAL_ERROR "error: extract of '${archive_path}' failed")
@ -137,7 +146,7 @@ endfunction(DownloadOrExtractInternal)
file(REMOVE ${CMAKE_BINARY_DIR}/dependencies_64.txt)
function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked_path result_path folder fattal result123 use_alternatives)
function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked_path result_path folder fattal resultExt use_alternatives)
set (archive_path ${TEMP}/download/${archive_name})
set (status "ON")
set (on_master FALSE)
@ -145,7 +154,7 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
if(DEFINED ENV{IE_PATH_TO_DEPS})
set(URL "$ENV{IE_PATH_TO_DEPS}/${RELATIVE_URL}")
else()
set(URL "https://download.01.org/opencv/2019/openvinotoolkit/R3/inference_engine/${RELATIVE_URL}")
set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.1/inference_engine/${RELATIVE_URL}")
endif()
#no message on recursive calls
@ -159,7 +168,7 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
if (NOT EXISTS ${unpacked_path})
DownloadOrExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} status)
else(NOT EXISTS ${unpacked_path})
else(NOT EXISTS ${unpacked_path})
#path exists, so we would like to check what was unpacked version
set (version_file ${unpacked_path}/ie_dependency.info)
@ -176,7 +185,7 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
"\trm -rf ${unpacked_path}\n"
"and rerun cmake.\n"
"If your dependency is fine, then execute:\n\techo ${URL} > ${unpacked_path}/ie_dependency.info\n")
# file(REMOVE_RECURSE "${unpacked_path}")
# file(REMOVE_RECURSE "${unpacked_path}")
# DownloadOrExtractInternal(${URL} ${archive_path} ${unpacked_path} ${fattal} status)
else()
if (EXISTS ${version_file})
@ -196,11 +205,11 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
string(REPLACE ${TEMP} ${ALTERNATIVE_PATH} archive_path ${archive_path})
debug_message("dependency different: use local path for fetching updated version: ${alternative_path}")
CheckOrDownloadAndExtract(${component} ${RELATIVE_URL} ${archive_name} ${unpacked_path} ${result_path} ${folder} ${fattal} ${result123} FALSE)
CheckOrDownloadAndExtract(${component} ${RELATIVE_URL} ${archive_name} ${unpacked_path} ${result_path} ${folder} ${fattal} ${resultExt} FALSE)
else()
debug_message("dependency updated: download it again")
file(REMOVE_RECURSE "${unpacked_path}")
file(REMOVE_RECURSE "${unpacked_path}")
DownloadOrExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} status)
endif()
endif ()
@ -208,11 +217,10 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
endif()
if (${use_alternatives} OR ${on_master})
set (${result123} "${status}" PARENT_SCOPE)
set (${resultExt} "${status}" PARENT_SCOPE)
set (${result_path} ${unpacked_path} PARENT_SCOPE)
endif()
endfunction(CheckOrDownloadAndExtract)
endfunction(CheckOrDownloadAndExtract)

View File

@ -1,10 +1,9 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
function (extract archive_path unpacked_path folder result)
# Slurped from a generated extract-TARGET.cmake file.
if (NOT EXISTS ${unpacked_path})
get_filename_component(unpacked_dir ${unpacked_path} DIRECTORY)
file(MAKE_DIRECTORY ${unpacked_path})
@ -40,6 +39,5 @@ function (extract archive_path unpacked_path folder result)
else()
set(${result} 1 PARENT_SCOPE)
endif()
endif()
endfunction (extract)

View File

@ -1,23 +1,16 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
include (target_flags)
include (options)
#these options are aimed to optimize build time on development system
#backed targets
ie_option (ENABLE_GNA "GNA support for inference engine" ON)
ie_option (ENABLE_ROCKHOPER "use Rockhopper decoder for converting / output scores" ON)
ie_option (ENABLE_MKL_DNN "MKL-DNN plugin for inference engine" ON)
ie_option (ENABLE_CLDNN "clDnn based plugin for inference engine" ON)
ie_option (ENABLE_CLDNN_TESTS "Enable clDNN unit tests" OFF)
ie_option (ENABLE_CLDNN_BUILD "build clDnn from sources" OFF)
ie_option (ENABLE_PROFILING_ITT "ITT tracing of IE and plugins internals" ON)
ie_option (ENABLE_PROFILING_RAW "Raw counters profiling (just values, no start/stop time or timeline)" OFF)
@ -26,7 +19,11 @@ ie_option (ENABLE_PROFILING_RAW "Raw counters profiling (just values, no start/s
if (NOT GEMM STREQUAL "MKL"
AND NOT GEMM STREQUAL "OPENBLAS"
AND NOT GEMM STREQUAL "JIT")
set (GEMM "JIT")
if(ANDROID)
set(GEMM "JIT")
else()
set(GEMM "JIT")
endif()
message(STATUS "GEMM should be set to MKL, OPENBLAS or JIT. Default option is " ${GEMM})
endif()
set(GEMM "${GEMM}" CACHE STRING "Gemm implementation" FORCE)
@ -37,7 +34,11 @@ if (NOT THREADING STREQUAL "TBB"
AND NOT THREADING STREQUAL "TBB_AUTO"
AND NOT THREADING STREQUAL "OMP"
AND NOT THREADING STREQUAL "SEQ")
set (THREADING "TBB")
if (ARM OR AARCH64)
set (THREADING "SEQ")
else()
set (THREADING "TBB")
endif()
message(STATUS "THREADING should be set to TBB, TBB_AUTO, OMP or SEQ. Default option is " ${THREADING})
endif()
set(THREADING "${THREADING}" CACHE STRING "Threading" FORCE)
@ -49,7 +50,7 @@ ie_option (ENABLE_MYRIAD "myriad targeted plugin for inference engine" ON)
ie_option (ENABLE_MYRIAD_NO_BOOT "myriad plugin will skip device boot" OFF)
ie_option (ENABLE_TESTS "unit and functional tests" OFF)
ie_option (ENABLE_TESTS "unit, behavior and functional tests" OFF)
ie_option (ENABLE_GAPI_TESTS "tests for GAPI kernels" OFF)
@ -57,74 +58,48 @@ ie_option (GAPI_TEST_PERF "if GAPI unit tests should examine performance" OFF)
ie_option (ENABLE_MYRIAD_MVNC_TESTS "functional and behavior tests for mvnc api" OFF)
ie_option (ENABLE_BEH_TESTS "tests oriented to check inference engine API corecteness" ON)
ie_option (ENABLE_FUNCTIONAL_TESTS "functional tests" ON)
ie_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON)
ie_option (ENABLE_SAMPLES_CORE "console samples core library" ON)
ie_option (ENABLE_SANITIZER "enable checking memory errors via AddressSanitizer" OFF)
ie_option (ENABLE_FUZZING "instrument build for fuzzing" OFF)
ie_option (COVERAGE "enable code coverage" OFF)
ie_option (ENABLE_STRESS_UNIT_TESTS "stress unit tests" OFF)
ie_option (VERBOSE_BUILD "shows extra information about build" OFF)
ie_option (ENABLE_UNSAFE_LOCATIONS "skip check for MD5 for dependency" OFF)
ie_option (ENABLE_ALTERNATIVE_TEMP "in case of dependency conflict, to avoid modification in master, use local copy of dependency" ON)
ie_option (ENABLE_SEGMENTATION_TESTS "segmentation tests" ON)
ie_option (ENABLE_OBJECT_DETECTION_TESTS "object detection tests" ON)
ie_option (ENABLE_DUMP "enables mode for dumping per layer information" OFF)
ie_option (ENABLE_OPENCV "enables OpenCV" ON)
ie_option (OS_FOLDER "create OS dedicated folder in output" OFF)
ie_option (ENABLE_PLUGIN_RPATH "enables rpath information to be present in plugins binary, and in corresponding test_applications" ON)
ie_option (ENABLE_AFFINITY_GENERATOR "enables affinity generator build" OFF)
ie_option (ENABLE_DEBUG_SYMBOLS "generates symbols for debugging" OFF)
ie_option (ENABLE_PYTHON "enables ie python bridge build" OFF)
ie_option (DEVELOPMENT_PLUGIN_MODE "Disabled build of all plugins" OFF)
ie_option (TREAT_WARNING_AS_ERROR "Treat build warnings as errors" ON)
ie_option (ENABLE_CPP_CCT "enables C++ version of Cross Check Tool" OFF)
ie_option (ENABLE_UNICODE_PATH_SUPPORT "Enable loading models from Unicode paths" ON)
ie_option (ENABLE_LTO "Enable Link Time Optimization" OFF)
ie_option (ENABLE_IR_READER "Compile with IR readers / parsers" ON)
# FIXME: there are compiler failures with LTO and Cross-Compile toolchains. Disabling for now, but
# this must be addressed in a proper way
if(CMAKE_CROSSCOMPILING OR NOT (UNIX AND NOT APPLE))
set(ENABLE_LTO OFF)
endif()
ie_option (ENABLE_C "enables ie c bridge build" ON)
if (UNIX AND NOT APPLE AND CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.3)
set(ENABLE_UNICODE_PATH_SUPPORT OFF)
endif()
ie_option(ENABLE_CPPLINT "Enable cpplint checks during the build" OFF)
if (UNIX AND NOT APPLE)
ie_option(ENABLE_CPPLINT "Enable cpplint checks during the build" OFF)
ie_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF)
else()
set(ENABLE_CPPLINT OFF)
endif()
ie_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF)
if (UNIX AND NOT APPLE)
ie_option(ENABLE_CPPCHECK "Enable cppcheck during the build" OFF)
else()
set(ENABLE_CPPCHECK OFF)
endif()
ie_option(ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" OFF)
#environment variables used
ie_option(ENABLE_CPPCHECK "Enable cppcheck during the build" OFF)
#name of environment variable stored path to temp directory"
set (DL_SDK_TEMP "DL_SDK_TEMP")
set(IE_EXTRA_PLUGINS "" CACHE STRING "Extra paths for plugins to include into DLDT build tree")
if (LINUX)
ie_option(ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are linked to the Inference Engine binaries" ON)
endif()

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,82 +1,55 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
function(set_ie_threading_interface_for TARGET_NAME)
function(ie_target_link_libraries TARGET_NAME LINK_TYPE)
if(CMAKE_VERSION VERSION_LESS "3.12.0")
get_target_property(target_type ${TARGET_NAME} TYPE)
if(NOT target_type STREQUAL "OBJECT_LIBRARY")
target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${ARGN})
else()
# Object library may not link to anything.
# To add interface include directories and compile options explicitly.
foreach(ITEM IN LISTS ARGN)
if(TARGET ${ITEM})
get_target_property(compile_options ${ITEM} INTERFACE_COMPILE_OPTIONS)
if (compile_options)
target_compile_options(${TARGET_NAME} ${LINK_TYPE} ${compile_options})
endif()
get_target_property(compile_definitions ${ITEM} INTERFACE_COMPILE_DEFINITIONS)
if (compile_options)
target_compile_definitions(${TARGET_NAME} ${LINK_TYPE} ${compile_definitions})
endif()
endif()
endforeach()
endif()
else()
target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${ARGN})
endif()
# include directories as SYSTEM
foreach(library IN LISTS ARGN)
if(TARGET ${library})
get_target_property(include_directories ${library} INTERFACE_INCLUDE_DIRECTORIES)
if(include_directories)
target_include_directories(${TARGET_NAME} SYSTEM BEFORE ${LINK_TYPE} ${include_directories})
endif()
endif()
endforeach()
endfunction()
set(IE_THREAD_DEFINE "IE_THREAD_SEQ")
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
if (DEFINED ENV{TBBROOT})
# Check TBB package in case if custom TBBROOT path configured
find_package(TBB QUIET PATHS "$ENV{TBBROOT}/cmake")
if (TBB_FOUND)
set(IE_THREAD_DEFINE "IE_THREAD_TBB")
if (WIN32)
target_link_libraries(${TARGET_NAME} PUBLIC "-nodefaultlib:vcomp")
endif ()
target_link_libraries(${TARGET_NAME} PUBLIC ${TBB_IMPORTED_TARGETS})
else ()
# TBB was not found by the configured TBBROOT path, SEQ method will be used
ext_message(WARNING "TBB not found by the configured TBBROOT path $ENV{TBBROOT}")
endif ()
else()
if (NOT (IE_MAIN_SOURCE_DIR))
set(incl_path ${IE_EXTERNAL_DIR}/tbb/include)
if (WIN32)
set(lib_rel_path ${IE_LIB_REL_DIR})
set(lib_dbg_path ${IE_LIB_DBG_DIR})
else ()
set(lib_rel_path ${IE_EXTERNAL_DIR}/tbb/lib)
set(lib_dbg_path ${lib_rel_path})
endif ()
else ()
set(incl_path ${TBB}/include)
set(lib_rel_path ${TBB}/lib)
set(lib_dbg_path ${lib_rel_path})
endif ()
if (NOT TBB_INCLUDE_DIRS OR NOT TBB_LIBRARIES_RELEASE)
find_path(TBB_INCLUDE_DIRS tbb/tbb.h ${incl_path} NO_DEFAULT_PATH)
find_library(TBB_LIBRARIES_RELEASE tbb ${lib_rel_path} NO_DEFAULT_PATH)
ext_message(STATUS "TBB include: ${TBB_INCLUDE_DIRS}")
ext_message(STATUS "TBB Release lib: ${TBB_LIBRARIES_RELEASE}")
if (NOT LINUX)
find_library(TBB_LIBRARIES_DEBUG tbb_debug ${lib_dbg_path} NO_DEFAULT_PATH)
if (TBB_LIBRARIES_DEBUG)
ext_message(STATUS "TBB Debug lib: ${TBB_LIBRARIES_DEBUG}")
else ()
ext_message(WARNING "TBB Debug binaries are missed.")
endif ()
endif ()
endif ()
if (NOT TBB_INCLUDE_DIRS OR NOT TBB_LIBRARIES_RELEASE)
ext_message(WARNING "TBB not found. TBB support will be disabled. ${IE_THREAD_DEFINE} is defined")
else ()
set(IE_THREAD_DEFINE "IE_THREAD_TBB")
target_include_directories(${TARGET_NAME} PUBLIC ${TBB_INCLUDE_DIRS})
if (WIN32)
target_link_libraries(${TARGET_NAME} PUBLIC "-nodefaultlib:vcomp")
endif ()
# Debug binaries are optional.
if (TBB_LIBRARIES_DEBUG AND NOT LINUX)
if (WIN32)
target_link_libraries(${TARGET_NAME} PUBLIC "$<$<CONFIG:DEBUG>:${TBB_LIBRARIES_DEBUG}>;$<$<NOT:$<CONFIG:DEBUG>>:${TBB_LIBRARIES_RELEASE}>")
else ()
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
target_link_libraries(${TARGET_NAME} PUBLIC ${TBB_LIBRARIES_DEBUG})
else()
target_link_libraries(${TARGET_NAME} PUBLIC ${TBB_LIBRARIES_RELEASE})
endif ()
endif ()
else ()
# Link Release library to all configurations.
target_link_libraries(${TARGET_NAME} PUBLIC ${TBB_LIBRARIES_RELEASE})
endif ()
endif ()
endif()
find_package(TBB COMPONENTS tbb tbbmalloc)
if (TBB_FOUND)
set(IE_THREAD_DEFINE "IE_THREAD_TBB")
ie_target_link_libraries(${TARGET_NAME} PUBLIC ${TBB_IMPORTED_TARGETS})
else ()
ext_message(WARNING "TBB was not found by the configured TBB_DIR path. \
SEQ method will be used for ${TARGET_NAME}")
endif ()
elseif (THREADING STREQUAL "OMP")
if (WIN32)
set(omp_lib_name libiomp5md)
@ -118,7 +91,7 @@ function(set_ie_threading_interface_for TARGET_NAME)
if (WIN32)
target_compile_options(${TARGET_NAME} PUBLIC ${OpenMP_CXX_FLAGS} /openmp)
target_compile_options(${TARGET_NAME} PUBLIC ${OpenMP_CXX_FLAGS} /Qopenmp)
target_link_libraries(${TARGET_NAME} PUBLIC "-nodefaultlib:vcomp")
ie_target_link_libraries(${TARGET_NAME} PUBLIC "-nodefaultlib:vcomp")
else()
target_compile_options(${TARGET_NAME} PUBLIC ${OpenMP_CXX_FLAGS} -fopenmp)
endif ()
@ -126,17 +99,17 @@ function(set_ie_threading_interface_for TARGET_NAME)
# Debug binaries are optional.
if (OMP_LIBRARIES_DEBUG AND NOT LINUX)
if (WIN32)
target_link_libraries(${TARGET_NAME} PUBLIC "$<$<CONFIG:DEBUG>:${OMP_LIBRARIES_DEBUG}>;$<$<NOT:$<CONFIG:DEBUG>>:${OMP_LIBRARIES_RELEASE}>")
ie_target_link_libraries(${TARGET_NAME} PUBLIC "$<$<CONFIG:DEBUG>:${OMP_LIBRARIES_DEBUG}>;$<$<NOT:$<CONFIG:DEBUG>>:${OMP_LIBRARIES_RELEASE}>")
else()
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_DEBUG})
ie_target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_DEBUG})
else()
target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_RELEASE})
ie_target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_RELEASE})
endif ()
endif ()
else ()
# Link Release library to all configurations.
target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_RELEASE})
ie_target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_RELEASE})
endif ()
endif ()
@ -146,6 +119,6 @@ function(set_ie_threading_interface_for TARGET_NAME)
if (NOT THREADING STREQUAL "SEQ")
find_package(Threads REQUIRED)
target_link_libraries(${TARGET_NAME} PUBLIC ${CMAKE_THREAD_LIBS_INIT})
ie_target_link_libraries(${TARGET_NAME} PUBLIC ${CMAKE_THREAD_LIBS_INIT})
endif()
endfunction(set_ie_threading_interface_for)

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,77 +0,0 @@
# Copyright (C) 2018-2019 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
macro(disable_deprecated_warnings)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Qdiag-warning:1478")
elseif(CMAKE_CXX_COMPILER_ID STREQUAL MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4996") # disable warning on deprecated API
endif()
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations")
endif()
endmacro()
if (WIN32)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _CRT_SECURE_NO_WARNINGS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_SCL_SECURE_NO_WARNINGS")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") #no asynchronous structured exception handling
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE")
if (TREAT_WARNING_AS_ERROR)
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Qdiag-warning:2586,177,3180,1740,1786,47,161")
elseif (CMAKE_CXX_COMPILER_ID MATCHES MSVC)
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX") # Too many warnings
endif()
endif()
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /Z7")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Z7")
if(ENABLE_DEBUG_SYMBOLS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Z7")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Z7")
set(DEBUG_SYMBOLS_LINKER_FLAGS "/DEBUG")
if (CMAKE_BUILD_TYPE STREQUAL "Release")
# Keep default /OPT values. See /DEBUG reference for details.
set(DEBUG_SYMBOLS_LINKER_FLAGS "${DEBUG_SYMBOLS_LINKER_FLAGS} /OPT:REF /OPT:ICF")
endif()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
endif()
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Werror=return-type ")
if (APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-command-line-argument")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-function")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-variable")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-private-field")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reorder")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wswitch")
elseif(UNIX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wuninitialized -Winit-self")
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-switch")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wmaybe-uninitialized")
endif()
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -diag-disable=remark")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden")
if(LINUX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffunction-sections -fdata-sections")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
endif()
endif()

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
@ -21,13 +21,14 @@ endif()
# ie_add_plugin(NAME <targetName>
# DEVICE_NAME <deviceName>
# SOURCES <sources>
# OBJECT_LIBRARIES <object_libs>
# VERSION_DEFINES_FOR <source>
# )
#
function(ie_add_plugin)
set(options)
set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR)
set(multiValueArgs SOURCES)
set(multiValueArgs SOURCES OBJECT_LIBRARIES)
cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
if(NOT IE_PLUGIN_NAME)
@ -44,9 +45,23 @@ function(ie_add_plugin)
addVersionDefines(${IE_PLUGIN_VERSION_DEFINES_FOR} CI_BUILD_NUMBER)
endif()
add_library(${IE_PLUGIN_NAME} SHARED ${IE_PLUGIN_SOURCES})
set(input_files ${IE_PLUGIN_SOURCES})
foreach(obj_lib IN LISTS IE_PLUGIN_OBJECT_LIBRARIES)
list(APPEND input_files $<TARGET_OBJECTS:${obj_lib}>)
add_cpplint_target(${obj_lib}_cpplint FOR_TARGETS ${obj_lib})
endforeach()
add_library(${IE_PLUGIN_NAME} SHARED ${input_files})
target_compile_definitions(${IE_PLUGIN_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_PLUGIN)
if(TARGET inference_engine_preproc)
target_include_directories(${IE_PLUGIN_NAME} PRIVATE $<TARGET_PROPERTY:inference_engine_preproc,INTERFACE_INCLUDE_DIRECTORIES>)
target_link_libraries(${IE_PLUGIN_NAME} PRIVATE inference_engine_plugin_api)
else()
target_include_directories(${IE_PLUGIN_NAME} PRIVATE $<TARGET_PROPERTY:IE::inference_engine_preproc,INTERFACE_INCLUDE_DIRECTORIES>)
target_link_libraries(${IE_PLUGIN_NAME} PRIVATE IE::inference_engine_plugin_api)
endif()
if(WIN32)
set_target_properties(${IE_PLUGIN_NAME} PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME})
endif()
@ -60,6 +75,17 @@ function(ie_add_plugin)
set(PLUGIN_FILES "${PLUGIN_FILES}" CACHE INTERNAL "" FORCE)
add_dependencies(ie_plugins ${IE_PLUGIN_NAME})
# install rules
string(TOLOWER "${IE_PLUGIN_DEVICE_NAME}" install_component)
ie_cpack_add_component(${install_component} REQUIRED DEPENDS core)
install(TARGETS ${IE_PLUGIN_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH}
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH}
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH}
COMPONENT ${install_component})
endfunction()
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,42 +0,0 @@
# Copyright (C) 2018-2019 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if (UNIX OR APPLE AND CMAKE_BUILD_TYPE STREQUAL "Release")
set(CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS} -fPIE -fPIC -Wformat -Wformat-security")
# TODO: double check it it's OK
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
string(REPLACE "-fPIE" "" CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS}")
endif()
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie")
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -z noexecstack -z relro -z now")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -z noexecstack -z relro -z now")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9)
set(CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS} -fstack-protector-all")
else()
set(CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS} -fstack-protector-strong")
endif()
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -s -fvisibility=hidden")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -s -fvisibility=hidden")
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set(CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS} -fstack-protector-all")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -fvisibility=hidden")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fvisibility=hidden")
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector-strong")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -z noexecstack -z relro -z now")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -z noexecstack -z relro -z now")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wl,--strip-all -fvisibility=hidden")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Wl,--strip-all -fvisibility=hidden")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CCXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CCXX_FLAGS}")
elseif (WIN32)
if (CMAKE_CXX_COMPILER_ID STREQUAL MSVC)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MP /sdl")
endif()
endif()

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
#
@ -15,7 +15,9 @@
#
# and the following imported targets:
#
# IE::inference_engine - The Inference Engine library
# IE::inference_engine - The Inference Engine library
# IE::inference_engine_c_api - The Inference Engine C API library
# IE::inference_engine_nn_builder - The Inference Engine NN Builder library
#
macro(ext_message TRACE_LEVEL)
@ -36,7 +38,7 @@ set(InferenceEngine_FOUND FALSE)
if(TARGET IE::inference_engine)
set(InferenceEngine_FOUND TRUE)
get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
set(InferenceEngine_LIBRARIES IE::inference_engine)
set(InferenceEngine_LIBRARIES IE::inference_engine IE::inference_engine_c_api IE::inference_engine_nn_builder)
else()
if (WIN32)
set(_ARCH intel64)
@ -73,7 +75,6 @@ else()
endif()
find_path(IE_INCLUDE_DIR inference_engine.hpp "${IE_ROOT_DIR}/include" NO_DEFAULT_PATH)
find_path(IE_SRC_DIR extension "${IE_ROOT_DIR}/src" NO_DEFAULT_PATH)
set(IE_LIB_DIR "${IE_ROOT_DIR}/lib/${_ARCH}")
set(IE_LIB_REL_DIR "${IE_LIB_DIR}/Release")
@ -83,73 +84,86 @@ else()
if(WIN32)
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
elseif(APPLE)
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
else()
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
endif()
find_package_handle_standard_args( InferenceEngine
FOUND_VAR INFERENCEENGINE_FOUND
REQUIRED_VARS IE_RELEASE_LIBRARY IE_INCLUDE_DIR
REQUIRED_VARS IE_RELEASE_LIBRARY IE_C_API_RELEASE_LIBRARY IE_NN_BUILDER_RELEASE_LIBRARY IE_INCLUDE_DIR
FAIL_MESSAGE "Some of mandatory Inference Engine components are not found. Please consult InferenceEgnineConfig.cmake module's help page.")
if(INFERENCEENGINE_FOUND)
# to keep this line for successful execution in CMake 2.8
set(InferenceEngine_FOUND TRUE)
add_library(IE::inference_engine SHARED IMPORTED GLOBAL)
foreach(ie_library_suffix "" "_c_api" "_nn_builder")
string(TOUPPER "${ie_library_suffix}" ie_library_usuffix)
add_library(IE::inference_engine${ie_library_suffix} SHARED IMPORTED GLOBAL)
if (WIN32)
set_target_properties(IE::inference_engine PROPERTIES
IMPORTED_CONFIGURATIONS RELEASE
IMPORTED_IMPLIB_RELEASE "${IE_RELEASE_LIBRARY}"
MAP_IMPORTED_CONFIG_RELEASE Release
MAP_IMPORTED_CONFIG_RELWITHDEBINFO Release
INTERFACE_INCLUDE_DIRECTORIES "${IE_INCLUDE_DIR}")
if (WIN32)
set_target_properties(IE::inference_engine${ie_library_suffix} PROPERTIES
IMPORTED_CONFIGURATIONS RELEASE
IMPORTED_IMPLIB_RELEASE "${IE${ie_library_usuffix}_RELEASE_LIBRARY}"
MAP_IMPORTED_CONFIG_RELEASE Release
MAP_IMPORTED_CONFIG_RELWITHDEBINFO Release
INTERFACE_INCLUDE_DIRECTORIES "${IE_INCLUDE_DIR}")
# Debug binaries are optional
find_library(IE_DEBUG_LIBRARY inference_engine@IE_DEBUG_POSTFIX_WIN@ "${IE_LIB_DBG_DIR}" NO_DEFAULT_PATH)
if (IE_DEBUG_LIBRARY)
set_property(TARGET IE::inference_engine APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG)
set_target_properties(IE::inference_engine PROPERTIES
IMPORTED_IMPLIB_DEBUG "${IE_DEBUG_LIBRARY}"
MAP_IMPORTED_CONFIG_DEBUG Debug)
# Debug binaries are optional
find_library(IE${ie_library_usuffix}_DEBUG_LIBRARY inference_engine${ie_library_suffix}@IE_DEBUG_POSTFIX_WIN@
"${IE_LIB_DBG_DIR}" NO_DEFAULT_PATH)
if (IE${ie_library_usuffix}_DEBUG_LIBRARY)
set_property(TARGET IE::inference_engine${ie_library_suffix} APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG)
set_target_properties(IE::inference_engine${ie_library_suffix} PROPERTIES
IMPORTED_IMPLIB_DEBUG "${IE${ie_library_usuffix}_DEBUG_LIBRARY}"
MAP_IMPORTED_CONFIG_DEBUG Debug)
else()
ext_message(WARNING "Inference Engine DEBUG binaries are missed.")
endif()
elseif (APPLE)
set_target_properties(IE::inference_engine${ie_library_suffix} PROPERTIES
IMPORTED_LOCATION_RELEASE "${IE${ie_library_usuffix}_RELEASE_LIBRARY}"
INTERFACE_INCLUDE_DIRECTORIES "${IE_INCLUDE_DIR}"
INTERFACE_COMPILE_OPTIONS "-Wno-error=deprecated-declarations")
# Debug binaries are optional
find_library(IE${ie_library_usuffix}_DEBUG_LIBRARY inference_engine${ie_library_suffix}@IE_DEBUG_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
if (IE${ie_library_usuffix}_DEBUG_LIBRARY)
set_target_properties(IE::inference_engine${ie_library_suffix} PROPERTIES
IMPORTED_LOCATION_DEBUG "${IE${ie_library_usuffix}_DEBUG_LIBRARY}")
else()
ext_message(WARNING "Inference Engine DEBUG binaries are missed")
endif()
target_link_libraries(IE::inference_engine${ie_library_suffix} INTERFACE ${CMAKE_DL_LIBS})
else()
ext_message(WARNING "Inference Engine DEBUG binaries are missed.")
# Only Release binaries are distributed for Linux systems
set_target_properties(IE::inference_engine${ie_library_suffix} PROPERTIES
IMPORTED_LOCATION "${IE${ie_library_usuffix}_RELEASE_LIBRARY}"
INTERFACE_INCLUDE_DIRECTORIES "${IE_INCLUDE_DIR}")
if(CMAKE_CXX_COMPILER_ID MATCHES Intel)
set_target_properties(IE::inference_engine${ie_library_suffix} PROPERTIES
INTERFACE_COMPILE_OPTIONS "-diag-warning=1786")
else()
set_target_properties(IE::inference_engine${ie_library_suffix} PROPERTIES
INTERFACE_COMPILE_OPTIONS "-Wno-error=deprecated-declarations")
endif()
target_link_libraries(IE::inference_engine${ie_library_suffix} INTERFACE ${CMAKE_DL_LIBS})
endif()
elseif (APPLE)
set_target_properties(IE::inference_engine PROPERTIES
IMPORTED_LOCATION_RELEASE "${IE_RELEASE_LIBRARY}"
INTERFACE_INCLUDE_DIRECTORIES "${IE_INCLUDE_DIR}"
INTERFACE_COMPILE_OPTIONS "-Wno-error=deprecated-declarations")
# Debug binaries are optional
find_library(IE_DEBUG_LIBRARY inference_engine@IE_DEBUG_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
if (IE_DEBUG_LIBRARY)
set_target_properties(IE::inference_engine PROPERTIES
IMPORTED_LOCATION_DEBUG "${IE_DEBUG_LIBRARY}")
else()
ext_message(WARNING "Inference Engine DEBUG binaries are missed")
endif()
target_link_libraries(IE::inference_engine INTERFACE ${CMAKE_DL_LIBS})
else()
# Only Release binaries are distributed for Linux systems
set_target_properties(IE::inference_engine PROPERTIES
IMPORTED_LOCATION "${IE_RELEASE_LIBRARY}"
INTERFACE_INCLUDE_DIRECTORIES "${IE_INCLUDE_DIR}"
INTERFACE_COMPILE_OPTIONS "-Wno-error=deprecated-declarations")
target_link_libraries(IE::inference_engine INTERFACE ${CMAKE_DL_LIBS})
endif()
endforeach()
set(InferenceEngine_INCLUDE_DIRS ${IE_INCLUDE_DIR})
set(InferenceEngine_LIBRARIES IE::inference_engine)
set(InferenceEngine_LIBRARIES IE::inference_engine_c_api IE::inference_engine_nn_builder IE::inference_engine)
set(IE_EXTERNAL_DIR "${IE_ROOT_DIR}/external")
include("${IE_ROOT_DIR}/share/ie_parallel.cmake")
add_subdirectory(${IE_SRC_DIR}/extension EXCLUDE_FROM_ALL ie_cpu_extension)
add_library(IE::ie_cpu_extension ALIAS ie_cpu_extension)
endif()
endif()

View File

@ -1,45 +1,59 @@
# Copyright (C) 2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(VPU_SUPPORTED_SOC ma2450 ma2x8x mv0262)
if(CMAKE_VERSION VERSION_GREATER 3.9.6)
include_guard(GLOBAL)
else()
if(__CURRENT_FILE_VAR__)
return()
endif()
set(__CURRENT_FILE_VAR__ TRUE)
endif()
include(dependency_solver)
set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x)
#
# Default firmware packages
# Default packages
#
RESOLVE_DEPENDENCY(VPU_FIRMWARE_MA2450
ARCHIVE_UNIFIED firmware_ma2450_759W.zip
TARGET_PATH "${TEMP}/vpu/firmware/ma2450"
ENVIRONMENT "VPU_FIRMWARE_MA2450"
FOLDER)
debug_message(STATUS "ma2450=" ${VPU_FIRMWARE_MA2450})
RESOLVE_DEPENDENCY(VPU_FIRMWARE_MV0262
ARCHIVE_UNIFIED firmware_mv0262_mdk_R9.8.zip
TARGET_PATH "${TEMP}/vpu/firmware/mv0262"
ENVIRONMENT "VPU_FIRMWARE_MV0262"
FOLDER)
debug_message(STATUS "mv0262=" ${VPU_FIRMWARE_MV0262})
RESOLVE_DEPENDENCY(VPU_FIRMWARE_MA2X8X
ARCHIVE_UNIFIED firmware_ma2x8x_mdk_R9.8.zip
TARGET_PATH "${TEMP}/vpu/firmware/ma2x8x"
ENVIRONMENT "VPU_FIRMWARE_MA2X8X"
FOLDER)
debug_message(STATUS "ma2x8x=" ${VPU_FIRMWARE_MA2X8X})
set(FIRMWARE_PACKAGE_VERSION 942_R10.15)
#
# CMake variables to override default firmware files
#
foreach(soc IN LISTS VPU_SUPPORTED_SOC)
string(TOUPPER "${soc}" soc_upper)
set(var_name VPU_FIRMWARE_${soc_upper}_FILE)
foreach(firmware_name IN LISTS VPU_SUPPORTED_FIRMWARES)
string(TOUPPER "${firmware_name}" firmware_name_upper)
find_file(${var_name} MvNCAPI-${soc}.mvcmd "${VPU_FIRMWARE_${soc_upper}}/mvnc")
if(NOT ${var_name})
message(FATAL_ERROR "[VPU] Missing ${soc} firmware")
set(firmware_name_full ${firmware_name}.mvcmd)
# Handle PCIe elf firmware for Windows
if (WIN32 AND "${firmware_name}" STREQUAL "pcie-ma248x")
set(firmware_name_full ${firmware_name}.elf)
endif ()
reset_deps_cache(VPU_FIRMWARE_${firmware_name_upper}_FILE)
RESOLVE_DEPENDENCY(VPU_FIRMWARE_${firmware_name_upper}
ARCHIVE_UNIFIED firmware_${firmware_name}_${FIRMWARE_PACKAGE_VERSION}.zip
TARGET_PATH "${TEMP}/vpu/firmware/${firmware_name}"
ENVIRONMENT "VPU_FIRMWARE_${firmware_name_upper}_FILE"
FOLDER)
debug_message(STATUS "${firmware_name}=" ${VPU_FIRMWARE_${firmware_name_upper}})
update_deps_cache(
VPU_FIRMWARE_${firmware_name_upper}_FILE
"${VPU_FIRMWARE_${firmware_name_upper}}/mvnc/${firmware_name_full}"
"[VPU] ${firmware_name_full} firmware")
find_file(
VPU_FIRMWARE_${firmware_name_upper}_FILE
NAMES ${firmware_name_full}
NO_CMAKE_FIND_ROOT_PATH)
if(NOT VPU_FIRMWARE_${firmware_name_upper}_FILE)
message(FATAL_ERROR "[VPU] Missing ${firmware_name_full} firmware")
endif()
endforeach()
@ -47,13 +61,18 @@ endforeach()
# `vpu_copy_firmware` CMake target
#
foreach(soc IN LISTS VPU_SUPPORTED_SOC)
string(TOUPPER "${soc}" soc_upper)
set(var_name VPU_FIRMWARE_${soc_upper}_FILE)
foreach(firmware_name IN LISTS VPU_SUPPORTED_FIRMWARES)
string(TOUPPER "${firmware_name}" firmware_name_upper)
set(var_name VPU_FIRMWARE_${firmware_name_upper}_FILE)
set(firmware_out_file "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${firmware_name}.mvcmd")
# Handle PCIe elf firmware for Windows
if (WIN32 AND "${firmware_name}" STREQUAL "pcie-ma248x")
set(firmware_out_file "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${firmware_name}.elf")
endif ()
set(firmware_out_file "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/MvNCAPI-${soc}.mvcmd")
list(APPEND all_firmware_files ${firmware_out_file})
add_custom_command(
OUTPUT ${firmware_out_file}
COMMAND
@ -61,8 +80,28 @@ foreach(soc IN LISTS VPU_SUPPORTED_SOC)
MAIN_DEPENDENCY ${${var_name}}
COMMENT "[VPU] Copy ${${var_name}} to ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}"
VERBATIM)
install(FILES ${${var_name}}
DESTINATION ${IE_CPACK_LIBRARY_PATH}
COMPONENT myriad)
endforeach()
add_custom_target(vpu_copy_firmware
DEPENDS ${all_firmware_files}
COMMENT "[VPU] Copy firmware files")
#
# libusb
#
if(ANDROID)
RESOLVE_DEPENDENCY(LIBUSB
ARCHIVE_ANDROID "libusb_33167_android.tgz"
TARGET_PATH "${TEMP}/vpu/libusb")
debug_message(STATUS "LIBUSB=" ${LIBUSB})
set(LIBUSB_INCLUDE_DIR "${LIBUSB}/include")
set(LIBUSB_LIBRARY "${LIBUSB}/lib/libusb1.0.so")
log_rpath_from_dir(LIBUSB "${LIBUSB}/lib")
endif()

View File

@ -0,0 +1,11 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
project(InferenceEngine_C_API)
add_subdirectory(src)
if(ENABLE_SAMPLES)
add_subdirectory(samples)
endif()

View File

@ -0,0 +1,796 @@
# Overview of Inference Engine C* API
> **NOTE**: It is a preview version of the Inference Engine C* API for evaluation purpose only.
> Module structure and API itself may be changed in future releases.
This API provides a simplified interface for Inference Engine functionality that allows to:
- handle the models
- load and configure Inference Engine plugins based on device names
- perform inference in synchronous and asynchronous modes with arbitrary number of infer requests (the number of infer requests may be limited by target device capabilities)
## Supported OSes
Currently the Inference Engine C* API is supported on Ubuntu* 16.04, Microsoft Windows* 10 and CentOS* 7.3 OSes.
Supported Python* versions:
- On Ubuntu 16.04: 2.7, 3.5, 3.6
- On Windows 10: 3.5, 3.6
- On CentOS 7.3: 3.4, 3.5, 3.6
## Setting Up the Environment
To configure the environment for the Inference Engine C* API, run:
- On Ubuntu 16.04: `source <INSTALL_DIR>/bin/setupvars.sh .`
- On Windows 10: XXXX
The script automatically detects latest installed C* version and configures required environment if the version is supported.
If you want to use certain version of C*, set the environment variable XXXXX
after running the environment configuration script.
## Struct
```
typedef struct ie_core_version {
size_t major;
size_t minor;
const char *build_number;
const char *description;
}ie_core_version_t;
```
```
typedef struct ie_config {
char *name;
char *value;
}ie_config_t;
```
```
typedef struct ie_param {
union { //To be continue, to collect metric and config parameters
};
}ie_param_t;
```
```
typedef struct ie_param_config {
char *name;
ie_param_t *param;
}ie_param_config_t;
```
```
typedef struct desc {
char msg[256];
}desc_t;
```
```
typedef struct dimensions {
size_t dims[8];
}dimensions_t;
```
```
struct tensor_desc {
layout_t layout;
dimensions_t dims;
precision_e precision;
};
```
```
typedef void (*completeCallBackFunc)(ie_infer_request_t *infer_request, int *status);
```
```
enum precision_e{
UNSPECIFIED = 255, /**< Unspecified value. Used by default */
MIXED = 0, /**< Mixed value. Can be received from network. No applicable for tensors */
FP32 = 10, /**< 32bit floating point value */
FP16 = 11, /**< 16bit floating point value */
Q78 = 20, /**< 16bit specific signed fixed point precision */
I16 = 30, /**< 16bit signed integer value */
U8 = 40, /**< 8bit unsigned integer value */
I8 = 50, /**< 8bit signed integer value */
U16 = 60, /**< 16bit unsigned integer value */
I32 = 70, /**< 32bit signed integer value */
I64 = 72, /**< 64bit signed integer value */
BIN = 71, /**< 1bit integer value */
CUSTOM = 80 /**< custom precision has it's own name and size of elements */
};
```
```
enum layout_t {
ANY = 0, // "any" layout
// I/O data layouts
NCHW = 1,
NHWC = 2,
NCDHW = 3,
NDHWC = 4,
// weight layouts
OIHW = 64,
// Scalar
SCALAR = 95,
// bias layouts
C = 96,
// Single image layout (for mean image)
CHW = 128,
// 2D
HW = 192,
NC = 193,
CN = 194,
BLOCKED = 200,
};
```
```
enum colorformat_e {
RAW = 0u, ///< Plain blob (default), no extra color processing required
RGB, ///< RGB color format
BGR, ///< BGR color format, default in DLDT
RGBX, ///< RGBX color format with X ignored during inference
BGRX, ///< BGRX color format with X ignored during inference
NV12, ///< NV12 color format represented as compound Y+UV blob
};
```
```
enum resize_alg_e {
NO_RESIZE = 0,
RESIZE_BILINEAR,
RESIZE_AREA
};
```
```
struct roi_e {
size_t id; // ID of a roi
size_t posX; // W upper left coordinate of roi
size_t posY; // H upper left coordinate of roi
size_t sizeX; // W size of roi
size_t sizeY; // H size of roi
};
```
```
enum IEStatusCode {
OK = 0,
GENERAL_ERROR = -1,
NOT_IMPLEMENTED = -2,
NETWORK_NOT_LOADED = -3,
PARAMETER_MISMATCH = -4,
NOT_FOUND = -5,
OUT_OF_BOUNDS = -6,
/*
\* @brief exception not of std::exception derived type was thrown
*/
UNEXPECTED = -7,
REQUEST_BUSY = -8,
RESULT_NOT_READY = -9,
NOT_ALLOCATED = -10,
INFER_NOT_STARTED = -11,
NETWORK_NOT_READ = -12
};
```
- `const char *ie_c_api_version(void)`
- Description: Returns number of version that is exported.
- Parameters: None.
- Return value: Version number of the API.
- Usage example:
```
const char *ver_num=ie_c_api_version();
```
## IECore
This strcut represents an Inference Engine entity and allows you to manipulate with plugins using unified interfaces.
### Create
- `IEStatusCode ie_core_create(char *xml_config_file, ie_core_t *core_result)`
> Note: create an ie_core_t instance with default configuration when xml_config_file=null.
- Parameters:
- `xml_config_file`- A full path to`.xml` file containing plugins configuration. If the parameter is not specified, the default configuration is handled automatically.
- `core_result` - A pointer to the newly created `ie_core_t`.
- Return value: Status code of the operation: OK(0) for success.
- Usage examples:
Create an `ie_core_t` t instance with a custom configuration location sepcified:
```
char *xml_config_file="/localdisk/plugins/my_custom_cfg.xml";
ie_core_t ie;
IEStatusCode status = ie_core_create(xml_config_file,ie);
```
.`xml` file has the following structure:
```
<ie>
<plugins>
<plugin name="" location="" optional="yes/no">
<extensions>
<extension location=""/>
</extensions>
<properties>
<property key="" value=""/>
</properties>
</plugin>
</plugins>
</ie>
```
### <a name="iecore-methods"></a>Methods
- `IEStatusCode ie_core_get_versions(ie_core_t *core, char *device_name, ie_core_version_t *version_result)`
- Description: Returns a `ie_core_version_t` with versions of the plugin specified.
- Parameters:
- `core` -A pointer to `ie_core_t` instance.
- `device_name` - Name of the the registered plugin.
- `version_result` - Dictionary mapping a plugin name .
- Return value: Status of the operation: OK(0) for success.
- Usage example:
```
char *xml_config_file="/localdisk/plugins/my_custom_cfg.xml";
char *device_name="CPU";
ie_core_t *ie;
ie_core_version_t *version;
IEStatusCode status= ie_core_create(xml_config_file, ie);
IEStatusCode status2=ie_core_get_versions(ie,device_name, version);
print("description:%s, major:%d, minor:%d, build_number:%s.\n",version- >description, version->major, version->minor, version->build_number);
```
- `IEStatusCode ie_core_load_network(ie_core_t *core, ie_network_t *network, const char *device_name, ie_config_t config, ie_executable_network_t *exec_network_result)`
- Description: Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device name and creates an `ie_executable_network_t` instance of the `ie_network_t` struct.
You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware resources).
- Parameters:
- `core` - A pointer to `ie_core_t` instance.
- `network` - A pointer to `ie_network_t` instance.
- `device_name` - A device name of a target plugin.
- `config` - A dictionary of plugin configuration keys and their values.
- `exec_network_result` - A pointer to the newly loaded network.
- Return value: Status code of the operation: OK(0) for success.
- Usage example:
```
```
- `IEStatusCode ie_core_set_config(ie_core_t *core, ie_config_t *ie_core_config, const char *device_name)`
- Description: Sets a configuration for a plugin.
- Parameters:
- `core`- A pointer to `ie_core_t` instance.
- `ie_core_config` - A dictionary of configuration parameters as keys and their values.
- `device_name` - A device name of a target plugin.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_core_register_plugin(ie_core_t *core, const char *plugin_name, const char *device_name )`
- Description: Registers a new device and a plugin which implement this device inside Inference Engine.
- Parameters:
- `core`- A pointer to `ie_core_t` instance.
- `plugin_name` - A name of a plugin. Depending on a platform, plugin_name is wrapped with a shared library suffix and a prefix to identify a full name of the library.
- `device_name` - A target device name for the plugin. If not specified, the method registers.
a plugin with the default name.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_core_register_plugins(ie_core_t *core, const char *xml_config_file)`
- Description: Registers plugins specified in an `.xml` configuration file
- Parameters:
- `core` - A pointer to `ie_core_t` instance.
- `xml_config_file` - A full path to `.xml` file containing plugins configuration.
- Return value: Status code of the operation: 0 for success.
- `IEStatusCode ie_core_unregister_plugin(ie_core_t *core, const char *device_name)`
- Description: Unregisters a plugin with a specified device name
- Parameters:
- `core` - A pointer `ie_core_t` instance.
- `device_name` - A device name of the plugin to unregister.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_core_add_extension(ie_core_t *core, const char *extension_path, const char *device_name)`
- Description: Loads extension library to the plugin with a specified device name.
- Parameters:
- `core` - A pointer `ie_core_t` instance.
- `extension_path` - Path to the extensions library file to load to a plugin.
- `device_name` - A device name of a plugin to load the extensions to.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_core_get_metric(ie_core_t *core, const char *device_name, const char *metric_name, ie_param_t *param_result)`
- Description: Gets a general runtime metric for dedicated hardware. Enables to request common device properties, which are `ie_executable_network_t` agnostic, such as device name, temperature, and other devices-specific values.
- Parameters:
- `core` - A pointer `ie_core_t` instance.
- `device_name` - A name of a device to get a metric value.
- `metric_name` - A metric name to request.
- `param_result` - A metric value corresponding to a metric key.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_core_get_config(ie_core_t *core, const char *device_name, const char *config_name, ie_param_t *param_result)`
- Description: Gets a configuration dedicated to device behavior. The method targets to extract information which can be set via SetConfig method.
- Parameters:
- `core` - A pointer `ie_core_t` instance.
- `device_name` - A name of a device to get a metric value.
- `config_name` - A configuration value corresponding to a configuration key.
- `param_result` - A metric value corresponding to a metric key.
- Return value: Status code of the operation: OK(0) for success.
## IENetwork
This struct contains the information about the network model read from IR and allows you to manipulate with some model parameters such as layers affinity and output layers.
### Methods
- `IEStatusCode ie_network_read(char *xml, char *weights_file, ie_network_t *network_result)`
- Description: Reads the model from the `.xml` and `.bin` files of the IR.
- Parameters:
- `xml_file` - `.xml` file's path of the IR.
- `weights_file` - `.bin` file's path of the IR.
- `network_result` - A pointer to the newly created network.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_free(ie_network_t *network)`
- Description: When network is loaded into the Inference Engine, it is not required anymore and should be released.
- Parameters:
- `network` - The pointer to the instance of the `ie_network_t` to free.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_input_numbers(ie_network_t *network, size_t *size_result)`
- Description: Gets number of inputs for the `IENetwork` instance.
- Parameters:
- `network` - The instance of the `ie_network_t` to get size of input information for this instance.
- `size_result` - A number of the instance's input information.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_input_name(ie_network_t *network, size_t number, char *name_result)`
- Description: Gets name corresponding to the "number".
- Parameters:
- `network` - The instance of the `ie_network_t` to get input information.
- `number` - An id of input information .
- `name_result` - Input name corresponding to the "number".
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_output_numbers(ie_network_t *network, size_t size_result)`
- Description: Gets number of output for the `ie_network_t` instance.
- Parameters:
- `network` - The instance of the `ie_network_t` to get size of output information for this instance.
- `size_result` - A number of the instance's output information.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_output_name(ie_network_t *network, size_t number, char *name_result)`
- Description: Gets output name corresponding to the "number".
- Parameters:
- `network` - The instance of the `ie_network_t` to get out information of nth layer for this instance.
- `number` - An id of output information.
- `name_result` - A output name corresponding to the "number".
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_input_precision(ie_network_t *network, char *input_name, precision_e *prec_result)`
- Description: Gets a precision of the input data named "input_name".
- Parameters:
- `network` - A pointer to ie_network_t instance.
- `input_name` - Name of input data.
- `prec_result` - A pointer to the precision used for input blob creation.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_set_input_precision(ie_network_t *network, char *input_name, precision_e p)`
- Description: Changes the precision of the input data named "input_name".
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `input_name` - Name of input data.
- `p` - A new precision of the input data to set (eg. precision_e.FP16).
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_input_layout(ie_network_t *network, char *input_name, layout_t *layout_result)`
- Description: Gets a layout of the input data named "input_name".
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `input_name` - Name of input data.
- `layout_result` - A pointer to the layout used for input blob creation.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_set_input_layout(ie_network_t *network, char *input_name, layout_t l)`
- Description: Changes the layout of the input data named "input_name". This function should be called before loading the network to the plugin
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `input_name` - Name of input data.
- `layout` - Network layer layout (eg. layout_t.NCHW).
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_input_dims(ie_network_t *network, char *input_name, dimensions_t *dims_result)`
- Description: Gets dimensions/shape of the input data with reversed order.
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `input_name` - Name of input data.
- `dims_result` - A pointer to the dimensions used for input blob creation.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_input_resize_algorithm(ie_network_t *network, char *input_name, resize_alg_e *resize_alg_result)`
- Description: Gets pre-configured resize algorithm.
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `input_name` - Name of input data.
- `resize_alg_result` - The pointer to the resize algorithm used for input blob creation.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_set_input_resize_algorithm(ie_network_t *network, char *input_name, resize_alg_e resize_algo)`
- Description: Sets resize algorithm to be used during pre-processing
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `input_name` - Name of input data.
- `resize_algo` - Resize algorithm.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_color_format(ie_network_t *network, char *input_name, colorformat_e *colformat_result)`
- Description: Gets color format of the input data named "input_name".
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `input` - Name of input data.
- `colformat_result` - Input color format of the input data named "input_name".
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_set_color_format(ie_network_t *network, char *input_name, colorformat_e color_format)`
- Description: Changes the color format of the input data named "input_name".
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `input_name` - Name of input data.
- `color_format` - Color format of the input data .
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_output_precision(ie_network_t *network, char *output_name, precision_e *prec_result)`
- Description: Get output precision of the output data named "output_name".
- Parameters:
- `network` - A pointer `ie_network_t` instance.
- `output_name` - Name of output date.
- `precision_e` - Output precision of the output data named "output_name".
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_set_output_precision(ie_network_t *network, char *output_name, precision_e p)`
- Description: Sets a precision type of the output date named "output_name".
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `outputName` - Name of output data.
- `p` - Precision of the output data (eg. precision_e.FP16).
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_output_layout(ie_network_t *network, char *output_name, layout_t *layout_result)`
- Description: Get output layout of the output date named "output_name" in the network.
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `output_name` - Name of output data.
- `layout_result` - Layout value of the output data named "output_name".
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_set_output_layout(ie_network_t *network, char *output_name, c l)`
- Description: Sets the layout value for output data named "output_name".
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `output_name` - Name of output data.
- `l` - Layout value to set (eg. output_name.NCHW).
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_network_get_output_dims(ie_network_t *network, char *output_name, dimensions_t *dims_result)`
- Description: Get output dimension of output data named "output_name" in the network.
- Parameters:
- `network` - A pointer to `ie_network_t` instance.
- `output_name` - Name of output data.
- `dims_result` - Dimensions value of the output data named "output_name".
- Return value: Status code of the operation: OK(0) for success.
## ExecutableNetwork
This struct represents a network instance loaded to plugin and ready for inference.
### Methods
- `IEStatusCode ie_exec_network_create_infer_request(ie_executable_network_t *ie_exec_network, desc_t *desc, ie_infer_request_t **req)`
- Description: Creates an inference request instance used to infer the network. The created request has allocated input and output blobs (that can be changed later).
- Parameters:
- `ie_exec_network` - A pointer to `ie_executable_network_t` instance.
- `desc` - A pointer to a `desc_t` instance.
- `req` - A pointer to the newly created `ie_infer_request_t` instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_exec_network_get_metric(ie_executable_network_t *ie_exec_network, const char *metric_name, ie_param_t *param_result)`
- Description: - Gets general runtime metric for an executable network. It can be network name, actual device ID on which executable network is running or all other properties which cannot be changed dynamically.
- Parameters:
- `ie_exec_network`: A pointer to `ie_executable_network_t` instance.
- `metric_name` - A metric name to request.
- `param_result` - A metric value corresponding to a metric key.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_exec_network_set_config(ie_executable_network_t *ie_exec_network, ie_param_config_t *param_config, desc_t *desc)`
- Description: Sets a configuration for current executable network.
- Parameters:
- `ie_exec_network`: A pointer to `ie_executable_network_t` instance.
- `config`: An config for current executable network.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_exec_network_get_config(ie_executable_network_t *ie_exec_network, const char *metric_config, ie_param_t *param_result)`
- Description: - Gets configuration for current executable network. The method is responsible to extract information
- which affects executable network execution
- Parameters:
- `ie_exec_network` - A pointer to `ie_executable_network_t` instance.
- `metric_config` - A configuration parameter name to request.
- `param_result` - A configuration value corresponding to a configuration key.
- Return value: Status code of the operation: OK(0) for success.
## InferRequest
This struct provides an interface to infer requests of `ExecutableNetwork` and serves to handle infer requests execution and to set and get output data.
### Methods
- `IEStatusCode *ie_infer_request_get_blob(ie_infer_request_t *infer_request, const char *name, ie_blob_t **blob_result)`
- Description: Get a Blob corresponding to blob name.
- Parameters:
- `infer_request` - A pointer to `ie_infer_request_t` instance
- `name` - Blob name.
- `blob_result` - A pointer to the blob corresponding to the blob name.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_request_set_blob(ie_infer_request_t *infer_request, ie_blob_t *blob)`
- Description: Sets the blob in a inference request.
- Parameters:
- `infer_request`: A pointer to `ie_infer_request_t` instance.
- `blob ` - A pointer to `ie_blob_t` instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_request_infer(ie_infer_request_t *infer_request)`
- Description: Starts synchronous inference of the infer request and fill outputs array
- Parameters:
- `infer_request`: A pointer to `ie_infer_request_t` instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_request_infer_async(ie_infer_request_t *infer_request)`
- Description: Starts asynchronous inference of the infer request and fill outputs array.
- Parameters:
- `infer_request` - A pointer to `ie_infer_request_t` instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_set_completion_callback(ie_infer_request_t *infer_request,completeCallBackFunc callback)`
- Description: Sets a callback function that will be called on success or failure of asynchronous request.
- Parameters:
- `infer_request` - A pointer to a `ie_infer_request_t` instance.
- `callback` - A function to be called.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_request_wait(ie_infer_request_t *infer_request, int64_t timeout)`
- Description: Waits for the result to become available. Blocks until specified timeout elapses or the result becomes available, whichever comes first.
NOTE:** There are special values of the timeout parameter:
- 0 - Immediately returns the inference status. It does not block or interrupt execution.
ind statuses meaning.
- -1 - Waits until inference result becomes available (default value).
- Parameters:
- `infer_request` -A pointer to a `ie_infer_request_t` instance.
- `timeout` - Time to wait in milliseconds or special (0, -1) cases described above. If not specified, `timeout` value is set to -1 by default.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_infer_request_set_batch(ie_infer_request_t *infer_request, const size_t size)`
- Description: Sets new batch size for certain infer request when dynamic batching is enabled in executable network that created this request.
NOTE:** Support of dynamic batch size depends on the target plugin.
- Parameters:
- `infer_request` -A pointer to a `ie_infer_request_t` instance.
- `size` - New batch size to be used by all the following inference calls for this request.
- Return value: Status code of the operation: OK(0) for success.
## Blob
### Methods
/*The structure of the blobs has complex structure, below functions represent creation of memory blobs from the scratch or on top of existing memory These functions return handle to the blob to be used in other ie_* functions*/
- `IEStatusCode make_memory_blob(const tensor_desc *tensorDesc, ie_blob_t *blob_result)`
- Description: Creates a `ie_blob_t` instance with the specified dimensions and layout but does not allocate the memory. Use the allocate() method to allocate memory. `tensor_desc` Defines the layout and dims of the blob.
- Parameters:
- `tensorDesc` - Defines the layout and dims of the blob.
- `blob_result` - A pointer to an empty ie_blob_t instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode make_memory_blob_from_preallocated_memory(const tensor_desc *tensorDesc, void *ptr, size_t size = 0, ie_blob_t *blob_result)`
- Description: The constructor creates a `ie_blob_t` instance with the specified dimensions and layout on the pre-allocated memory. The allocate() call is not required.
- Parameters:
- `tensorDesc` - Tensor description for Blob creation.
- `ptr` - A pointer to the pre-allocated memory.
- `size` -Length of the pre-allocated array. If not set, size is assumed equal to the dot product of dims.
- `blob_result` - A pointer to the newly created ie_blob_t instance.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode make_memory_blob_with_roi(const ie_blob_t **inputBlob, const roi_e *roi, ie_blob_t *blob_result)`
- Description: Creates a blob describing given roi instance based on the given blob with pre-allocated memory.
- Parameters:
- `inputBlob` - Original blob with pre-allocated memory.
- `roi` - A roi object inside of the original blob.
- `blob_result` - A pointer to the newly created blob.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_size(ie_blob_t *blob, int *size_result)`
- Description: Gets the total number of elements, which is a product of all the dimensions.
- Parameters:
- `blob` - A pointer to the blob.
- `size_result` - The total number of elements.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_byte_size(ie_blob_t *blob, int *bsize_result)`
- Description: Gets the size of the current Blob in bytes.
- Parameters:
- `blob` - A pointer to the blob.
- `bsize_result` - The size of the current Blob in bytes.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_allocate(ie_blob_t *blob)`
- Description: Allocates memory for blob.
- Parameters:
- `blob` - A pointer to an empty blob.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_deallocate(ie_blob_t *blob)`
- Description: Releases previously allocated data.
- Parameters:
- `blob` - A pointer to the blob.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_buffer(ie_blob_t *blob, void *buffer)`
- Description: Gets access to the allocated memory .
- Parameters:
- `blob` - A pointer to the blob.
- `buffer` - A pointer to the coped date from the given pointer to the blob.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_cbuffer(ie_blob_t *blob, const void *cbuffer)`
- Description: Gets read-only access to the allocated memory.
- Parameters:
- `blob` - A pointer to the blob.
- `cbuffer` - A pointer to the coped date from the given pointer to the blob and the date is read-only.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_get_dims(ie_blob_t *blob, dimensions_t *dims_result)`
- Description: Gets dimensions of blob instance's tensor.
- Parameters:
- `blob` - A pointer to the blob.
- `dims_result` - A pointer to the dimensions of blob instance's tensor.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_get_layout(ie_blob_t *blob, layout_t *layout_result)`
- Description: Gets layout of blob instance's tensor.
- Parameters:
- `blob` - A pointer to the blob.
- `layout_result` - A pointer to the layout of blob instance's tensor.
- Return value: Status code of the operation: OK(0) for success.
- `IEStatusCode ie_blob_get_precision(ie_blob_t *blob, precision_e *prec_result)`
- Description: Gets precision of blob instance's tensor.
- Parameters:
- `blob` - A pointer to the blob.
- `prec_result` - A pointer to the precision of blob instance's tensor.
- Return value: Status code of the operation: OK(0) for success.

View File

@ -0,0 +1,928 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file ie_c_api.h
* C API of Inference Engine bridge unlocks using of OpenVINO Inference Engine
* library and all its plugins in native applications disabling usage
* of C++ API. The scope of API covers significant part of C++ API and includes
* an ability to read model from the disk, modify input and output information
* to correspond their runtime representation like data types or memory layout,
* load in-memory model to Inference Engine on different devices including
* heterogeneous and multi-device modes, manage memory where input and output
* is allocated and manage inference flow.
**/
#ifndef IE_C_API_H
#define IE_C_API_H
#include <stdint.h>
#include <stdio.h>
#ifdef __cplusplus
#define INFERENCE_ENGINE_C_API_EXTERN extern "C"
#else
#define INFERENCE_ENGINE_C_API_EXTERN
#endif
#if defined(__GNUC__) && (__GNUC__ < 4)
#define INFERENCE_ENGINE_C_API(...) INFERENCE_ENGINE_C_API_EXTERN __VA_ARGS__
#else
#if defined(_WIN32)
#ifdef inference_engine_c_api_EXPORTS
#define INFERENCE_ENGINE_C_API(...) INFERENCE_ENGINE_C_API_EXTERN __declspec(dllexport) __VA_ARGS__ __cdecl
#else
#define INFERENCE_ENGINE_C_API(...) INFERENCE_ENGINE_C_API_EXTERN __declspec(dllimport) __VA_ARGS__ __cdecl
#endif
#else
#define INFERENCE_ENGINE_C_API(...) INFERENCE_ENGINE_C_API_EXTERN __attribute__((visibility("default"))) __VA_ARGS__
#endif
#endif
typedef struct ie_core ie_core_t;
typedef struct ie_network ie_network_t;
typedef struct ie_executable ie_executable_network_t;
typedef struct ie_infer_request ie_infer_request_t;
typedef struct ie_blob ie_blob_t;
/**
* @struct ie_core_version
* @brief Represents version information that describes devices and the inference engine runtime library
*/
typedef struct ie_core_version {
size_t major;
size_t minor;
const char *device_name;
const char *build_number;
const char *description;
}ie_core_version_t;
/**
* @struct ie_core_versions
* @brief Represents all versions information that describes all devices and the inference engine runtime library
*/
typedef struct ie_core_versions {
ie_core_version_t *versions;
size_t num_vers;
}ie_core_versions_t;
/**
* @struct ie_config
* @brief Represents configuration information that describes devices
*/
typedef struct ie_config {
const char *name;
const char *value;
struct ie_config *next;
}ie_config_t;
/**
* @struct ie_param
* @brief metric and config parameters.
*/
typedef struct ie_param {
union {
char *params;
unsigned int number;
unsigned int range_for_async_infer_request[3];
unsigned int range_for_streams[2];
};
}ie_param_t;
/**
* @struct ie_param_config
* @brief Represents configuration parameter information
*/
typedef struct ie_param_config {
char *name;
ie_param_t *param;
}ie_param_config_t;
/**
* @struct desc
* @brief Represents detailed information for an error
*/
typedef struct desc {
char msg[256];
}desc_t;
/**
* @struct dimensions
* @brief Represents dimensions for input or output data
*/
typedef struct dimensions {
size_t ranks;
size_t dims[8];
}dimensions_t;
/**
* @enum layout_e
* @brief Layouts that the inference engine supports
*/
typedef enum {
ANY = 0, // "any" layout
// I/O data layouts
NCHW = 1,
NHWC = 2,
NCDHW = 3,
NDHWC = 4,
// weight layouts
OIHW = 64,
// Scalar
SCALAR = 95,
// bias layouts
C = 96,
// Single image layout (for mean image)
CHW = 128,
// 2D
HW = 192,
NC = 193,
CN = 194,
BLOCKED = 200,
}layout_e;
/**
* @enum precision_e
* @brief Precisions that the inference engine supports
*/
typedef enum {
UNSPECIFIED = 255, /**< Unspecified value. Used by default */
MIXED = 0, /**< Mixed value. Can be received from network. No applicable for tensors */
FP32 = 10, /**< 32bit floating point value */
FP16 = 11, /**< 16bit floating point value */
Q78 = 20, /**< 16bit specific signed fixed point precision */
I16 = 30, /**< 16bit signed integer value */
U8 = 40, /**< 8bit unsigned integer value */
I8 = 50, /**< 8bit signed integer value */
U16 = 60, /**< 16bit unsigned integer value */
I32 = 70, /**< 32bit signed integer value */
I64 = 72, /**< 64bit signed integer value */
BIN = 71, /**< 1bit integer value */
CUSTOM = 80 /**< custom precision has it's own name and size of elements */
}precision_e;
/**
* @struct tensor_desc
* @brief Represents detailed information for a tensor
*/
typedef struct tensor_desc {
layout_e layout;
dimensions_t dims;
precision_e precision;
}tensor_desc_t;
/**
* @enum colorformat_e
* @brief Extra information about input color format for preprocessing
*/
typedef enum {
RAW = 0u, ///< Plain blob (default), no extra color processing required
RGB, ///< RGB color format
BGR, ///< BGR color format, default in DLDT
RGBX, ///< RGBX color format with X ignored during inference
BGRX, ///< BGRX color format with X ignored during inference
NV12, ///< NV12 color format represented as compound Y+UV blob
}colorformat_e;
/**
* @enum resize_alg_e
* @brief Represents the list of supported resize algorithms.
*/
typedef enum {
NO_RESIZE = 0,
RESIZE_BILINEAR,
RESIZE_AREA
}resize_alg_e;
/**
* @enum IEStatusCode
* @brief This enum contains codes for all possible return values of the interface functions
*/
typedef enum {
OK = 0,
GENERAL_ERROR = -1,
NOT_IMPLEMENTED = -2,
NETWORK_NOT_LOADED = -3,
PARAMETER_MISMATCH = -4,
NOT_FOUND = -5,
OUT_OF_BOUNDS = -6,
/*
* @brief exception not of std::exception derived type was thrown
*/
UNEXPECTED = -7,
REQUEST_BUSY = -8,
RESULT_NOT_READY = -9,
NOT_ALLOCATED = -10,
INFER_NOT_STARTED = -11,
NETWORK_NOT_READ = -12
}IEStatusCode;
/**
* @struct roi_t
* @brief This structure describes roi data.
*/
typedef struct roi {
size_t id; // ID of a roi
size_t posX; // W upper left coordinate of roi
size_t posY; // H upper left coordinate of roi
size_t sizeX; // W size of roi
size_t sizeY; // H size of roi
}roi_t;
/**
* @struct input_shape
* @brief Represents shape for input data
*/
typedef struct input_shape {
char *name;
dimensions_t shape;
}input_shape_t;
/**
* @struct input_shapes
* @brief Represents shapes for all input data
*/
typedef struct input_shapes {
input_shape_t *shapes;
size_t shape_num;
}input_shapes_t;
/**
* @struct ie_blob_buffer
* @brief Represents copied data from the given blob.
*/
typedef struct ie_blob_buffer {
union {
void *buffer; // buffer can be written
const void *cbuffer; // cbuffer is read-only
};
}ie_blob_buffer_t;
/**
* @struct ie_complete_call_back
* @brief Completion callback definition about the function and args
*/
typedef struct ie_complete_call_back {
void (*completeCallBackFunc)(void *args);
void *args;
}ie_complete_call_back_t;
/**
* @brief Returns number of version that is exported.
* @return Version number of the API.
*/
INFERENCE_ENGINE_C_API(const char *) ie_c_api_version(void);
// Core
/**
* @defgroup Core Core
* Set of functions dedicated to working with registered plugins and loading
* network to the registered devices.
* @{
*/
/**
* @brief Constructs Inference Engine Core instance using XML configuration file with devices description.
* See RegisterPlugins for more details. Use the ie_core_free() method to free memory.
* @ingroup Core
* @param xml_config_file A path to .xml file with devices to load from. If XML configuration file is not specified,
* then default Inference Engine devices are loaded from the default plugin.xml file.
* @param core A pointer to the newly created ie_core_t.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_create(const char *xml_config_file, ie_core_t **core);
/**
* @brief Releases memory occupied by core.
* @ingroup Core
* @param core A pointer to the core to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_free(ie_core_t **core);
/**
* @brief Gets version information of the device specified. Use the ie_core_versions_free() method to free memory.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param device_name Name to indentify device.
* @param versions A pointer to versions corresponding to device_name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_get_versions(const ie_core_t *core, const char *device_name, ie_core_versions_t *versions);
/**
* @brief Releases memory occupied by ie_core_versions.
* @ingroup Core
* @param vers A pointer to the ie_core_versions to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_versions_free(ie_core_versions_t *vers);
/**
* @brief Reads the model from the .xml and .bin files of the IR. Use the ie_network_free() method to free memory.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param xml .xml file's path of the IR.
* @param weights_file .bin file's path of the IR, if path is empty, will try to read bin file with the same name as xml and
* if bin file with the same name was not found, will load IR without weights.
* @param network A pointer to the newly created network.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_read_network(ie_core_t *core, const char *xml, const char *weights_file, ie_network_t **network);
/**
* @brief Creates an executable network from a network object. Users can create as many networks as they need and use
* them simultaneously (up to the limitation of the hardware resources). Use the ie_exec_network_free() method to free memory.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param network A pointer to ie_network instance.
* @param device_name Name of device to load network to.
* @param config Device configuration.
* @param exe_network A pointer to the newly created executable network.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_load_network(ie_core_t *core, const ie_network_t *network, const char *device_name, \
const ie_config_t *config, ie_executable_network_t **exe_network);
/**
* @brief Sets configuration for device.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param ie_core_config Device configuration.
* @param device_name An optinal name of a device. If device name is not specified,
* the config is set for all the registered devices.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_set_config(ie_core_t *core, const ie_config_t *ie_core_config, const char *device_name);
/**
* @brief Registers a new device and a plugin which implement this device inside Inference Engine.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param plugin_name A name of a plugin. Depending on a platform, plugin_name is wrapped with
* a shared library suffix and a prefix to identify a full name of the library.
* @param device_name A device name to register plugin for. If not specified, the method registers
* a plugin with the default name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_register_plugin(ie_core_t *core, const char *plugin_name, const char *device_name);
/**
* @brief Registers plugins specified in an ".xml" configuration file.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param xml_config_file A full path to ".xml" file containing plugins configuration.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_register_plugins(ie_core_t *core, const char *xml_config_file);
/**
* @brief Unregisters a plugin with a specified device name.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param device_name A device name of the device to unregister.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_unregister_plugin(ie_core_t *core, const char *device_name);
/**
* @brief Loads extension library to the device with a specified device name.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param extension_path Path to the extensions library file to load to a device.
* @param device_name A device name of a device to load the extensions to.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_add_extension(ie_core_t *core, const char *extension_path, const char *device_name);
/**
* @brief Gets general runtime metric for dedicated hardware. The method is needed to request common device properties
* which are executable network agnostic. It can be device name, temperature, other devices-specific values.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param device_name A name of a device to get a metric value.
* @param metric_name A metric name to request.
* @param param_result A metric value corresponding to the metric_name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_get_metric(const ie_core_t *core, const char *device_name, const char *metric_name, ie_param_t *param_result);
/**
* @brief Gets configuration dedicated to device behaviour. The method is targeted to extract information
* which can be set via SetConfig method.
* @ingroup Core
* @param core A pointer to ie_core_t instance.
* @param device_name A name of a device to get a configuration value.
* @param config_name Name of a configuration.
* @param param_result A configuration value corresponding to the config_name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_core_get_config(const ie_core_t *core, const char *device_name, const char *config_name, ie_param_t *param_result);
/** @} */ // end of Core
// ExecutableNetwork
/**
* @defgroup ExecutableNetwork ExecutableNetwork
* Set of functions representing of neural networks been loaded to device.
* @{
*/
/**
* @brief Releases memory occupied by ExecutableNetwork.
* @ingroup ExecutableNetwork
* @param ie_exec_network A pointer to the ExecutableNetwork to free memory.
* return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_free(ie_executable_network_t **ie_exec_network);
/**
* @brief Creates an inference request instance used to infer the network. The created request has allocated input
* and output blobs (that can be changed later). Use the ie_infer_request_free() method to free memory.
* @ingroup ExecutableNetwork
* @param ie_exec_network A pointer to ie_executable_network_t instance.
* @param request A pointer to the newly created ie_infer_request_t instance
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_create_infer_request(ie_executable_network_t *ie_exec_network, ie_infer_request_t **request);
/**
* @brief Gets general runtime metric for an executable network. It can be network name, actual device ID on which executable network is running
* or all other properties which cannot be changed dynamically.
* @ingroup ExecutableNetwork
* @param ie_exec_network A pointer to ie_executable_network_t instance.
* @param metric_name A metric name to request.
* @param param_result A metric value corresponding to the metric_name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_get_metric(const ie_executable_network_t *ie_exec_network, \
const char *metric_name, ie_param_t *param_result);
/**
* @brief Sets configuration for current executable network. Currently, the method can be used
* when the network run on the Multi device and the configuration paramter is only can be "MULTI_DEVICE_PRIORITIES"
* @ingroup ExecutableNetwork
* @param ie_exec_network A pointer to ie_executable_network_t instance.
* @param param_config A pointer to device configuration..
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_set_config(ie_executable_network_t *ie_exec_network, const ie_config_t *param_config);
/**
* @brief Gets configuration for current executable network. The method is responsible to
* extract information which affects executable network execution.
* @ingroup ExecutableNetwork
* @param ie_exec_network A pointer to ie_executable_network_t instance.
* @param metric_config A configuration parameter name to request.
* @param param_result A configuration value corresponding to a configuration paramter name.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_exec_network_get_config(const ie_executable_network_t *ie_exec_network, \
const char *metric_config, ie_param_t *param_result);
/** @} */ // end of ExecutableNetwork
// InferRequest
/**
* @defgroup InferRequest InferRequest
* Set of functions responsible for dedicated inference for certain
* ExecutableNetwork.
* @{
*/
/**
* @brief Releases memory occupied by ie_infer_request_t instance.
* @ingroup InferRequest
* @param infer_request A pointer to the ie_infer_request_t to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_free(ie_infer_request_t **infer_request);
/**
* @brief Gets input/output data for inference
* @ingroup InferRequest
* @param infer_request A pointer to ie_infer_request_t instance.
* @param name Name of input or output blob.
* @param blob A pointer to input or output blob. The type of Blob must match the network input precision and size.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_get_blob(ie_infer_request_t *infer_request, const char *name, ie_blob_t **blob);
/**
* @brief Sets input/output data to inference.
* @ingroup InferRequest
* @param infer_request A pointer to ie_infer_request_t instance.
* @param name Name of input or output blob.
* @param blob Reference to input or output blob. The type of a blob must match the network input precision and size.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_set_blob(ie_infer_request_t *infer_request, const char *name, const ie_blob_t *blob);
/**
* @brief Starts synchronous inference of the infer request and fill outputs.
* @ingroup InferRequest
* @param infer_request A pointer to ie_infer_request_t instance.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_infer(ie_infer_request_t *infer_request);
/**
* @brief Starts asynchronous inference of the infer request and fill outputs.
* @ingroup InferRequest
* @param infer_request A pointer to ie_infer_request_t instance.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_infer_async(ie_infer_request_t *infer_request);
/**
* @brief Sets a callback function that will be called on success or failure of asynchronous request
* @ingroup InferRequest
* @param infer_request A pointer to ie_infer_request_t instance.
* @param callback A function to be called.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_set_completion_callback(ie_infer_request_t *infer_request, ie_complete_call_back_t *callback);
/**
* @brief Waits for the result to become available. Blocks until specified timeout elapses or the result becomes available, whichever comes first.
* @ingroup InferRequest
* @param infer_request A pointer to ie_infer_request_t instance.
* @param timeout Maximum duration in milliseconds to block for
* @note There are special cases when timeout is equal some value of the WaitMode enum:
* * 0 - Immediately returns the inference status. It does not block or interrupt execution.
* * -1 - waits until inference result becomes available
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_wait(ie_infer_request_t *infer_request, const int64_t timeout);
/**
* @brief Sets new batch size for certain infer request when dynamic batching is enabled in executable network that created this request.
* @ingroup InferRequest
* @param infer_request A pointer to ie_infer_request_t instance.
* @param size New batch size to be used by all the following inference calls for this request.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_infer_request_set_batch(ie_infer_request_t *infer_request, const size_t size);
/** @} */ // end of InferRequest
// Network
/**
* @defgroup Network Network
* Set of functions managing network been read from the IR before loading
* of it to the device.
* @{
*/
/**
* @brief When netowrk is loaded into the Infernece Engine, it is not required anymore and should be released
* @ingroup Network
* @param network The pointer to the instance of the ie_network_t to free.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_free(ie_network_t **network);
/**
* @brief Gets number of inputs for the network.
* @ingroup Network
* @param network A pointer to the instance of the ie_network_t to get number of input information.
* @param size_result A number of the instance's input information.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_inputs_number(const ie_network_t *network, size_t *size_result);
/**
* @brief Gets name corresponding to the "number". Use the ie_network_name_free() method to free memory.
* @ingroup Network
* @param network A pointer to theinstance of the ie_network_t to get input information.
* @param number An id of input information .
* @param name Input name corresponding to the number.
* @return status Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_name(const ie_network_t *network, size_t number, char **name);
/**
* @brief Gets a precision of the input data provided by user.
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param input_name Name of input data.
* @param prec_result A pointer to the precision used for input blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_precision(const ie_network_t *network, const char *input_name, precision_e *prec_result);
/**
* @brief Changes the precision of the input data provided by the user.
* This function should be called before loading the network to the device.
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param input_name Name of input data.
* @param p A new precision of the input data to set (eg. precision_e.FP16).
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_input_precision(ie_network_t *network, const char *input_name, const precision_e p);
/**
* @brief Gets a layout of the input data.
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param input_name Name of input data.
* @param layout_result A pointer to the layout used for input blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_layout(const ie_network_t *network, const char *input_name, layout_e *layout_result);
/**
* @brief Changes the layout of the input data named "input_name".
* This function should be called before loading the network to the device.
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param input_name Name of input data.
* @param l A new layout of the input data to set.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_input_layout(ie_network_t *network, const char *input_name, const layout_e l);
/**
* @Gets dimensions/shape of the input data with reversed order.
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param input_name Name of input data.
* @param dims_result A pointer to the dimensions used for input blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_dims(const ie_network_t *network, const char *input_name, dimensions_t *dims_result);
/**
* @brief Gets pre-configured resize algorithm.
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param input_name Name of input data.
* @parm resize_alg_result The pointer to the resize algorithm used for input blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_resize_algorithm(const ie_network_t *network, const char *input_name, \
resize_alg_e *resize_alg_result);
/**
* @brief Sets resize algorithm to be used during pre-processing
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param input_name Name of input data.
* @param resize_algo Resize algorithm.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_input_resize_algorithm(ie_network_t *network, const char *input_name, const resize_alg_e resize_algo);
/**
* @brief Gets color format of the input data.
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param input_name Name of input data.
* @param colformat_result The pointer to the color format used for input blob creation.
* @reutrn Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_color_format(const ie_network_t *network, const char *input_name, colorformat_e *colformat_result);
/**
* @brief Changes the color format of the input data.
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param input_name Name of input data.
* @param color_format Color format of the input data.
* @reutrn Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_color_format(ie_network_t *network, const char *input_name, const colorformat_e color_format);
/**
* @brief Helper method collect all input shapes with input names of corresponding input data.
* Use the ie_network_input_shapes_free() method to free memory.
* @ingroup Network
* @param network A pointer to the instance of the ie_network_t to get input shapes.
* @param shapes A pointer to the input_shapes.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_input_shapes(ie_network_t *network, input_shapes_t *shapes);
/**
* @brief Run shape inference with new input shapes for the network.
* @ingroup Network
* @param network A pointer to the instance of the ie_network_t to reshape.
* @param shapes A new input shapes to set for the network.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_reshape(ie_network_t *network, const input_shapes_t shapes);
/**
* @brief Gets number of output for the network.
* @ingroup Network
* @param network A pointer to the instance of the ie_network_t to get number of ouput information.
* @param size_result A number of the network's output information.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_outputs_number(const ie_network_t *network, size_t *size_result);
/**
* @brief Gets name corresponding to the "number". Use the ie_network_name_free() method to free memory.
* @ingroup Network
* @param network A pointer to theinstance of the ie_network_t to get output information.
* @param number An id of output information .
* @param name Output name corresponding to the number.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_name(const ie_network_t *network, const size_t number, char **name);
/**
* @brief Gets a precision of the output data named "output_name".
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param output_name Name of output data.
* @param prec_result A pointer to the precision used for output blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_precision(const ie_network_t *network, const char *output_name, precision_e *prec_result);
/**
* @brief Changes the precision of the output data named "output_name".
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param output_name Name of output data.
* @param p A new precision of the output data to set (eg. precision_e.FP16).
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_output_precision(ie_network_t *network, const char *output_name, const precision_e p);
/**
* @brief Gets a layout of the output data.
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param output_name Name of output data.
* @param layout_result A pointer to the layout used for output blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_layout(const ie_network_t *network, const char *output_name, layout_e *layout_result);
/**
* @brief Changes the layout of the output data named "output_name".
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param output_name Name of output data.
* @param l A new layout of the output data to set.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_set_output_layout(ie_network_t *network, const char *output_name, const layout_e l);
/**
* @brief Gets dimensions/shape of the output data with reversed order.
* @ingroup Network
* @param network A pointer to ie_network_t instance.
* @param output_name Name of output data.
* @param dims_result A pointer to the dimensions used for output blob creation.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_get_output_dims(const ie_network_t *network, const char *output_name, dimensions_t *dims_result);
/**
* @brief Releases memory occupied by input_shapes.
* @ingroup Network
* @param inputShapes A pointer to the input_shapes to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_input_shapes_free(input_shapes_t *inputShapes);
/**
* @brief Releases momory occupied by input_name or output_name.
* @ingroup Network
* @param name A pointer to the input_name or output_name to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_network_name_free(char **name);
/** @} */ // end of InferRequest
// Blob
/**
* @defgroup Blob Blob
* Set of functions allowing to research memory from infer requests or make new
* memory objects to be passed to InferRequests.
* @{
*/
/**
* @brief Creates a blob with the specified dimensions, layout and to allocate memory.
* @ingroup Blob
* @param tensorDesc Tensor descriptor for Blob creation.
* @param blob A pointer to the newly created blob.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **blob);
/**
* @brief Creates a blob with the given tensor descriptor from the pointer to the pre-allocated memory.
* @ingroup Blob
* @param tensorDesc Tensor descriptor for Blob creation.
* @param ptr Pointer to the pre-allocated memory.
* @param size Length of the pre-allocated array.
* @param blob A pointer to the newly created blob.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDesc, void *ptr, size_t size, ie_blob_t **blob);
/**
* @brief Creates a blob describing given roi_t instance based on the given blob with pre-allocated memory.
* @ingroup Blob
* @param inputBlob original blob with pre-allocated memory.
* @param roi A roi_tinstance inside of the original blob.
* @param blob A pointer to the newly created blob.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_make_memory_with_roi(const ie_blob_t *inputBlob, const roi_t *roi, ie_blob_t **blob);
/**
* @brief Gets the total number of elements, which is a product of all the dimensions.
* @ingroup Blob
* @param blob A pointer to the blob.
* @param size_result The total number of elements.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_size(ie_blob_t *blob, int *size_result);
/**
* @brief Gets the size of the current Blob in bytes.
* @ingroup Blob
* @param blob A pointer to the blob.
* @param bsize_result The size of the current blob in bytes.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_byte_size(ie_blob_t *blob, int *bsize_result);
/**
* @brief Releases previously allocated data
* @ingroup Blob
* @param blob A pointer to the blob to free memory.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_deallocate(ie_blob_t **blob);
/**
* @brief Gets access to the allocated memory .
* @ingroup Blob
* @param blob A pointer to the blob.
* @param blob_buffer A pointer to the copied data from the given blob.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_buffer(const ie_blob_t *blob, ie_blob_buffer_t *blob_buffer);
/**
* @brief Gets read-only access to the allocated memory.
* @ingroup Blob
* @param blob A pointer to the blob.
* @param blob_cbuffer A pointer to the coped data from the given pointer to the blob and the data is read-only.
* @return Status code of the operation: OK(0) for success
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_cbuffer(const ie_blob_t *blob, ie_blob_buffer_t *blob_cbuffer);
/**
* @brief Gets dimensions of blob's tensor.
* @ingroup Blob
* @param blob A pointer to the blob.
* @param dims_result A pointer to the dimensions of blob's tensor.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_dims(const ie_blob_t *blob, dimensions_t *dims_result);
/**
* @brief Gets layout of blob's tensor.
* @ingroup Blob
* @param blob A pointer to the blob.
* @param layout_result A pointer to the layout of blob's tensor.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_layout(const ie_blob_t *blob, layout_e *layout_result);
/**
* @brief Gets precision of blob's tensor.
* @ingroup Blob
* @param blob A pointer to the blob.
* @param prec_result A pointer to the precision of blob's tensor.
* @return Status code of the operation: OK(0) for success.
*/
INFERENCE_ENGINE_C_API(IEStatusCode) ie_blob_get_precision(const ie_blob_t *blob, precision_e *prec_result);
/** @} */ // end of Blob
#endif // IE_C_API_H

View File

@ -0,0 +1,5 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
include("${InferenceEngine_SOURCE_DIR}/samples/CMakeLists.txt")

View File

@ -0,0 +1,29 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
project(OpenCV_C_Wraper)
set(TARGET_NAME opencv_c_wraper)
#set(CMAKE_SHARED_LINKER_FLAGS "-Wl,--exclude-libs,ALL")
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file(GLOB HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.h)
# create library
add_library(${TARGET_NAME} SHARED ${HEADERS} ${SOURCES})
# Find OpenCV components if exist
find_package(OpenCV COMPONENTS imgcodecs videoio QUIET)
if(NOT OpenCV_FOUND)
message(WARNING "OPENCV is disabled or not found, " ${TARGET_NAME} " is built without OPENCV support")
else()
add_definitions(-DUSE_OPENCV)
endif()
target_link_libraries(${TARGET_NAME} PUBLIC ${OpenCV_LIBRARIES})
target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}")
if(COMMAND add_cpplint_target)
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
endif()

View File

@ -0,0 +1,160 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "opencv_c_wraper.h"
#ifndef USE_OPENCV
int image_read(const char *img_path, c_mat_t *img) { return -1; }
int image_resize(const c_mat_t *src_img, c_mat_t *dst_img, const int width, const int height) { return -1; }
int image_save(const char *img_path, c_mat_t *img) { return -1; }
int image_free(c_mat_t *img) { return -1; }
int image_add_rectangles(c_mat_t *img, rectangle_t rects[], int classes[], int num, int thickness) { return -1; }
#else
#include <opencv2/opencv.hpp>
#include <algorithm>
int image_read(const char *img_path, c_mat_t *img) {
if (img_path == nullptr || img == nullptr) {
return - 1;
}
cv::Mat mat = cv::imread(img_path);
if (mat.data == NULL) {
return -1;
}
img->mat_channels = mat.channels();
img->mat_width = mat.size().width;
img->mat_height = mat.size().height;
img->mat_type = mat.type();
img->mat_data_size = img->mat_channels * img->mat_width * img->mat_height;
img->mat_data = (unsigned char *)malloc(sizeof(unsigned char) * img->mat_data_size);
for (int i = 0; i < img->mat_data_size; ++i) {
img->mat_data[i] = mat.data[i];
}
return 0;
}
int image_resize(const c_mat_t *src_img, c_mat_t *dst_img, const int width, const int height) {
if (src_img == nullptr || dst_img == nullptr) {
return -1;
}
cv::Mat mat_src(cv::Size(src_img->mat_width, src_img->mat_height), src_img->mat_type, src_img->mat_data);
cv::Mat mat_dst;
cv::resize(mat_src, mat_dst, cv::Size(width, height));
if (mat_dst.data) {
dst_img->mat_channels = mat_dst.channels();
dst_img->mat_width = mat_dst.size().width;
dst_img->mat_height = mat_dst.size().height;
dst_img->mat_type = mat_dst.type();
dst_img->mat_data_size = dst_img->mat_channels * dst_img->mat_width * dst_img->mat_height;
dst_img->mat_data = (unsigned char *)malloc(sizeof(unsigned char) * dst_img->mat_data_size);
for (int i = 0; i < dst_img->mat_data_size; ++i) {
dst_img->mat_data[i] = mat_dst.data[i];
}
} else {
return -1;
}
return 1;
}
int image_save(const char *img_path, c_mat_t *img) {
cv::Mat mat(cv::Size(img->mat_width, img->mat_height), img->mat_type, img->mat_data);
return cv::imwrite(img_path, mat);
}
int image_free(c_mat_t *img) {
if (img) {
free(img->mat_data);
img->mat_data = NULL;
}
return -1;
}
int image_add_rectangles(c_mat_t *img, rectangle_t rects[], int classes[], int num, int thickness) {
int colors_num = 21;
color_t colors[21] = { // colors to be used for bounding boxes
{ 128, 64, 128 },
{ 232, 35, 244 },
{ 70, 70, 70 },
{ 156, 102, 102 },
{ 153, 153, 190 },
{ 153, 153, 153 },
{ 30, 170, 250 },
{ 0, 220, 220 },
{ 35, 142, 107 },
{ 152, 251, 152 },
{ 180, 130, 70 },
{ 60, 20, 220 },
{ 0, 0, 255 },
{ 142, 0, 0 },
{ 70, 0, 0 },
{ 100, 60, 0 },
{ 90, 0, 0 },
{ 230, 0, 0 },
{ 32, 11, 119 },
{ 0, 74, 111 },
{ 81, 0, 81 }
};
for (int i = 0; i < num; i++) {
int x = rects[i].x_min;
int y = rects[i].y_min;
int w = rects[i].rect_width;
int h = rects[i].rect_height;
int cls = classes[i] % colors_num; // color of a bounding box line
if (x < 0) x = 0;
if (y < 0) y = 0;
if (w < 0) w = 0;
if (h < 0) h = 0;
if (x >= img->mat_width) { x = img->mat_width - 1; w = 0; thickness = 1; }
if (y >= img->mat_height) { y = img->mat_height - 1; h = 0; thickness = 1; }
if ((x + w) >= img->mat_width) { w = img->mat_width - x - 1; }
if ((y + h) >= img->mat_height) { h = img->mat_height - y - 1; }
thickness = std::min(std::min(thickness, w / 2 + 1), h / 2 + 1);
size_t shift_first;
size_t shift_second;
for (int t = 0; t < thickness; t++) {
shift_first = (y + t) * img->mat_width * 3;
shift_second = (y + h - t) * img->mat_width * 3;
for (int ii = x; ii < x + w + 1; ii++) {
img->mat_data[shift_first + ii * 3] = colors[cls].r;
img->mat_data[shift_first + ii * 3 + 1] = colors[cls].g;
img->mat_data[shift_first + ii * 3 + 2] = colors[cls].b;
img->mat_data[shift_second + ii * 3] = colors[cls].r;
img->mat_data[shift_second + ii * 3 + 1] = colors[cls].g;
img->mat_data[shift_second + ii * 3 + 2] = colors[cls].b;
}
}
for (int t = 0; t < thickness; t++) {
shift_first = (x + t) * 3;
shift_second = (x + w - t) * 3;
for (int ii = y; ii < y + h + 1; ii++) {
img->mat_data[shift_first + ii * img->mat_width * 3] = colors[cls].r;
img->mat_data[shift_first + ii * img->mat_width * 3 + 1] = colors[cls].g;
img->mat_data[shift_first + ii * img->mat_width * 3 + 2] = colors[cls].b;
img->mat_data[shift_second + ii * img->mat_width * 3] = colors[cls].r;
img->mat_data[shift_second + ii * img->mat_width * 3 + 1] = colors[cls].g;
img->mat_data[shift_second + ii * img->mat_width * 3 + 2] = colors[cls].b;
}
}
}
return 0;
}
#endif // USE_OPENCV

View File

@ -0,0 +1,104 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <stdint.h>
#include <stdio.h>
#ifdef __cplusplus
#define OPENCV_C_EXTERN extern "C"
#else
#define OPENCV_C_EXTERN
#endif
#if defined(__GNUC__) && (__GNUC__ < 4)
#define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __VA_ARGS__
#else
#if defined(_WIN32)
#ifdef opencv_c_wraper_EXPORTS
#define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __declspec(dllexport) __VA_ARGS__ __cdecl
#else
#define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __declspec(dllimport) __VA_ARGS__ __cdecl
#endif
#else
#define OPENCV_C_WRAPPER(...) OPENCV_C_EXTERN __attribute__((visibility("default"))) __VA_ARGS__
#endif
#endif
/**
* @struct c_mat
* @brief OpenCV Mat Wraper
*/
typedef struct c_mat {
unsigned char *mat_data;
int mat_data_size;
int mat_width;
int mat_height;
int mat_channels;
int mat_type;
}c_mat_t;
/**
* @struct rectangle
* @brief This structure describes rectangle data.
*/
typedef struct rectangle {
int x_min;
int y_min;
int rect_width;
int rect_height;
}rectangle_t;
/**
* @struct color
* @brief Stores channels of a given color
*/
typedef struct color {
unsigned char r;
unsigned char g;
unsigned char b;
}color_t;
/**
* @brief Load an image from a file. If the image cannot be read, the function return -1.
* @param img_path Path of file to be loaded.
* @param img A pointer to the newly created c_mat_t.
* @return Status of the operation: 0 for success, -1 for fail.
*/
OPENCV_C_WRAPPER(int) image_read(const char *img_path, c_mat_t *img);
/**
* @brief Resizes an image.
* @param src_img A pointer to the input image.
* @param dst_img A pointer to the output image.
* @param width The width of dst_img.
* @param height The height of dst_img.
* @return Status of the operation: 0 for success, -1 for fail.
*/
OPENCV_C_WRAPPER(int) image_resize(const c_mat_t *src_img, c_mat_t *dst_img, const int width, const int height);
/**
* @brief Saves an image to a specified file.The image format is chosen based on the filename extension.
* @param img_path Path of the file to be saved.
* @param img Image to be saved.
* @return Status of the operation: 0 for success, -1 for fail.
*/
OPENCV_C_WRAPPER(int) image_save(const char *img_path, c_mat_t *img);
/**
* @brief Releases memory occupied by a c_mat_t instance.
* @param img A pointer to the c_mat_t instance to free memory.
* @return Status of the operation: 0 for success, -1 for fail.
*/
OPENCV_C_WRAPPER(int) image_free(c_mat_t *img);
/**
* @brief Adds colored rectangles to the image
* @param img - image where rectangles are put
* @param rects - array for the rectangle
* @param classes - array for classes
* @param num - number of the rects and classes
* @param thickness - thickness of a line (in pixels) to be used for bounding boxes
* @return Status of the operation: 0 for success, -1 for fail.
*/
OPENCV_C_WRAPPER(int) image_add_rectangles(c_mat_t *img, rectangle_t rects[], int classes[], int num, int thickness);

View File

@ -0,0 +1,15 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME "hello_classification_c")
# create sample target
add_executable(${TARGET_NAME} main.c)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES} opencv_c_wraper)
if(COMMAND add_cpplint_target)
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
endif()

View File

@ -0,0 +1,169 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier : Apache-2.0
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <opencv_c_wraper.h>
#include <c_api/ie_c_api.h>
struct classify_res {
size_t class_id;
float probability;
};
void classify_res_sort(struct classify_res *res, size_t n) {
size_t i, j;
for (i = 0; i < n; ++i) {
for (j = i + 1; j < n; ++j) {
if (res[i].probability < res[j].probability) {
struct classify_res temp = res[i];
res[i] = res[j];
res[j] = temp;
} else if (res[i].probability == res[j].probability && res[i].class_id > res[j].class_id) {
struct classify_res temp = res[i];
res[i] = res[j];
res[j] = temp;
}
}
}
}
struct classify_res *output_blob_to_classify_res(ie_blob_t *blob, size_t *n) {
dimensions_t output_dim;
ie_blob_get_dims(blob, &output_dim);
*n = output_dim.dims[1];
struct classify_res *cls = (struct classify_res *)malloc(sizeof(struct classify_res) * (*n));
IEStatusCode status;
ie_blob_buffer_t blob_cbuffer;
status = ie_blob_get_cbuffer(blob, &blob_cbuffer);
float *blob_data = (float*) (blob_cbuffer.cbuffer);
size_t i;
for (i = 0; i < *n; ++i) {
cls[i].class_id = i;
cls[i].probability = blob_data[i];
}
return cls;
}
void print_classify_res(struct classify_res *cls, size_t n, const char *img_path) {
printf("\nImage %s\n", img_path);
printf("\nclassid probability\n");
printf("------- -----------\n");
size_t i;
for (i = 0; i < n; ++i) {
printf("%zu %f\n", cls[i].class_id, cls[i].probability);
}
}
int main(int argc, char **argv) {
// ------------------------------ Parsing and validation of input args ---------------------------------
if (argc != 4) {
printf("Usage : ./hello_classification <path_to_model> <path_to_image> <device_name>\n");
return EXIT_FAILURE;
}
const char *input_model = argv[1];
const char *input_image_path = argv[2];
const char *device_name = argv[3];
// -----------------------------------------------------------------------------------------------------
// --------------------------- 1. Load inference engine instance -------------------------------------
ie_core_t *core = NULL;
IEStatusCode status = ie_core_create("", &core);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
ie_network_t *network = NULL;
ie_core_read_network(core, input_model, NULL, &network);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 3. Configure input & output ---------------------------------------------
// --------------------------- Prepare input blobs -----------------------------------------------------
char *input_name = NULL;
status = ie_network_get_input_name(network, 0, &input_name);
/* Mark input as resizable by setting of a resize algorithm.
* In this case we will be able to set an input blob of any shape to an infer request.
* Resize and layout conversions are executed automatically during inference */
ie_network_set_input_resize_algorithm(network, input_name, RESIZE_BILINEAR);
ie_network_set_input_layout(network, input_name, NHWC);
ie_network_set_input_precision(network, input_name, U8);
// --------------------------- Prepare output blobs ----------------------------------------------------
char *output_name = NULL;
status = ie_network_get_output_name(network, 0, &output_name);
ie_network_set_output_precision(network, output_name, FP32);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 4. Loading model to the device ------------------------------------------
ie_config_t config = {NULL, NULL, NULL};
ie_executable_network_t *exe_network = NULL;
status = ie_core_load_network(core, network, device_name, &config, &exe_network);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 5. Create infer request -------------------------------------------------
ie_infer_request_t *infer_request = NULL;
status = ie_exec_network_create_infer_request(exe_network, &infer_request);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 6. Prepare input --------------------------------------------------------
/* Read input image to a blob and set it to an infer request without resize and layout conversions. */
c_mat_t img;
image_read(input_image_path, &img);
dimensions_t dimens = {4, {1, (size_t)img.mat_channels, (size_t)img.mat_height, (size_t)img.mat_width}};
tensor_desc_t tensorDesc = {NHWC, dimens, U8};
size_t size = img.mat_data_size;
//just wrap IplImage data to ie_blob_t pointer without allocating of new memory
ie_blob_t *imgBlob = NULL;
ie_blob_make_memory_from_preallocated(&tensorDesc, img.mat_data, size, &imgBlob);
//infer_request accepts input blob of any size
ie_infer_request_set_blob(infer_request, input_name, imgBlob);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 7. Do inference --------------------------------------------------------
/* Running the request synchronously */
ie_infer_request_infer(infer_request);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 8. Process output ------------------------------------------------------
ie_blob_t *output_blob = NULL;
ie_infer_request_get_blob(infer_request, output_name, &output_blob);
size_t class_num;
struct classify_res *cls = output_blob_to_classify_res(output_blob, &class_num);
classify_res_sort(cls, class_num);
// Print classification results
size_t top = 10;
if (top > class_num) {
top = class_num;
}
printf("\nTop %zu results:\n", top);
print_classify_res(cls, top, input_image_path);
// -----------------------------------------------------------------------------------------------------
ie_blob_deallocate(&output_blob);
image_free(&img);
ie_infer_request_free(&infer_request);
ie_exec_network_free(&exe_network);
ie_network_name_free(&input_name);
ie_network_name_free(&output_name);
ie_network_free(&network);
ie_core_free(&core);
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,15 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set (TARGET_NAME "object_detection_sample_ssd_c")
# create sample target
add_executable(${TARGET_NAME} main.c)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES} opencv_c_wraper)
if(COMMAND add_cpplint_target)
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
endif()

View File

@ -0,0 +1,134 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#if defined(_WIN32)
#ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN_UNDEF
#endif
#ifndef NOMINMAX
# define NOMINMAX
# define NOMINMAX_UNDEF
#endif
#if defined(_M_IX86) && !defined(_X86_) && !defined(_AMD64_)
# define _X86_
#endif
#if defined(_M_X64) && !defined(_X86_) && !defined(_AMD64_)
# define _AMD64_
#endif
#include <string.h>
#include <windef.h>
#include <fileapi.h>
#include <Winbase.h>
#include <sys/stat.h>
// Copied from linux libc sys/stat.h:
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
typedef struct dirent {
char *d_name;
}dirent;
static dirent *createDirent(const wchar_t *wsFilePath) {
dirent *d = (dirent *)malloc(sizeof(dirent));
size_t i;
size_t slen = wcslen(wsFilePath);
d->d_name = (char *)(malloc(slen + 1));
wcstombs_s(&i, d->d_name, slen + 1, wsFilePath, slen);
return d;
}
static void freeDirent(dirent **d) {
free((*d)->d_name);
(*d)->d_name = NULL;
free(*d);
*d = NULL;
}
typedef struct DIR {
WIN32_FIND_DATAA FindFileData;
HANDLE hFind;
dirent *next;
}DIR;
static int endsWith(const char *src, const char *with) {
int wl = (int)(strlen(with));
int so = (int)(strlen(with)) - wl;
if (so < 0) return 0;
if (strncmp(with, &(src[so]), wl) == 0)
return 1;
else
return 0;
}
static int isValid(DIR* dp) {
if (dp->hFind != INVALID_HANDLE_VALUE && dp->FindFileData.dwReserved0) {
return 1;
} else {
return 0;
}
}
static DIR *opendir(const char *dirPath) {
DIR *dp = (DIR *)malloc(sizeof(DIR));
dp->next = NULL;
char *ws = (char *)(malloc(strlen(dirPath) + 1));
strcpy(ws, dirPath);
if (endsWith(ws, "\\"))
strcat(ws, "*");
else
strcat(ws, "\\*");
dp->hFind = FindFirstFileA(ws, &dp->FindFileData);
dp->FindFileData.dwReserved0 = dp->hFind != INVALID_HANDLE_VALUE;
free(ws);
if (isValid(dp)) {
free(dp);
return NULL;
}
return dp;
}
static struct dirent *readdir(DIR *dp) {
if (dp->next != NULL) freeDirent(&(dp->next));
if (!dp->FindFileData.dwReserved0) return NULL;
wchar_t wbuf[4096];
size_t outSize;
mbstowcs_s(&outSize, wbuf, 4094, dp->FindFileData.cFileName, 4094);
dp->next = createDirent(wbuf);
dp->FindFileData.dwReserved0 = FindNextFileA(dp->hFind, &(dp->FindFileData));
return dp->next;
}
static void closedir(DIR *dp){
if (dp->next) {
freeDirent(&(dp->next));
}
free(dp);
}
#ifdef WIN32_LEAN_AND_MEAN_UNDEF
# undef WIN32_LEAN_AND_MEAN
# undef WIN32_LEAN_AND_MEAN_UNDEF
#endif
#ifdef NOMINMAX_UNDEF
# undef NOMINMAX_UNDEF
# undef NOMINMAX
#endif
#else
#include <sys/types.h>
#include <dirent.h>
#endif

View File

@ -0,0 +1,662 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier : Apache-2.0
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <sys/stat.h>
#include <c_api/ie_c_api.h>
#include "object_detection_sample_ssd.h"
#include <opencv_c_wraper.h>
#ifdef _WIN32
#include "c_w_dirent.h"
#else
#include <dirent.h>
#endif
#define MAX_IMAGE 20
static const char *img_msg = NULL;
static const char *input_model = NULL;
static const char *device_name = "CPU";
static const char *custom_cldnn_msg = NULL;
static const char *custom_cpu_library_msg = NULL;
static const char *config_msg = NULL;
static int file_num = 0;
static char **file_paths = NULL;
const char *info = "[ INFO ] ";
const char *warn = "[ WARNING ] ";
int ParseAndCheckCommandLine(int argc, char *argv[]) {
int opt = 0;
int help = 0;
char *string = "hi:m:d:c:l:g:";
printf("%sParsing input parameters\n", info);
while ((opt = getopt(argc, argv, string)) != -1) {
switch(opt) {
case 'h':
showUsage();
help = 1;
break;
case 'i':
img_msg = optarg;
break;
case 'm':
input_model = optarg;
break;
case 'd':
device_name = optarg;
break;
case 'c':
custom_cldnn_msg = optarg;
break;
case 'l':
custom_cpu_library_msg = optarg;
break;
case 'f':
config_msg = optarg;
break;
default:
return -1;
}
}
if (help)
return -1;
if (img_msg == NULL) {
printf("Parameter -i is not set\n");
return -1;
}
if (input_model == NULL) {
printf("Parameter -m is not set \n");
return -1;
}
return 1;
}
/**
* @brief This function checks input args and existence of specified files in a given folder. Updated the file_paths and file_num.
* @param arg path to a file to be checked for existence
* @return none.
*/
void readInputFilesArgument(const char *arg) {
struct stat sb;
int i;
if (stat(arg, &sb) != 0) {
printf("%sFile %s cannot be opened!\n", warn, arg);
return;
}
if (S_ISDIR(sb.st_mode)) {
DIR *dp;
dp = opendir(arg);
if (dp == NULL) {
printf("%sFile %s cannot be opened!\n", warn, arg);
return;
}
struct dirent *ep;
while (NULL != (ep = readdir(dp))) {
const char *fileName = ep->d_name;
if (strcmp(fileName, ".") == 0 || strcmp(fileName, "..") == 0) continue;
char *file_path = (char *)malloc(strlen(arg) + strlen(ep->d_name) + 1);
strcpy(file_path, arg);
strcat(file_path, "/");
strcat(file_path, ep->d_name);
if (file_num == 0) {
file_paths = (char **)malloc(sizeof(char *));
file_paths[0] = file_path;
++file_num;
} else {
char **temp = (char **)realloc(file_paths, sizeof(char *) * (file_num +1));
if (temp) {
file_paths = temp;
file_paths[file_num++] = file_path;
} else {
for (i = 0; i < file_num; ++i) {
free(file_paths[i]);
}
free(file_paths);
file_num = 0;
}
}
}
closedir(dp);
dp = NULL;
} else {
char *file_path = malloc(strlen(arg));
strcpy(file_path, arg);
if (file_num == 0) {
file_paths = (char **)malloc(sizeof(char *));
}
file_paths[file_num++] = file_path;
}
if (file_num) {
printf("%sFiles were added: %d\n", info, file_num);
for (i = 0; i < file_num; ++i) {
printf("%s %s\n", info, file_paths[i]);
}
} else {
printf("%sFiles were added: %d. Too many to display each of them.\n", info, file_num);
}
}
/**
* @brief This function find -i key in input args. It's necessary to process multiple values for single key
* @return none.
*/
void parseInputFilesArguments(int argc, char **argv) {
int readArguments = 0, i;
for (i = 0; i < argc; ++i) {
if (strcmp(argv[i], "-i") == 0) {
readArguments = 1;
continue;
}
if (!readArguments) {
continue;
}
if (argv[i][0] == '-') {
break;
}
readInputFilesArgument(argv[i]);
}
}
/**
* @brief Convert the contents of configuration file to the ie_config_t type.
* @param config_file File path.
* @param comment Separator symbol.
* @return A pointer to the ie_config_t instance.
*/
ie_config_t *parseConfig(const char *config_file, char comment) {
FILE *file = fopen(config_file, "r");
if (!file) {
return NULL;
}
ie_config_t *cfg = NULL;
char key[256], value[256];
if (fscanf(file, "%s", key)!= EOF && fscanf(file, "%s", value) != EOF) {
char *cfg_name = (char *)malloc(strlen(key));
char *cfg_value = (char *)malloc(strlen(value));
strcpy(cfg_name, key);
strcpy(cfg_value, value);
ie_config_t *cfg_t = (ie_config_t *)malloc(sizeof(ie_config_t));
cfg_t->name = cfg_name;
cfg_t->value = cfg_value;
cfg_t->next = NULL;
cfg = cfg_t;
}
if (cfg) {
ie_config_t *cfg_temp = cfg;
while (fscanf(file, "%s", key)!= EOF && fscanf(file, "%s", value) != EOF) {
if (strlen(key) == 0 || key[0] == comment) {
continue;
}
char *cfg_name = (char *)malloc(strlen(key));
char *cfg_value = (char *)malloc(strlen(value));
strcpy(cfg_name, key);
strcpy(cfg_value, value);
ie_config_t *cfg_t = (ie_config_t *)malloc(sizeof(ie_config_t));
cfg_t->name = cfg_name;
cfg_t->value = cfg_value;
cfg_t->next = NULL;
cfg_temp->next = cfg_t;
cfg_temp = cfg_temp->next;
}
}
return cfg;
}
/**
* @brief Releases memory occupied by config
* @param config A pointer to the config to free memory.
* @return none
*/
void config_free(ie_config_t *config) {
while (config) {
ie_config_t *temp = config;
if (config->name) {
free((char *)config->name);
config->name = NULL;
}
if(config->value) {
free((char *)config->value);
config->value = NULL;
}
if(config->next) {
config = config->next;
}
free(temp);
temp = NULL;
}
}
/**
* @brief Convert the numbers to char *;
* @param str A pointer to the convered string .
* @param num The number to convert.
* @return none.
*/
void int2str(char *str, int num) {
int i = 0, j;
if (num == 0) {
str[0] = '0';
str[1] = '\0';
return;
}
while (num != 0) {
str[i++] = num % 10 + '0';
num = num / 10;
}
str[i] = '\0';
--i;
for (j = 0; j < i; ++j, --i) {
char temp = str[j];
str[j] = str[i];
str[i] = temp;
}
}
int main(int argc, char **argv) {
/** This sample covers certain topology and cannot be generalized for any object detection one **/
printf("%sInferenceEngine: \n", info);
printf("%s\n", ie_c_api_version());
char **argv_temp =(char **)malloc(sizeof(char *) * argc);
int i, j;
for (i = 0; i < argc; ++i) {
argv_temp[i] = argv[i];
}
// --------------------------- 1. Parsing and validation of input args ---------------------------------
if (ParseAndCheckCommandLine(argc, argv) < 0) {
return -1;
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- 2. Read input -----------------------------------------------------------
/** This file_paths stores paths to the processed images **/
parseInputFilesArguments(argc, argv_temp);
if (!file_num) {
printf("No suitable images were found\n");
return -1;
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- 3. Load inference engine ------------------------------------------------
printf("%sLoading Inference Engine\n", info);
ie_core_t *core = NULL;
IEStatusCode status = ie_core_create("", &core);
assert(core);
ie_core_versions_t ver;
printf("%sDevice info: \n", info);
ie_core_get_versions(core, device_name, &ver);
for (i = 0; i < ver.num_vers; ++i) {
printf(" %s\n", ver.versions[i].device_name);
printf(" %s version ......... %zu.%zu\n", ver.versions[i].description, ver.versions[i].major, ver.versions[i].minor);
printf(" Build ......... %s\n", ver.versions[i].build_number);
}
ie_core_versions_free(&ver);
if (custom_cpu_library_msg) {
// CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
ie_core_add_extension(core, custom_cpu_library_msg, "CPU");
printf("%sCPU Extension loaded: %s\n", info, custom_cpu_library_msg);
}
if (custom_cldnn_msg) {
// clDNN Extensions are loaded from an .xml description and OpenCL kernel files
ie_config_t cfg = {"CONFIG_FILE", custom_cldnn_msg, NULL};
ie_core_set_config(core, &cfg, "GPU");
printf("%sGPU Extension loaded: %s\n", info, custom_cldnn_msg);
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- 4. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
char *input_weight = (char *)malloc(strlen(input_model) + 1);
strncpy(input_weight, input_model, strlen(input_model)-4);
input_weight[strlen(input_model)-4] = '\0';
strcat(input_weight, ".bin");
printf("%sLoading network files:\n", info);
printf("\t%s\n", input_model);
printf("\t%s\n", input_weight);
ie_network_t *network = NULL;
ie_core_read_network(core, input_model, input_weight, &network);
assert(network);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 5. Prepare input blobs --------------------------------------------------
printf("%sPreparing input blobs\n", info);
/** SSD network has one input and one output **/
size_t input_num = 0;
status = ie_network_get_inputs_number(network, &input_num);
if (input_num != 1 && input_num != 2) {
printf("Sample supports topologies only with 1 or 2 inputs\n");
return -1;
}
/**
* Some networks have SSD-like output format (ending with DetectionOutput layer), but
* having 2 inputs as Faster-RCNN: one for image and one for "image info".
*
* Although object_datection_sample_ssd's main task is to support clean SSD, it could score
* the networks with two inputs as well. For such networks imInfoInputName will contain the "second" input name.
*/
char *imageInputName = NULL, *imInfoInputName = NULL;
size_t input_width = 0, input_height = 0;
/** Stores input image **/
/** Iterating over all input blobs **/
for (i = 0; i < input_num; ++i) {
char *name = NULL;
ie_network_get_input_name(network, i, &name);
dimensions_t input_dim;
ie_network_get_input_dims(network, name, &input_dim);
/** Working with first input tensor that stores image **/
if(input_dim.ranks == 4) {
imageInputName = name;
input_height = input_dim.dims[2];
input_width = input_dim.dims[3];
/** Creating first input blob **/
ie_network_set_input_precision(network, name, U8);
} else if (input_dim.ranks == 2) {
imInfoInputName = name;
ie_network_set_input_precision(network, name, FP32);
if(input_dim.dims[1] != 3 && input_dim.dims[1] != 6) {
printf("Invalid input info. Should be 3 or 6 values length\n");
return -1;
}
}
}
if (imageInputName == NULL) {
ie_network_get_input_name(network, 0, &imageInputName);
dimensions_t input_dim;
ie_network_get_input_dims(network, imageInputName, &input_dim);
input_height = input_dim.dims[2];
input_width = input_dim.dims[3];
}
/** Collect images data **/
c_mat_t *originalImages = (c_mat_t *)malloc(file_num * sizeof(c_mat_t));
c_mat_t *images = (c_mat_t *)malloc(file_num * sizeof(c_mat_t));
int image_num = 0;
for (i = 0; i < file_num; ++i) {
c_mat_t img = {NULL, 0, 0, 0, 0, 0};
if (image_read(file_paths[i], &img) == -1) {
printf("%sImage %s cannot be read!\n", warn, file_paths[i]);
continue;
}
/** Store image data **/
c_mat_t resized_img = {NULL, 0, 0, 0, 0, 0};
if (input_width == img.mat_width && input_height == img.mat_height) {
resized_img.mat_data_size = img.mat_data_size;
resized_img.mat_channels = img.mat_channels;
resized_img.mat_width = img.mat_width;
resized_img.mat_height = img.mat_height;
resized_img.mat_type = img.mat_type;
resized_img.mat_data = malloc(resized_img.mat_data_size);
for (j = 0; j < resized_img.mat_data_size; ++j)
resized_img.mat_data[j] = img.mat_data[j];
} else {
printf("%sImage is resized from (%d, %d) to (%zu, %zu)\n", \
warn, img.mat_width, img.mat_height, input_width, input_height);
image_resize(&img, &resized_img, (int)input_width, (int)input_height);
}
if (resized_img.mat_data) {
originalImages[image_num] = img;
images[image_num] = resized_img;
++image_num;
}
}
if (!image_num) {
printf("Valid input images were not found!\n");
return -1;
}
input_shapes_t shapes;
ie_network_get_input_shapes(network, &shapes);
shapes.shapes[0].shape.dims[0] = image_num;
ie_network_reshape(network, shapes);
ie_network_input_shapes_free(&shapes);
input_shapes_t shapes2;
ie_network_get_input_shapes(network, &shapes2);
size_t batchSize = shapes2.shapes[0].shape.dims[0];
ie_network_input_shapes_free(&shapes2);
printf("%sBatch size is %zu\n", info, batchSize);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 6. Prepare output blobs -------------------------------------------------
printf("%sPreparing output blobs\n", info);
size_t output_num = 0;
ie_network_get_outputs_number(network, &output_num);
if (!output_num) {
printf("Can't find a DetectionOutput layer in the topology\n");
return -1;
}
char *output_name = NULL;
ie_network_get_output_name(network, output_num-1, &output_name);
dimensions_t output_dim;
ie_network_get_output_dims(network, output_name, &output_dim);
if (output_dim.ranks != 4) {
printf("Incorrect output dimensions for SSD model\n");
return -1;
}
const int maxProposalCount = (int)output_dim.dims[2];
const int objectSize = (int)output_dim.dims[3];
if (objectSize != 7) {
printf("Output item should have 7 as a last dimension\n");
return -1;
}
/** Set the precision of output data provided by the user, should be called before load of the network to the device **/
ie_network_set_output_precision(network, output_name, FP32);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 7. Loading model to the device ------------------------------------------
printf("%sLoading model to the device\n", info);
ie_executable_network_t *exe_network = NULL;
if (config_msg) {
ie_config_t * config = parseConfig(config_msg, '#');
ie_core_load_network(core, network, device_name, config, &exe_network);
config_free(config);
} else {
ie_config_t cfg = {NULL, NULL, NULL};
ie_core_load_network(core, network, device_name, &cfg, &exe_network);
}
assert(exe_network);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 8. Create infer request -------------------------------------------------
printf("%sCreate infer request\n", info);
ie_infer_request_t *infer_request = NULL;
ie_exec_network_create_infer_request(exe_network, &infer_request);
assert(infer_request);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 9. Prepare input --------------------------------------------------------
/** Creating input blob **/
ie_blob_t *imageInput = NULL;
ie_infer_request_get_blob(infer_request, imageInputName, &imageInput);
assert(imageInput);
/** Filling input tensor with images. First b channel, then g and r channels **/
dimensions_t input_tensor_dims;
ie_blob_get_dims(imageInput, &input_tensor_dims);
size_t num_channels = input_tensor_dims.dims[1];
size_t image_size = input_tensor_dims.dims[3] * input_tensor_dims.dims[2];
ie_blob_buffer_t blob_buffer;
ie_blob_get_buffer(imageInput, &blob_buffer);
unsigned char *data = (unsigned char *)(blob_buffer.buffer);
/** Iterate over all input images **/
int image_id, pid, ch, k;
for (image_id = 0; image_id < batchSize; ++image_id) {
/** Iterate over all pixel in image (b,g,r) **/
for (pid = 0; pid < image_size; ++pid) {
/** Iterate over all channels **/
for (ch = 0; ch < num_channels; ++ch) {
/** [images stride + channels stride + pixel id ] all in bytes **/
data[image_id * image_size * num_channels + ch * image_size + pid] =
images[image_id].mat_data[pid * num_channels + ch];
}
}
image_free(&images[image_id]);
}
free(images);
if (imInfoInputName != NULL) {
ie_blob_t *input2 = NULL;
ie_infer_request_get_blob(infer_request, imInfoInputName, &input2);
dimensions_t imInfoDim;
ie_blob_get_dims(input2, &imInfoDim);
//Fill input tensor with values
ie_blob_buffer_t info_blob_buffer;
ie_blob_get_buffer(input2, &info_blob_buffer);
float *p = (float *)(info_blob_buffer.buffer);
for (image_id = 0; image_id < batchSize; ++image_id) {
p[image_id * imInfoDim.dims[1] + 0] = (float)input_height;
p[image_id * imInfoDim.dims[1] + 1] = (float)input_width;
for (k = 2; k < imInfoDim.dims[1]; k++) {
p[image_id * imInfoDim.dims[1] + k] = 1.0f; // all scale factors are set to 1.0
}
}
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- 10. Do inference ---------------------------------------------------------
printf("%sStart inference\n", info);
ie_infer_request_infer_async(infer_request);
ie_infer_request_wait(infer_request, -1);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 11. Process output -------------------------------------------------------
printf("%sProcessing output blobs\n", info);
ie_blob_t *output_blob = NULL;
ie_infer_request_get_blob(infer_request, output_name, &output_blob);
assert(output_blob);
ie_blob_buffer_t output_blob_buffer;
ie_blob_get_cbuffer(output_blob, &output_blob_buffer);
const float* detection = (float *)(output_blob_buffer.cbuffer);
int **classes = (int **)malloc(image_num * sizeof(int *));
rectangle_t **boxes = (rectangle_t **)malloc(image_num * sizeof(rectangle_t *));
int *object_num = (int *)malloc(image_num * sizeof(int));
for ( i = 0; i < image_num; ++i) {
classes[i] = (int *)malloc(maxProposalCount * sizeof(int));
boxes[i] = (rectangle_t *)malloc(maxProposalCount * sizeof(rectangle_t));
object_num[i] = 0;
}
/* Each detection has image_id that denotes processed image */
int curProposal;
for (curProposal = 0; curProposal < maxProposalCount; curProposal++) {
image_id = (int)(detection[curProposal * objectSize + 0]);
if (image_id < 0) {
break;
}
float confidence = detection[curProposal * objectSize + 2];
int label = (int)(detection[curProposal * objectSize + 1]);
int xmin = (int)(detection[curProposal * objectSize + 3] * originalImages[image_id].mat_width);
int ymin = (int)(detection[curProposal * objectSize + 4] * originalImages[image_id].mat_height);
int xmax = (int)(detection[curProposal * objectSize + 5] * originalImages[image_id].mat_width);
int ymax = (int)(detection[curProposal * objectSize + 6] * originalImages[image_id].mat_height);
printf("[%d, %d] element, prob = %f (%d, %d)-(%d, %d) batch id : %d", \
curProposal, label, confidence, xmin, ymin, xmax, ymax, image_id);
if (confidence > 0.5) {
/** Drawing only objects with >50% probability **/
classes[image_id][object_num[image_id]] = label;
boxes[image_id][object_num[image_id]].x_min = xmin;
boxes[image_id][object_num[image_id]].y_min = ymin;
boxes[image_id][object_num[image_id]].rect_width = xmax - xmin;
boxes[image_id][object_num[image_id]].rect_height = ymax - ymin;
printf(" WILL BE PRINTED!");
++object_num[image_id];
}
printf("\n");
}
/** Adds rectangles to the image and save **/
int batch_id;
for (batch_id = 0; batch_id < batchSize; ++batch_id) {
if (object_num[batch_id] > 0) {
image_add_rectangles(&originalImages[batch_id], boxes[batch_id], classes[batch_id], object_num[batch_id], 2);
}
const char *out = "out_";
char *img_path = (char *)malloc(strlen(out) + 1);
char str_num[16] = {0};
strcpy(img_path, out);
int2str(str_num, batch_id);
strcat(img_path, str_num);
strcat(img_path, ".bmp");
image_save(img_path, &originalImages[batch_id]);
printf("%sImage %s created!\n", info, img_path);
free(img_path);
image_free(&originalImages[batch_id]);
}
free(originalImages);
// -----------------------------------------------------------------------------------------------------
printf("%sExecution successful\n", info);
for (i = 0; i < image_num; ++i) {
free(classes[i]);
free(boxes[i]);
}
free(classes);
free(boxes);
free(object_num);
ie_infer_request_free(&infer_request);
ie_exec_network_free(&exe_network);
ie_network_free(&network);
ie_core_free(&core);
ie_network_name_free(&imageInputName);
ie_network_name_free(&imInfoInputName);
ie_network_name_free(&output_name);
free(input_weight);
free(argv_temp);
return 0;
}

View File

@ -0,0 +1,105 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <stdlib.h>
/// @brief message for help argument
static const char *help_message = "Print a usage message.";
/// @brief message for images argument
static const char *image_message = "Required. Path to an .bmp image.";
/// @brief message for model argument
static const char *model_message = "Required. Path to an .xml file with a trained model.";
/// @brief message for plugin argument
static const char *plugin_message = "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " \
"the sample will look for this plugin only";
/// @brief message for assigning cnn calculation to device
static const char *target_device_message = "Optional. Specify the target device to infer on (the list of available devices is shown below). " \
"Default value is CPU. Use \"-d HETERO:<comma-separated_devices_list>\" format to specify HETERO plugin. " \
"Sample will look for a suitable plugin for device specified";
/// @brief message for clDNN custom kernels desc
static const char *custom_cldnn_message = "Required for GPU custom kernels. "\
"Absolute path to the .xml file with the kernels descriptions.";
/// @brief message for user library argument
static const char *custom_cpu_library_message = "Required for CPU custom layers. " \
"Absolute path to a shared library with the kernels implementations.";
/// @brief message for config argument
static const char *config_message = "Path to the configuration file. Default value: \"config\".";
/**
* \brief This function show a help message
*/
static void showUsage() {
printf("\nobject_detection_sample_ssd [OPTION]\n");
printf("Options:\n\n");
printf(" -h %s\n", help_message);
printf(" -i \"<path>\" %s\n", image_message);
printf(" -m \"<path>\" %s\n", model_message);
printf(" -l \"<absolute_path>\" %s\n", custom_cpu_library_message);
printf(" Or\n");
printf(" -c \"<absolute_path>\" %s\n", custom_cldnn_message);
printf(" -d \"<device>\" %s\n", target_device_message);
printf(" -g %s\n", config_message);
}
int opterr = 1;
int optind = 1;
int optopt;
char *optarg;
#define ERR(s, c) if(opterr){\
fputs(argv[0], stderr);\
fputs(s, stderr);\
fputc('\'', stderr);\
fputc(c, stderr);\
fputs("\'\n", stderr);}
static int getopt(int argc, char **argv, char *opts) {
static int sp = 1;
register int c = 0;
register char *cp = NULL;
if (sp == 1) {
if(optind >= argc || argv[optind][0] != '-' || argv[optind][1] == '\0')
return -1;
else if(strcmp(argv[optind], "--") == 0) {
optind++;
return -1;
}
optopt = c = argv[optind][sp];
if(c == ':' || (cp = strchr(opts, c)) == 0) {
ERR(": unrecognized option -- ", c);
if(argv[optind][++sp] == '\0') {
optind++;
sp = 1;
}
return('?');
}
if(*++cp == ':') {
if(argv[optind][sp+1] != '\0')
optarg = &argv[optind++][sp+1];
else if(++optind >= argc) {
ERR(": option requires an argument -- ", c);
sp = 1;
return('?');
} else
optarg = argv[optind++];
sp = 1;
} else {
if(argv[optind][++sp] == '\0') {
sp = 1;
optind++;
}
optarg = NULL;
}
}
return(c);
}

View File

@ -0,0 +1,34 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME inference_engine_c_api)
file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
file(GLOB HEADERS ${InferenceEngine_C_API_SOURCE_DIR}/include/*.h)
# create library
add_library(${TARGET_NAME} SHARED ${HEADERS} ${SOURCES})
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine)
target_include_directories(${TARGET_NAME} PUBLIC "${InferenceEngine_C_API_SOURCE_DIR}/include")
add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
# export
export(TARGETS ${TARGET_NAME} NAMESPACE IE:: APPEND FILE "${CMAKE_BINARY_DIR}/targets.cmake")
# install
install(TARGETS ${TARGET_NAME}
RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH}
ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH}
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH}
COMPONENT core)
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/
DESTINATION ${IE_CPACK_IE_DIR}/include/
COMPONENT core)

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,12 @@
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Defines the CMake commands/policies
cmake_minimum_required (VERSION 3.3)
# Set the project name
project (ie_python_api)
set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_LIST_DIR}/cmake)
set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_LIST_DIR}/cmake")
string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} ARCH)
if(ARCH STREQUAL "x86_64" OR ARCH STREQUAL "amd64") # Windows detects Intel's 64-bit CPU as AMD64
@ -12,45 +15,48 @@ elseif(ARCH STREQUAL "i386")
set(ARCH ia32)
endif()
# in case of independent python api build (out of Inference Engine root Cmake)
if (NOT DEFINED IE_MAIN_SOURCE_DIR)
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
message(STATUS "CMAKE_BUILD_TYPE not defined, 'Release' will be used")
set(CMAKE_BUILD_TYPE "Release")
endif()
message(STATUS "BUILD_CONFIGURATION: ${CMAKE_BUILD_TYPE}")
if(ENABLE_NGRAPH)
add_definitions(-DENABLE_NGRAPH)
endif()
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/bin/${ARCH})
if(NOT(WIN32))
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE})
endif()
if(DEFINED IE_MAIN_SOURCE_DIR)
set(InferenceEngine_LIBRARIES inference_engine)
else()
if (UNIX OR APPLE)
# cython generated files requires public visibility. Force visibility required.
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fvisibility=default")
endif()
find_package(InferenceEngineDeveloperPackage REQUIRED)
endif()
if(UNIX)
# cython generated files requires public visibility. Force visibility required.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
endif()
include (UseCython)
if (PYTHONINTERP_FOUND)
set (PYTHON_VERSION python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR})
if(PYTHONINTERP_FOUND)
set(PYTHON_VERSION python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR})
else()
message(FATAL_ERROR "Python Interpretator was not found!")
endif()
if(WIN32)
set (PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$<CONFIG>/python_api/${PYTHON_VERSION}/openvino)
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$<CONFIG>/python_api/${PYTHON_VERSION}/openvino)
else()
set (PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python_api/${PYTHON_VERSION}/openvino)
endif()
if(DEFINED IE_MAIN_SOURCE_DIR)
find_package(InferenceEngine REQUIRED)
else()
find_package(InferenceEngineDeveloperPackage REQUIRED)
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python_api/${PYTHON_VERSION}/openvino)
endif()
set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR})
add_subdirectory (src/openvino/inference_engine)
add_subdirectory (src/openvino/tools/statistics_collector)
# install
ie_cpack_add_component(${PYTHON_VERSION} REQUIRED)
install(FILES requirements.txt
DESTINATION python/${PYTHON_VERSION}
COMPONENT ${PYTHON_VERSION})
install(PROGRAMS src/openvino/__init__.py
DESTINATION python/${PYTHON_VERSION}/openvino
COMPONENT ${PYTHON_VERSION})
ie_cpack(${PYTHON_VERSION})

View File

@ -1,4 +1,4 @@
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@ -46,7 +46,7 @@
#
# See also FindCython.cmake
# Copyright (C) 2018-2019 Intel Corporation
# Copyright (C) 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@ -1,18 +1,15 @@
# Overview of Inference Engine Python* API
> **NOTE:** It is a preview version of the Inference Engine Python\* API for evaluation purpose only.
> Module structure and API itself may be changed in future releases.
This API provides a simplified interface for Inference Engine functionality that allows you to:
This API provides a simplified interface for Inference Engine functionality that allows to:
* handle the models
* load and configure Inference Engine plugins based on device names
* perform inference in synchronous and asynchronous modes with arbitrary number of infer requests (the number of infer requests may be limited by target device capabilities)
* Handle the models
* Load and configure Inference Engine plugins based on device names
* Perform inference in synchronous and asynchronous modes with arbitrary number of infer requests (the number of infer requests may be limited by target device capabilities)
## Supported OSes
Currently the Inference Engine Python\* API is supported on Ubuntu\* 16.04 and 18.04, Windows\* 10, macOS\* 10.x and
CentOS\* 7.3 OSes.
Inference Engine Python\* API is supported on Ubuntu\* 16.04 and 18.04, CentOS\* 7.3 OSes, Raspbian\* 9, Windows\* 10
and macOS\* 10.x.
Supported Python* versions:
| Operating System | Supported Python\* versions: |
@ -22,753 +19,21 @@ Supported Python* versions:
| Windows\* 10 | 3.5, 3.6, 3.7 |
| CentOS\* 7.3 | 3.4, 3.5, 3.6, 3.7 |
| macOS\* 10.x | 3.5, 3.6, 3.7 |
| Raspbian\* 9 | 3.5, 3.6, 3.7 |
## Setting Up the Environment
## Set Up the Environment
To configure the environment for the Inference Engine Python\* API, run:
* On Ubuntu\* 16.04 or 18.04, CentOS\* 7.4 or macOS\* 10.x: `source <INSTALL_DIR>/bin/setupvars.sh .`
* On Ubuntu\* 16.04 or 18.04 CentOS\* 7.4: `source <INSTALL_DIR>/bin/setupvars.sh .`
* On CentOS\* 7.4: `source <INSTALL_DIR>/bin/setupvars.sh .`
* On macOS\* 10.x: `source <INSTALL_DIR>/bin/setupvars.sh .`
* On Raspbian\* 9,: `source <INSTALL_DIR>/bin/setupvars.sh .`
* On Windows\* 10: `call <INSTALL_DIR>\deployment_tools\inference_engine\python_api\setenv.bat`
The script automatically detects latest installed Python\* version and configures required environment if the version is supported.
If you want to use certain version of Python\*, set the environment variable `PYTHONPATH=<INSTALL_DIR>/deployment_tools/inference_engine/python_api/<desired_python_version>`
after running the environment configuration script.
## <a name="iecore-class"></a>IECore
This class represents an Inference Engine entity and allows you to manipulate with plugins using unified interfaces.
### <a name="iecore-constructor"></a>Class Constructor
`__init__(xml_config_file: str = "")`
* Parameters:
* `xml_config_file` - A full path to `.xml` file containing plugins configuration.
If the parameter is not specified, the default configuration is handled automatically.
* Usage examples:
* Initialize an `IECore` object with default configuration:
```py
ie = IECore()
```
* Initialize an `IECore` object with a custom configuration location specified:
```py
ie = IECore("/localdisk/plugins/my_custom_cfg.xml")
```
`.xml` file has the following structure:
```xml
<ie>
<plugins>
<plugin name="" location="" optional="yes/no">
<extensions>
<extension location=""/>
</extensions>
<properties>
<property key="" value=""/>
</properties>
</plugin>
</plugin>
</ie>
```
### <a name="iecore-attributes"></a>Class Attributes
* `available_devices` - A vector of devices. The devices are returned as \[CPU, FPGA.0, FPGA.1, MYRIAD\].
If there are more than one device of a specific type, they all are listed followed by a dot and a number.
### <a name="iecore-methods"></a>Instance Methods
* `get_versions(device_name: str)`
* Description: Returns a `namedtuple` object with versions of the plugin specified
* Parameters:
* `device_name` - Name of the the registered plugin
* Return value:
Dictionary mapping a plugin name and `Versions` `namedtuple` object with the following fields:
* `major` - major plugin integer version
* `minor` - minor plugin integer version
* `build_number` - plugin build number string
* `description` - plugin description string
* Usage example:
```py
ie = IECore()
ver = ie.get_versions("CPU")["CPU"]
print("{descr}: {maj}.{min}.{num}".format(descr=ver.description, maj=ver.major, min=ver.minor, num=ver.build_number))
```
* `load_network(network: IENetwork, device_name: str, config=None, num_requests: int=1)`
* Description: Loads a network that was read from the Intermediate Representation (IR) to the plugin with specified device name and creates an `ExecutableNetwork` object of the `IENetwork` class.
You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware
resources).
* Parameters:
* `network` - A valid `IENetwork` instance
* `device_name` - A device name of a target plugin
* `num_requests` - A positive integer value of infer requests to be created. Number of infer requests is limited
by device capabilities.
* `config` - A dictionary of plugin configuration keys and their values
* Return value: An <a href="#executablenetwork">`ExecutableNetwork`</a> object
* Usage example:
```py
net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
ie = IECore()
exec_net = plugin.load_network(network=net, device_name="CPU", num_requsts=2)
```
* `query_network(network: IENetwork, device_name: str, config=None)`
* Description:
Queries the plugin with specified device name what network layers are supported in the current configuration.
Please note that layers support depends on plugin configuration and loaded extensions.
* Parameters:
* `network` - A valid `IENetwork` instance
* `device_name` - A device name of a target plugin
* `config` - A dictionary of plugin configuration keys and their values
* Return value: A dictionary mapping layers and device names on which they are supported
* Usage example:
```py
net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
ie = IECore()
exec_net = plugin.query_network(network=net, device_name="HETERO:GPU,CPU")
```
* `set_config(config: dict, device_name: str)`
* Description: Sets a configuration for a plugin
* Parameters:
* `config` - a dictionary of configuration parameters as keys and their values
* `device_name` - a device name of a target plugin
* Return value: None
* Usage examples:
See the `set_affinity` method of the <a href="#ienetwork-class">`IENetwork` class</a>.
* `register_plugin(plugin_name: str, device_name: str = "")`
* Description: Registers a new device and a plugin which implement this device inside Inference Engine.
* Parameters:
* `plugin_name` - A name of a plugin. Depending on a platform, plugin_name is wrapped with a shared
library suffix and a prefix to identify a full name of the library
* `device_name` - A target device name for the plugin. If not specified, the method registers
a plugin with the default name.
* Return value: None
* Usage examples:
```py
ie = IECore()
ie.register_plugin(plugin="MKLDNNPlugin", device_name="MY_NEW_PLUGIN")
```
* `register_plugins(xml_config_file: str)`
* Description: Registers plugins specified in an `.xml` configuration file
* Parameters:
* `xml_config_file` - A full path to `.xml` file containing plugins configuration
* Return value: None
* Usage examples:
```py
ie = IECore()
ie.register_plugins("/localdisk/plugins/my_custom_cfg.xml")
```
* `unregister_plugin(device_name: str = "")`
* Description: Unregisters a plugin with a specified device name
* Parameters:
* `device_name` - A device name of the plugin to unregister
* Return value: None
* Usage examples:
```py
ie = IECore()
plugin = IEPlugin("GPU")
ie.register_plugin(plugin=plugin, device_name="MY_NEW_GPU")
ie.unregister_plugin(device_name="GPU")
```
* `add_extension(extension_path: str, device_name: str)`
* Description: Loads extension library to the plugin with a specified device name
* Parameters:
* `extension_path` - Path to the extensions library file to load to a plugin
* `device_name` - A device name of a plugin to load the extensions to
* Return value: None
* Usage examples:
```py
ie = IECore()
ie.add_extension(extension_path="/some_dir/libcpu_extension_avx2.so", device_name="CPU")
```
* `get_metric(device_name: str, metric_name: str)`
* Description: Gets a general runtime metric for dedicated hardware. Enables to request common device properties,
which are <a href="#executablenetwork">`ExecutableNetwork`</a> agnostic, such as device name,
temperature, and other devices-specific values.
* Parameters:
* device_name - A name of a device to get a metric value.
* metric_name - A metric name to request.
* Return value: A metric value corresponding to a metric key.
* Usage example
```py
ie = IECore()
ie.get_metric(metric_name="SUPPORTED_METRICS", device_name="CPU")
```
* `get_config(device_name: str, metric_name: str)`
* Description: Gets a configuration dedicated to device behavior. The method targets to extract information
which can be set via SetConfig method.
* Parameters:
* device_name - A name of a device to get a metric value.
* metric_name - A metric name to request.
* Return value: A metric value corresponding to a metric key.
* Usage example
```py
ie = IECore()
ie.get_config(metric_name="CPU_BIND_THREAD", device_name="CPU")
```
## <a name="ienetlayer-class"></a>IENetLayer
This class stores main information about the layer and allow to modify some layer parameters
### <a name="ienetlayer-attributes"></a>Class Attributes
* `name` - Name of the layer
* `type`- Layer type
* `precision` - Layer base operating precision. Provides getter and setter interfaces.
* `layout` - Returns the layout of shape of the layer
* `shape` - Return the list of the shape of the layer
* `parents` - Returns a list, which contains names of layers preceding this layer
* `children` - Returns a list, which contains names of layers following this layer
* `affinity` - Layer affinity set by user or a default affinity set by the `IEPlugin.set_initial_affinity()` method.
The affinity attribute provides getter and setter interfaces, so the layer affinity can be modified directly.
For example:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="HETERO:FPGA,CPU")
>>> plugin.set_config({"TARGET_FALLBACK": "HETERO:FPGA,CPU"})
>>> plugin.set_initial_affinity(net)
>>> for l in net.layers.values():
... if l.type == "Convolution":
... l.affinity = "CPU"
```
To correctly set affinity for the network, you must first initialize and properly configure the HETERO plugin.
`set_config({"TARGET_FALLBACK": "HETERO:FPGA,GPU"})` function configures the plugin fallback devices and their order.
`plugin.set_initial_affinity(net)` function sets affinity parameter of model layers according to its support
on specified devices.
After default affinity is set by the plugin, override the default values by setting affinity manually how it's
described in example above
To understand how default and non-default affinities are set:
1. Call `net.layers` function right after model loading and check that layer affinity parameter is empty.
2. Call `plugin.set_default_affinity(net)`.
3. Call `net.layers` and check layer affinity parameters to see how plugin set a default affinity
4. Set layer affinity how it's described above
5. Call `net.layers` again and check layer affinity parameters to see how it was changed after manual affinity
setting
* `weights`- Dictionary with layer weights, biases or custom blobs if any
* `params` - Layer specific parameters. Provides getter and setter interfaces to get and modify layer parameters.
Please note that some modifications can be ignored and/or overwriten by target plugin (e.g. modification of
convolution kernel size will be reflected in layer parameters but finally the plugin will ignore it and will
use initial kernel size)
## <a name="ienetwork-class"></a>IENetwork
This class contains the information about the network model read from IR and allows you to manipulate with some model parameters such as
layers affinity and output layers.
### <a name="ienetwork-constructor"></a>Class Constructor
`__init__(model: [bytes, str], weights: [bytes, str], init_from_buffer: bool=False, ngrpah_compatibility: bool=False)`
* Parameters:
* `model` - An `.xml` file of the IR. Depending on `init_from_buffer` value, can be a string path or bytes with file content.
* `weights` - A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or bytes with file content.
* `init_from_buffer` - Defines the way of how `model` and `weights` attributes are interpreted.
If `True`, attributes are interpreted as strings with paths to .xml and .bin files of IR. If `False`, they are
interpreted as Python `bytes` object with .xml and .bin files content.
* Usage examples:
* Initializing `IENetwork` object from IR files:
```py
net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
```
* Initializing `IENetwork` object bytes with content of IR files:
```py
with open(path_to_bin_file, 'rb') as f:
bin = f.read()
with open(path_to_xml_file, 'rb') as f:
xml = f.read()
net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
```
### <a name="ienetwork-attributes"></a>Class Attributes
* `name` - Name of the loaded network
* `inputs` - A dictionary that maps input layer names to <a href="#inputinfo-class">InputInfo</a> objects.
For example, to get a shape of the input layer:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.inputs
{'data': <inference_engine.ie_api.InputInfo object at 0x7efe042dedd8>}
>>> net.inputs['data'].shape
[1, 3, 224, 224]
```
* `outputs` - A dictionary that maps output layer names to <a href="#outputinfo-class">OutputInfo</a> objects
For example, to get a shape of the output layer:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.inputs
{'prob': <inference_engine.ie_api.OutputInfo object at 0x7efe03ab95d0>}
>>> net.outputs['prob'].shape
[1, 1000]
```
* `batch_size` - Batch size of the network. Provides getter and setter interfaces to get and modify the
network batch size. For example:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.batch_size
1
>>> net.batch_size = 4
>>> net.batch_size
4
>>> net.inputs['data'].shape
[4, 3, 224, 224]
```
* `layers` - Return dictionary that maps network layer names to <a href="#ienetlayer-class">`IENetLayer`</a>
objects containing layer properties in topological order. For example, to list all network layers:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.layers
{'conv0': <inference_engine.ie_api.IENetLayer object at 0x7f3a4c102370>
...
}
```
* `stats` - Returns `LayersStatsMap` object containing dictionary that maps network layer names to calibration statistics
represented by <a href="#layerstats-class">`LayerStats`</a> objects.
`LayersStatsMap` class inherited from built-in python `dict` and overrides default `update()`method to allow
to set or modify layers calibration statistics.
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.stats.update({
"conv1_2d" : LayserStats(min=(-25, -1, 0), max=(63, 124, 70)),
"conv2_2d" : LayserStats(min=(-5, -1, 0, 1, -7, 2), max=(63, 124, 70, 174, 99, 106)),
})
```
For more details about low precision inference please refer to "Low-Precision 8-bit Integer Inference"
section in Inference Engine Developers Guide documentation.
### <a name="ienetwork-methods"></a>Class Methods
* `from_ir(model: str, weights: str)`
> **NOTE:** The function is deprecated. Please use the `IENetwork()` class constructor to create valid instance of `IENetwork`.
* Description: Reads the model from the `.xml` and `.bin` files of the IR.
* Parameters:
* model - Path to `.xml` file of the IR
* weights - Path to `.bin` file of the IR
* Return value: An instance of the `IENetwork` class
* Usage example:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net
<inference_engine.ie_api.IENetwork object at 0x7fd7dbce54b0>
```
### <a name="ienetwork-inference-methods"></a>Instance Methods
* `add_outputs(outputs)`
* Description: Marks any intermediate layer as output layer to retrieve the inference results
from the specified layers.
* Parameters:
* `outputs` - List of layer to be set as model outputs. The list can contain strings with layer names to be set
as outputs or tuples with layer name as first element and output port id as second element.
In case of setting one layer as output, string or tuple with one layer can be provided.
* Return value: None
* Usage example:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.add_outputs(["conv5_1/dwise', conv2_1/expand'])]
>>> net.outputs
['prob', 'conv5_1/dwise', 'conv2_1/expand']
```
> **NOTE**: The last layers (nodes without successors in graph representation of the model) are set as output
> by default. In the case above, `prob` layer is a default output and `conv5_1/dwise`, `conv2_1/expand` are user-defined
> outputs.
* `reshape(input_shapes: dict)`
* Description: Reshapes the network to change spatial dimensions, batch size, or any dimension.
> **NOTE:** Before using this method, make sure that the target shape is applicable for the network. Changing the network shape to an arbitrary value may lead to unpredictable behaviour.
* Parameters:
* `input_shapes` - A dictionary that maps input layer names to tuples with the target shape
* Return value: None
* Usage example:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> input_layer = next(iter(net.inputs))
>>> n, c, h, w = net.inputs[input_layer]
>>> net.reshape({input_layer: (n, c, h*2, w*2)}]
```
* `serialize(path_to_xml, path_to_bin)`
* Description:
Serializes the network and stores it in files.
* Parameters:
* `path_to_xml` - Path to a file, where a serialized model will be stored
* `path_to_bin` - Path to a file, where serialized weights will be stored
* Return value:
None
* Usage example:
```py
>>> net = IENetwork(model=path_to_model, weights=path_to_weights)
>>> net.serialize(path_to_xml, path_to_bin)
```
## <a name="layerstats-class"></a>LayerStats
Layer calibration statistic container.
### <a name="layerstats-constructor"></a>Class Constructor
* `__init__(min: tuple = (), max: tuple = ())`
* Parameters:
* `min` - Tuple with per-channel minimum layer activation values
* `max` - Tuple with per-channel maximum layer activation values
## <a name="inputinfo-class"></a>InputInfo
This class contains the information about the network input layers
### <a name="inputinfo-attributes"></a>Class Attributes
* `precision` - Precision of the input data provided by user. Provides setter and getter interfaces
to get and modify input layer precision.
List of applicable precisions: FP32 FP16, I32, I16, I8, U32, U16
> **NOTE**: Support of any calculation precision depends on the target plugin.
* `layout` - Layout of the input data provided by user. Provides setter and getter interfaces
to get and modify input layer layout.
List of applicable layouts: NCHW, NHWC, OIHW, C, CHW, HW, NC, CN, BLOCKED
* `shape` - input layer data shape
## <a name="outputinfo-class"></a>OutputInfo
This class contains the information about the network input layers
### <a name="outputinfo-attributes"></a>Class Attributes
* `precision` - Precision of the output data. Provides setter and getter interfaces
to get and modify output layer precision.
* `layout` - Layout of the output data provided by user
* `shape` - Input layer data shape
## <a name="ieplugin-class"></a>IEPlugin Class
This class is the main plugin interface and serves to initialize and configure the plugin.
### <a name="ieplugin-constructor"></a>Class Constructor
* `__init__(device: str, plugin_dirs=None)`
* Parameters:
* `device` - Target device name. Supported devices: CPU, GPU, FPGA, MYRIAD, HETERO
* `plugin_dirs` - List of paths to plugin directories
### <a name="ieplugin-properties"></a>Properties
* `device` - a name of the device that was specified to initialize IEPlugin
* `version` - a version of the plugin
### <a name="ieplugin-instance-methods"></a>Instance Methods
* `load(network: IENetwork, num_requests: int=1, config=None)`
* Description: Loads a network that was read from the IR to the plugin and creates an executable network from a network object.
You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware
resources).
* Parameters:
* `network` - A valid `IENetwork` instance
* `num_requests` - A positive integer value of infer requests to be created. Number of infer requests may be limited
by device capabilities.
* `config` - A dictionary of plugin configuration keys and their values
* Return value: None
* Usage example:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="CPU")
>>> exec_net = plugin.load(network=net, num_requsts=2)
>>> exec_net
<inference_engine.ie_api.ExecutableNetwork object at 0x7f5140bbcd38>
```
* `set_initial_affinity(net: IENetwork)`
* Description: Sets initial affinity for model layers according to the HETERO plugin logic. Applicable only if
`IEPlugin` was initialized for a HETERO device.
* Parameters:
* `net` - A valid instance of IENetwork
* Return value: None
* Usage example:
See `affinity` attribute of the `IENetLayer` class.
* `add_cpu_extension(extension_path: str)`
* Description: Loads extensions library to the plugin. Applicable only for a CPU device and a HETERO device with CPU
* Parameters:
* `extension_path` - A full path to CPU extensions library
* Return value: None
* Usage example:
```py
>>> plugin = IEPlugin(device="CPU")
>>> plugin.add_cpu_extenstions(ext_lib_path)
```
* `set_config(config: dict)`
* Description: Sets a configuration for the plugin. Refer to `SetConfig()` in Inference Engine C++ documentation for acceptable
keys and values list.
* Parameters:
* `config` - A dictionary of keys and values of acceptable configuration parameters
* Return value: None
* Usage examples:
See `set_affinity` method of the `IENetwork` class.
* `get_supported_layers(net: IENetwork)`
* Description: Returns the set of layers supported by the plugin. Please note that for the CPU plugin, support of
a layer may depends on extension loaded by `add_cpu_extenstion()` method.
* Parameters:
* `net` - A valid instance of IENetwork
* Return value: Set of layers supported by the plugin
* Usage example: See `affinity` attribute of the `IENetLayer` class.
## <a name="executablenetwork"></a>ExecutableNetwork Class
This class represents a network instance loaded to plugin and ready for inference.
### <a name="executablenetwork-contructor"></a>Class Constructor
There is no explicit class constructor. To make a valid instance of `ExecutableNetwork`, use `load()` method of the `IEPlugin` class.
### <a name="executablenetwork-attributes"></a>Class Attributes
* `requests` - A tuple of `InferRequest` instances
* Usage example:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="CPU")
>>> exec_net = plugin.load(network=net, num_requsts=3)
>>> exec_net.requests
(<inference_engine.ie_api.InferRequest object at 0x7f66f56c57e0>,
<inference_engine.ie_api.InferRequest object at 0x7f66f56c58b8>,
<inference_engine.ie_api.InferRequest object at 0x7f66f56c5900>)
```
### <a name="executablenetwork-methods"></a>Instance Methods
* `infer(inputs=None)`
* Description:
Starts synchronous inference for the first infer request of the executable network and returns output data.
Wraps `infer()` method of the `InferRequest` class
* Parameters:
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* Return value:
A dictionary that maps output layer names to `numpy.ndarray` objects with output data of the layer
* Usage example:
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="CPU")
>>> exec_net = plugin.load(network=net, num_requsts=2)
>>> res = exec_net.infer({'data': img})
>>> res
{'prob': array([[[[2.83426580e-08]],
[[2.40166020e-08]],
[[1.29469613e-09]],
[[2.95946148e-08]]
......
]])}
```
For illustration of input data preparation, please see the samples (for example, `classification_sample.py`).
* `start_async(request_id, inputs=None)`
* Description: Starts asynchronous inference for specified infer request.
Wraps `async_infer()` method of the `InferRequest` class.
* Parameters:
* `request_id` - Index of infer request to start inference
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* Return value: A handler of specified infer request, which is an instance of the `InferRequest` class.
* Usage example:
```py
>>> infer_request_handle = exec_net.start_async(request_id=0, inputs={input_blob: image})
>>> infer_status = infer_request_handle.wait()
>>> res = infer_request_handle.outputs[out_blob]
```
For more details about infer requests processing, see `classification_sample_async.py` (simplified case) and
`object_detection_demo_ssd_async.py` (real asynchronous use case) samples.
* `get_exec_graph_info()`
* Description: Gets executable graph information from a device
* Parameters: None
* Return value: An instance of <a href="#ienetwork-class">`IENetwork`</a>
* Usage_example:
```py
net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
plugin = IEPlugin(device="CPU")
exec_net = plugin.load(network=net, num_requsts=2)
exec_graph = exec_net.get_exec_graph_info()
```
* `get_metric(metric_name: str)`
* Description: - Gets general runtime metric for an executable network. It can be network name, actual device ID on
which executable network is running or all other properties which cannot be changed dynamically.
* Parameters:
* metric_name - A metric name to request.
* Return value: A metric value corresponding to a metric key.
* Usage example
```py
ie = IECore()
net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
exec_net = ie.load_network(net, "CPU")
exec_net.get_metric("NETWORK_NAME")
```
* `get_config(metric_config: str)`
* Description: - Gets configuration for current executable network. The method is responsible to extract information
* which affects executable network execution
* Parameters:
* config_name - A configuration parameter name to request.
* Return value: A configuration value corresponding to a configuration key.
* Usage example
```py
ie = IECore()
net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
exec_net = ie.load_network(net, "CPU")
exec_net.get_metric("DEVICE_ID")
```
## <a name="inferrequest"></a>InferRequest Class
This class provides an interface to infer requests of `ExecutableNetwork` and serves to handle infer requests execution
and to set and get output data.
### <a name="inferrequest-constructor"></a>Class Constructor
There is no explicit class constructor. To make a valid `InferRequest` instance, use `load()` method of the `IEPlugin`
class with specified number of requests to get `ExecutableNetwork` instance which stores infer requests.
### <a name="inferrequest-attributes"></a>Class Attributes
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* `outputs` - A dictionary that maps output layer names to `numpy.ndarray` objects with output data of the layer
Usage example:
```py
>>> exec_net.requests[0].inputs['data'][:] = image
>>> exec_net.requests[0].infer()
>>> res = exec_net.requests[0].outputs['prob']
>>> np.flip(np.sort(np.squeeze(res)),0)
array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03,
2.26027006e-03, 2.12283316e-03 ...])
```
### <a name="inferrequest-methods"></a>Instance Methods
It is not recommended to run inference directly on `InferRequest` instance.
To run inference, please use simplified methods `infer()` and `start_async()` of `ExecutableNetwork`.
* `infer(inputs=None)`
* Description: Starts synchronous inference of the infer request and fill outputs array
* Parameters:
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* Return value: None
* Usage example:
```py
>>> exec_net = plugin.load(network=net, num_requests=2)
>>> exec_net.requests[0].infer({input_blob: image})
>>> res = exec_net.requests[0].outputs['prob']
>>> np.flip(np.sort(np.squeeze(res)),0)
array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03,
2.26027006e-03, 2.12283316e-03 ...])
```
* `async_infer(inputs=None)`
* Description: Starts asynchronous inference of the infer request and fill outputs array
* Parameters:
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* Return value: None
* Usage example:
```py
>>> exec_net = plugin.load(network=net, num_requests=2)
>>> exec_net.requests[0].async_infer({input_blob: image})
>>> exec_net.requests[0].wait()
>>> res = exec_net.requests[0].outputs['prob']
>>> np.flip(np.sort(np.squeeze(res)),0)
array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03,
2.26027006e-03, 2.12283316e-03 ...])
```
* `wait(timeout=-1)`
* Description: Waits for the result to become available. Blocks until specified timeout elapses or the result
becomes available, whichever comes first.
> **NOTE:** There are special values of the timeout parameter:
* 0 - Immediately returns the inference status. It does not block or interrupt execution.
To find statuses meaning, please refer to InferenceEngine::StatusCode in Inference Engine C++ documentation
* -1 - Waits until inference result becomes available (default value)
* Parameters:
* `timeout` - Time to wait in milliseconds or special (0, -1) cases described above.
If not specified, `timeout` value is set to -1 by default.
* Usage example: See `async_infer()` method of the the `InferRequest` class.
* `get_perf_counts()`
* Description: Queries performance measures per layer to get feedback of what is the most time consuming layer.
> **NOTE**: Performance counters data and format depends on the plugin
* Parameters: None
* Usage example:
```py
>>> exec_net = plugin.load(network=net, num_requests=2)
>>> exec_net.requests[0].infer({input_blob: image})
>>> exec_net.requests[0].get_perf_counts()
{'Conv2D': {'exec_type': 'jit_avx2_1x1',
'real_time': 154,
'cpu_time': 154,
'status': 'EXECUTED',
'layer_type': 'Convolution'},
'Relu6': {'exec_type': 'undef',
'real_time': 0,
'cpu_time': 0,
'status': 'NOT_RUN',
'layer_type': 'Clamp'}
...
}
```
* `set_batch(size)`
* Description:
Sets new batch size for certain infer request when dynamic batching is enabled in executable network that created this request.
> **NOTE:** Support of dynamic batch size depends on the target plugin.
* Parameters:
* `batch` - New batch size to be used by all the following inference calls for this request
* Usage example:
```py
>>> plugin.set_config({"DYN_BATCH_ENABLED": "YES"})
>>> exec_net = plugin.load(network=net)
>>> exec_net.requests[0].set_batch(inputs_count)
```
* `set_completion_callback(py_callback, py_data = None)`
* Description: Sets a callback function that is called on success or failure of an asynchronous request
* Parameters:
* `py_callback` - Any defined or lambda function
* `py_data` - Data that is passed to the callback function
* Return value: None
* Usage example:
```py
callback = lambda status, py_data: print("Request with id {} finished with status {}".format(py_data, status))
net = IENetwork("./model.xml", "./model.bin")
ie = IECore()
exec_net = ie.load_network(net, "CPU", num_requests=4)
for id, req in enumerate(exec_net.requests):
req.set_completion_callback(py_callback=callback, py_data=id)
for req in exec_net.requests:
req.async_infer({"data": img})
```
## API Reference
For the complete API Reference, see [Inference Engine Python* API Reference](ie_python_api.html)

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
"""
Copyright (C) 2018-2019 Intel Corporation
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
"""
Copyright (C) 2018-2019 Intel Corporation
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -149,7 +149,7 @@ def main():
num_iter = 10
request_wrap = InferReqWrap(infer_request, request_id, num_iter)
# Start inference request execution. Wait for last execution being completed
request_wrap.execute("sync", {input_blob: images})
request_wrap.execute("async", {input_blob: images})
# Processing output blob
log.info("Processing output blob")

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
"""
Copyright (C) 2018-2019 Intel Corporation
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,32 +1,51 @@
# If the pyx file is a C++ file, we should specify that here.
set (CMAKE_INCLUDE_CURRENT_DIR ON)
set (TARGET_NAME "ie_api")
# Copyright (C) 2018-2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine)
set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
set(TARGET_NAME "ie_api")
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine)
file(GLOB SOURCE
${CMAKE_CURRENT_SOURCE_DIR}/*.pyx
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
)
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
set_source_files_properties(${SOURCE} PROPERTIES CYTHON_IS_CXX TRUE
)
set_source_files_properties(${SOURCE} PROPERTIES CYTHON_IS_CXX ON)
## Compatibility with python 2.7 which has deprecated "register" specifier
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
add_definitions("-Wno-register")
# create target
cython_add_module(${TARGET_NAME} ${SOURCE})
function(python_disable_deprecated_warnings)
disable_deprecated_warnings()
set(pyx_file "${CMAKE_CURRENT_BINARY_DIR}/ie_api.cxx")
set_source_files_properties(${pyx_file} PROPERTIES COMPILE_FLAGS ${ie_c_cxx_deprecated})
endfunction()
python_disable_deprecated_warnings()
target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
set_target_properties(${TARGET_NAME} PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
# Compatibility with python 2.7 which has deprecated "register" specifier
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
target_compile_options(${TARGET_NAME} PRIVATE "-Wno-error=register")
endif()
cython_add_module (${TARGET_NAME} ${SOURCE})
set_target_properties (${TARGET_NAME} PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
target_link_libraries (${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
# perform copy
ADD_CUSTOM_COMMAND (TARGET ${TARGET_NAME}
add_custom_command(TARGET ${TARGET_NAME}
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/inference_engine/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/requirements.txt ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../../requirements.txt
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../__init__.py
)
)
# install
install(TARGETS ${TARGET_NAME}
DESTINATION python/${PYTHON_VERSION}/openvino/inference_engine
COMPONENT ${PYTHON_VERSION})
install(PROGRAMS __init__.py
DESTINATION python/${PYTHON_VERSION}/openvino/inference_engine
COMPONENT ${PYTHON_VERSION})

View File

@ -3,7 +3,7 @@ from .ie_api_impl_defs cimport Blob, TensorDesc
from libcpp.string cimport string
from libcpp.vector cimport vector
from libcpp.memory cimport unique_ptr
from libcpp.memory cimport unique_ptr, shared_ptr
cdef class BlobBuffer:
cdef Blob.Ptr ptr
@ -37,7 +37,7 @@ cdef class ExecutableNetwork:
cdef C.IEPlugin plugin_impl
cdef C.IECore ie_core_impl
cdef public:
_requests, _infer_requests, inputs, outputs
_requests, _infer_requests
cdef class IEPlugin:
cdef C.IEPlugin impl
@ -47,18 +47,20 @@ cdef class IEPlugin:
cpdef void set_initial_affinity(self, IENetwork network) except *
cpdef set get_supported_layers(self, IENetwork net)
cdef class IENetLayer:
cdef C.IENetLayer impl
cdef class InputInfo:
cdef C.InputInfo impl
cdef class OutputInfo:
cdef C.OutputInfo impl
cdef class LayersStatsMap(dict):
cdef C.IENetwork net_impl
cdef class IECore:
cdef C.IECore impl
cpdef ExecutableNetwork load_network(self, IENetwork network, str device_name, config = ?, int num_requests = ?)
cpdef ExecutableNetwork import_network(self, str model_file, str device_name, config = ?, int num_requests = ?)
cdef class DataPtr:
cdef C.DataPtr _ptr
cdef class CDataPtr:
cdef C.CDataPtr _ptr
cdef class IENetLayer:
cdef C.CNNLayerPtr _ptr

View File

@ -1,4 +1,4 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -7,26 +7,27 @@
#include "ie_iinfer_request.hpp"
#include "details/ie_cnn_network_tools.h"
std::map<std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
{"FP16", InferenceEngine::Precision::FP16},
{"Q78", InferenceEngine::Precision::Q78},
{"I32", InferenceEngine::Precision::I32},
{"I16", InferenceEngine::Precision::I16},
{"I8", InferenceEngine::Precision::I8},
{"U16", InferenceEngine::Precision::U16},
{"U8", InferenceEngine::Precision::U8}};
const std::string EXPORTED_NETWORK_NAME = "undefined";
std::map <std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
{"FP16", InferenceEngine::Precision::FP16},
{"I8", InferenceEngine::Precision::I8},
{"I16", InferenceEngine::Precision::I16},
{"I32", InferenceEngine::Precision::I32},
{"I64", InferenceEngine::Precision::I64},
{"U16", InferenceEngine::Precision::U16},
{"U8", InferenceEngine::Precision::U8}};
std::map<std::string, InferenceEngine::Layout> layout_map = {{"ANY", InferenceEngine::Layout::ANY},
{"NCHW", InferenceEngine::Layout::NCHW},
{"NHWC", InferenceEngine::Layout::NHWC},
{"OIHW", InferenceEngine::Layout::OIHW},
{"C", InferenceEngine::Layout::C},
{"CHW", InferenceEngine::Layout::CHW},
{"HW", InferenceEngine::Layout::HW},
{"NC", InferenceEngine::Layout::NC},
{"CN", InferenceEngine::Layout::CN},
{"NCDHW", InferenceEngine::Layout::NCDHW},
{"BLOCKED", InferenceEngine::Layout::BLOCKED}};
std::map <std::string, InferenceEngine::Layout> layout_map = {{"ANY", InferenceEngine::Layout::ANY},
{"NCHW", InferenceEngine::Layout::NCHW},
{"NHWC", InferenceEngine::Layout::NHWC},
{"OIHW", InferenceEngine::Layout::OIHW},
{"C", InferenceEngine::Layout::C},
{"CHW", InferenceEngine::Layout::CHW},
{"HW", InferenceEngine::Layout::HW},
{"NC", InferenceEngine::Layout::NC},
{"CN", InferenceEngine::Layout::CN},
{"NCDHW", InferenceEngine::Layout::NCDHW},
{"BLOCKED", InferenceEngine::Layout::BLOCKED}};
#define stringify(name) # name
#define IE_CHECK_CALL(expr) { \
auto ret = (expr); \
@ -35,12 +36,13 @@ std::map<std::string, InferenceEngine::Layout> layout_map = {{"ANY", Inferen
} \
} \
uint32_t getOptimalNumberOfRequests(const InferenceEngine::IExecutableNetwork::Ptr actual) {
try {
InferenceEngine::ResponseDesc response;
InferenceEngine::Parameter parameter_value;
IE_CHECK_CALL(actual->GetMetric(METRIC_KEY(SUPPORTED_METRICS), parameter_value, &response));
auto supported_metrics = parameter_value.as<std::vector<std::string>>();
auto supported_metrics = parameter_value.as < std::vector < std::string >> ();
std::string key = METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS);
if (std::find(supported_metrics.begin(), supported_metrics.end(), key) != supported_metrics.end()) {
IE_CHECK_CALL(actual->GetMetric(key, parameter_value, &response));
@ -48,20 +50,20 @@ uint32_t getOptimalNumberOfRequests(const InferenceEngine::IExecutableNetwork::P
return parameter_value.as<unsigned int>();
else
THROW_IE_EXCEPTION << "Unsupported format for " << key << "!"
<< " Please specify number of infer requests directly!";
<< " Please specify number of infer requests directly!";
} else {
THROW_IE_EXCEPTION << "Can't load network: " << key << " is not supported!"
<< " Please specify number of infer requests directly!";
}
} catch (const std::exception& ex) {
} catch (const std::exception &ex) {
THROW_IE_EXCEPTION << "Can't load network: " << ex.what()
<< " Please specify number of infer requests directly!";
}
}
PyObject* parse_parameter(const InferenceEngine::Parameter & param){
PyObject *parse_parameter(const InferenceEngine::Parameter &param) {
// Check for std::string
if (param.is<std::string>()){
if (param.is<std::string>()) {
return PyUnicode_FromString(param.as<std::string>().c_str());
}
// Check for int
@ -107,7 +109,7 @@ PyObject* parse_parameter(const InferenceEngine::Parameter & param){
else if (param.is<std::vector<unsigned int>>()){
auto val = param.as<std::vector<unsigned int>>();
PyObject *list = PyList_New(0);
for (const auto & it : val){
for (const auto &it : val) {
PyList_Append(list, PyLong_FromLong(it));
}
return list;
@ -116,8 +118,8 @@ PyObject* parse_parameter(const InferenceEngine::Parameter & param){
else if (param.is<std::vector<float>>()){
auto val = param.as<std::vector<float>>();
PyObject *list = PyList_New(0);
for (const auto & it : val){
PyList_Append(list, PyFloat_FromDouble((double)it));
for (const auto &it : val) {
PyList_Append(list, PyFloat_FromDouble((double) it));
}
return list;
}
@ -161,156 +163,143 @@ PyObject* parse_parameter(const InferenceEngine::Parameter & param){
return (PyObject *) NULL;
}
}
InferenceEnginePython::IENetwork::IENetwork(const std::string &model, const std::string &weights, bool ngraph_compatibility = false) {
if (ngraph_compatibility){
InferenceEngine::IRReader ir_reader;
auto ngraph_function = ir_reader.read(model, weights);
actual = InferenceEngine::CNNNetwork(InferenceEngine::convertFunctionToICNNNetwork(ngraph_function));
} else {
InferenceEngine::CNNNetReader net_reader;
net_reader.ReadNetwork(model);
net_reader.ReadWeights(weights);
actual = net_reader.getNetwork();
}
name = actual.getName();
batch_size = actual.getBatchSize();
precision = actual.getPrecision().name();
InferenceEnginePython::IENetwork::IENetwork(const std::string &model, const std::string &weights) {
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::CNNNetReader net_reader;
net_reader.ReadNetwork(model);
net_reader.ReadWeights(weights);
auto net = net_reader.getNetwork();
IE_SUPPRESS_DEPRECATED_END
actual = std::make_shared<InferenceEngine::CNNNetwork>(net);
name = actual->getName();
batch_size = actual->getBatchSize();
precision = actual->getPrecision().name();
}
InferenceEnginePython::IENetwork::IENetwork(const InferenceEngine::CNNNetwork& cnn_network)
: actual(cnn_network) {
name = actual.getName();
batch_size = actual.getBatchSize();
precision = actual.getPrecision().name();
InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork> &cnn_network)
: actual(cnn_network) {
name = actual->getName();
batch_size = actual->getBatchSize();
precision = actual->getPrecision().name();
}
void InferenceEnginePython::IENetwork::load_from_buffer(const char *xml, size_t xml_size, uint8_t *bin, size_t bin_size) {
InferenceEnginePython::IENetwork::IENetwork(PyObject* network) {
#if defined(ENABLE_NGRAPH)
auto* capsule_ptr = PyCapsule_GetPointer(network, "ngraph_function");
auto* function_sp = static_cast<std::shared_ptr<ngraph::Function>*>(capsule_ptr);
if (function_sp == nullptr)
THROW_IE_EXCEPTION << "Cannot create CNNNetwork from capsule! Capsule doesn't contain nGraph function!";
InferenceEngine::CNNNetwork cnnNetwork(*function_sp);
actual = std::make_shared<InferenceEngine::CNNNetwork>(cnnNetwork);
name = actual->getName();
batch_size = actual->getBatchSize();
precision = actual->getPrecision().name();
#else
THROW_IE_EXCEPTION << "InferenceEngine was built without nGraph support!";
#endif
}
void
InferenceEnginePython::IENetwork::load_from_buffer(const char *xml, size_t xml_size, uint8_t *bin, size_t bin_size) {
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::CNNNetReader net_reader;
net_reader.ReadNetwork(xml, xml_size);
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, {bin_size}, InferenceEngine::Layout::C);
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bin_size }, InferenceEngine::Layout::C);
auto weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc, bin, bin_size);
net_reader.SetWeights(weights_blob);
name = net_reader.getName();
actual = net_reader.getNetwork();
batch_size = actual.getBatchSize();
precision = actual.getPrecision().name();
auto net = net_reader.getNetwork();
IE_SUPPRESS_DEPRECATED_END
actual = std::make_shared<InferenceEngine::CNNNetwork>(net);
batch_size = actual->getBatchSize();
precision = actual->getPrecision().name();
}
void InferenceEnginePython::IENetwork::serialize(const std::string &path_to_xml, const std::string &path_to_bin) {
actual.serialize(path_to_xml, path_to_bin);
actual->serialize(path_to_xml, path_to_bin);
}
const std::vector<std::pair<std::string, InferenceEnginePython::IENetLayer>>
const std::vector <InferenceEngine::CNNLayerPtr>
InferenceEnginePython::IENetwork::getLayers() {
std::vector<std::pair<std::string, InferenceEnginePython::IENetLayer>> result;
std::vector<InferenceEngine::CNNLayerPtr> sorted_layers = InferenceEngine::details::CNNNetSortTopologically(actual);
std::vector<InferenceEngine::CNNLayerPtr> result;
std::vector<InferenceEngine::CNNLayerPtr> sorted_layers = InferenceEngine::details::CNNNetSortTopologically(*actual);
for (const auto &layer : sorted_layers) {
InferenceEnginePython::IENetLayer layer_info;
layer_info.layer_ptr = layer;
layer_info.network_ptr = actual;
layer_info.name = layer->name;
layer_info.type = layer->type;
layer_info.precision = layer->precision.name();
layer_info.params = layer->params;
layer_info.affinity = layer->affinity;
std::vector<std::string> parents;
for (const auto &i : layer->insData) {
auto data = i.lock();
if (data) {
parents.emplace_back(data->getName());
}
}
layer_info.parents = parents;
std::vector<std::string> children;
for (const auto &data : layer->outData) {
auto inputTo = data->getInputTo();
for (auto layer_iter : inputTo) {
InferenceEngine::CNNLayerPtr layer_in_data = layer_iter.second;
if (!layer_in_data) {
THROW_IE_EXCEPTION << "Layer which takes data " << data->getName() << " is nullptr";
}
children.emplace_back(layer_in_data->name);
}
}
layer_info.children = children;
const InferenceEngine::TensorDesc &inputTensorDesc = layer->outData[0]->getTensorDesc();
for (const auto &it : layout_map) {
if (it.second == inputTensorDesc.getLayout()) {
layer_info.layout = it.first;
}
}
auto dims = inputTensorDesc.getDims();
std::string string_dims = "";
for (const auto &it : dims) {
string_dims += std::to_string(it) + " ";
}
string_dims = string_dims.substr(0, string_dims.size() - 1);
layer_info.shape = string_dims;
result.emplace_back(std::make_pair(layer->name, layer_info));
result.emplace_back(layer);
}
return result;
}
const std::map<std::string, InferenceEnginePython::InputInfo> InferenceEnginePython::IENetwork::getInputs() {
std::map<std::string, InferenceEnginePython::InputInfo> inputs;
const InferenceEngine::InputsDataMap &inputsInfo = actual.getInputsInfo();
PyObject* InferenceEnginePython::IENetwork::getFunction() {
#if defined(ENABLE_NGRAPH)
const char * py_capsule_name = "ngraph_function";
auto ngraph_func_ptr = actual->getFunction();
// create a shared pointer on the heap before putting it in the capsule
// this secures the lifetime of the object transferred by the capsule
auto* sp_copy = new std::shared_ptr<const ngraph::Function>(ngraph_func_ptr);
// a destructor callback that will delete the heap allocated shared_ptr
// when the capsule is destructed
auto sp_deleter = [](PyObject* capsule) {
auto* capsule_ptr = PyCapsule_GetPointer(capsule, "ngraph_function");
auto* function_sp = static_cast<std::shared_ptr<ngraph::Function>*>(capsule_ptr);
if (function_sp) {
delete function_sp;
}
};
if (ngraph_func_ptr) {
//return PyCapsule_New(&ngraph_func_ptr, py_capsule_name, NULL);
return PyCapsule_New(sp_copy, py_capsule_name, sp_deleter);
} else {
return nullptr;
}
#else
return nullptr;
#endif
}
const std::map <std::string, InferenceEngine::DataPtr> InferenceEnginePython::IENetwork::getInputs() {
std::map <std::string, InferenceEngine::DataPtr> inputs;
const InferenceEngine::InputsDataMap &inputsInfo = actual->getInputsInfo();
for (auto &in : inputsInfo) {
InferenceEnginePython::InputInfo info;
info.actual = in.second;
const InferenceEngine::TensorDesc &inputTensorDesc = in.second->getTensorDesc();
info.dims = inputTensorDesc.getDims();
for (auto it : precision_map)
if (it.second == in.second->getPrecision())
info.precision = it.first;
for (auto it : layout_map)
if (it.second == in.second->getLayout())
info.layout = it.first;
inputs[in.first] = info;
inputs[in.first] = in.second->getInputData();
}
return inputs;
}
const std::map<std::string, InferenceEnginePython::OutputInfo> InferenceEnginePython::IENetwork::getOutputs() {
std::map<std::string, InferenceEnginePython::OutputInfo> outputs;
const InferenceEngine::OutputsDataMap &outputsInfo = actual.getOutputsInfo();
const std::map <std::string, InferenceEngine::DataPtr> InferenceEnginePython::IENetwork::getOutputs() {
std::map <std::string, InferenceEngine::DataPtr> outputs;
const InferenceEngine::OutputsDataMap &outputsInfo = actual->getOutputsInfo();
for (auto &out : outputsInfo) {
InferenceEnginePython::OutputInfo info;
info.actual = out.second;
const InferenceEngine::TensorDesc &inputTensorDesc = out.second->getTensorDesc();
info.dims = inputTensorDesc.getDims();
for (auto it : precision_map)
if (it.second == out.second->getPrecision())
info.precision = it.first;
for (auto it : layout_map)
if (it.second == out.second->getLayout())
info.layout = it.first;
outputs[out.first] = info;
outputs[out.first] = out.second;
}
return outputs;
}
void
InferenceEnginePython::IENetwork::addOutput(const std::string &out_layer, size_t port_id) {
actual.addOutput(out_layer, port_id);
actual->addOutput(out_layer, port_id);
}
void InferenceEnginePython::IENetwork::setBatch(const size_t size) {
actual.setBatchSize(size);
actual->setBatchSize(size);
}
void InferenceEnginePython::IENetwork::reshape(const std::map<std::string, std::vector<size_t>> &input_shapes) {
actual.reshape(input_shapes);
void InferenceEnginePython::IENetwork::reshape(const std::map <std::string, std::vector<size_t>> &input_shapes) {
actual->reshape(input_shapes);
}
const std::map<std::string, std::map<std::string, std::vector<float>>> InferenceEnginePython::IENetwork::getStats() {
const std::map <std::string, std::map<std::string, std::vector < float>>>
InferenceEnginePython::IENetwork::getStats() {
InferenceEngine::ICNNNetworkStats *pstats = nullptr;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(((InferenceEngine::ICNNNetwork &) actual).getStats(&pstats, &response));
IE_CHECK_CALL(((InferenceEngine::ICNNNetwork &) *actual).getStats(&pstats, &response));
auto statsMap = pstats->getNodesStats();
std::map<std::string, std::map<std::string, std::vector<float>>> map;
std::map < std::string, std::map < std::string, std::vector < float >> > map;
for (const auto &it : statsMap) {
std::map<std::string, std::vector<float>> stats;
std::map <std::string, std::vector<float>> stats;
stats.emplace("min", it.second->_minOutputs);
stats.emplace("max", it.second->_maxOutputs);
map.emplace(it.first, stats);
@ -322,7 +311,7 @@ void InferenceEnginePython::IENetwork::setStats(const std::map<std::string, std:
std::vector<float>>> &stats) {
InferenceEngine::ICNNNetworkStats *pstats = nullptr;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(((InferenceEngine::ICNNNetwork &) actual).getStats(&pstats, &response));
IE_CHECK_CALL(((InferenceEngine::ICNNNetwork &) *actual).getStats(&pstats, &response));
std::map<std::string, InferenceEngine::NetworkNodeStatsPtr> newNetNodesStats;
for (const auto &it : stats) {
InferenceEngine::NetworkNodeStatsPtr nodeStats = InferenceEngine::NetworkNodeStatsPtr(
@ -334,53 +323,46 @@ void InferenceEnginePython::IENetwork::setStats(const std::map<std::string, std:
pstats->setNodesStats(newNetNodesStats);
}
void InferenceEnginePython::InputInfo::setPrecision(std::string precision) {
actual->setPrecision(precision_map[precision]);
}
void InferenceEnginePython::InputInfo::setLayout(std::string layout) {
actual->setLayout(layout_map[layout]);
}
IE_SUPPRESS_DEPRECATED_START
InferenceEnginePython::IEPlugin::IEPlugin(const std::string &device, const std::vector <std::string> &plugin_dirs) {
void InferenceEnginePython::OutputInfo::setPrecision(std::string precision) {
actual->setPrecision(precision_map[precision]);
}
InferenceEnginePython::IEPlugin::IEPlugin(const std::string &device, const std::vector<std::string> &plugin_dirs) {
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::PluginDispatcher dispatcher{plugin_dirs};
actual = dispatcher.getPluginByDevice(device);
IE_SUPPRESS_DEPRECATED_END
auto pluginVersion = actual.GetVersion();
version = std::to_string(pluginVersion->apiVersion.major) + ".";
version += std::to_string(pluginVersion->apiVersion.minor) + ".";
version += pluginVersion->buildNumber;
device_name = device;
}
IE_SUPPRESS_DEPRECATED_END
void InferenceEnginePython::IEPlugin::setInitialAffinity(const InferenceEnginePython::IENetwork &net) {
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::InferenceEnginePluginPtr hetero_plugin(actual);
InferenceEngine::QueryNetworkResult queryRes;
auto &network = net.actual;
hetero_plugin->QueryNetwork(network, {}, queryRes);
hetero_plugin->QueryNetwork(*network, {}, queryRes);
IE_SUPPRESS_DEPRECATED_END
if (queryRes.rc != InferenceEngine::StatusCode::OK) {
THROW_IE_EXCEPTION << queryRes.resp.msg;
}
for (auto && layer : queryRes.supportedLayersMap) {
network.getLayerByName(layer.first.c_str())->affinity = layer.second;
for (auto &&layer : queryRes.supportedLayersMap) {
network->getLayerByName(layer.first.c_str())->affinity = layer.second;
}
}
std::set<std::string> InferenceEnginePython::IEPlugin::queryNetwork(const InferenceEnginePython::IENetwork &net) {
const InferenceEngine::CNNNetwork &network = net.actual;
std::set <std::string> InferenceEnginePython::IEPlugin::queryNetwork(const InferenceEnginePython::IENetwork &net) {
const std::shared_ptr<InferenceEngine::CNNNetwork> &network = net.actual;
InferenceEngine::QueryNetworkResult queryRes;
actual.QueryNetwork(network, {}, queryRes);
IE_SUPPRESS_DEPRECATED_START
actual.QueryNetwork(*network, {}, queryRes);
IE_SUPPRESS_DEPRECATED_END
std::set<std::string> supportedLayers;
for (auto && layer : queryRes.supportedLayersMap) {
std::set <std::string> supportedLayers;
for (auto &&layer : queryRes.supportedLayersMap) {
supportedLayers.insert(layer.first);
}
@ -388,61 +370,24 @@ std::set<std::string> InferenceEnginePython::IEPlugin::queryNetwork(const Infere
}
void InferenceEnginePython::IENetLayer::setAffinity(const std::string &target_affinity) {
layer_ptr->affinity = target_affinity;
}
void InferenceEnginePython::IENetLayer::setParams(const std::map<std::string, std::string> &params_map) {
layer_ptr->params = params_map;
}
std::map<std::string, InferenceEngine::Blob::Ptr> InferenceEnginePython::IENetLayer::getWeights() {
auto w_layer = std::dynamic_pointer_cast<InferenceEngine::WeightableLayer>(layer_ptr);
// IF current layer is weightable gather weights and biases from casted WeightableLayer and all other blobs
// considered as custom and gathered from blobs field pf CNNLayer.
std::map<std::string, InferenceEngine::Blob::Ptr> weights;
if (w_layer != nullptr) {
if (w_layer->_weights != nullptr) {
weights["weights"] = w_layer->_weights;
}
if (w_layer->_biases != nullptr) {
weights["biases"] = w_layer->_biases;
}
for (auto it : w_layer->blobs) {
if (it.first == "weights" || it.first == "biases") {
continue;
}
weights[it.first] = it.second;
}
} else {
// Otherwise all layer's blobs are considered as custom and gathered from CNNLayer
std::map<std::string, InferenceEngine::Blob::Ptr> map_placeholder;
weights = map_placeholder; // If layer has no blobs it should not be missed from weights map
for (auto it : layer_ptr->blobs) {
weights[it.first] = it.second;
}
}
return weights;
}
void InferenceEnginePython::IENetLayer::setPrecision(std::string precision) {
layer_ptr->precision = precision_map[precision];
}
void InferenceEnginePython::IEPlugin::addCpuExtension(const std::string &extension_path) {
auto extension_ptr = InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(extension_path);
auto extension = std::dynamic_pointer_cast<InferenceEngine::IExtension>(extension_ptr);
IE_SUPPRESS_DEPRECATED_START
actual.AddExtension(extension);
IE_SUPPRESS_DEPRECATED_END
}
std::unique_ptr<InferenceEnginePython::IEExecNetwork>
std::unique_ptr <InferenceEnginePython::IEExecNetwork>
InferenceEnginePython::IEPlugin::load(const InferenceEnginePython::IENetwork &net,
int num_requests,
const std::map<std::string, std::string> &config) {
const std::map <std::string, std::string> &config) {
InferenceEngine::ResponseDesc response;
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(net.name,
num_requests);
exec_network->actual = actual.LoadNetwork(net.actual, config);
IE_SUPPRESS_DEPRECATED_START
exec_network->actual = actual.LoadNetwork(*net.actual, config);
IE_SUPPRESS_DEPRECATED_END
if (0 == num_requests) {
num_requests = getOptimalNumberOfRequests(exec_network->actual);
@ -458,7 +403,9 @@ InferenceEnginePython::IEPlugin::load(const InferenceEnginePython::IENetwork &ne
}
void InferenceEnginePython::IEPlugin::setConfig(const std::map<std::string, std::string> &config) {
IE_SUPPRESS_DEPRECATED_START
actual.SetConfig(config);
IE_SUPPRESS_DEPRECATED_END
}
InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string &name, size_t num_requests) :
@ -474,23 +421,50 @@ InferenceEnginePython::IENetwork InferenceEnginePython::IEExecNetwork::GetExecGr
InferenceEngine::ResponseDesc response;
InferenceEngine::ICNNNetwork::Ptr graph;
IE_CHECK_CALL(actual->GetExecGraphInfo(graph, &response));
return IENetwork(InferenceEngine::CNNNetwork(graph));
return IENetwork(std::make_shared<InferenceEngine::CNNNetwork>(graph));
}
PyObject* InferenceEnginePython::IEExecNetwork::getMetric(const std::string &metric_name) {
PyObject *InferenceEnginePython::IEExecNetwork::getMetric(const std::string &metric_name) {
InferenceEngine::Parameter parameter;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->GetMetric(metric_name, parameter, &response));
return parse_parameter(parameter);
}
PyObject* InferenceEnginePython::IEExecNetwork::getConfig(const std::string &metric_name) {
PyObject *InferenceEnginePython::IEExecNetwork::getConfig(const std::string &metric_name) {
InferenceEngine::Parameter parameter;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->GetMetric(metric_name, parameter, &response));
return parse_parameter(parameter);
}
void InferenceEnginePython::IEExecNetwork::exportNetwork(const std::string &model_file) {
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->Export(model_file, &response));
}
std::map <std::string, InferenceEngine::DataPtr> InferenceEnginePython::IEExecNetwork::getInputs() {
InferenceEngine::ConstInputsDataMap inputsDataMap;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->GetInputsInfo(inputsDataMap, &response));
std::map <std::string, InferenceEngine::DataPtr> pyInputs;
for (const auto &item : inputsDataMap) {
pyInputs[item.first] = item.second->getInputData();
}
return pyInputs;
}
std::map <std::string, InferenceEngine::CDataPtr> InferenceEnginePython::IEExecNetwork::getOutputs() {
InferenceEngine::ConstOutputsDataMap outputsDataMap;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->GetOutputsInfo(outputsDataMap, &response));
std::map <std::string, InferenceEngine::CDataPtr> pyInputs;
for (const auto &item : outputsDataMap) {
pyInputs[item.first] = item.second;
}
return pyInputs;
}
void InferenceEnginePython::InferRequestWrap::getBlobPtr(const std::string &blob_name,
InferenceEngine::Blob::Ptr &blob_ptr) {
InferenceEngine::ResponseDesc response;
@ -547,12 +521,12 @@ int InferenceEnginePython::InferRequestWrap::wait(int64_t timeout) {
return static_cast<int >(code);
}
std::map<std::string, InferenceEnginePython::ProfileInfo>
std::map <std::string, InferenceEnginePython::ProfileInfo>
InferenceEnginePython::InferRequestWrap::getPerformanceCounts() {
std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perf_counts;
std::map <std::string, InferenceEngine::InferenceEngineProfileInfo> perf_counts;
InferenceEngine::ResponseDesc response;
request_ptr->GetPerformanceCounts(perf_counts, &response);
std::map<std::string, InferenceEnginePython::ProfileInfo> perf_map;
std::map <std::string, InferenceEnginePython::ProfileInfo> perf_map;
for (auto it : perf_counts) {
InferenceEnginePython::ProfileInfo profile_info;
@ -588,21 +562,24 @@ std::string InferenceEnginePython::get_version() {
}
InferenceEnginePython::IECore::IECore(const std::string & xmlConfigFile) {
InferenceEnginePython::IECore::IECore(const std::string &xmlConfigFile) {
actual = InferenceEngine::Core(xmlConfigFile);
}
std::map<std::string, InferenceEngine::Version> InferenceEnginePython::IECore::getVersions(const std::string &deviceName) {
std::map <std::string, InferenceEngine::Version>
InferenceEnginePython::IECore::getVersions(const std::string &deviceName) {
return actual.GetVersions(deviceName);
}
std::unique_ptr<InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::loadNetwork(IENetwork network,
const std::string & deviceName, const std::map<std::string, std::string> & config, int num_requests){
std::unique_ptr <InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::loadNetwork(IENetwork network,
const std::string &deviceName,
const std::map <std::string, std::string> &config,
int num_requests) {
InferenceEngine::ResponseDesc response;
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(network.name,
num_requests);
exec_network->actual = actual.LoadNetwork(network.actual, deviceName, config);
exec_network->actual = actual.LoadNetwork(*network.actual, deviceName, config);
if (0 == num_requests) {
num_requests = getOptimalNumberOfRequests(exec_network->actual);
@ -617,46 +594,69 @@ std::unique_ptr<InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IEC
return exec_network;
}
std::map<std::string, std::string> InferenceEnginePython::IECore::queryNetwork(InferenceEnginePython::IENetwork network,
const std::string &deviceName,
const std::map<std::string, std::string> &config) {
auto res = actual.QueryNetwork(network.actual, deviceName, config);
std::unique_ptr <InferenceEnginePython::IEExecNetwork> InferenceEnginePython::IECore::importNetwork(
const std::string &modelFIle, const std::string &deviceName, const std::map <std::string, std::string> &config,
int num_requests) {
InferenceEngine::ResponseDesc response;
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(EXPORTED_NETWORK_NAME,
num_requests);
exec_network->actual = actual.ImportNetwork(modelFIle, deviceName, config);
if (0 == num_requests) {
num_requests = getOptimalNumberOfRequests(exec_network->actual);
exec_network->infer_requests.resize(num_requests);
}
for (size_t i = 0; i < num_requests; ++i) {
InferRequestWrap &infer_request = exec_network->infer_requests[i];
IE_CHECK_CALL(exec_network->actual->CreateInferRequest(infer_request.request_ptr, &response))
}
return exec_network;
}
std::map <std::string, std::string>
InferenceEnginePython::IECore::queryNetwork(InferenceEnginePython::IENetwork network,
const std::string &deviceName,
const std::map <std::string, std::string> &config) {
auto res = actual.QueryNetwork(*network.actual, deviceName, config);
return res.supportedLayersMap;
}
void InferenceEnginePython::IECore::setConfig(const std::map<std::string, std::string> &config,
void InferenceEnginePython::IECore::setConfig(const std::map <std::string, std::string> &config,
const std::string &deviceName) {
actual.SetConfig(config, deviceName);
}
void InferenceEnginePython::IECore::registerPlugin(const std::string & pluginName, const std::string &deviceName) {
void InferenceEnginePython::IECore::registerPlugin(const std::string &pluginName, const std::string &deviceName) {
actual.RegisterPlugin(pluginName, deviceName);
}
void InferenceEnginePython::IECore::unregisterPlugin(const std::string & deviceName){
void InferenceEnginePython::IECore::unregisterPlugin(const std::string &deviceName) {
actual.UnregisterPlugin(deviceName);
}
void InferenceEnginePython::IECore::registerPlugins(const std::string & xmlConfigFile){
void InferenceEnginePython::IECore::registerPlugins(const std::string &xmlConfigFile) {
actual.RegisterPlugins(xmlConfigFile);
}
void InferenceEnginePython::IECore::addExtension(const std::string & ext_lib_path, const std::string &deviceName) {
void InferenceEnginePython::IECore::addExtension(const std::string &ext_lib_path, const std::string &deviceName) {
auto extension_ptr = InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(ext_lib_path);
auto extension = std::dynamic_pointer_cast<InferenceEngine::IExtension>(extension_ptr);
actual.AddExtension(extension, deviceName);
}
std::vector<std::string> InferenceEnginePython::IECore::getAvailableDevices() {
std::vector <std::string> InferenceEnginePython::IECore::getAvailableDevices() {
return actual.GetAvailableDevices();
}
PyObject* InferenceEnginePython::IECore::getMetric(const std::string &deviceName, const std::string &name) {
PyObject *InferenceEnginePython::IECore::getMetric(const std::string &deviceName, const std::string &name) {
InferenceEngine::Parameter param = actual.GetMetric(deviceName, name);
return parse_parameter(param);
}
PyObject* InferenceEnginePython::IECore::getConfig(const std::string &deviceName, const std::string &name) {
PyObject *InferenceEnginePython::IECore::getConfig(const std::string &deviceName, const std::string &name) {
InferenceEngine::Parameter param = actual.GetConfig(deviceName, name);
return parse_parameter(param);
}

View File

@ -1,4 +1,4 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -26,47 +26,6 @@ typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::nanoseconds ns;
namespace InferenceEnginePython {
struct IENetLayer {
InferenceEngine::CNNLayerPtr layer_ptr;
InferenceEngine::CNNNetwork network_ptr;
std::string name;
std::string type;
std::string precision;
std::string shape;
std::string layout;
std::vector<std::string> children;
std::vector<std::string> parents;
std::string affinity;
std::map<std::string, std::string> params;
void setAffinity(const std::string &target_affinity);
void setParams(const std::map<std::string, std::string> &params_map);
std::map<std::string, InferenceEngine::Blob::Ptr> getWeights();
void setPrecision(std::string precision);
};
struct InputInfo {
InferenceEngine::InputInfo::Ptr actual;
std::vector<size_t> dims;
std::string precision;
std::string layout;
void setPrecision(std::string precision);
void setLayout(std::string layout);
};
struct OutputInfo {
InferenceEngine::DataPtr actual;
std::vector<size_t> dims;
std::string precision;
std::string layout;
void setPrecision(std::string precision);
};
struct ProfileInfo {
std::string status;
@ -78,20 +37,21 @@ struct ProfileInfo {
};
struct IENetwork {
InferenceEngine::CNNNetwork actual;
std::shared_ptr<InferenceEngine::CNNNetwork> actual;
std::string name;
std::size_t batch_size;
std::string precision;
PyObject* getFunction();
void setBatch(const size_t size);
void addOutput(const std::string &out_layer, size_t port_id);
const std::vector<std::pair<std::string, InferenceEnginePython::IENetLayer>> getLayers();
const std::vector <InferenceEngine::CNNLayerPtr> getLayers();
const std::map<std::string, InferenceEnginePython::InputInfo> getInputs();
const std::map<std::string, InferenceEngine::DataPtr> getInputs();
const std::map<std::string, InferenceEnginePython::OutputInfo> getOutputs();
const std::map<std::string, InferenceEngine::DataPtr> getOutputs();
void reshape(const std::map<std::string, std::vector<size_t>> &input_shapes);
@ -103,9 +63,11 @@ struct IENetwork {
void load_from_buffer(const char* xml, size_t xml_size, uint8_t* bin, size_t bin_size);
IENetwork(const std::string &model, const std::string &weights, bool ngraph_compatibility);
IENetwork(const std::string &model, const std::string &weights);
IENetwork(const InferenceEngine::CNNNetwork& cnn_network);
IENetwork(const std::shared_ptr<InferenceEngine::CNNNetwork> &cnn_network);
IENetwork(PyObject* network);
IENetwork() = default;
};
@ -146,6 +108,10 @@ struct IEExecNetwork {
IENetwork GetExecGraphInfo();
void infer();
void exportNetwork(const std::string & model_file);
std::map<std::string, InferenceEngine::DataPtr> getInputs();
std::map<std::string, InferenceEngine::CDataPtr> getOutputs();
PyObject* getMetric(const std::string & metric_name);
PyObject* getConfig(const std::string & metric_name);
@ -172,7 +138,9 @@ struct IEPlugin {
std::set<std::string> queryNetwork(const InferenceEnginePython::IENetwork &net);
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::InferencePlugin actual;
IE_SUPPRESS_DEPRECATED_END
};
struct IECore {
@ -181,6 +149,8 @@ struct IECore {
std::map<std::string, InferenceEngine::Version> getVersions(const std::string & deviceName);
std::unique_ptr<InferenceEnginePython::IEExecNetwork> loadNetwork(IENetwork network, const std::string & deviceName,
const std::map<std::string, std::string> & config, int num_requests);
std::unique_ptr<InferenceEnginePython::IEExecNetwork> importNetwork(const std::string & modelFIle, const std::string & deviceName,
const std::map<std::string, std::string> & config, int num_requests);
std::map<std::string, std::string> queryNetwork(IENetwork network, const std::string & deviceName,
const std::map<std::string, std::string> & config);
void setConfig(const std::map<std::string, std::string> &config, const std::string & deviceName = std::string());

View File

@ -5,17 +5,33 @@ from libcpp.vector cimport vector
from libcpp.map cimport map
from libcpp.set cimport set
from libcpp.pair cimport pair
from libcpp.memory cimport unique_ptr, shared_ptr
from libcpp.memory cimport unique_ptr, shared_ptr, weak_ptr
from libc.stdint cimport int64_t, uint8_t
cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
ctypedef vector[size_t] SizeVector
cdef cppclass TensorDesc:
SizeVector& getDims()
const Precision& getPrecision() const
cdef cppclass Data:
const Precision getPrecision() const
void setPrecision(const Precision& precision) const
const SizeVector getDims()
const string& getName() const
const Layout getLayout() const
void setLayout(Layout layout) const
const bool isInitialized() const
weak_ptr[CNNLayer] & getCreatorLayer()
map[string, shared_ptr[CNNLayer]] & getInputTo()
ctypedef shared_ptr[Data] DataPtr
ctypedef weak_ptr[Data] DataWeakPtr
ctypedef shared_ptr[const Data] CDataPtr
cdef cppclass Blob:
ctypedef shared_ptr[Blob] Ptr
const TensorDesc& getTensorDesc() const
@ -23,6 +39,22 @@ cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
cdef cppclass Precision:
const char*name() const
@staticmethod
const Precision FromStr(const string& str)
cdef cppclass CNNLayer:
string name
string type
Precision precision
vector[DataPtr] outData
vector[DataWeakPtr] insData
string affinity
map[string, string] params
map[string, Blob.Ptr] blobs
ctypedef weak_ptr[CNNLayer] CNNLayerWeakPtr
ctypedef shared_ptr[CNNLayer] CNNLayerPtr
cdef struct apiVersion:
int minor
@ -33,34 +65,36 @@ cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
const char *description
apiVersion apiVersion
cdef enum Layout:
ANY
NCHW
NHWC
NCDHW
NDHWC
OIHW
GOIHW
OIDHW
GOIDHW
SCALAR
C
CHW
HW
NC
CN
BLOCKED
cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
cdef cppclass IENetLayer:
string name
string type
string precision
string affinity
string shape
string layout
vector[string] children
vector[string] parents
map[string, string] params
void setAffinity(const string & target_affinity) except +
void setParams(const map[string, string] & params_map) except +
map[string, Blob.Ptr] getWeights() except +
void setPrecision(string precision) except +
cdef cppclass InputInfo:
vector[size_t] dims
string precision
string layout
void setPrecision(string precision) except +
void setLayout(string layout) except +
# cdef cppclass IENetLayer:
# string layout
# vector[string] children
# vector[string] parents
# void setAffinity(const string & target_affinity) except +
# void setParams(const map[string, string] & params_map) except +
# map[string, Blob.Ptr] getWeights() except +
# void setPrecision(string precision) except +
# vector[DataPtr] getOutData() except +
cdef cppclass OutputInfo:
vector[size_t] dims
string precision
string layout
void setPrecision(string precision) except +
cdef cppclass ProfileInfo:
string status
@ -78,19 +112,23 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
cdef cppclass IEExecNetwork:
vector[InferRequestWrap] infer_requests
IENetwork GetExecGraphInfo() except +
map[string, DataPtr] getInputs()
map[string, CDataPtr] getOutputs()
void exportNetwork(const string & model_file) except +
object getMetric(const string & metric_name)
object getConfig(const string & metric_name)
cdef cppclass IENetwork:
IENetwork() except +
IENetwork(const string &, const string &, bool ngraph_compatibility) except +
IENetwork(object) except +
IENetwork(const string &, const string &) except +
string name
size_t batch_size
string precision
map[string, vector[size_t]] inputs
const vector[pair[string, IENetLayer]] getLayers() except +
map[string, InputInfo] getInputs() except +
map[string, OutputInfo] getOutputs() except +
const vector[CNNLayerPtr] getLayers() except +
map[string, DataPtr] getInputs() except +
map[string, DataPtr] getOutputs() except +
void addOutput(string &, size_t) except +
void setAffinity(map[string, string] & types_affinity_map, map[string, string] & layers_affinity_map) except +
void setBatch(size_t size) except +
@ -100,6 +138,7 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
void setStats(map[string, map[string, vector[float]]] & stats) except +
map[string, map[string, vector[float]]] getStats() except +
void load_from_buffer(const char*xml, size_t xml_size, uint8_t*bin, size_t bin_size) except +
object getFunction() except +
cdef cppclass IEPlugin:
IEPlugin() except +
@ -128,6 +167,8 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
map[string, Version] getVersions(const string & deviceName) except +
unique_ptr[IEExecNetwork] loadNetwork(IENetwork network, const string deviceName,
const map[string, string] & config, int num_requests) except +
unique_ptr[IEExecNetwork] importNetwork(const string & modelFIle, const string & deviceName,
const map[string, string] & config, int num_requests) except +
map[string, string] queryNetwork(IENetwork network, const string deviceName,
const map[string, string] & config) except +
void setConfig(const map[string, string] & config, const string & deviceName) except +

View File

@ -1,39 +0,0 @@
# If the pyx file is a C++ file, we should specify that here.
set (CMAKE_INCLUDE_CURRENT_DIR ON)
set (TARGET_NAME "statistics_collector_api")
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/tools/statistics_collector)
set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
file(GLOB SOURCE
${CMAKE_CURRENT_SOURCE_DIR}/*.pyx
)
set_source_files_properties(${SOURCE} PROPERTIES CYTHON_IS_CXX TRUE
)
include_directories (
${CMAKE_SOURCE_DIR}/samples/common
)
## Compatibility with python 2.7 which has deprecated "register" specifier
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
add_definitions("-Wno-register")
endif()
cython_add_module (${TARGET_NAME} ${SOURCE})
set_target_properties (${TARGET_NAME} PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
target_link_libraries (${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
if(TARGET IE::statistics_collector_s)
target_link_libraries(${TARGET_NAME} PRIVATE IE::statistics_collector_s)
else()
target_link_libraries(${TARGET_NAME} PRIVATE statistics_collector_s)
endif()
# perform copy
ADD_CUSTOM_COMMAND (TARGET ${TARGET_NAME}
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/tools/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../__init__.py
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/tools/statistics_collector/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py
)

View File

@ -1,2 +0,0 @@
from .statistics_collector_api import *
__all__ = ['StatisticsCollector']

View File

@ -1,8 +0,0 @@
from .cimport statistics_collector_c as C
from libcpp.string cimport string
cdef class StatisticsCollector:
cdef C.StatisticsCollector* _impl
cdef C.ct_preprocessingOptions ppOptions
cpdef void collectStatisticsToIR(self, str outModelName, str output_precision)

View File

@ -1,25 +0,0 @@
#distutils: language=c++
from .cimport statistics_collector_c as C
cdef class StatisticsCollector:
def __cinit__(self,
deviceName: [str, bytes],
custom_cpu_library: [str, bytes],
custom_cldnn: [str, bytes],
modelFilePath: [str, bytes],
imagesPath: [str, bytes],
img_number: int,
batch: int,
progress: [str, bytes]):
self.ppOptions._pp_size = 0
self.ppOptions._pp_width = 0
self.ppOptions._pp_height = 0
self._impl = new C.StatisticsCollector(deviceName.encode(), custom_cpu_library.encode(), custom_cldnn.encode(), modelFilePath.encode(), imagesPath.encode(), img_number, batch, self.ppOptions, progress.encode())
cpdef void collectStatisticsToIR(self, str outModelName, str output_precision):
self._impl.collectStatisticsToIR(outModelName.encode(), output_precision.encode())
def __dealloc__(self):
if self._impl is not NULL:
del self._impl

View File

@ -1,24 +0,0 @@
from libc.stddef cimport size_t
from libcpp.string cimport string
cdef extern from "<statistics_processor.hpp>":
cdef struct ct_preprocessingOptions:
string _pp_type
size_t _pp_size
size_t _pp_width
size_t _pp_height
cdef cppclass StatisticsCollector:
StatisticsCollector(const string& deviceName,
const string& custom_cpu_library,
const string& custom_cldnn,
const string& modelFilePath,
const string& imagesPath,
size_t img_number,
size_t batch,
const ct_preprocessingOptions& preprocessingOptions,
const string& progress) except +
void collectStatisticsToIR(const string& outModelName, const string& output_precision)
ct_preprocessingOptions ppOptions

View File

@ -1,7 +1,11 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file ie_argmax_layer.hpp
*/
#pragma once
#include <builders/ie_layer_decorator.hpp>
@ -12,9 +16,11 @@ namespace InferenceEngine {
namespace Builder {
/**
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for ArgMax layer
*/
class INFERENCE_ENGINE_API_CLASS(ArgMaxLayer): public LayerDecorator {
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ArgMaxLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
@ -83,6 +89,7 @@ public:
*/
ArgMaxLayer& setOutMaxVal(size_t size);
};
IE_SUPPRESS_DEPRECATED_END
} // namespace Builder
} // namespace InferenceEngine

View File

@ -1,7 +1,11 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file
*/
#pragma once
#include <builders/ie_layer_decorator.hpp>
@ -12,9 +16,10 @@ namespace InferenceEngine {
namespace Builder {
/**
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for BatchNormalization layer
*/
class INFERENCE_ENGINE_API_CLASS(BatchNormalizationLayer): public LayerDecorator {
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(BatchNormalizationLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
@ -48,7 +53,7 @@ public:
* @param port Port with shapes
* @return reference to layer builder
*/
BatchNormalizationLayer& setPort(const Port &port);
BatchNormalizationLayer& setPort(const Port& port);
/**
* @brief Returns epsilon

View File

@ -1,7 +1,11 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file
*/
#pragma once
#include <builders/ie_layer_decorator.hpp>
@ -12,9 +16,11 @@ namespace InferenceEngine {
namespace Builder {
/**
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for Clamp layer
*/
class INFERENCE_ENGINE_API_CLASS(ClampLayer): public LayerDecorator {
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ClampLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
@ -72,6 +78,7 @@ public:
*/
ClampLayer& setMaxValue(float maxValue);
};
IE_SUPPRESS_DEPRECATED_END
} // namespace Builder
} // namespace InferenceEngine

View File

@ -1,7 +1,11 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file
*/
#pragma once
#include <builders/ie_layer_decorator.hpp>
@ -13,9 +17,11 @@ namespace InferenceEngine {
namespace Builder {
/**
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for Concat layer
*/
class INFERENCE_ENGINE_API_CLASS(ConcatLayer): public LayerDecorator {
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ConcatLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
@ -73,6 +79,7 @@ public:
*/
ConcatLayer& setAxis(size_t axis);
};
IE_SUPPRESS_DEPRECATED_END
} // namespace Builder
} // namespace InferenceEngine

View File

@ -1,7 +1,11 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file
*/
#pragma once
#include <builders/ie_layer_decorator.hpp>
@ -12,9 +16,11 @@ namespace InferenceEngine {
namespace Builder {
/**
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for Const layer
*/
class INFERENCE_ENGINE_API_CLASS(ConstLayer): public LayerDecorator {
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ConstLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
@ -63,6 +69,7 @@ public:
*/
const Blob::CPtr& getData() const;
};
IE_SUPPRESS_DEPRECATED_END
} // namespace Builder
} // namespace InferenceEngine

View File

@ -1,21 +1,27 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file
*/
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <vector>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ArgMax layer
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for Convolution layer
*/
class INFERENCE_ENGINE_API_CLASS(ConvolutionLayer): public LayerDecorator {
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ConvolutionLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
@ -139,6 +145,7 @@ public:
*/
ConvolutionLayer& setOutDepth(size_t outDepth);
};
IE_SUPPRESS_DEPRECATED_END
} // namespace Builder
} // namespace InferenceEngine

View File

@ -1,7 +1,11 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file
*/
#pragma once
#include <builders/ie_layer_decorator.hpp>
@ -13,9 +17,11 @@ namespace InferenceEngine {
namespace Builder {
/**
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for Crop layer
*/
class INFERENCE_ENGINE_API_CLASS(CropLayer): public LayerDecorator {
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(CropLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
@ -84,6 +90,7 @@ public:
*/
CropLayer& setOffset(const std::vector<size_t>& offsets);
};
IE_SUPPRESS_DEPRECATED_END
} // namespace Builder
} // namespace InferenceEngine

View File

@ -1,7 +1,11 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file
*/
#pragma once
#include <builders/ie_layer_decorator.hpp>
@ -13,9 +17,11 @@ namespace InferenceEngine {
namespace Builder {
/**
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for CTCGreedyDecoder layer
*/
class INFERENCE_ENGINE_API_CLASS(CTCGreedyDecoderLayer): public LayerDecorator {
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(CTCGreedyDecoderLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
@ -73,7 +79,7 @@ public:
*/
CTCGreedyDecoderLayer& setCTCMergeRepeated(bool flag);
};
IE_SUPPRESS_DEPRECATED_END
} // namespace Builder
} // namespace InferenceEngine

View File

@ -1,7 +1,11 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file
*/
#pragma once
#include <builders/ie_convolution_layer.hpp>
@ -12,9 +16,11 @@ namespace InferenceEngine {
namespace Builder {
/**
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for Deconvolution layer
*/
class INFERENCE_ENGINE_API_CLASS(DeconvolutionLayer): public ConvolutionLayer {
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(DeconvolutionLayer): public ConvolutionLayer {
public:
/**
* @brief The constructor creates a builder with the name
@ -32,6 +38,7 @@ public:
*/
explicit DeconvolutionLayer(const Layer::CPtr& layer);
};
IE_SUPPRESS_DEPRECATED_END
} // namespace Builder
} // namespace InferenceEngine

View File

@ -1,7 +1,11 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file
*/
#pragma once
#include <builders/ie_convolution_layer.hpp>
@ -12,9 +16,10 @@ namespace InferenceEngine {
namespace Builder {
/**
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for Deconvolution layer
*/
class INFERENCE_ENGINE_API_CLASS(DeformableConvolutionLayer): public ConvolutionLayer {
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(DeformableConvolutionLayer): public ConvolutionLayer {
public:
/**
* @brief The constructor creates a builder with the name

View File

@ -1,7 +1,11 @@
// Copyright (C) 2018-2019 Intel Corporation
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @file
*/
#pragma once
#include <builders/ie_layer_decorator.hpp>
@ -13,9 +17,12 @@ namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ArgMax layer
* @deprecated Use ngraph API instead.
* @brief The class represents a builder for Detection Output layer
*/
class INFERENCE_ENGINE_API_CLASS(DetectionOutputLayer): public LayerDecorator {
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(DetectionOutputLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
@ -183,6 +190,7 @@ public:
*/
DetectionOutputLayer& setVariantEncodedInTarget(bool flag);
};
IE_SUPPRESS_DEPRECATED_END
} // namespace Builder
} // namespace InferenceEngine

Some files were not shown because too many files have changed in this diff Show More