Compare commits

...

10 Commits

Author SHA1 Message Date
Alexey Suhov
e206d06f18 Publishing 2019 R1.0.1 content 2019-04-30 18:55:07 +03:00
Viacheslav Matveichev
b235c73481 Merge pull request #129 from asuhov/2019-r1
Publishing 2019 R1 content
2019-04-13 01:02:28 +03:00
Alexey Suhov
72660e9a4d Publishing 2019 R1 content 2019-04-12 18:25:53 +03:00
Dmitry Kurtaev
669bee86e5 Add a section of how to link IE with CMake project (#99) 2019-03-14 13:13:27 +03:00
Alexey Suhov
17e66dc5a6 Added unit tests and readme for model optimizer (#79)
* added unit tests
* added readme for model optimizer
* added a list of supported IE plugins
2019-01-23 20:23:27 +03:00
openvino-pushbot
30594bb309 Update readme 2019-01-21 23:30:11 +03:00
Alexey Suhov
9de27f16bc Publishing R5 content (#72)
* Publishing R5 content

* Updated ade revision

* updated readme

* add possibility to build CPU plugin with Intel MKL package
2019-01-21 21:31:31 +03:00
Alexey Suhov
fbc7a4a710 updated readme files (#54) 2018-12-14 21:26:38 +03:00
RachelRen05
e5d4940a0f update the dependency file to support ubuntu 18.04 (#17)
* update dependency file to support ubuntu 18.04

* update dependency file to support ubuntu 18.04
2018-12-14 21:09:44 +03:00
Alexey Suhov
3600f36d7b updated install_dependencies.sh and readme for python api (#43)
* use absolute path in readme for python api
* Update install_dependencies.sh
2018-11-29 21:04:21 +03:00
3750 changed files with 403982 additions and 65979 deletions

View File

@@ -1,5 +1,5 @@
# [OpenVINO™ Toolkit](https://01.org/openvinotoolkit) - Deep Learning Deployment Toolkit repository
[![Stable release](https://img.shields.io/badge/version-2018.R4-green.svg)](https://github.com/opencv/dldt/releases/tag/2018_R4)
[![Stable release](https://img.shields.io/badge/version-2019.R1-green.svg)](https://github.com/opencv/dldt/releases/tag/2019_R1)
[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE)
This toolkit allows developers to deploy pre-trained deep learning models through a high-level C++ Inference Engine API integrated with application logic.

View File

@@ -1,12 +1,15 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required (VERSION 3.3)
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
project(InferenceEngine)
set(DEV_BUILD TRUE)
include(CTest)
## WA for problem with gtest submodule. It cannot detect uint32 type.
## remove Gtest submodule and this two lines together
include (CheckTypeSize)
@@ -18,7 +21,9 @@ endif()
option (OS_FOLDER "create OS dedicated folder in output" OFF)
if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l")
set (ARCH_FOLDER armv7l)
elseif("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
set (ARCH_FOLDER intel64)
else()
set (ARCH_FOLDER ia32)
@@ -46,7 +51,6 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "")
debug_message(STATUS "CMAKE_BUILD_TYPE not defined, 'Release' will be used")
set(CMAKE_BUILD_TYPE "Release")
endif()
message(STATUS "BUILD_CONFIGURATION: ${CMAKE_BUILD_TYPE}")
if(COVERAGE)
@@ -55,17 +59,38 @@ endif()
if (UNIX)
SET(LIB_DL ${CMAKE_DL_LIBS})
else()
endif()
set (OUTPUT_ROOT ${IE_MAIN_SOURCE_DIR})
if(NOT(UNIX))
if (WIN32)
#set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
#set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
endif()
include(os_flags)
#resolving dependencies for the project
include (dependencies)
set(CMAKE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX})
set(CMAKE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX})
if (WIN32)
# Support CMake multiconfiguration for Visual Studio build
set(IE_BUILD_POSTFIX $<$<CONFIG:Debug>:${IE_DEBUG_POSTFIX}>$<$<CONFIG:Release>:${IE_RELEASE_POSTFIX}>)
set(IE_BUILD_CONFIGURATION $<CONFIG>)
else ()
if (${CMAKE_BUILD_TYPE} STREQUAL "Debug" )
set(IE_BUILD_POSTFIX ${IE_DEBUG_POSTFIX})
else()
set(IE_BUILD_POSTFIX ${IE_RELEASE_POSTFIX})
endif()
set(IE_BUILD_CONFIGURATION ${CMAKE_BUILD_TYPE})
endif()
add_definitions(-DIE_BUILD_POSTFIX=\"${IE_BUILD_POSTFIX}\")
if(NOT(UNIX))
if (WIN32)
#set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
#set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
endif()
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
set (CMAKE_LIBRARY_PATH ${OUTPUT_ROOT}/${BIN_FOLDER})
set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
@@ -75,20 +100,15 @@ if(NOT(UNIX))
set (LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER})
set (LIBRARY_OUTPUT_PATH ${LIBRARY_OUTPUT_DIRECTORY}) # compatibility issue: linux uses LIBRARY_OUTPUT_PATH, windows uses LIBRARY_OUTPUT_DIRECTORY
else ()
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib)
set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib)
set (CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE})
set (CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE})
set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE})
set (LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib)
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION}/lib)
set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION}/lib)
set (CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION})
set (CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION})
set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION})
set (LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION}/lib)
set (LIBRARY_OUTPUT_PATH ${LIBRARY_OUTPUT_DIRECTORY}/lib)
endif()
include(os_flags)
#resolving rependencies for the project
include (dependencies)
if (APPLE)
set(CMAKE_MACOSX_RPATH 1)
endif(APPLE)
@@ -108,29 +128,35 @@ message (STATUS "IE_MAIN_SOURCE_DIR .................... " ${IE_MAIN_SOURCE_DIR}
message (STATUS "CMAKE_GENERATOR ....................... " ${CMAKE_GENERATOR})
message (STATUS "CMAKE_C_COMPILER_ID ................... " ${CMAKE_C_COMPILER_ID})
if("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
include(sdl)
endif()
include(sdl)
set (CMAKE_POSITION_INDEPENDENT_CODE ON)
include (sanitizer)
include(CheckCXXCompilerFlag)
if(UNIX)
CHECK_CXX_COMPILER_FLAG("-fvisibility=hidden" COMPILER_SUPPORTS_VISIBILITY)
if (COMPILER_SUPPORTS_VISIBILITY)
#add_definitions(-fvisibility=hidden) todo: should be hidden? if so define default visibiliti explicite for each funtion
add_definitions(-fvisibility=default)
endif(COMPILER_SUPPORTS_VISIBILITY)
endif(UNIX)
include(cpplint)
add_subdirectory(src)
add_subdirectory(tests)
add_subdirectory(thirdparty)
if (ENABLE_SAMPLES_CORE)
set(InferenceEngine_DIR "${CMAKE_BINARY_DIR}")
set(InferenceEngine_DIR "${CMAKE_BINARY_DIR}")
#to be able to link
set (LIB_FOLDER ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib)
add_subdirectory(samples)
#to be able to link
set (LIB_FOLDER ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION}/lib)
# gflags and format_reader targets are kept inside of samples directory and
# they must be built even if samples build is disabled (required for tests and tools).
add_subdirectory(samples)
file(GLOB_RECURSE SAMPLES_SOURCES samples/*.cpp samples/*.hpp samples/*.h)
add_cpplint_target(sample_cpplint
FOR_SOURCES ${SAMPLES_SOURCES}
EXCLUDE_PATTERNS "thirdparty/*" "pugixml/*")
if (ENABLE_PYTHON)
add_subdirectory(ie_bridges/python)
endif()
add_cpplint_report_target()

View File

@@ -1,17 +1,34 @@
## Repository components
The Inference Engine can infer models in different formats with various input and output formats.
The open source version of Inference Engine includes the following plugins:
| PLUGIN | DEVICE TYPES |
| ---------------------| -------------|
| CPU plugin | Intel® Xeon® with Intel® AVX2 and AVX512, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® SSE |
| GPU plugin | Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics |
| GNA plugin | Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver processor J5005, Intel® Celeron® processor J4005, Intel® Core™ i3-8121U processor |
| Heterogeneous plugin | Heterogeneous plugin enables computing for inference on one network on several Intel® devices. |
Inference Engine plugins for Intel® FPGA and Intel® Movidius™ Neural Compute Stick are distributed only in a binary form as a part of [Intel® Distribution of OpenVINO™](https://software.intel.com/en-us/openvino-toolkit).
## Build on Linux\* Systems
The software was validated on:
- Ubuntu\* 16.04 with default GCC\* 5.4.0
- CentOS\* 7.4 with default GCC\* 4.8.5
- Ubuntu\* 16.04 (64-bit) with default GCC\* 5.4.0
- CentOS\* 7.4 (64-bit) with default GCC\* 4.8.5
- [Intel® Graphics Compute Runtime for OpenCL™ Driver package 18.28.11080](https://github.com/intel/compute-runtime/releases/tag/18.28.11080).
### Software Requirements
- [CMake\*](https://cmake.org/download/) 3.9 or higher
- GCC\* 4.8 or higher to build the Inference Engine
- Python 2.7 or higher for Inference Engine Python API wrapper
### Build Steps
1. Clone submodules:
```sh
cd dldt/inference-engine
git submodule init
git submodule update --recursive
```
@@ -28,19 +45,51 @@ The software was validated on:
You can use the following additional build options:
- Internal JIT GEMM implementation is used by default.
- To switch to OpenBLAS\* implementation, use `GEMM=OPENBLAS` option and `BLAS_INCLUDE_DIRS` and `BLAS_LIBRARIES` cmake options to specify path to OpenBLAS headers and library, for example use the following options on CentOS\*: `-DGEMM=OPENBLAS -DBLAS_INCLUDE_DIRS=/usr/include/openblas -DBLAS_LIBRARIES=/usr/lib64/libopenblas.so.0`
- To switch to optimized MKL-ML\* GEMM implementation, use `GEMM=MKL` and `MKLROOT` cmake options to specify path to unpacked MKL-ML with `include` and `lib` folders, for example use the following options: `-DGEMM=MKL -DMKLROOT=<path_to_MKL>`. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_lnx_2019.0.1.20180928.tgz)
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_lnx_2019.0.1.20180928.tgz)
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
- Required versions of TBB and OpenCV packages are downloaded automatically by the CMake-based script. If you already have installed TBB or OpenCV packages configured in your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR` environment variables before running the `cmake` command, otherwise they won't be downloaded and the build may fail if incompatible versions were installed.
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE=`which python3.7` \
-DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.7m.so \
-DPYTHON_INCLUDE_DIR=/usr/include/python3.7
```
- To switch on/off the CPU and GPU plugins, use `cmake` options `-DENABLE_MKL_DNN=ON/OFF` and `-DENABLE_CLDNN=ON/OFF`.
5. Adding to your project
For CMake projects, set an environment variable `InferenceEngine_DIR`:
```sh
export InferenceEngine_DIR=/path/to/dldt/inference-engine/build/
```
Then you can find Inference Engine by `find_package`:
```cmake
find_package(InferenceEngine)
include_directories(${InferenceEngine_INCLUDE_DIRS})
target_link_libraries(${PROJECT_NAME} ${InferenceEngine_LIBRARIES} dl)
```
## Build on Windows\* Systems:
The software was validated on:
- Microsoft\* Windows\* 10 with Visual Studio 2017 and Intel® C++ Compiler 2018 Update 3
- Microsoft\* Windows\* 10 (64-bit) with Visual Studio 2017 and Intel® C++ Compiler 2018 Update 3
- [Intel® Graphics Driver for Windows* [24.20] driver package](https://downloadcenter.intel.com/download/27803/Graphics-Intel-Graphics-Driver-for-Windows-10?v=t).
### Software Requirements
- [CMake\*](https://cmake.org/download/) 3.9 or higher
- [OpenBLAS\*](https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download) and [mingw64\* runtime dependencies](https://sourceforge.net/projects/openblas/files/v0.2.14/mingw64_dll.zip/download).
- [Intel® C++ Compiler](https://software.intel.com/en-us/intel-parallel-studio-xe) 18.0 to build the Inference Engine on Windows.
- Python 3.4 or higher for Inference Engine Python API wrapper
### Build Steps
1. Clone submodules:
@@ -59,15 +108,82 @@ The software was validated on:
5. In the `build` directory, run `cmake` to fetch project dependencies and generate a Visual Studio solution:
```sh
cd build
cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" -DOS_FOLDER=ON ^
cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
-DCMAKE_BUILD_TYPE=Release ^
-DICCLIB="C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\compiler\lib" ..
```
- Internal JIT GEMM implementation is used by default.
- To switch to OpenBLAS GEMM implementation, use -DGEMM=OPENBLAS cmake option and specify path to OpenBLAS using `-DBLAS_INCLUDE_DIRS=<OPENBLAS_DIR>\include` and `-DBLAS_LIBRARIES=<OPENBLAS_DIR>\lib\libopenblas.dll.a` options. Prebuilt OpenBLAS\* package can be downloaded [here](https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download), mingw64* runtime dependencies [here](https://sourceforge.net/projects/openblas/files/v0.2.14/mingw64_dll.zip/download)
- To switch to optimized MKL-ML GEMM implementation, use `GEMM=MKL` and `MKLROOT` cmake options to specify path to unpacked MKL-ML with `include` and `lib` folders, for example use the following options: `-DGEMM=MKL -DMKLROOT=<path_to_MKL>`. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_win_2019.0.1.20180928.zip)
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_win_2019.0.1.20180928.zip)
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
- Required versions of TBB and OpenCV packages are downloaded automatically by the CMake-based script. If you already have installed TBB or OpenCV packages configured in your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR` environment variables before running the `cmake` command, otherwise they won't be downloaded and the build may fail if incompatible versions were installed.
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE="C:\Program Files\Python37\python.exe" ^
-DPYTHON_LIBRARY="C:\Program Files\Python37\libs\python37.lib" ^
-DPYTHON_INCLUDE_DIR="C:\Program Files\Python37\include"
```
6. Build generated solution in Visual Studio 2017 or run `cmake --build . --config Release` to build from the command line.
7. Before running the samples, add paths to TBB and OpenCV binaries used for the build to the %PATH% environment variable. By default, TBB binaries are downloaded by the CMake-based script to the `<dldt_repo>/inference-engine/temp/tbb/lib` folder, OpenCV binaries - to the `<dldt_repo>/inference-engine/temp/opencv_4.1.0/bin` folder.
### Building Inference Engine with Ninja
```sh
call "C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\bin\ipsxe-comp-vars.bat" intel64 vs2017
set CXX=icl
set CC=icl
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by dldt cmake script
set TBBROOT=
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --config Release
```
## Build on macOS\* Systems
The software was validated on:
- macOS\* 10.14, 64-bit
### Software Requirements
- [CMake\*](https://cmake.org/download/) 3.9 or higher
- Clang\* compiler from Xcode\* 10.1
- Python\* 3.4 or higher for the Inference Engine Python API wrapper
### Build Steps
1. Clone submodules:
```sh
cd dldt/inference-engine
git submodule init
git submodule update --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the project root folder.
3. Create a build folder:
```sh
mkdir build
```
4. Inference Engine uses a CMake-based build system. In the created `build` directory, run `cmake` to fetch project dependencies and create Unix makefiles, then run `make` to build the project:
```sh
cmake -DCMAKE_BUILD_TYPE=Release ..
make -j16
```
You can use the following additional build options:
- Internal JIT GEMM implementation is used by default.
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17.1/mklml_mac_2019.0.1.20180928.tgz)
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE=/Library/Frameworks/Python.framework/Versions/3.7/bin/python3.7 \
-DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.7/lib/libpython3.7m.dylib \
-DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m
```
---
\* Other names and brands may be claimed as the property of others.

View File

@@ -0,0 +1,37 @@
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
#module to locate GNA libraries
if (WIN32)
set(GNA_PLATFORM_DIR win64)
set(GNA_LIB_DIR x64)
set(GNA_LIB gna)
elseif (UNIX)
set(GNA_PLATFORM_DIR linux)
set(GNA_LIB_DIR lib)
set(GNA_LIB gna_api)
set(GNA_KERNEL_LIB gna_kernel)
else ()
message(FATAL_ERROR "GNA not supported on this platform, only linux, and windows")
endif ()
find_library(GNA_API_LIBRARY
${GNA_LIB}
HINTS
${GNA}/${GNA_PLATFORM_DIR}/${GNA_LIB_DIR})
set(libGNA_INCLUDE_DIRS ${GNA}/${GNA_PLATFORM_DIR}/include)
set(libGNA_LIBRARY ${GNA_API_LIBRARY})
if (UNIX)
#message("Searching for libgna_kernel.so in: ${GNA}/${GNA_PLATFORM_DIR}/${GNA_KERNEL_LIB}")
find_library(GNA_KERNEL_LIBRARY
${GNA_KERNEL_LIB}
HINTS
${GNA}/${GNA_PLATFORM_DIR}/${GNA_LIB_DIR})
endif ()
set(libGNA_LIBRARIES ${libGNA_LIBRARY} ${GNA_KERNEL_LIBRARY})

View File

@@ -0,0 +1,10 @@
set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_PROCESSOR armv7l)
set(CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set(CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)

View File

@@ -1,12 +1,10 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
include("features")
include("mode")
if (THREADING STREQUAL "OMP")
include("omp")
endif()
include("itt")
#64 bits platform
@@ -28,17 +26,15 @@ else()
SET(ENABLE_MKL_DNN OFF)
endif()
#apple specific
if (APPLE)
set(ENABLE_GNA OFF)
set(ENABLE_CLDNN OFF)
endif()
#minGW specific - under wine no support for downloading file and applying them using git
if (WIN32)
enable_omp()
if (MINGW)
SET(ENABLE_CLDNN OFF) # dont have mingw dll for linking
set(ENABLE_SAMPLES OFF)
@@ -61,7 +57,7 @@ if (LINUX)
endif ()
if (NOT ENABLE_MKL_DNN)
set(GEMM OPENBLAS)
set(ENABLE_MKL OFF)
endif()
#next section set defines to be accesible in c++/c code for certain feature
@@ -69,10 +65,6 @@ if (ENABLE_PROFILING_RAW)
add_definitions(-DENABLE_PROFILING_RAW=1)
endif()
if (ENABLE_GTEST_PATCHES)
add_definitions(-DENABLE_GTEST_PATCHES=1)
endif()
if (ENABLE_CLDNN)
add_definitions(-DENABLE_CLDNN=1)
endif()
@@ -81,16 +73,12 @@ if (ENABLE_MKL_DNN)
add_definitions(-DENABLE_MKL_DNN=1)
endif()
if (ENABLE_STRESS_UNIT_TESTS)
add_definitions(-DENABLE_STRESS_UNIT_TESTS=1)
if (ENABLE_GNA)
add_definitions(-DENABLE_GNA)
endif()
if (ENABLE_SEGMENTATION_TESTS)
add_definitions(-DENABLE_SEGMENTATION_TESTS=1)
endif()
if (ENABLE_OBJECT_DETECTION_TESTS)
add_definitions(-DENABLE_OBJECT_DETECTION_TESTS=1)
if (ENABLE_SAMPLES)
set (ENABLE_SAMPLES_CORE ON)
endif()
if (DEVELOPMENT_PLUGIN_MODE)
@@ -112,9 +100,4 @@ if (VERBOSE_BUILD)
set(CMAKE_VERBOSE_MAKEFILE ON)
endif()
if (THREADING STREQUAL "TBB" OR THREADING STREQUAL "SEQ")
set(ENABLE_INTEL_OMP OFF)
message(STATUS "ENABLE_INTEL_OMP should be disabled if THREADING is TBB or Sequential. ENABLE_INTEL_OMP option is " ${ENABLE_INTEL_OMP})
endif()
print_enabled_features()
print_enabled_features()

View File

@@ -1,6 +1,8 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
if(DEFINED IE_MAIN_SOURCE_DIR AND TARGET inference_engine)
set(InferenceEngine_INCLUDE_DIRS ${IE_MAIN_SOURCE_DIR}/include)
set(InferenceEngine_LIBRARIES inference_engine)

View File

@@ -0,0 +1,162 @@
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
if(ENABLE_CPPLINT)
find_package(PythonInterp 2.7 EXACT)
if(NOT PYTHONINTERP_FOUND)
message(WARNING "Python was not found (required for cpplint check)")
set(ENABLE_CPPLINT OFF)
endif()
endif()
if(ENABLE_CPPLINT)
add_custom_target(cpplint_all ALL)
set(CPPLINT_ALL_OUTPUT_FILES "" CACHE INTERNAL "All cpplint output files")
endif()
function(add_cpplint_target TARGET_NAME)
if(NOT ENABLE_CPPLINT)
return()
endif()
set(options "")
set(oneValueArgs "")
set(multiValueArgs "FOR_TARGETS" "FOR_SOURCES" "EXCLUDE_PATTERNS")
cmake_parse_arguments(CPPLINT "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
foreach(target IN LISTS CPPLINT_FOR_TARGETS)
get_target_property(target_sources "${target}" SOURCES)
list(APPEND CPPLINT_FOR_SOURCES ${target_sources})
endforeach()
list(REMOVE_DUPLICATES CPPLINT_FOR_SOURCES)
set(all_output_files "")
foreach(source_file IN LISTS CPPLINT_FOR_SOURCES)
set(exclude FALSE)
foreach(pattern IN LISTS CPPLINT_EXCLUDE_PATTERNS)
if(source_file MATCHES "${pattern}")
set(exclude TRUE)
break()
endif()
endforeach()
if(exclude)
continue()
endif()
file(RELATIVE_PATH source_file_relative "${CMAKE_CURRENT_SOURCE_DIR}" "${source_file}")
set(output_file "${CMAKE_CURRENT_BINARY_DIR}/cpplint/${source_file_relative}.cpplint")
string(REPLACE ".." "__" output_file "${output_file}")
get_filename_component(output_dir "${output_file}" DIRECTORY)
file(MAKE_DIRECTORY "${output_dir}")
add_custom_command(
OUTPUT
"${output_file}"
COMMAND
"${CMAKE_COMMAND}"
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
-D "CPPLINT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cpplint.py"
-D "INPUT_FILE=${source_file}"
-D "OUTPUT_FILE=${output_file}"
-D "WORKING_DIRECTORY=${CMAKE_CURRENT_SOURCE_DIR}"
-D "SKIP_RETURN_CODE=${ENABLE_CPPLINT_REPORT}"
-P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_run.cmake"
DEPENDS
"${source_file}"
"${IE_MAIN_SOURCE_DIR}/scripts/cpplint.py"
"${IE_MAIN_SOURCE_DIR}/cmake/cpplint_run.cmake"
COMMENT
"[cpplint] ${source_file}"
VERBATIM)
list(APPEND all_output_files "${output_file}")
endforeach()
set(CPPLINT_ALL_OUTPUT_FILES
${CPPLINT_ALL_OUTPUT_FILES} ${all_output_files}
CACHE INTERNAL
"All cpplint output files")
add_custom_target(${TARGET_NAME} ALL
DEPENDS ${all_output_files}
COMMENT "[cpplint] ${TARGET_NAME}")
if(CPPLINT_FOR_TARGETS)
foreach(target IN LISTS CPPLINT_FOR_TARGETS)
add_dependencies(${target} ${TARGET_NAME})
endforeach()
endif()
add_dependencies(cpplint_all ${TARGET_NAME})
endfunction()
function(add_cpplint_report_target)
if(NOT ENABLE_CPPLINT OR NOT ENABLE_CPPLINT_REPORT)
return()
endif()
set(cpplint_output_file "${CMAKE_BINARY_DIR}/cpplint/final_output.cpplint")
add_custom_command(
OUTPUT
"${cpplint_output_file}"
COMMAND
"${CMAKE_COMMAND}"
-D "FINAL_OUTPUT_FILE=${cpplint_output_file}"
-D "OUTPUT_FILES=${CPPLINT_ALL_OUTPUT_FILES}"
-P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_merge.cmake"
DEPENDS
${CPPLINT_ALL_OUTPUT_FILES}
"${IE_MAIN_SOURCE_DIR}/cmake/cpplint_merge.cmake"
COMMENT
"[cpplint] Merge all output files"
VERBATIM)
set(cppcheck_output_file "${CMAKE_BINARY_DIR}/cpplint/cpplint-cppcheck-result.xml")
add_custom_command(
OUTPUT
"${cppcheck_output_file}"
COMMAND
"${CMAKE_COMMAND}"
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
-D "CONVERT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py"
-D "INPUT_FILE=${cpplint_output_file}"
-D "OUTPUT_FILE=${cppcheck_output_file}"
-P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_to_cppcheck_xml.cmake"
DEPENDS
${cpplint_output_file}
"${IE_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py"
"${IE_MAIN_SOURCE_DIR}/cmake/cpplint_to_cppcheck_xml.cmake"
COMMENT
"[cpplint] Convert to cppcheck XML format"
VERBATIM)
set(report_dir "${IE_MAIN_SOURCE_DIR}/report/cpplint")
set(html_output_file "${report_dir}/index.html")
add_custom_command(
OUTPUT
"${html_output_file}"
COMMAND
"${CMAKE_COMMAND}"
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
-D "CONVERT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py"
-D "INPUT_FILE=${cppcheck_output_file}"
-D "REPORT_DIR=${report_dir}"
-D "SOURCE_DIR=${IE_MAIN_SOURCE_DIR}"
-D "TITLE=${CMAKE_PROJECT_NAME}"
-P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_html.cmake"
DEPENDS
"${cppcheck_output_file}"
"${IE_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py"
"${IE_MAIN_SOURCE_DIR}/cmake/cpplint_html.cmake"
COMMENT
"[cpplint] Generate HTML report"
VERBATIM)
add_custom_target(cpplint_report
DEPENDS "${html_output_file}"
COMMENT "[cpplint] Generate report")
endfunction()

View File

@@ -0,0 +1,30 @@
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
if(EXISTS "${REPORT_DIR}")
file(REMOVE_RECURSE "${REPORT_DIR}")
endif()
file(MAKE_DIRECTORY "${REPORT_DIR}")
execute_process(
COMMAND
"${PYTHON_EXECUTABLE}"
"${CONVERT_SCRIPT}"
"--file=${INPUT_FILE}"
"--report-dir=${REPORT_DIR}"
"--source-dir=${SOURCE_DIR}"
"--title=${TITLE}")
# Change cppcheck things to cpplint
file(READ "${REPORT_DIR}/index.html" cur_file_content)
string(REPLACE "Cppcheck" "cpplint" cur_file_content ${cur_file_content})
string(REPLACE "a tool for static C/C++ code analysis" "an open source lint-like tool from Google" cur_file_content ${cur_file_content})
string(REPLACE "http://cppcheck.sourceforge.net" "http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py" cur_file_content ${cur_file_content})
string(REPLACE "IRC: <a href=\"irc://irc.freenode.net/cppcheck\">irc://irc.freenode.net/cppcheck</a>" " " cur_file_content ${cur_file_content})
file(WRITE "${REPORT_DIR}/index.html" "${cur_file_content}")

View File

@@ -0,0 +1,11 @@
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
file(WRITE "${FINAL_OUTPUT_FILE}" "")
foreach(output_file IN LISTS OUTPUT_FILES)
file(READ "${output_file}" cur_file_content)
file(APPEND "${FINAL_OUTPUT_FILE}" "${cur_file_content}\n")
endforeach()

View File

@@ -0,0 +1,37 @@
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
file(REMOVE "${OUTPUT_FILE}")
execute_process(
COMMAND
"${PYTHON_EXECUTABLE}"
"${CPPLINT_SCRIPT}"
"--linelength=160"
"--counting=detailed"
"--filter=-readability/fn_size"
"${INPUT_FILE}"
WORKING_DIRECTORY "${WORKING_DIRECTORY}"
RESULT_VARIABLE result
OUTPUT_VARIABLE output
ERROR_VARIABLE output)
# Display the cpplint output to console (to parse it form IDE)
message("${output}")
# Store cpplint output to file (replace problematic symbols)
string(REPLACE "\"" "&quot\;" output ${output})
string(REPLACE "<" "&lt\;" output ${output})
string(REPLACE ">" "&gt\;" output ${output})
string(REPLACE "'" "&apos\;" output ${output})
string(REPLACE "&" "&amp\;" output ${output})
file(WRITE "${OUTPUT_FILE}" ${output})
if(NOT SKIP_RETURN_CODE)
# Pass through the cpplint return code
if(NOT result EQUAL 0)
message(FATAL_ERROR "[cpplint] Code style check failed for : ${INPUT_FILE}")
endif()
endif()

View File

@@ -0,0 +1,12 @@
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
execute_process(
COMMAND
"${PYTHON_EXECUTABLE}"
"${CONVERT_SCRIPT}"
INPUT_FILE "${INPUT_FILE}"
OUTPUT_FILE "${OUTPUT_FILE}"
ERROR_FILE "${OUTPUT_FILE}")

View File

@@ -1,10 +1,8 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required (VERSION 2.8)
function (debug_message)
if (VERBOSE_BUILD)
message(${ARGV})
@@ -67,3 +65,8 @@ function (log_rpath component lib_path)
log_rpath_remove_top(${component} TRUE ${lib_path} TRUE)
endfunction()
# Just wrapping of the original message() function to make this macro known during IE build.
# This macro is redefined (with additional checks) within the InferenceEngineConfig.cmake file.
macro(ext_message TRACE_LEVEL)
message(${TRACE_LEVEL} "${ARGN}")
endmacro()

View File

@@ -1,9 +1,8 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required(VERSION 2.8)
cmake_policy(SET CMP0054 NEW)
#features trigger supported by build system
@@ -14,7 +13,7 @@ include(debug)
include(dependency_solver)
#prepare temporary folder
if (DEFINED ENV{${DL_SDK_TEMP}})
if (DEFINED ENV{${DL_SDK_TEMP}} AND NOT $ENV{${DL_SDK_TEMP}} STREQUAL "")
if (WIN32)
string(REPLACE "\\" "\\\\" TEMP $ENV{${DL_SDK_TEMP}})
else(WIN32)
@@ -38,9 +37,6 @@ else()
set(MODELS_BRANCH "master")
endif()
set(MODELS_PATH "${TEMP}/models")
debug_message(STATUS "MODELS_PATH=" ${MODELS_PATH})
## enable cblas_gemm from OpenBLAS package
if (GEMM STREQUAL "OPENBLAS")
if(NOT BLAS_LIBRARIES OR NOT BLAS_INCLUDE_DIRS)
@@ -59,10 +55,12 @@ if (GEMM STREQUAL "MKL")
if(NOT MKLROOT)
message(FATAL_ERROR "MKLROOT not found: install MKL and set -DMKLROOT=<path_to_MKL>")
endif()
set(MKL ${MKLROOT})
debug_message(STATUS "mkl_ml=" ${MKLROOT})
endif ()
if (ENABLE_INTEL_OMP)
## Intel OMP package
if (THREADING STREQUAL "OMP")
if (WIN32)
RESOLVE_DEPENDENCY(OMP
ARCHIVE_WIN "iomp.zip"
@@ -75,67 +73,116 @@ elseif(LINUX)
TARGET_PATH "${TEMP}/omp"
ENVIRONMENT "OMP"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
else(APPLE)
RESOLVE_DEPENDENCY(OMP
ARCHIVE_MAC "iomp_20190130_mac.tgz"
TARGET_PATH "${TEMP}/omp"
ENVIRONMENT "OMP"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
endif()
log_rpath_from_dir(OMP "${OMP}/lib")
debug_message(STATUS "intel_omp=" ${OMP})
endif ()
#TBB package
## TBB package
if (THREADING STREQUAL "TBB")
if (WIN32)
#TODO: add target_path to be platform specific as well, to avoid following if
RESOLVE_DEPENDENCY(TBB
ARCHIVE_WIN "tbb2018_20180618_win.zip" #TODO: windows zip archive created incorrectly using old name for folder
ARCHIVE_WIN "tbb2019_20181010_win.zip" #TODO: windows zip archive created incorrectly using old name for folder
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
elseif(LINUX)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_LIN "tbb2018_20180618_lin.tgz"
ARCHIVE_LIN "tbb2019_20181010_lin.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT")
else(APPLE)
RESOLVE_DEPENDENCY(TBB
ARCHIVE_MAC "tbb2019_20190414_mac.tgz"
TARGET_PATH "${TEMP}/tbb"
ENVIRONMENT "TBBROOT"
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
endif()
set(TBB_INCLUDE_DIRS "${TBB}/include")
find_path(TBB_INCLUDE_DIRS tbb/tbb.h)
find_library(TBB_LIBRARIES_RELEASE tbb HINTS "${TBB}/lib")
if (TBB_INCLUDE_DIRS AND TBB_LIBRARIES_RELEASE)
log_rpath_from_dir(TBB "${TBB}/lib")
else()
message("FATAL_ERROR" "TBB is unset")
endif()
log_rpath_from_dir(TBB "${TBB}/lib")
debug_message(STATUS "tbb=" ${TBB})
endif ()
if (ENABLE_OPENCV)
if (WIN32)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_WIN "opencv_4.0.0-0256.zip"
TARGET_PATH "${TEMP}/opencv_4.0.0"
ARCHIVE_WIN "opencv_4.1.0-0437.zip"
TARGET_PATH "${TEMP}/opencv_4.1.0"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
log_rpath_from_dir(OPENCV "\\opencv_4.0.0\\bin")
log_rpath_from_dir(OPENCV "\\opencv_4.1.0\\bin")
set( ENV{OpenCV_DIR} ${OPENCV}/cmake )
elseif(APPLE)
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_MAC "opencv_4.1.0-0437_osx.tar.xz"
TARGET_PATH "${TEMP}/opencv_4.1.0_osx"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
log_rpath_from_dir(OPENCV "opencv_4.1.0_osx/lib")
set( ENV{OpenCV_DIR} ${OPENCV}/cmake )
elseif(LINUX)
if (${LINUX_OS_NAME} STREQUAL "Ubuntu 16.04")
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_LIN "opencv_4.0.0-0256_ubuntu16.tgz"
TARGET_PATH "${TEMP}/opencv_4.0.0_ubuntu"
ARCHIVE_LIN "opencv_4.1.0-0437_ubuntu16.tar.xz"
TARGET_PATH "${TEMP}/opencv_4.1.0_ubuntu16"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
log_rpath_from_dir(OPENCV "opencv_4.0.0_ubuntu/lib")
log_rpath_from_dir(OPENCV "opencv_4.1.0_ubuntu16/lib")
elseif (${LINUX_OS_NAME} STREQUAL "Ubuntu 18.04")
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_LIN "opencv_4.1.0-0437_ubuntu18.tar.xz"
TARGET_PATH "${TEMP}/opencv_4.1.0_ubuntu18"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
log_rpath_from_dir(OPENCV "opencv_4.1.0_ubuntu18/lib")
elseif (${LINUX_OS_NAME} STREQUAL "CentOS 7")
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_LIN "opencv_4.0.0-0256_centos.tgz"
TARGET_PATH "${TEMP}/opencv_4.0.0_centos"
ARCHIVE_LIN "opencv_4.1.0-0437_centos7.tar.xz"
TARGET_PATH "${TEMP}/opencv_4.1.0_centos"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
log_rpath_from_dir(OPENCV "opencv_4.0.0_centos/lib")
log_rpath_from_dir(OPENCV "opencv_4.1.0_centos/lib")
elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "armv7l" AND
(${LINUX_OS_NAME} STREQUAL "Debian 9" OR
${LINUX_OS_NAME} STREQUAL "Raspbian 9"))
RESOLVE_DEPENDENCY(OPENCV
ARCHIVE_LIN "opencv_4.1.0-0437_debian9arm.tar.xz"
TARGET_PATH "${TEMP}/opencv_4.1.0_debian9arm"
ENVIRONMENT "OpenCV_DIR"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
log_rpath_from_dir(OPENCV "opencv_4.1.0_debian9arm/lib")
endif()
set( ENV{OpenCV_DIR} ${OPENCV}/cmake )
endif()
debug_message(STATUS "opencv=" ${OPENCV})
endif()
if (THREADING STREQUAL "OMP")
include(omp)
endif ()
include(ie_parallel)
if (ENABLE_GNA)
RESOLVE_DEPENDENCY(GNA
ARCHIVE_UNIFIED "gna_20181120.zip"
TARGET_PATH "${TEMP}/gna")
endif()
configure_file(
"${CMAKE_SOURCE_DIR}/cmake/share/InferenceEngineConfig.cmake.in"
"${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake"
@ONLY)
configure_file(
"${CMAKE_SOURCE_DIR}/cmake/share/InferenceEngineConfig-version.cmake.in"
"${CMAKE_BINARY_DIR}/share/InferenceEngineConfig-version.cmake"
COPYONLY)
configure_file(
"${CMAKE_SOURCE_DIR}/cmake/ie_parallel.cmake"
"${CMAKE_BINARY_DIR}/share/ie_parallel.cmake"
COPYONLY)

View File

@@ -1,10 +1,8 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required (VERSION 2.8)
include ("download")
function (resolve_archive_dependency VAR COMPONENT ARCHIVE ARCHIVE_UNIFIED ARCHIVE_WIN ARCHIVE_LIN ARCHIVE_MAC TARGET_PATH FOLDER ENVIRONMENT)
@@ -15,7 +13,7 @@ function (resolve_archive_dependency VAR COMPONENT ARCHIVE ARCHIVE_UNIFIED ARCHI
if (NOT DEFINED HAS_ENV)
if (ARCHIVE)
#TODO: check wether this is platform specific binary with same name per or it is in common folder
#TODO: check whether this is platform specific binary with same name per or it is in common folder
DownloadAndExtract(${COMPONENT} ${ARCHIVE} ${TARGET_PATH} result_path ${FOLDER})
else()
DownloadAndExtractPlatformSpecific(${COMPONENT} ${ARCHIVE_UNIFIED} ${ARCHIVE_WIN} ${ARCHIVE_LIN} ${ARCHIVE_MAC} ${TARGET_PATH} result_path ${FOLDER})
@@ -130,11 +128,3 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
endif()
endfunction(RESOLVE_DEPENDENCY)
function (resolve_model_dependency network archive network_model_path)
RESOLVE_DEPENDENCY(${network_model_path}
ARCHIVE "models_archives/${archive}"
TARGET_PATH "${MODELS_PATH}/${network}")
string (REPLACE ${MODELS_PATH} "" relative_path ${${network_model_path}})
set(${network_model_path} ".${relative_path}" PARENT_SCOPE)
endfunction()

View File

@@ -1,10 +1,8 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required (VERSION 2.8)
function (Download from to fatal result output)
if((NOT EXISTS "${to}"))

View File

@@ -1,10 +1,8 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required (VERSION 2.8)
function (DownloadAndApply URL apply_to)
if (EXISTS ${apply_to})

View File

@@ -1,23 +1,22 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required (VERSION 2.8)
include (FindWget)
function (DownloadAndCheck from to fatal result)
set(status_res "ON")
set(output 1)
set(status_res "ON")
set(output 1)
get_filename_component(download_dir ${to} DIRECTORY)
if (NOT EXISTS ${download_dir})
file(MAKE_DIRECTORY ${download_dir})
endif()
get_filename_component(download_dir ${to} DIRECTORY)
if (NOT EXISTS ${download_dir})
file(MAKE_DIRECTORY ${download_dir})
endif()
if(NOT EXISTS "${to}")
if(NOT EXISTS "${to}")
if (${from} MATCHES "(http:)|(https:)|(ftp:)")
message(STATUS "Downloading from ${from} to ${to} ...")
find_program(aria2c "aria2c")
if (${aria2c} STREQUAL "aria2c-NOTFOUND")
if (NOT ${WGET_FOUND})
@@ -48,9 +47,13 @@ function (DownloadAndCheck from to fatal result)
status_code: ${status_code}")
endif()
endif()
else()
message(STATUS "Copying from local folder ${from} to ${to} ... ")
file(COPY ${from} DESTINATION ${download_dir})
endif()
endif()
file(REMOVE ${to}.md5)
set(${result} "${status_res}" PARENT_SCOPE)
endfunction(DownloadAndCheck)
endfunction(DownloadAndCheck)

View File

@@ -1,9 +1,8 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required (VERSION 2.8)
include ("extract")
include ("download_and_check")
@@ -120,12 +119,12 @@ function (DownloadOrExtractInternal URL archive_path unpacked_path folder fattal
if (ENABLE_UNSAFE_LOCATIONS)
ExtractWithVersion(${URL} ${archive_path} ${unpacked_path} ${folder} result)
if(NOT ${result})
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
endif()
else()
debug_message("archive found on FS : ${archive_path}, however we cannot check it's checksum and think that it is invalid")
file(REMOVE_RECURSE "${archive_path}")
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
endif()
@@ -144,7 +143,11 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
set (status "ON")
set (on_master FALSE)
set (URL "https://download.01.org/openvinotoolkit/2018_R4/dldt/inference_engine/${RELATIVE_URL}")
if(DEFINED ENV{IE_PATH_TO_DEPS})
set(URL "$ENV{IE_PATH_TO_DEPS}/${RELATIVE_URL}")
else()
set(URL "https://download.01.org/opencv/2019/openvinotoolkit/R1/inference_engine/${RELATIVE_URL}")
endif()
#no message on recursive calls
if (${use_alternatives})

View File

@@ -1,17 +1,15 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required (VERSION 2.8)
function (extract archive_path unpacked_path folder result)
# Slurped from a generated extract-TARGET.cmake file.
if (NOT EXISTS ${unpacked_path})
get_filename_component(unpacked_dir ${unpacked_path} DIRECTORY)
file(MAKE_DIRECTORY ${unpacked_path})
message(STATUS "extracting...
src='${archive_path}'
dst='${unpacked_path}'")

View File

@@ -1,16 +1,16 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required (VERSION 2.8)
include ("options")
include (options)
#this options are aimed to optimize build time on development system
#backed targets
ie_option (ENABLE_GNA "GNA support for inference engine" ON)
ie_option (ENABLE_MKL_DNN "MKL-DNN plugin for inference engine" ON)
ie_option (ENABLE_CLDNN "clDnn based plugin for inference engine" ON)
@@ -19,26 +19,57 @@ ie_option (ENABLE_PROFILING_ITT "ITT tracing of IE and plugins internals" ON)
ie_option (ENABLE_PROFILING_RAW "Raw counters profiling (just values, no start/stop time or timeline)" OFF)
#
# "MKL-DNN library might use MKL-ML or OpenBLAS for gemm tasks: MKL|OPENBLAS|JIT"
if (NOT GEMM STREQUAL "MKL" AND NOT GEMM STREQUAL "OPENBLAS" AND NOT GEMM STREQUAL "JIT")
if (NOT GEMM STREQUAL "MKL"
AND NOT GEMM STREQUAL "OPENBLAS"
AND NOT GEMM STREQUAL "JIT")
set (GEMM "JIT")
message(STATUS "GEMM should be set to MKL|OPENBLAS|JIT. Default option is " ${GEMM})
message(STATUS "GEMM should be set to MKL, OPENBLAS or JIT. Default option is " ${GEMM})
endif()
set(GEMM "${GEMM}" CACHE STRING "Gemm implementation" FORCE)
list (APPEND IE_OPTIONS GEMM)
# "MKL-DNN library based on OMP or TBB or Sequential implementation: TBB|OMP|SEQ"
if (NOT THREADING STREQUAL "TBB" AND NOT THREADING STREQUAL "OMP" AND NOT THREADING STREQUAL "SEQ")
set (THREADING "OMP")
message(STATUS "THREADING should be set to TBB|OMP|SEQ. Default option is " ${THREADING})
if (NOT THREADING STREQUAL "TBB"
AND NOT THREADING STREQUAL "OMP"
AND NOT THREADING STREQUAL "SEQ")
set (THREADING "TBB")
message(STATUS "THREADING should be set to TBB, OMP or SEQ. Default option is " ${THREADING})
endif()
set(THREADING "${THREADING}" CACHE STRING "Threading" FORCE)
list (APPEND IE_OPTIONS THREADING)
ie_option (ENABLE_INTEL_OMP "MKL-DNN library based on Intel OMP implementation" ON)
# Enable postfixes for Debug/Release builds
set (IE_DEBUG_POSTFIX_WIN "d")
set (IE_RELEASE_POSTFIX_WIN "")
set (IE_DEBUG_POSTFIX_LIN "")
set (IE_RELEASE_POSTFIX_LIN "")
set (IE_DEBUG_POSTFIX_MAC "d")
set (IE_RELEASE_POSTFIX_MAC "")
if (WIN32)
set (IE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX_WIN})
set (IE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX_WIN})
elseif(APPLE)
set (IE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX_MAC})
set (IE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX_MAC})
else()
set (IE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX_LIN})
set (IE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX_LIN})
endif()
set(IE_DEBUG_POSTFIX "${IE_DEBUG_POSTFIX}" CACHE STRING "Debug postfix" FORCE)
list (APPEND IE_OPTIONS IE_DEBUG_POSTFIX)
set(IE_RELEASE_POSTFIX "${IE_RELEASE_POSTFIX}" CACHE STRING "Release postfix" FORCE)
list (APPEND IE_OPTIONS IE_RELEASE_POSTFIX)
ie_option (ENABLE_TESTS "unit and functional tests" OFF)
ie_option (ENABLE_GAPI_TESTS "unit tests for GAPI kernels" OFF)
ie_option (GAPI_TEST_PERF "if GAPI unit tests should examine performance" OFF)
ie_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON)
ie_option (ENABLE_SAMPLES_CORE "console samples core library" ON)
ie_option (ENABLE_SANITIZER "enable checking memory errors via AddressSanitizer" OFF)
@@ -63,6 +94,17 @@ ie_option (OS_FOLDER "create OS dedicated folder in output" OFF)
ie_option (ENABLE_PLUGIN_RPATH "enables rpath information to be present in plugins binary, and in corresponding test_applications" ON)
ie_option (ENABLE_AFFINITY_GENERATOR "enables affinity generator build" OFF)
ie_option (ENABLE_DEBUG_SYMBOLS "generates symbols for debugging" OFF)
ie_option (ENABLE_PYTHON "enables ie python bridge build" OFF)
ie_option (TREAT_WARNING_AS_ERROR "Treat build warnings as errors" ON)
ie_option(ENABLE_CPPLINT "Enable cpplint checks during the build" OFF)
ie_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF)
#environment variables used
#name of environment variable stored path to temp directory"

View File

@@ -0,0 +1,106 @@
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
function(set_ie_threading_interface_for TARGET_NAME)
set(IE_THREAD_DEFINE "IE_THREAD_SEQ")
if (THREADING STREQUAL "TBB")
if (NOT (IE_MAIN_SOURCE_DIR))
set(incl_path ${IE_EXTERNAL_DIR}/tbb/include)
if (WIN32)
set(lib_rel_path ${IE_LIB_REL_DIR})
set(lib_dbg_path ${IE_LIB_DBG_DIR})
else ()
set(lib_rel_path ${IE_EXTERNAL_DIR}/tbb/lib)
set(lib_dbg_path ${lib_rel_path})
endif ()
else ()
set(incl_path ${TBB}/include)
set(lib_rel_path ${TBB}/lib)
set(lib_dbg_path ${lib_rel_path})
endif ()
if (NOT TBB_INCLUDE_DIRS OR NOT TBB_LIBRARIES_RELEASE OR NOT TBB_LIBRARIES_DEBUG)
find_path(TBB_INCLUDE_DIRS tbb/tbb.h ${incl_path} NO_DEFAULT_PATH)
find_library(TBB_LIBRARIES_RELEASE tbb ${lib_rel_path} NO_DEFAULT_PATH)
find_library(TBB_LIBRARIES_DEBUG tbb_debug ${lib_dbg_path} NO_DEFAULT_PATH)
ext_message(STATUS "TBB include: ${TBB_INCLUDE_DIRS}")
ext_message(STATUS "TBB Release lib: ${TBB_LIBRARIES_RELEASE}")
ext_message(STATUS "TBB Debug lib: ${TBB_LIBRARIES_DEBUG}")
endif ()
if (NOT TBB_INCLUDE_DIRS OR NOT TBB_LIBRARIES_RELEASE OR NOT TBB_LIBRARIES_DEBUG)
ext_message(WARNING "TBB not found. TBB support will be disabled. ${IE_THREAD_DEFINE} is defined")
else ()
set(IE_THREAD_DEFINE "IE_THREAD_TBB")
target_include_directories(${TARGET_NAME} PUBLIC ${TBB_INCLUDE_DIRS})
if (WIN32)
target_link_libraries(${TARGET_NAME} PUBLIC "-nodefaultlib:vcomp")
target_link_libraries(${TARGET_NAME} PUBLIC "$<$<CONFIG:DEBUG>:${TBB_LIBRARIES_DEBUG}>;$<$<NOT:$<CONFIG:DEBUG>>:${TBB_LIBRARIES_RELEASE}>")
else()
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
target_link_libraries(${TARGET_NAME} PUBLIC ${TBB_LIBRARIES_DEBUG})
else()
target_link_libraries(${TARGET_NAME} PUBLIC ${TBB_LIBRARIES_RELEASE})
endif ()
endif ()
endif ()
elseif (THREADING STREQUAL "OMP")
if (WIN32)
set(omp_lib_name libiomp5md)
else ()
set(omp_lib_name iomp5)
endif ()
if (NOT(IE_MAIN_SOURCE_DIR))
if (WIN32)
set(lib_rel_path ${IE_LIB_REL_DIR})
set(lib_dbg_path ${IE_LIB_DBG_DIR})
else ()
set(lib_rel_path ${IE_EXTERNAL_DIR}/omp/lib)
set(lib_dbg_path ${lib_rel_path})
endif ()
else ()
set(lib_rel_path ${OMP}/lib)
set(lib_dbg_path ${lib_rel_path})
endif ()
if (NOT OMP_LIBRARIES_RELEASE OR NOT OMP_LIBRARIES_DEBUG)
find_library(OMP_LIBRARIES_RELEASE ${omp_lib_name} ${lib_rel_path} NO_DEFAULT_PATH)
find_library(OMP_LIBRARIES_DEBUG ${omp_lib_name} ${lib_dbg_path} NO_DEFAULT_PATH)
ext_message(STATUS "OMP Release lib: ${OMP_LIBRARIES_RELEASE}")
ext_message(STATUS "OMP Debug lib: ${OMP_LIBRARIES_DEBUG}")
endif ()
if (NOT OMP_LIBRARIES_RELEASE OR NOT OMP_LIBRARIES_DEBUG)
ext_message(WARNING "Intel OpenMP not found. Intel OpenMP support will be disabled. ${IE_THREAD_DEFINE} is defined")
else ()
set(IE_THREAD_DEFINE "IE_THREAD_OMP")
if (WIN32)
target_compile_options(${TARGET_NAME} PUBLIC ${OpenMP_CXX_FLAGS} /openmp)
target_compile_options(${TARGET_NAME} PUBLIC ${OpenMP_CXX_FLAGS} /Qopenmp)
target_link_libraries(${TARGET_NAME} PUBLIC "-nodefaultlib:vcomp")
target_link_libraries(${TARGET_NAME} PUBLIC "$<$<CONFIG:DEBUG>:${OMP_LIBRARIES_DEBUG}>;$<$<NOT:$<CONFIG:DEBUG>>:${OMP_LIBRARIES_RELEASE}>")
else()
target_compile_options(${TARGET_NAME} PUBLIC ${OpenMP_CXX_FLAGS} -fopenmp)
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_DEBUG})
else()
target_link_libraries(${TARGET_NAME} PUBLIC ${OMP_LIBRARIES_RELEASE})
endif ()
endif ()
endif ()
endif ()
target_compile_definitions(${TARGET_NAME} PUBLIC -DIE_THREAD=${IE_THREAD_DEFINE})
if (NOT THREADING STREQUAL "SEQ")
find_package(Threads REQUIRED)
target_link_libraries(${TARGET_NAME} PUBLIC ${CMAKE_THREAD_LIBS_INIT})
endif()
endfunction(set_ie_threading_interface_for)

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#

View File

@@ -1,10 +1,8 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required(VERSION 2.8)
if (UNIX)
function(get_linux_name res_var)
if (NOT EXISTS "/etc/lsb-release")

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#

View File

@@ -1,59 +0,0 @@
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_policy(SET CMP0054 NEW)
if (APPLE OR WIN32)
find_path(OMP_INC omp.h)
find_library(OMP_LIB iomp5
PATHS ${OMP}/lib)
if (OMP_INC AND OMP_LIB)
set(HAVE_OMP TRUE)
get_filename_component(OMP_LIB_DIR "${OMP_LIB}" PATH)
else()
if (THREADING STREQUAL "OMP")
find_package(OpenMP)
if (NOT OPENMP_FOUND)
message(WARNING "OpenMP not found. OpenMP support will be disabled.")
endif()
endif()
endif()
endif()
macro(enable_omp)
if (APPLE) ## MacOS
if (HAVE_OMP)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp=libiomp5")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -L${OMP_LIB_DIR}")
else()
message(WARNING "Was trying to enable OMP for some target. However OpenMP was not detected on system.")
endif()
elseif(UNIX) # Linux
add_definitions(-fopenmp)
elseif(WIN32) # Windows
if (THREADING STREQUAL "OMP")
set(OPENMP_FLAGS "/Qopenmp /openmp")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CCXX_FLAGS} ${OPENMP_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CCXX_FLAGS} ${OPENMP_FLAGS}")
endif()
endif()
if (ENABLE_INTEL_OMP)
if (WIN32)
find_library(intel_omp_lib
libiomp5md
PATHS ${OMP}/lib ${ICCLIB})
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /nodefaultlib:vcomp")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /nodefaultlib:vcomp")
else()
find_library(intel_omp_lib
iomp5
PATHS ${OMP}/lib)
endif()
endif()
endmacro(enable_omp)

View File

@@ -1,9 +1,9 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Usage: ie_option(<option_variable> "description" <initial value or boolean expression> [IF <condition>])
function (ie_option variable description value)
option(${variable} "${description}" ${value})
list (APPEND IE_OPTIONS "${variable}")

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -7,12 +7,36 @@ if (WIN32)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _CRT_SECURE_NO_WARNINGS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_SCL_SECURE_NO_WARNINGS")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") #no asynchronous structured exception handling
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE")
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /Z7")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Z7")
if(ENABLE_DEBUG_SYMBOLS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Z7")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Z7")
set(DEBUG_SYMBOLS_LINKER_FLAGS "/DEBUG")
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
# Keep default /OPT values. See /DEBUG reference for details.
set(DEBUG_SYMBOLS_LINKER_FLAGS "${DEBUG_SYMBOLS_LINKER_FLAGS} /OPT:REF /OPT:ICF")
endif()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${DEBUG_SYMBOLS_LINKER_FLAGS}")
endif()
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Werror=return-type ")
if (APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-command-line-argument")
elseif(UNIX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wuninitialized -Winit-self -Wmaybe-uninitialized")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wuninitialized -Winit-self")
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-switch")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wmaybe-uninitialized")
endif()
endif()
endif()

View File

@@ -1,11 +1,24 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
include(CheckCXXCompilerFlag)
if (ENABLE_SANITIZER)
set(CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS} -fsanitize=address -fuse-ld=gold")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -fuse-ld=gold")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=address")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
endif()
set(SANITIZER_COMPILER_FLAGS "-fsanitize=address")
CHECK_CXX_COMPILER_FLAG("-fsanitize-recover=address" SANITIZE_RECOVER_SUPPORTED)
if (SANITIZE_RECOVER_SUPPORTED)
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize-recover=address")
endif()
set(SANITIZER_LINKER_FLAGS "-fsanitize=address")
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=gold")
endif()
set(CMAKE_CC_FLAGS "${CMAKE_CC_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${SANITIZER_LINKER_FLAGS}")
endif()

View File

@@ -1,9 +1,9 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
if (UNIX OR APPLE)
if (UNIX OR APPLE AND ${CMAKE_BUILD_TYPE} STREQUAL "Release")
set(CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS} -fPIE -fPIC -Wformat -Wformat-security")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2")
@@ -16,21 +16,24 @@ if (UNIX OR APPLE)
else()
set(CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS} -fstack-protector-strong")
endif()
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -s -fvisibility=hidden")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -s -fvisibility=hidden")
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(CMAKE_CCXX_FLAGS "${CMAKE_CCXX_FLAGS} -fstack-protector-all")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -fvisibility=hidden")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fvisibility=hidden")
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -z noexecstack -z relro -z now")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -z noexecstack -z relro -z now")
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wl,--strip-all -fvisibility=hidden")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Wl,--strip-all -fvisibility=hidden")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CCXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CCXX_FLAGS}")
elseif (WIN32)
elseif (${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP /sdl")
if (${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MP /sdl")
endif()
endif()

View File

@@ -1,7 +1,9 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set(InferenceEngine_VERSION 1.4.0)
set(InferenceEngine_VERSION 1.6.0)
set(PACKAGE_VERSION ${InferenceEngine_VERSION})
set(PACKAGE_VERSION_EXACT False)

View File

@@ -1,6 +1,8 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
#
# FindIE
# ------
#
@@ -17,6 +19,18 @@
# IE::inference_engine - The Inference Engine library
#
macro(ext_message TRACE_LEVEL)
if (${TRACE_LEVEL} STREQUAL FATAL_ERROR)
if(InferenceEngine_FIND_REQUIRED)
message(FATAL_ERROR "${ARGN}")
elseif(NOT InferenceEngine_FIND_QUIETLY)
message(WARNING "${ARGN}")
endif()
return()
elseif(NOT InferenceEngine_FIND_QUIETLY)
message(${TRACE_LEVEL} "${ARGN}")
endif ()
endmacro()
set(InferenceEngine_FOUND FALSE)
@@ -28,84 +42,39 @@ else()
if (WIN32)
set(_ARCH intel64)
else()
if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86_64")
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l")
set(_ARCH armv7l)
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86_64")
set(_ARCH intel64)
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "i386")
set(_ARCH ia32)
endif()
endif()
set(THREADING "@THREADING@")
# check whether setvars.sh is sourced
if(NOT IE_ROOT_DIR AND (DEFINED ENV{InferenceEngine_DIR} OR InferenceEngine_DIR OR DEFINED ENV{INTEL_CVSDK_DIR}))
if(NOT IE_ROOT_DIR AND (DEFINED ENV{InferenceEngine_DIR} OR InferenceEngine_DIR OR DEFINED ENV{INTEL_OPENVINO_DIR}))
if (EXISTS "${InferenceEngine_DIR}")
# InferenceEngine_DIR manually set via command line params
set(IE_ROOT_DIR "${InferenceEngine_DIR}/..")
elseif (EXISTS "$ENV{InferenceEngine_DIR}")
# InferenceEngine_DIR manually set via env
set(IE_ROOT_DIR "$ENV{InferenceEngine_DIR}/..")
elseif (EXISTS "$ENV{INTEL_CVSDK_DIR}/inference_engine")
elseif (EXISTS "$ENV{INTEL_OPENVINO_DIR}/inference_engine")
# if we installed DL SDK
set(IE_ROOT_DIR "$ENV{INTEL_CVSDK_DIR}/inference_engine")
elseif (EXISTS "$ENV{INTEL_CVSDK_DIR}/deployment_tools/inference_engine")
set(IE_ROOT_DIR "$ENV{INTEL_OPENVINO_DIR}/inference_engine")
elseif (EXISTS "$ENV{INTEL_OPENVINO_DIR}/deployment_tools/inference_engine")
# CV SDK is installed
set(IE_ROOT_DIR "$ENV{INTEL_CVSDK_DIR}/deployment_tools/inference_engine")
set(IE_ROOT_DIR "$ENV{INTEL_OPENVINO_DIR}/deployment_tools/inference_engine")
endif()
endif()
if(IE_ROOT_DIR)
if (WIN32)
set(_OS_PATH "")
else()
if (NOT EXISTS "/etc/lsb-release")
execute_process(COMMAND find /etc/ -maxdepth 1 -type f -name *-release -exec cat {} \;
OUTPUT_VARIABLE release_data RESULT_VARIABLE result)
set(name_regex "NAME=\"([^ \"\n]*).*\"\n")
set(version_regex "VERSION=\"([0-9]+(\\.[0-9]+)?)[^\n]*\"")
else()
#linux version detection using cat /etc/lsb-release
file(READ "/etc/lsb-release" release_data)
set(name_regex "DISTRIB_ID=([^ \n]*)\n")
set(version_regex "DISTRIB_RELEASE=([0-9]+(\\.[0-9]+)?)")
endif()
string(REGEX MATCH ${name_regex} name ${release_data})
set(os_name ${CMAKE_MATCH_1})
string(REGEX MATCH ${version_regex} version ${release_data})
set(os_name "${os_name} ${CMAKE_MATCH_1}")
if (NOT os_name)
if(InferenceEngine_FIND_REQUIRED)
message(FATAL_ERROR "Cannot detect OS via reading /etc/*-release:\n ${release_data}")
elseif(NOT InferenceEngine_FIND_QUIETLY)
message(WARNING "Cannot detect OS via reading /etc/*-release:\n ${release_data}")
endif()
return()
endif()
if (NOT InferenceEngine_FIND_QUIETLY)
message (STATUS "/etc/*-release distrib: ${os_name}")
endif()
if (${os_name} STREQUAL "Ubuntu 14.04")
set(_OS_PATH "ubuntu_14.04/")
elseif (${os_name} STREQUAL "Ubuntu 16.04")
set(_OS_PATH "ubuntu_16.04/")
elseif (${os_name} STREQUAL "CentOS 7")
set(_OS_PATH "centos_7.4/")
elseif (${os_name} STREQUAL "poky 2.0")
set(_OS_PATH "ubuntu_16.04/")
else()
if(InferenceEngine_FIND_REQUIRED)
message(FATAL_ERROR "${os_name} is not supported. List of supported OS: Ubuntu 14.04, Ubuntu 16.04, CentOS 7")
elseif(NOT InferenceEngine_FIND_QUIETLY)
message(WARNING "${os_name} is not supported. List of supported OS: Ubuntu 14.04, Ubuntu 16.04, CentOS 7")
endif()
return()
endif()
endif()
if(NOT IE_ROOT_DIR)
ext_message(FATAL_ERROR "inference_engine directory is not found")
endif()
if(IE_INCLUDE_DIR AND NOT "${IE_ROOT_DIR}/include" EQUAL "${IE_INCLUDE_DIR}")
unset(IE_INCLUDE_DIR CACHE)
endif()
@@ -114,32 +83,50 @@ else()
unset(IE_SRC_DIR CACHE)
endif()
if(IE_LIBRARY AND NOT "${IE_ROOT_DIR}/lib/${_OS_PATH}/${_ARCH}" EQUAL "${IE_LIBRARY}")
if(IE_LIBRARY AND NOT "${IE_ROOT_DIR}/lib/${_ARCH}" EQUAL "${IE_LIBRARY}")
unset(IE_LIBRARY CACHE)
endif()
set(_IE_ROOT_INCLUDE_DIR "${IE_ROOT_DIR}/include")
set(_IE_ROOT_SRC_DIR "${IE_ROOT_DIR}/src")
set(_IE_ROOT_LIBRARY "${IE_ROOT_DIR}/lib/${_OS_PATH}/${_ARCH}")
set(_IE_ROOT_LIBRARY "${IE_ROOT_DIR}/lib/${_ARCH}")
find_path(IE_INCLUDE_DIR inference_engine.hpp "${_IE_ROOT_INCLUDE_DIR}")
find_path(IE_SRC_DIR extension "${_IE_ROOT_SRC_DIR}")
set(IE_LIB_DIR "${_IE_ROOT_LIBRARY}")
set(IE_LIB_REL_DIR "${IE_LIB_DIR}/Release")
set(IE_LIB_DBG_DIR "${IE_LIB_DIR}/Debug")
set(IE_EXTERNAL_DIR "${IE_ROOT_DIR}/external")
include(FindPackageHandleStandardArgs)
if (WIN32)
find_library(IE_RELEASE_LIBRARY inference_engine "${_IE_ROOT_LIBRARY}/Release")
find_library(IE_DEBUG_LIBRARY inference_engine "${_IE_ROOT_LIBRARY}/Debug")
find_package_handle_standard_args( IE
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}")
find_library(IE_DEBUG_LIBRARY inference_engine@IE_DEBUG_POSTFIX_WIN@ "${IE_LIB_DBG_DIR}")
find_package_handle_standard_args( InferenceEngine
FOUND_VAR INFERENCEENGINE_FOUND
REQUIRED_VARS IE_RELEASE_LIBRARY IE_DEBUG_LIBRARY IE_INCLUDE_DIR
FAIL_MESSAGE "Inference Engine cannot be found at ${_IE_ROOT_LIBRARY}. Please consult InferenceEgnineConfig.cmake module's help page.")
elseif (APPLE)
find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}")
find_library(IE_DEBUG_LIBRARY inference_engine@IE_DEBUG_POSTFIX_MAC@ "${IE_LIB_DIR}")
find_package_handle_standard_args( InferenceEngine
FOUND_VAR INFERENCEENGINE_FOUND
REQUIRED_VARS IE_RELEASE_LIBRARY IE_DEBUG_LIBRARY IE_INCLUDE_DIR
FAIL_MESSAGE "Inference Engine cannot be found at ${_IE_ROOT_LIBRARY}. Please consult InferenceEgnineConfig.cmake module's help page.")
else()
find_library(IE_LIBRARY inference_engine "${_IE_ROOT_LIBRARY}")
find_package_handle_standard_args( IE
find_library(IE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}")
find_package_handle_standard_args( InferenceEngine
FOUND_VAR INFERENCEENGINE_FOUND
REQUIRED_VARS IE_LIBRARY IE_INCLUDE_DIR
FAIL_MESSAGE "Inference Engine cannot be found at ${_IE_ROOT_LIBRARY}. Please consult InferenceEgnineConfig.cmake module's help page.")
endif()
if(IE_FOUND)
if(INFERENCEENGINE_FOUND)
# to keep this line for successful execution in CMake 2.8
set(InferenceEngine_FOUND TRUE)
add_library(IE::inference_engine SHARED IMPORTED GLOBAL)
if (WIN32)
@@ -153,6 +140,12 @@ else()
MAP_IMPORTED_CONFIG_RELEASE Release
MAP_IMPORTED_CONFIG_RELWITHDEBINFO Release
INTERFACE_INCLUDE_DIRECTORIES "${IE_INCLUDE_DIR}")
elseif (APPLE)
set_target_properties(IE::inference_engine PROPERTIES
IMPORTED_LOCATION_RELEASE "${IE_RELEASE_LIBRARY}"
IMPORTED_LOCATION_DEBUG "${IE_DEBUG_LIBRARY}"
INTERFACE_INCLUDE_DIRECTORIES "${IE_INCLUDE_DIR}")
target_link_libraries(IE::inference_engine INTERFACE ${CMAKE_DL_LIBS})
else()
set_target_properties(IE::inference_engine PROPERTIES
IMPORTED_LOCATION "${IE_LIBRARY}"
@@ -162,10 +155,10 @@ else()
set(InferenceEngine_INCLUDE_DIRS ${IE_INCLUDE_DIR})
set(InferenceEngine_LIBRARIES IE::inference_engine)
set(InferenceEngine_FOUND TRUE)
include("${IE_ROOT_DIR}/share/ie_parallel.cmake")
add_subdirectory(${IE_SRC_DIR}/extension EXCLUDE_FROM_ALL ie_cpu_extension)
add_library(IE::ie_cpu_extension ALIAS ie_cpu_extension)
endif()
endif()

View File

@@ -1,10 +1,8 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required(VERSION 2.8)
function (branchName VAR)
execute_process(
COMMAND git rev-parse --abbrev-ref HEAD

View File

@@ -1,42 +1,53 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# Defines the CMake commands/policies
cmake_minimum_required( VERSION 2.8.5 )
cmake_minimum_required (VERSION 3.3)
# Set the project name
project( INFERENCE_ENGINE_DRIVER )
project (ie_python_api)
set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_LIST_DIR}/cmake)
option(COPY_IE_LIBS "Copy Inference Engine libs to package directory" ${WIN32})
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l")
set (ARCH armv7l)
elseif ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
set (ARCH intel64)
else()
set (ARCH ia32)
endif()
set (IE_DEFAULT_PATH computer_vision_sdk/deployment_tools/inference_engine/share)
find_package(InferenceEngine REQUIRED PATHS /opt/intel/${IE_DEFAULT_PATH} $ENV{HOME}/intel/${IE_DEFAULT_PATH})
# in case of independent python api build (out of Inference Engine root Cmake)
if (NOT(IE_MAIN_SOURCE_DIR))
if("${CMAKE_BUILD_TYPE}" STREQUAL "")
message(STATUS "CMAKE_BUILD_TYPE not defined, 'Release' will be used")
set(CMAKE_BUILD_TYPE "Release")
endif()
message(STATUS "BUILD_CONFIGURATION: ${CMAKE_BUILD_TYPE}")
# Make the scripts available in the 'cmake' directory available for the
# 'include()' command, 'find_package()' command.
set( CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_LIST_DIR}/cmake )
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/bin/${ARCH})
if(NOT(WIN32))
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE})
endif()
else()
if (UNIX OR APPLE)
# cython generated files requires public visibility. Force visibility required.
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fvisibility=default")
endif()
endif()
# Include the CMake script UseCython.cmake. This defines add_cython_module().
# Instruction for use can be found at the top of cmake/UseCython.cmake.
include( UseCython )
include (UseCython)
# With CMake, a clean separation can be made between the source tree and the
# build tree. When all source is compiled, as with pure C/C++, the source is
# no-longer needed in the build tree. However, with pure *.py source, the
# source is processed directly. To handle this, we reproduce the availability
# of the source files in the build tree.
add_custom_target( ReplicatePythonSourceTree ALL ${CMAKE_COMMAND} -P
${CMAKE_CURRENT_SOURCE_DIR}/cmake/ReplicatePythonSourceTree.cmake
${CMAKE_CURRENT_BINARY_DIR}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} )
if (PYTHONINTERP_FOUND)
set (PYTHON_VERSION python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR})
else()
message(FATAL_ERROR "Python Interpretator was not found!")
endif()
add_custom_target( CopyIeLibs ${CMAKE_COMMAND} -P
${CMAKE_CURRENT_SOURCE_DIR}/cmake/CopyIeLibs.cmake
${IE_ROOT_DIR}/bin/${_ARCH}/Release ${_IE_ROOT_LIBRARY}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ie_driver )
if(WIN32)
set (PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$<CONFIG>/python_api/${PYTHON_VERSION}/openvino)
else()
set (PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python_api/${PYTHON_VERSION}/openvino)
endif()
include_directories( IE::inference_engine )
find_package (InferenceEngine REQUIRED)
# Process the CMakeLists.txt in the 'src' and 'bin' directory.
add_subdirectory( inference_engine )
set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR})
add_subdirectory (src/openvino/inference_engine)

View File

@@ -7,33 +7,53 @@
## Prerequisites
Install the following Python modules:
- opencv-python
- numpy
- cython
## Building on Windows
```shellscript
mkdir build
cd build
set PATH=C:\Program Files\Python36\Scripts;%PATH%
cmake -G "Visual Studio 14 2015 Win64" -DInferenceEngine_DIR=..\..\..\build ^
-DPYTHON_EXECUTABLE="C:\Program Files\Python36\python.exe" ^
-DPYTHON_INCLUDE_DIR="C:\Program Files\Python36\include" ^
-DPYTHON_LIBRARY="C:\Program Files\Python36\libs\python36.lib" ..
2. Install Inference Engine Python API dependencies:
```bash
pip3 install -r requirements.txt
```
Then build generated solution INFERENCE_ENGINE_DRIVER.sln using Microsoft\* Visual Studio.
## Building on Linux
Build Inference Engine Python API alongside with the Inference Engine build.
You need to run Inference Engine build with the following flags:
```shellscript
cd <IE_ROOT>
mkdir -p build
cd build
cmake -DInferenceEngine_DIR=../../../build -DPYTHON_EXECUTABLE=`which python3.6` \
cmake -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=`which python3.6` \
-DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so \
-DPYTHON_INCLUDE_DIR=/usr/include/python3.6 ..
make -j16
```
Note: -DInferenceEngine_DIR parameter is needed to specify the folder with generated make files or Visual Studio solution used to build Inference Engine (see readme file in the inference-engine root folder).
## Building on Windows
You need to run Inference Engine build with the following flags:
```shellscript
cd <IE_ROOT>
mkdir build
cd build
set PATH=C:\Program Files\Python36\Scripts;%PATH%
cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
-DENABLE_PYTHON=ON ^
-DPYTHON_EXECUTABLE="C:\Program Files\Python36\python.exe" ^
-DPYTHON_INCLUDE_DIR="C:\Program Files\Python36\include" ^
-DPYTHON_LIBRARY="C:\Program Files\Python36\libs\python36.lib" ..
```
Then build generated solution INFERENCE_ENGINE_DRIVER.sln using Microsoft\* Visual Studio or run `cmake --build . --config Release` to build from the command line.
## Running sample
Before running the Python samples:
- add the folder with built `openvino` Python module (located at `inference-engine/bin/intel64/Release/lib/python_api/python3.6`) to the PYTHONPATH environment variable.
- add the folder with Inference Engine libraries to LD_LIBRARY_PATH variable on Linux (or PATH on Windows).
Example of command line to run classification sample:
```bash
python3 sample/classification_sample.py -m <path/to/xml> -i <path/to/input/image> -d CPU
```

View File

@@ -1,10 +0,0 @@
set(IE_WIN_LIBS ${CMAKE_ARGV3})
set(IE_LIBS ${CMAKE_ARGV4})
if (WIN32)
file( GLOB IE_LIBS "${IE_WIN_LIBS}/*.dll")
file( COPY ${IE_LIBS} DESTINATION ${CMAKE_CURRENT_SOURCE_DIR})
else()
file( GLOB IE_LIBS "${IE_LIBS}/*.so")
file( COPY ${IE_LIBS} DESTINATION ${CMAKE_CURRENT_SOURCE_DIR})
endif()

View File

@@ -1,10 +1,19 @@
# Find the Cython compiler.
# Copyright (C) 2018-2019 Intel Corporation
#
# This code sets the following variables:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# CYTHON_EXECUTABLE
# http://www.apache.org/licenses/LICENSE-2.0
#
# See also UseCython.cmake
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Following changes were done on top of original file:
# Add CYTHON_EXECUTABLE searching hints at lines 50 and 51
#=============================================================================
# Copyright 2011 Kitware, Inc.
@@ -21,7 +30,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
# Find the Cython compiler.
#
# This code sets the following variables:
#
# CYTHON_EXECUTABLE
#
# See also UseCython.cmake
# Use the Cython executable that lives next to the Python executable
# if it is a local installation.
find_package( PythonInterp )

View File

@@ -1,7 +0,0 @@
# Note: when executed in the build dir, then CMAKE_CURRENT_SOURCE_DIR is the
# build dir.
file( COPY setup.py inference_engine tests DESTINATION "${CMAKE_ARGV3}"
FILES_MATCHING PATTERN "*.py" )
file( COPY requirements.txt DESTINATION "${CMAKE_ARGV3}" )

View File

@@ -46,6 +46,23 @@
#
# See also FindCython.cmake
# Copyright (C) 2018-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Following changes were done on top of the original file:
# added PRIVATE linking mode for target_link_libraries call at lines 298 and 336
#=============================================================================
# Copyright 2011 Kitware, Inc.
#

View File

@@ -1,7 +1,7 @@
# Overview of Inference Engine Python* API
**NOTE:** It is a preview version of the Inference Engine Python\* API for evaluation purpose only.
Module structure and API itself may be changed in future releases.
> **NOTE:** It is a preview version of the Inference Engine Python\* API for evaluation purpose only.
> Module structure and API itself may be changed in future releases.
This API provides a simplified interface for Inference Engine functionality that allows to:
@@ -21,99 +21,101 @@ Supported Python* versions:
## Setting Up the Environment
To configure the environment for the Inference Engine Python\* API, run:
* On Ubuntu 16.04: `source <INSTALL_DIR>/bin/setupvars.sh .`
* On Ubuntu 16.04: `source <INSTALL_DIR>/bin/setupvars.sh .`
* On Windows 10: `call <INSTALL_DIR>\deployment_tools\inference_engine\python_api\setenv.bat`
The script automatically detects latest installed Python\* version and configures required environment if the version is supported.
If you want to use certain version of Python\*, set the environment variable `PYTHONPATH=<INSTALL_DIR>/deployment_tools/inference_engine/python_api/<desired_python_version>`
after running the environment configuration script.
## <a name="ienetlayer-class"></a>IENetLayer
This class stores main information about the layer and allow to modify some layer parameters
This class stores main information about the layer and allow to modify some layer parameters
### Class attributes:
* `name` - Name of the layer
* `name` - Name of the layer
* `type`- Layer type
* `precision` - Layer base operating precision. Provides getter and setter interfaces.
* `layout` - Returns the layout of shape of the layer.
* `shape` - Return the list of the shape of the layer.
* `parents` - Returns a list, which contains names of layers preceding this layer.
* `children` - Returns a list, which contains names of layers following this layer.
* `affinity` - Layer affinity set by user or a default affinity set by the `IEPlugin.set_initial_affinity()` method.
The affinity attribute provides getter and setter interfaces, so the layer affinity can be modified directly.
For example:
For example:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="HETERO:FPGA,CPU")
>>> plugin.set_config({"TARGET_FALLBACK": "HETERO:FPGA,CPU"})
>>> plugin.set_initial_affinity(net)
>>> plugin.set_initial_affinity(net)
>>> for l in net.layers.values():
... if l.type == "Convolution":
... l.affinity = "CPU"
```
To correctly set affinity for the network, you must first initialize and properly configure the HETERO plugin.
`set_config({"TARGET_FALLBACK": "HETERO:FPGA,GPU"})` function configures the plugin fallback devices and their order.
`plugin.set_initial_affinity(net)` function sets affinity parameter of model layers according to its support
on specified devices.
After default affinity is set by the plugin, override the default values by setting affinity manually how it's
To correctly set affinity for the network, you must first initialize and properly configure the HETERO plugin.
`set_config({"TARGET_FALLBACK": "HETERO:FPGA,GPU"})` function configures the plugin fallback devices and their order.
`plugin.set_initial_affinity(net)` function sets affinity parameter of model layers according to its support
on specified devices.
After default affinity is set by the plugin, override the default values by setting affinity manually how it's
described in example above
To understand how default and non-default affinities are set:
To understand how default and non-default affinities are set:
1. Call `net.layers` function right after model loading and check that layer affinity parameter is empty.
2. Call `plugin.set_default_affinity(net)`.
3. Call `net.layers` and check layer affinity parameters to see how plugin set a default affinity
4. Set layer affinity how it's described above
5. Call `net.layers` again and check layer affinity parameters to see how it was changed after manual affinity
5. Call `net.layers` again and check layer affinity parameters to see how it was changed after manual affinity
setting
Please refer to `affinity_setting_demo.py` to see the full usage pipeline.
* `weights`- Dictionary with layer weights, biases or custom blobs if any
* `params` - Layer specific parameters. Provides getter and setter interfaces to get and modify layer parameters.
Please note that some modifications can be ignored and\or overwriten by target plugin (e.g. modification of
Please note that some modifications can be ignored and\or overwriten by target plugin (e.g. modification of
convolution kernel size will be reflected in layer parameters but finally the plugin will ignore it and will
use initial kernel size)
use initial kernel size)
## <a name="ienetwork-class"></a>IENetwork
## <a name="ienetwork-class"></a>IENetwork
This class contains the information about the network model read from IR and allows you to manipulate with some model parameters such as
layers affinity and output layers.
### Class Constructor
There is no explicit class constructor. Use `from_ir` class method to read the Intermediate Representation (IR) and initialize a correct instance of the `IENetwork` class.
* `__init__(model: str, weights: str)`
* Parameters:
* model - Path to `.xml` file of the IR
* weights - Path to `.bin` file of the IR
### Class attributes:
* `name` - Name of the loaded network
* `inputs` - A dictionary that maps input layer names to <a name="inputinfo-class"></a>InputInfo objects.
* `inputs` - A dictionary that maps input layer names to <a name="inputinfo-class"></a>InputInfo objects.
For example, to get a shape of the input layer:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.inputs
{'data': <inference_engine.ie_api.InputInfo object at 0x7efe042dedd8>}
>>> net.inputs['data'].shape
[1, 3, 224, 224]
```
* `outputs` - A dictionary that maps output layer names to <a name="inputinfo-class"></a>OutputInfo objects
For example, to get a shape of the output layer:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.inputs
{'prob': <inference_engine.ie_api.OutputInfo object at 0x7efe03ab95d0>}
>>> net.outputs['prob'].shape
[1, 1000]
```
* `batch_size` - Batch size of the network. Provides getter and setter interfaces to get and modify the
* `batch_size` - Batch size of the network. Provides getter and setter interfaces to get and modify the
network batch size. For example:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.batch_size
1
>>> net.batch_size = 4
@@ -122,125 +124,124 @@ There is no explicit class constructor. Use `from_ir` class method to read the I
>>> net.inputs['data'].shape
[4, 3, 224, 224]
```
* `layers` - Return dictionary that maps network layer names to <a name="ienetlayer-class"></a>`IENetLayer`
objects containing layer properties. For example, to list all network layers:
* `layers` - Return dictionary that maps network layer names to <a name="ienetlayer-class"></a>`IENetLayer`
objects containing layer properties in topological order. For example, to list all network layers:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.layers
{'conv0': <inference_engine.ie_api.IENetLayer object at 0x7f3a4c102370>
...
}
```
* `stats` - Returns `LayersStatsMap` object containing dictionary that maps network layer names to calibration statistics
represented by <a name="layerstats-class"></a> `LayerStats` objects.
`LayersStatsMap` class inherited from built-in python `dict` and overrides default `update()`method to allow
to set or modify layers calibration statistics.
```py
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.stats.update({
"conv1_2d" : LayserStats(min=(-25, -1, 0), max=(63, 124, 70)),
"conv2_2d" : LayserStats(min=(-5, -1, 0, 1, -7, 2), max=(63, 124, 70, 174, 99, 106)),
})
```
For more details about low precision inference please refer to "Low-Precision 8-bit Integer Inference"
section in Inference Engine Developers Guide documentation.
### Class Methods
* `from_ir(model: str, weights: str)`
* Description:
* `from_ir(model: str, weights: str)`
> **NOTE:** The function is deprecated. Please use `IENetwork()` class constructor to create valid instance of `IENetwork`
* Description:
The class method serves to read the model from the `.xml` and `.bin` files of the IR.
* Parameters:
* model - Path to `.xml` file of the IR
* weights - Path to `.bin` file of the IR
* Return value:
An instance of the `IENetwork` class
* Usage example:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net
<inference_engine.ie_api.IENetwork object at 0x7fd7dbce54b0>
```
### Instance Methods
* `add_outputs(outputs)`:
* Description:
The method serves to mark any intermediate layer as output layer to retrieve the inference results
### Instance Methods
* `add_outputs(outputs)`:
* Description:
The method serves to mark any intermediate layer as output layer to retrieve the inference results
from the specified layers.
* Parameters:
* `outputs` - List of layer names to be set as model outputs. In case of setting one layer as output, string with one layer can be provided.
* Return value:
None
* Usage example:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.add_outputs(["conv5_1/dwise', conv2_1/expand'])]
>>> net.outputs
['prob', 'conv5_1/dwise', 'conv2_1/expand']
```
**Note**
The last layers (nodes without successors in graph representation of the model) are set as output
by default. In the case above, `prob` layer is a default output and `conv5_1/dwise`, `conv2_1/expand` are user-defined
outputs.
> **NOTE**: The last layers (nodes without successors in graph representation of the model) are set as output
> by default. In the case above, `prob` layer is a default output and `conv5_1/dwise`, `conv2_1/expand` are user-defined
> outputs.
* `reshape(input_shapes: dict)`:
* Description:
* Description:
The method reshapes the network to change spatial dimensions, batch size, or any dimension.
**Note:**
Before using this method, make sure that the target shape is applicable for the network
Changing the network shape to an arbitrary value may lead to unpredictable behaviour.
> **Note:** Before using this method, make sure that the target shape is applicable for the network. Changing the network shape to an arbitrary value may lead to unpredictable behaviour.
* Parameters:
* `input_shapes` - The dictionary that maps input layer names to tuples with the target shape
* Return value:
None
* Return value:
None
* Usage example:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> input_layer = next(iter(net.inputs))
>>> n, c, h, w = net.inputs[input_layer]
>>> net.reshape({input_layer: (n, c, h*2, w*2)}]
```
```
* `serialize(path_to_xml, path_to_bin)`:
* Description:
The method serializes the network and stores it in files.
* Parameters:
* `path_to_xml` - path to a file, where a serialized model will be stored.
* `path_to_bin` - path to a file, where serialized weights will be stored.
* Return value:
None
* Usage example:
```py
>>> net = IENetwork(model=path_to_model, weights=path_to_weights)
>>> net.serialize(path_to_xml, path_to_bin)
```
## <a name="inputinfo-class"></a>InputInfo
## <a name="layerstats-class"></a>LayerStats
Layer calibration statistic container.
### Class Constructor
* `__init__(min: tuple = (), max: tuple = ())`
* Parameters:
* min - Tuple with per-channel minimum layer activation values
* max - Tuple with per-channel maximum layer activation values
## <a name="inputinfo-class"></a>InputInfo
This class contains the information about the network input layers
### Class attributes:
* `precision` - Precision of the input data provided by user. Provides setter and getter interfaces
* `precision` - Precision of the input data provided by user. Provides setter and getter interfaces
to get and modify input layer precision.
List of applicable precisions: FP32 FP16, I32, I16, I8, U32, U16
**Note**: Support of any calculation precision depends on the target plugin
> **NOTE**: Support of any calculation precision depends on the target plugin.
* `layout` - Layout of the input data provided by user. Provides setter and getter interfaces
to get and modify input layer layout.
to get and modify input layer layout.
List of applicable layouts: NCHW, NHWC, OIHW, C, CHW, HW, NC, CN, BLOCKED
* `shape` - input layer data shape
## <a name="outputinfo-class"></a>OutputInfo
## <a name="outputinfo-class"></a>OutputInfo
This class contains the information about the network input layers
@@ -248,142 +249,93 @@ This class contains the information about the network input layers
* `precision` - Precision of the output data. Provides setter and getter interfaces
to get and modify output layer precision.
* `layout` - Layout of the output data provided by user
* `shape` - Input layer data shape
## <a name="ieplugin-class"></a>IEPlugin Class
This class is the main plugin interface and serves to initialize and configure the plugin.
### Class Constructor
* `__init__(device: str, plugin_dirs=None)`
* Parameters:
* `device` - Target device name. Supported devices: CPU, GPU, FPGA, MYRIAD, HETERO
* `plugin_dirs` - List of paths to plugin directories
* `plugin_dirs` - List of paths to plugin directories
### Properties
* `device` - a name of the device that was specified to initialize IEPlugin
* `version` - a version of the plugin
* `version` - a version of the plugin
### Instance Methods
* ```load(network: IENetwork, num_requests: int=1, config=None)```
* Description:
Loads a network that was read from the IR to the plugin and creates an executable network from a network object.
You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware
* Description:
Loads a network that was read from the IR to the plugin and creates an executable network from a network object.
You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware
resources).
* Parameters:
* `network` - A valid IENetwork instance created by `IENetwork.from_ir()` method
* `num_requests` - A positive integer value of infer requests to be created. Number of infer requests may be limited
* `network` - A valid `IENetwork` instance
* `num_requests` - A positive integer value of infer requests to be created. Number of infer requests may be limited
by device capabilities.
* `config` - A dictionary of plugin configuration keys and their values
* Return value:
* Return value:
None
* Usage example:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="CPU")
>>> exec_net = plugin.load(network=net, num_requsts=2)
>>> exec_net
<inference_engine.ie_api.ExecutableNetwork object at 0x7f5140bbcd38>
```
* `set_initial_affinity(net: IENetwork)`
* Description:
Sets initial affinity for model layers according to the HETERO plugin logic. Applicable only if
Sets initial affinity for model layers according to the HETERO plugin logic. Applicable only if
IEPlugin was initialized for HETERO device.
* Parameters:
* `net` - A valid instance of IENetwork
* Return value:
None
* Usage example:
See `affinity` attribute of the `IENetLayer` class.
* `add_cpu_extension(extension_path: str)`
* Description:
Loads extensions library to the plugin. Applicable only for CPU device and HETERO device with CPU
* Parameters:
* `extension_path` - A full path to CPU extensions library
* Return value:
None
* `net` - A valid instance of IENetwork
* Return value:
None
* Usage example:
See `affinity` attribute of the `IENetLayer` class.
* `add_cpu_extension(extension_path: str)`
* Description:
Loads extensions library to the plugin. Applicable only for CPU device and HETERO device with CPU
* Parameters:
* `extension_path` - A full path to CPU extensions library
* Return value:
None
* Usage example:
```py
>>> plugin = IEPlugin(device="CPU")
>>> plugin.add_cpu_extenstions(ext_lib_path)
```
```
* `set_config(config: dict)`
* Description:
Sets a configuration for the plugin. Refer to `SetConfig()` in Inference Engine C++ documentation for acceptable
keys and values list.
* Parameters:
* `config` - A dictionary of keys and values of acceptable configuration parameters
* Return value:
None
* Usage examples:
See `set_affinity` method of the `IENetwork` class.
* `get_supported_layers(net: IENetwork)`
* Description:
Returns the set of layers supported by the plugin. Please note that in case of CPU plugin support of
a layer may depends on extension loaded by `add_cpu_extenstion()` method
Sets a configuration for the plugin. Refer to `SetConfig()` in Inference Engine C++ documentation for acceptable
keys and values list.
* Parameters:
* `net` - A valid instance of IENetwork
* `config` - A dictionary of keys and values of acceptable configuration parameters
* Return value:
None
* Usage examples:
See `set_affinity` method of the `IENetwork` class.
* `get_supported_layers(net: IENetwork)`
* Description:
Returns the set of layers supported by the plugin. Please note that in case of CPU plugin support of
a layer may depends on extension loaded by `add_cpu_extenstion()` method
* Parameters:
* `net` - A valid instance of IENetwork
* Return value:
Set of layers supported by the plugin
* Usage example:
See `affinity` attribute of the `IENetLayer` class.
* Usage example:
See `affinity` attribute of the `IENetLayer` class.
## <a name="executablenetwork"></a>ExecutableNetwork Class
This class represents a network instance loaded to plugin and ready for inference.
This class represents a network instance loaded to plugin and ready for inference.
### Class Constructor
@@ -392,39 +344,30 @@ There is no explicit class constructor. To make a valid instance of `ExecutableN
### Class attributes
* `requests` - A tuple of InferRequest instances
* Usage example:
* Usage example:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="CPU")
>>> exec_net = plugin.load(network=net, num_requsts=3)
>>> exec_net.requests
(<inference_engine.ie_api.InferRequest object at 0x7f66f56c57e0>,
<inference_engine.ie_api.InferRequest object at 0x7f66f56c58b8>,
(<inference_engine.ie_api.InferRequest object at 0x7f66f56c57e0>,
<inference_engine.ie_api.InferRequest object at 0x7f66f56c58b8>,
<inference_engine.ie_api.InferRequest object at 0x7f66f56c5900>)
```
### Instance Methods
* `infer(inputs=None)`
* Description:
Starts synchronous inference for the first infer request of the executable network and returns output data.
Wraps `infer()` method of the `InferRequest` class
* Parameters:
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* Return value:
A dictionary that maps output layer names to `numpy.ndarray` objects with output data of the layer
* Usage example:
```py
>>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="CPU")
>>> exec_net = plugin.load(network=net, num_requsts=2)
>>> res = exec_net.infer({'data': img})
@@ -436,35 +379,26 @@ There is no explicit class constructor. To make a valid instance of `ExecutableN
......
]])}
```
For illustration of input data preparation, please see samples (for example, `classification_sample.py`).
For illustration of input data preparation, please see samples (for example, `classification_sample.py`).
* `start_async(request_id, inputs=None)`
* Description:
Starts asynchronous inference for specified infer request.
Wraps `async_infer()` method of the `InferRequest` class
* Parameters:
* `request_id` - Index of infer request to start inference
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* Return value:
A handler of specified infer request, which is an instance of the `InferRequest` class.
* Usage example:
```py
>>> infer_request_handle = exec_net.start_async(request_id=0, inputs={input_blob: image})
>>> infer_status = infer_request_handle.wait()
>>> res = infer_request_handle.outputs[out_blob]
```
For more details about infer requests processing, see `classification_sample_async.py` (simplified case) and
For more details about infer requests processing, see `classification_sample_async.py` (simplified case) and
`object_detection_demo_ssd_async.py` (real asynchronous use case) samples.
## <a name="inferrequest"></a>InferRequest Class
This class provides an interface to infer requests of `ExecutableNetwork` and serves to handle infer requests execution
@@ -472,140 +406,111 @@ and to set and get output data.
### Class Constructor
There is no explicit class constructor. To make a valid `InferRequest` instance, use `load()` method of the `IEPlugin`
class with specified number of requests to get `ExecutableNetwork` instance which stores infer requests.
There is no explicit class constructor. To make a valid `InferRequest` instance, use `load()` method of the `IEPlugin`
class with specified number of requests to get `ExecutableNetwork` instance which stores infer requests.
### Class attributes
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* `outputs` - A dictionary that maps output layer names to `numpy.ndarray` objects with output data of the layer
* Usage example:
```py
>>> exec_net.requests[0].inputs['data'][:] = image
>>> exec_net.requests[0].infer()
>>> res = exec_net.requests[0].outputs['prob']
>>> np.flip(np.sort(np.squeeze(res)),0)
>>> np.flip(np.sort(np.squeeze(res)),0)
array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03,
2.26027006e-03, 2.12283316e-03 ...])
```
```
### Instance Methods
It is not recommended to run inference directly on `InferRequest` instance.
To run inference, please use simplified methods `infer()` and `start_async()` of `ExecutableNetwork`.
It is not recommended to run inference directly on `InferRequest` instance.
To run inference, please use simplified methods `infer()` and `start_async()` of `ExecutableNetwork`.
* `infer(inputs=None)`
* Description:
Starts synchronous inference of the infer request and fill outputs array
* Parameters:
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* Return value:
None
* Usage example:
* Description:
Starts synchronous inference of the infer request and fill outputs array
* Parameters:
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* Return value:
None
* Usage example:
```py
>>> exec_net = plugin.load(network=net, num_requests=2)
>>> exec_net.requests[0].infer({input_blob: image})
>>> res = exec_net.requests[0].outputs['prob']
>>> np.flip(np.sort(np.squeeze(res)),0)
>>> np.flip(np.sort(np.squeeze(res)),0)
array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03,
2.26027006e-03, 2.12283316e-03 ...])
```
2.26027006e-03, 2.12283316e-03 ...])
```
* `async_infer(inputs=None)`
* Description:
Starts asynchronous inference of the infer request and fill outputs array
* Parameters:
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* Return value:
None
* Usage example:
* Description:
Starts asynchronous inference of the infer request and fill outputs array
* Parameters:
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
* Return value:
None
* Usage example:
```py
>>> exec_net = plugin.load(network=net, num_requests=2)
>>> exec_net.requests[0].async_infer({input_blob: image})
>>> exec_net.requests[0].wait()
>>> res = exec_net.requests[0].outputs['prob']
>>> np.flip(np.sort(np.squeeze(res)),0)
>>> np.flip(np.sort(np.squeeze(res)),0)
array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03,
2.26027006e-03, 2.12283316e-03 ...])
```
2.26027006e-03, 2.12283316e-03 ...])
```
* `wait(timeout=-1)`
* Description:
Waits for the result to become available. Blocks until specified timeout elapses or the result
becomes available, whichever comes first.
**Note:**
There are special values of the timeout parameter:
* 0 - Immediately returns the inference status. It does not block or interrupt execution.
* Description:
Waits for the result to become available. Blocks until specified timeout elapses or the result
becomes available, whichever comes first.
> **NOTE:** There are special values of the timeout parameter:
* 0 - Immediately returns the inference status. It does not block or interrupt execution.
To find statuses meaning, please refer to InferenceEngine::StatusCode in Inference Engine C++ documentation
* -1 - Waits until inference result becomes available (default value)
* Parameters:
* `timeout` - Time to wait in milliseconds or special (0, -1) cases described above.
* `timeout` - Time to wait in milliseconds or special (0, -1) cases described above.
If not specified, `timeout` value is set to -1 by default.
* Usage example:
See `async_infer()` method of the the `InferRequest` class.
* Usage example:
See `async_infer()` method of the the `InferRequest` class.
* `get_perf_counts()`
* Description:
Queries performance measures per layer to get feedback of what is the most time consuming layer. .
**Note**:
Performance counters data and format depends on the plugin
Queries performance measures per layer to get feedback of what is the most time consuming layer.
> **NOTE**: Performance counters data and format depends on the plugin
* Parameters:
None
* Usage example:
* Usage example:
```py
>>> exec_net = plugin.load(network=net, num_requests=2)
>>> exec_net.requests[0].infer({input_blob: image})
>>> exec_net.requests[0].get_perf_counts()
{'Conv2D': {'exec_type': 'jit_avx2_1x1',
'real_time': 154,
'cpu_time': 154,
'status': 'EXECUTED',
{'Conv2D': {'exec_type': 'jit_avx2_1x1',
'real_time': 154,
'cpu_time': 154,
'status': 'EXECUTED',
'layer_type': 'Convolution'},
'Relu6': {'exec_type': 'undef',
'real_time': 0,
'cpu_time': 0,
'status': 'NOT_RUN',
'Relu6': {'exec_type': 'undef',
'real_time': 0,
'cpu_time': 0,
'status': 'NOT_RUN',
'layer_type': 'Clamp'}
...
}
```
* `set_batch(size)`
* Description:
Sets new batch size for certain infer request when dynamic batching is enabled in executable network that created this request.
> **NOTE:** Support of dynamic batch size depends on the target plugin.
* Parameters:
* `batch` - new batch size to be used by all the following inference calls for this request.
* Usage example:
```py
>>> plugin.set_config({"DYN_BATCH_ENABLED": "YES"})
>>> exec_net = plugin.load(network=net)
>>> exec_net.requests[0].set_batch(inputs_count)
```
Please refer to `dynamic_batch_demo.py` to see the full usage example.

View File

@@ -1,69 +0,0 @@
# Copyright (C) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# If the pyx file is a C++ file, we should specify that here.
set(CMAKE_INCLUDE_CURRENT_DIR ON)
if (COPY_IE_LIBS)
if (UNIX)
SET(CMAKE_SKIP_BUILD_RPATH FALSE)
SET(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
SET(CMAKE_INSTALL_RPATH "$ORIGIN")
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH FALSE)
endif (UNIX)
endif()
set_source_files_properties(
ie_api_impl_defs.pxd
ie_api_impl.hpp
ie_api_impl.cpp
ie_api.pyx
ie_api.pxd
PROPERTIES CYTHON_IS_CXX TRUE
)
cython_add_module(
ie_api
ie_api_impl_defs.pxd
ie_api_impl.hpp
ie_api_impl.cpp
ie_api.pyx
)
target_link_libraries(ie_api PRIVATE IE::inference_engine)
set_target_properties(ie_api PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
#if (NOT UNIX AND ${PYTHON_VERSION_STRING} MATCHES "^1.4")
# set(python_subdir "python2.7")
#else()
# set(python_subdir "python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}")
#endif()
#
#
# Copy required build artifacts to structure which will be used in final package
#add_custom_command(TARGET ie_api POST_BUILD
#
# COMMAND ${CMAKE_COMMAND} -E make_directory
# ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/inference_engine/
#
# COMMAND ${CMAKE_COMMAND} -E touch
# ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/__init__.py)
#
#if (${WIN32})
#add_custom_command(TARGET ie_api POST_BUILD
# COMMAND ${CMAKE_COMMAND} -E copy
# ${CMAKE_CURRENT_BINARY_DIR}/Release/ie_api.pyd ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/inference_engine/
#
# COMMAND ${CMAKE_COMMAND} -E copy
# ${CMAKE_CURRENT_BINARY_DIR}/__init__.py ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/inference_engine/)
#else()
#add_custom_command(TARGET ie_api POST_BUILD
# COMMAND ${CMAKE_COMMAND} -E copy
# ${CMAKE_CURRENT_BINARY_DIR}/ie_api.so ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/inference_engine/
#
# COMMAND ${CMAKE_COMMAND} -E copy
# ${CMAKE_CURRENT_BINARY_DIR}/__init__.py ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/inference_engine/)
#endif()

View File

@@ -1,3 +0,0 @@
from .ie_api import *
__version__ = get_version()
__all__ = ['IENetwork', "IEPlugin", "IENetReader"]

View File

@@ -1,383 +0,0 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
#include "ie_api_impl.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include "ie_iinfer_request.hpp"
std::map <std::string,InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
{"FP16", InferenceEngine::Precision::FP16},
{"Q78", InferenceEngine::Precision::Q78},
{"I32", InferenceEngine::Precision::I32},
{"I16", InferenceEngine::Precision::I16},
{"I8", InferenceEngine::Precision::I8},
{"U16", InferenceEngine::Precision::U16},
{"U8", InferenceEngine::Precision::U8}};
std::map <std::string,InferenceEngine::Layout> layout_map = {{"ANY", InferenceEngine::Layout::ANY},
{"NCHW", InferenceEngine::Layout::NCHW},
{"NHWC", InferenceEngine::Layout::NHWC},
{"OIHW", InferenceEngine::Layout::OIHW},
{"C", InferenceEngine::Layout::C},
{"CHW", InferenceEngine::Layout::CHW},
{"HW", InferenceEngine::Layout::HW},
{"NC", InferenceEngine::Layout::NC},
{"CN", InferenceEngine::Layout::CN},
{"BLOCKED", InferenceEngine::Layout::BLOCKED}};
#define stringify( name ) # name
#define IE_CHECK_CALL(expr) { \
auto ret = (expr); \
if (ret != InferenceEngine::StatusCode::OK) { \
THROW_IE_EXCEPTION << response.msg; \
} \
} \
InferenceEnginePython::IENetwork InferenceEnginePython::IENetReader::read(std::string const &model,
std::string const &weights)
{
InferenceEngine::CNNNetReader net_reader;
net_reader.ReadNetwork(model);
net_reader.ReadWeights(weights);
const std::string &net_name = net_reader.getName();
InferenceEngine::CNNNetwork network = net_reader.getNetwork();
std::size_t batch_size = network.getBatchSize();
return {network, net_name, batch_size};
}
std::map<std::string, InferenceEnginePython::IENetLayer> InferenceEnginePython::IENetwork::getLayers()
{
std::map<std::string, InferenceEnginePython::IENetLayer> result;
std::unordered_set<std::string> visisted;
const InferenceEngine::InputsDataMap &networkInputs = actual.getInputsInfo();
using CNNLayerPtrCref = const InferenceEngine::CNNLayerPtr &;
std::function<void(CNNLayerPtrCref)> DFS = [&](CNNLayerPtrCref layer) {
InferenceEnginePython::IENetLayer layer_info;
/* Assumes no cycles in graph */
for (auto &od : layer->outData)
{
for (auto nl : od->getInputTo())
{
auto i = visisted.find(nl.second->name);
if (i != visisted.end())
{
continue;
}
DFS(nl.second);
}
}
visisted.emplace(layer->name);
layer_info.layer_ptr = layer;
layer_info.name = layer->name;
layer_info.type = layer->type;
std::string precision = layer->precision.name();
layer_info.precision = precision;
layer_info.params = layer->params;
layer_info.affinity = layer->affinity;
result[layer->name] = layer_info;
};
std::set<InferenceEngine::CNNLayerPtr> inputs;
for (auto input : networkInputs) {
for (auto l : input.second->getInputData()->inputTo) {
inputs.insert(l.second);
}
}
for (auto &layer : inputs)
{
DFS(layer);
}
return result;
}
std::map<std::string, InferenceEnginePython::InputInfo> InferenceEnginePython::IENetwork::getInputs(){
std::map<std::string, InferenceEnginePython::InputInfo> inputs;
const InferenceEngine::InputsDataMap &inputsInfo = actual.getInputsInfo();
for (auto & in : inputsInfo){
InferenceEnginePython::InputInfo info;
info.actual = *in.second;
const InferenceEngine::TensorDesc &inputTensorDesc = in.second->getTensorDesc();
info.dims = inputTensorDesc.getDims();
for (auto it : precision_map )
if (it.second == in.second->getPrecision())
info.precision = it.first;
for (auto it : layout_map )
if (it.second == in.second->getLayout())
info.layout = it.first;
inputs[in.first] = info;
}
return inputs;
}
std::map<std::string, InferenceEnginePython::OutputInfo> InferenceEnginePython::IENetwork::getOutputs(){
std::map<std::string, InferenceEnginePython::OutputInfo> outputs;
const InferenceEngine::OutputsDataMap &outputsInfo = actual.getOutputsInfo();
for (auto & out : outputsInfo){
InferenceEnginePython::OutputInfo info;
info.actual = out.second;
const InferenceEngine::TensorDesc &inputTensorDesc = out.second->getTensorDesc();
info.dims = inputTensorDesc.getDims();
for (auto it : precision_map )
if (it.second == out.second->getPrecision())
info.precision = it.first;
for (auto it : layout_map )
if (it.second == out.second->getLayout())
info.layout = it.first;
outputs[out.first] = info;
}
return outputs;
}
void InferenceEnginePython::IENetwork::addOutputs(const std::vector<std::string> & out_layers, const std::string &precision)
{
for (auto && l : out_layers)
{
InferenceEngine::OutputsDataMap outputsDataMap = actual.getOutputsInfo();
if (outputsDataMap.find(l) != outputsDataMap.end())
{
continue;
}
InferenceEngine::CNNLayerPtr cnnLayer = actual.getLayerByName(l.c_str());
std::vector<InferenceEngine::DataPtr> outData = cnnLayer->outData;
if (outData.size() != 1) {
std::cout << "Layer " << l << " has " << outData.size() << " output blobs and can not be set as output." << std::endl;
continue;
}
actual.addOutput(l);
InferenceEngine::OutputsDataMap outputsDataMapUpd = actual.getOutputsInfo();
outputsDataMapUpd[l]->setPrecision(precision_map[precision]);
}
}
void InferenceEnginePython::IENetwork::setBatch(const size_t size)
{
actual.setBatchSize(size);
}
void InferenceEnginePython::IENetwork::reshape(const std::map<std::string, std::vector<size_t>> & input_shapes){
actual.reshape(input_shapes);
}
void InferenceEnginePython::InputInfo::setPrecision(std::string precision){
actual.setPrecision(precision_map[precision]);
}
void InferenceEnginePython::InputInfo::setLayout(std::string layout){
actual.setLayout(layout_map[layout]);
}
void InferenceEnginePython::OutputInfo::setPrecision(std::string precision){
actual->setPrecision(precision_map[precision]);
}
InferenceEnginePython::IEPlugin::IEPlugin(const std::string &device, const std::vector<std::string> &plugin_dirs)
{
InferenceEngine::PluginDispatcher dispatcher{plugin_dirs};
actual = dispatcher.getPluginByDevice(device);
const InferenceEngine::Version *pluginVersion;
actual->GetVersion(pluginVersion);
version = std::to_string(pluginVersion->apiVersion.major) + ".";
version += std::to_string(pluginVersion->apiVersion.minor) + ".";
version += pluginVersion->buildNumber;
device_name = device;
}
void InferenceEnginePython::IEPlugin::setInitialAffinity(InferenceEnginePython::IENetwork &net)
{
InferenceEngine::HeteroPluginPtr hetero_plugin(actual);
InferenceEngine::ResponseDesc response;
auto &network = net.actual;
IE_CHECK_CALL(hetero_plugin->SetAffinity(network, {}, &response));
}
std::set<std::string> InferenceEnginePython::IEPlugin::queryNetwork(InferenceEnginePython::IENetwork &net)
{
InferenceEngine::CNNNetwork &network = net.actual;
InferenceEngine::QueryNetworkResult queryRes;
actual->QueryNetwork(network, queryRes);
return queryRes.supportedLayers;
}
void InferenceEnginePython::IENetLayer::setAffinity(const std::string & target_affinity){
layer_ptr->affinity = target_affinity;
}
void InferenceEnginePython::IENetLayer::setParams(const std::map<std::string, std::string> & params_map){
layer_ptr->params = params_map;
}
std::map<std::string, InferenceEngine::Blob::Ptr> InferenceEnginePython::IENetLayer::getWeights(){
auto w_layer = std::dynamic_pointer_cast<InferenceEngine::WeightableLayer>(layer_ptr);
// IF current layer is weightable gather weights and biases from casted WeightableLayer and all other blobs
// considered as custom and gathered from blobs field pf CNNLayer.
std::map<std::string, InferenceEngine::Blob::Ptr> weights;
if (w_layer != nullptr){
if (w_layer->_weights != nullptr){
weights["weights"] = w_layer->_weights;
}
if (w_layer->_biases != nullptr){
weights["biases"] = w_layer->_biases;
}
for (auto it : w_layer->blobs){
if (it.first == "weights" || it.first == "biases"){
continue;
}
weights[it.first] = it.second;
}
}
// Otherwise all layer's blobs are considered as custom and gathered from CNNLayer
else {
std::map<std::string, InferenceEngine::Blob::Ptr> map_placeholder;
weights = map_placeholder; // If layer has no blobs it should not be missed from weights map
for (auto it : layer_ptr->blobs){
weights[it.first] = it.second;
}
}
return weights;
}
void InferenceEnginePython::IENetLayer::setPrecision(std::string precision){
layer_ptr->precision = precision_map[precision];
}
void InferenceEnginePython::IEPlugin::addCpuExtension(const std::string &extension_path)
{
InferenceEngine::ResponseDesc response;
auto extension_ptr = InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(extension_path);
auto extension = std::dynamic_pointer_cast<InferenceEngine::IExtension>(extension_ptr);
IE_CHECK_CALL(actual->AddExtension(extension, &response))
}
std::unique_ptr<InferenceEnginePython::IEExecNetwork>
InferenceEnginePython::IEPlugin::load(InferenceEnginePython::IENetwork &net,
int num_requests,
const std::map<std::string, std::string> &config)
{
InferenceEngine::ResponseDesc response;
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(net.name, num_requests);
IE_CHECK_CALL(actual->LoadNetwork(exec_network->actual, net.actual, config, &response))
const InferenceEngine::InputsDataMap &inputs_info = net.actual.getInputsInfo();
const InferenceEngine::OutputsDataMap &outputs_info = net.actual.getOutputsInfo();
for (size_t i = 0; i < num_requests; ++i) {
InferRequestWrap &infer_request = exec_network->infer_requests[i];
IE_CHECK_CALL(exec_network->actual->CreateInferRequest(infer_request.request_ptr, &response))
for (const auto& input : inputs_info) {
infer_request.inputs[input.first] = nullptr;
infer_request.request_ptr->GetBlob(input.first.c_str(), infer_request.inputs[input.first], &response);
}
for (const auto& output : outputs_info) {
infer_request.request_ptr->GetBlob(output.first.c_str(), infer_request.outputs[output.first], &response);
}
}
return exec_network;
}
void InferenceEnginePython::IEPlugin::setConfig(const std::map<std::string, std::string> & config) {
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->SetConfig(config, &response))
}
InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string &name, size_t num_requests) :
infer_requests(num_requests), name(name)
{
}
void InferenceEnginePython::IEExecNetwork::infer()
{
InferenceEngine::ResponseDesc response;
InferRequestWrap &request = infer_requests[0];
request.request_ptr->Infer(&response);
}
InferenceEngine::Blob::Ptr &InferenceEnginePython::InferRequestWrap::getInputBlob(const std::string &blob_name)
{
return inputs.at(blob_name);
}
InferenceEngine::Blob::Ptr &InferenceEnginePython::InferRequestWrap::getOutputBlob(const std::string &blob_name)
{
return outputs.at(blob_name);
}
std::vector<std::string> InferenceEnginePython::InferRequestWrap::getInputsList() {
std::vector<std::string> inputs_list;
inputs_list.reserve(inputs.size());
std::transform(inputs.begin(), inputs.end(), std::back_inserter(inputs_list), [] (InferenceEngine::BlobMap::value_type it) -> std::string {
return it.first;
});
return inputs_list;
}
std::vector<std::string> InferenceEnginePython::InferRequestWrap::getOutputsList() {
std::vector<std::string> outputs_list;
outputs_list.reserve(inputs.size());
std::transform(outputs.begin(), outputs.end(), std::back_inserter(outputs_list), [] (InferenceEngine::BlobMap::value_type it) -> std::string {
return it.first;
});
return outputs_list;
}
void InferenceEnginePython::InferRequestWrap::infer() {
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(request_ptr->Infer(&response));
}
void InferenceEnginePython::InferRequestWrap::infer_async() {
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(request_ptr->StartAsync(&response));
}
int InferenceEnginePython::InferRequestWrap::wait(int64_t timeout) {
InferenceEngine::ResponseDesc responseDesc;
InferenceEngine::StatusCode code = request_ptr->Wait(timeout, &responseDesc);
return static_cast<int >(code);
}
std::map<std::string, InferenceEnginePython::ProfileInfo> InferenceEnginePython::InferRequestWrap::getPerformanceCounts(){
std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perf_counts;
InferenceEngine::ResponseDesc response;
request_ptr->GetPerformanceCounts(perf_counts, &response);
std::map<std::string, InferenceEnginePython::ProfileInfo> perf_map;
for (auto it : perf_counts){
InferenceEnginePython::ProfileInfo profile_info;
switch (it.second.status) {
case InferenceEngine::InferenceEngineProfileInfo::EXECUTED:
profile_info.status = "EXECUTED";
break;
case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN:
profile_info.status = "NOT_RUN";
break;
case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT:
profile_info.status = "OPTIMIZED_OUT";
break;
default:
profile_info.status = "UNKNOWN";
}
profile_info.exec_type = it.second.exec_type;
profile_info.layer_type = it.second.layer_type;
profile_info.cpu_time = it.second.cpu_uSec;
profile_info.real_time = it.second.realTime_uSec;
perf_map[it.first] = profile_info;
}
return perf_map;
}
std::string InferenceEnginePython::get_version() {
auto version = InferenceEngine::GetInferenceEngineVersion();
std::string version_str = std::to_string(version->apiVersion.major) + ".";
version_str += std::to_string(version->apiVersion.minor) + ".";
version_str += version->buildNumber;
return version_str;
}

View File

@@ -1,129 +0,0 @@
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
#ifndef INFERENCE_ENGINE_DRIVER_IE_API_IMPL_HPP
#define INFERENCE_ENGINE_DRIVER_IE_API_IMPL_HPP
#include <string>
#include <inference_engine.hpp>
#include <iterator>
#include <iostream>
#include <algorithm>
#include <sstream>
#include "ie_extension.h"
namespace InferenceEnginePython {
struct IENetLayer {
InferenceEngine::CNNLayerPtr layer_ptr;
std::string name;
std::string type;
std::string precision;
std::string affinity;
std::map<std::string, std::string> params;
void setAffinity(const std::string & target_affinity);
void setParams(const std::map<std::string, std::string> & params_map);
std::map<std::string, InferenceEngine::Blob::Ptr> getWeights();
void setPrecision(std::string precision);
};
struct InputInfo{
InferenceEngine::InputInfo actual;
std::vector<size_t> dims;
std::string precision;
std::string layout;
void setPrecision(std::string precision);
void setLayout(std::string layout);
};
struct OutputInfo{
InferenceEngine::DataPtr actual;
std::vector<size_t> dims;
std::string precision;
std::string layout;
void setPrecision(std::string precision);
};
struct ProfileInfo {
std::string status;
std::string exec_type;
std::string layer_type;
long long real_time;
long long cpu_time;
unsigned execution_index;
};
struct IENetwork {
InferenceEngine::CNNNetwork actual;
std::string name;
std::size_t batch_size;
void setBatch(const size_t size);
void addOutputs(const std::vector<std::string> &out_layers, const std::string &precision);
std::map<std::string, InferenceEnginePython::IENetLayer> getLayers();
std::map<std::string, InferenceEnginePython::InputInfo> getInputs();
std::map<std::string, InferenceEnginePython::OutputInfo> getOutputs();
void reshape(const std::map<std::string, std::vector<size_t>> & input_shapes);
};
struct IENetReader {
static IENetwork read(std::string const &model, std::string const &weights);
std::vector<std::pair<std::string, std::string>> getLayers();
};
struct InferRequestWrap {
InferenceEngine::IInferRequest::Ptr request_ptr;
InferenceEngine::BlobMap inputs;
InferenceEngine::BlobMap outputs;
void infer();
void infer_async();
int wait(int64_t timeout);
InferenceEngine::Blob::Ptr &getInputBlob(const std::string &blob_name);
InferenceEngine::Blob::Ptr &getOutputBlob(const std::string &blob_name);
std::vector<std::string> getInputsList();
std::vector<std::string> getOutputsList();
std::map<std::string, InferenceEnginePython::ProfileInfo> getPerformanceCounts();
};
struct IEExecNetwork {
InferenceEngine::IExecutableNetwork::Ptr actual;
std::vector<InferRequestWrap> infer_requests;
IEExecNetwork(const std::string &name, size_t num_requests);
std::string name;
int next_req_index = 0;
bool async;
void infer();
};
struct IEPlugin {
std::unique_ptr<InferenceEnginePython::IEExecNetwork> load(InferenceEnginePython::IENetwork &net,
int num_requests,
const std::map<std::string,std::string> &config);
std::string device_name;
std::string version;
void setConfig(const std::map<std::string, std::string> &);
void addCpuExtension(const std::string &extension_path);
void setInitialAffinity(InferenceEnginePython::IENetwork &net);
IEPlugin(const std::string &device, const std::vector<std::string> &plugin_dirs);
IEPlugin() = default;
std::set<std::string> queryNetwork(InferenceEnginePython::IENetwork &net);
InferenceEngine::InferenceEnginePluginPtr actual;
};
template<class T>
T* get_buffer(InferenceEngine::Blob& blob) {
return blob.buffer().as<T *>();
}
template<class T, class... Args>
std::unique_ptr<T> make_unique(Args&&... args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
std::string get_version();
}; // InferenceEnginePython
#endif //INFERENCE_ENGINE_DRIVER_IE_API_IMPL_HPP

View File

@@ -0,0 +1,116 @@
# Benchmark Application Python* Demo
This topic demonstrates how to run the Benchmark Application demo, which performs inference using convolutional networks.
## How It Works
> **NOTE:** To achieve benchmark results similar to the official published results, set CPU frequency to 2.9GHz and GPU frequency to 1GHz.
Upon the start-up, the application reads command-line parameters and loads a network and images to the Inference Engine plugin. The number of infer requests and execution approach depend on a mode defined with the `-api` command-line parameter.
> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Specify Input Shapes** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
### Synchronous API
For synchronous mode, the primary metric is latency. The application creates one infer request and executes the `Infer` method. A number of executions is defined by one of the two values:
* Number of iterations defined with the `-niter` command-line argument
* Predefined duration if `-niter` is skipped. Predefined duration value depends on device.
During the execution, the application collects two types of metrics:
* Latency for each infer request executed with `Infer` method
* Duration of all executions
Reported latency value is calculated as mean value of all collected latencies. Reported throughput value is a derivative from reported latency and additionally depends on batch size.
### Asynchronous API
For asynchronous mode, the primary metric is throughput in frames per second (FPS). The application creates a certain number of infer requests and executes the `StartAsync` method. A number of infer is specified with the `-nireq` command-line parameter. A number of executions is defined by one of the two values:
* Number of iterations defined with the `-niter` command-line argument
* Predefined duration if `-niter` is skipped. Predefined duration value depends on device.
The infer requests are executed asynchronously. `Wait` method is used to wait for previous execution to complete. The application measures all infer requests executions and reports the throughput metric based on batch size and total execution duration.
## Running
Running the application with the `-h` or `--help`' option yields the following usage message:
```python3 benchmark_app.py -h```
The command yields the following usage message:
```
usage: benchmark_app.py [-h] -i PATH_TO_IMAGES -m PATH_TO_MODEL
[-c PATH_TO_CLDNN_CONFIG] [-l PATH_TO_EXTENSION]
[-api {sync,async}] [-d TARGET_DEVICE]
[-niter NUMBER_ITERATIONS]
[-nireq NUMBER_INFER_REQUESTS]
[-nthreads NUMBER_THREADS] [-b BATCH_SIZE]
[-pin {YES,NO}]
Options:
-h, --help Show this help message and exit.
-i PATH_TO_IMAGES, --path_to_images PATH_TO_IMAGES
Required. Path to a folder with images or to image
files.
-m PATH_TO_MODEL, --path_to_model PATH_TO_MODEL
Required. Path to an .xml file with a trained model.
-c PATH_TO_CLDNN_CONFIG, --path_to_cldnn_config PATH_TO_CLDNN_CONFIG
Optional. Required for GPU custom kernels. Absolute
path to an .xml file with the kernels description.
-l PATH_TO_EXTENSION, --path_to_extension PATH_TO_EXTENSION
Optional. Required for GPU custom kernels. Absolute
path to an .xml file with the kernels description.
-api {sync,async}, --api_type {sync,async}
Optional. Enable using sync/async API. Default value
is sync
-d TARGET_DEVICE, --target_device TARGET_DEVICE
Optional. Specify a target device to infer on: CPU,
GPU, FPGA, HDDL or MYRIAD. Use "-d HETERO:<comma
separated devices list>" format to specify HETERO
plugin. The application looks for a suitable plugin
for the specified device.
-niter NUMBER_ITERATIONS, --number_iterations NUMBER_ITERATIONS
Optional. Number of iterations. If not specified, the
number of iterations is calculated depending on a
device.
-nireq NUMBER_INFER_REQUESTS, --number_infer_requests NUMBER_INFER_REQUESTS
Optional. Number of infer requests (default value is
2).
-nthreads NUMBER_THREADS, --number_threads NUMBER_THREADS
Number of threads to use for inference on the CPU
(including Hetero cases).
-b BATCH_SIZE, --batch_size BATCH_SIZE
Optional. Batch size value. If not specified, the
batch size value is determined from IR
-pin {YES,NO}, --infer_threads_pinning {YES,NO}
Optional. Enable ("YES" is default value) or disable
("NO")CPU threads pinning for CPU-involved inference.
```
Running the application with the empty list of options yields the usage message given above and an error message.
To run the demo, you can use public or pre-trained models. To download the pre-trained models, use the OpenVINO [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader) or go to [https://download.01.org/opencv/](https://download.01.org/opencv/).
> **NOTE**: Before running the demo with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
For example, to do inference on an image using a trained network with multiple outputs on CPU, run the following command:
```
python3 benchmark_app.py -i <path_to_image>/inputImage.bmp -m <path_to_model>/multiple-output.xml -d CPU
```
## Demo Output
Application output depends on a used API. For synchronous API, the application outputs latency and throughput:
```
[ INFO ] Start inference synchronously (10 s duration)
[BENCHMARK RESULT] Latency is 15.5520 msec
[BENCHMARK RESULT] Throughput is 1286.0082 FPS
```
For asynchronous API, the application outputs only throughput:
```
[ INFO ] Start inference asynchronously (10 s duration, 8 inference requests in parallel)
[BENCHMARK RESULT] Throughput is 1444.2591 FPS
```
## See Also
* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md)
* [Model Optimizer](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
* [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader)

View File

@@ -1,5 +1,5 @@
"""
Copyright (c) 2018 Intel Corporation
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,11 +14,5 @@
limitations under the License.
"""
from mo.front.common.partial_infer.elemental import copy_shape_infer
def tf_softmax_ext(pb):
return {
'type': 'SoftMax',
'infer': copy_shape_infer
}
from .benchmark import main
from .utils.constants import HELP_MESSAGES

View File

@@ -0,0 +1,199 @@
"""
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from statistics import median
from openvino.inference_engine import IENetwork, IEPlugin
from .utils.benchmark_utils import *
def main(args=None):
try:
if args is None:
args = parse_args()
validate_args(args)
# --------------------------------- 1. Load Plugin for inference engine ---------------------------------
logging.info("Loading plugin")
plugin = IEPlugin(args.target_device)
config = dict()
if CPU_DEVICE_NAME in args.target_device:
if args.path_to_extension:
plugin.add_cpu_extension(args.path_to_extension)
# limit threading for CPU portion of inference
if args.number_threads is not None:
config.update({'CPU_THREADS_NUM': str(args.number_threads)})
# pin threads for CPU portion of inference
config.update({'CPU_BIND_THREAD': args.infer_threads_pinning})
# for pure CPU execution, more throughput-oriented execution via streams
if args.api_type == 'async' and CPU_DEVICE_NAME in args.target_device:
config.update({'CPU_THROUGHPUT_STREAMS': str(args.number_infer_requests)})
elif GPU_DEVICE_NAME in args.target_device:
if args.path_to_cldnn_config:
config.update({'CONFIG_FILE': args.path_to_cldnn_config})
logger.info("GPU extensions is loaded {}".format(args.path_to_cldnn_config))
elif MYRIAD_DEVICE_NAME in args.target_device:
config.update({'LOG_LEVEL': 'LOG_INFO'})
config.update({'VPU_LOG_LEVEL': 'LOG_INFO'})
plugin.set_config(config)
logger.info("Device is {}".format(plugin.device))
logger.info("Plugin version is {}".format(plugin.version))
# --------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ---------------------
logger.info("Loading network files")
xml_filename = os.path.abspath(args.path_to_model)
head, tail = os.path.splitext(xml_filename)
bin_filename = os.path.abspath(head + BIN_EXTENSION)
ie_network = IENetwork(xml_filename, bin_filename)
input_info = ie_network.inputs
if len(input_info) == 0:
raise AttributeError('No inputs info is provided')
elif len(input_info) != 1:
raise AttributeError("only one input layer network is supported")
# -------------------------------------- 3. Change network batch_size -------------------------------------
batch_size = ie_network.batch_size
key = list(input_info.keys()).pop()
precision = input_info[key].precision
if args.batch_size and args.batch_size != ie_network.batch_size:
# deepcopy input_info
shape = input_info[key].shape
# We support models having only one input layers
if input_info[key].layout != LAYOUT_TYPE:
raise Exception('Unsupported model for batch size changing in automatic mode')
shape[BATCH_SIZE_ELEM] = args.batch_size
ie_network.reshape({key: shape})
input_info = ie_network.inputs
batch_size = args.batch_size
logger_message = "Network batch size was changed to: " if args.batch_size is not None else "Network batch size: "
logger_message += " {}, precision: {}".format(batch_size, precision)
logger.info(logger_message)
# ------------------------------------- 4. Loading model to the plugin -------------------------------------
logger.info("Loading model to the plugin")
exe_network = plugin.load(ie_network, args.number_infer_requests)
# ------------------------------------ 5. Performance measurements stuff -----------------------------------
inputs = get_images(os.path.abspath(args.path_to_images), batch_size)
if batch_size < len(inputs):
logger.warn("Network batch size {} is less then images count {}"
", some input files will be ignored".format(batch_size, len(inputs)))
input_images = {key: fill_blob_with_image(inputs, input_info[key].shape)}
times = list()
duration = 0
if args.number_iterations is None:
duration = get_duration_in_secs(args.target_device)
if args.api_type == 'sync':
# warming up - out of scope
exe_network.infer(input_images)
if args.number_iterations is not None:
logger.info(
"Start inference synchronously ({}) sync inference executions".format(args.number_iterations))
for iteration in range(args.number_iterations):
sync_infer_request(exe_network, times, input_images)
else:
logger.info("Start inference synchronously ({} s duration)".format(duration))
start_time = datetime.now()
current_time = start_time
while (current_time - start_time).total_seconds() < duration:
current_time = sync_infer_request(exe_network, times, input_images)
times.sort()
latency = median(times)
fps = batch_size / latency
print("[BENCHMARK RESULT] Latency is {:.4f} msec".format(latency * 1e3))
print("[BENCHMARK RESULT] Throughput is {:.4f} FPS".format(fps))
else:
infer_requests = exe_network.requests
if args.number_iterations is not None:
logger.info("Start inference asynchronously ({}"
" async inference executions, {} "
" inference requests in parallel".format(args.number_iterations,
args.number_infer_requests))
else:
logger.info("Start inference asynchronously ({} s duration, "
"{} inference requests in parallel)".format(duration, args.number_infer_requests))
current_inference = 0
required_inference_requests_were_executed = False
previous_inference = 1 - args.number_infer_requests
step = 0
steps_count = args.number_infer_requests - 1
if args.number_iterations is not None:
steps_count += args.number_iterations
# warming up - out of scope
infer_requests[0].async_infer(input_images)
infer_requests[0].wait()
start_time = datetime.now()
while not required_inference_requests_were_executed or step < steps_count or \
args.number_iterations is None and (datetime.now() - start_time).total_seconds() < duration:
exe_network.start_async(current_inference, input_images)
if previous_inference >= 0:
status = infer_requests[previous_inference].wait()
if status is not 0:
raise Exception("Infer request not completed successfully")
current_inference += 1
if current_inference >= args.number_infer_requests:
current_inference = 0
required_inference_requests_were_executed = True
previous_inference += 1
if previous_inference >= args.number_infer_requests:
previous_inference = 0
step += 1
# wait the latest inference executions
for not_completed_index in range(args.number_infer_requests):
if infer_requests[not_completed_index].wait(0) != 0:
infer_requests[not_completed_index].wait()
total_duration = (datetime.now() - start_time).total_seconds()
fps = batch_size * step / total_duration
print("[BENCHMARK RESULT] Throughput is {:.4f} FPS".format(fps))
del exe_network
del plugin
except Exception as e:
logging.exception(e)

View File

@@ -1,5 +1,5 @@
"""
Copyright (c) 2018 Intel Corporation
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,15 +13,3 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.common.partial_infer.elemental import copy_shape_infer
def softmax_ext(pb_layer, pb_model):
param = pb_layer.softmax_param
return {
'type': 'SoftMax',
'axis': param.axis,
'infer': copy_shape_infer
}

View File

@@ -0,0 +1,124 @@
"""
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import argparse
import os
import cv2
import numpy as np
import sys
from glob import glob
from random import choice
from datetime import datetime
from fnmatch import fnmatch
from .constants import *
logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger('BenchmarkApp')
def validate_args(args):
if args.number_iterations is not None and args.number_iterations < 0:
raise Exception("Number of iterations should be positive (invalid -niter option value)")
if args.number_infer_requests < 0:
raise Exception("Number of inference requests should be positive (invalid -nireq option value)")
if not fnmatch(args.path_to_model, XML_EXTENSION_PATTERN):
raise Exception('Path {} is not xml file.')
def parse_args():
parser = argparse.ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help=HELP_MESSAGES["HELP"])
args.add_argument('-i', '--path_to_images', type=str, required=True, help=HELP_MESSAGES['IMAGE_MESSAGE'])
args.add_argument('-m', '--path_to_model', type=str, required=True, help=HELP_MESSAGES['MODEL_MESSAGE'])
args.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
help=HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
args.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
help=HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
help=HELP_MESSAGES['API_MESSAGE'])
args.add_argument('-d', '--target_device', type=str, required=False, default="CPU",
help=HELP_MESSAGES['TARGET_DEVICE_MESSAGE'])
args.add_argument('-niter', '--number_iterations', type=int, required=False, default=None,
help=HELP_MESSAGES['ITERATIONS_COUNT_MESSAGE'])
args.add_argument('-nireq', '--number_infer_requests', type=int, required=False, default=2,
help=HELP_MESSAGES['INFER_REQUESTS_COUNT_MESSAGE'])
args.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
help=HELP_MESSAGES['INFER_NUM_THREADS_MESSAGE'])
args.add_argument('-b', '--batch_size', type=int, required=False, default=None,
help=HELP_MESSAGES['BATCH_SIZE_MESSAGE'])
args.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, default='YES',
choices=['YES', 'NO'], help=HELP_MESSAGES['INFER_THREADS_PINNING_MESSAGE'])
return parser.parse_args()
def get_images(path_to_images, batch_size):
images = list()
if os.path.isfile(path_to_images):
while len(images) != batch_size:
images.append(path_to_images)
else:
path = os.path.join(path_to_images, '*')
files = glob(path, recursive=True)
for file in files:
file_extension = file.rsplit('.').pop().upper()
if file_extension in IMAGE_EXTENSIONS:
images.append(file)
if len(images) == 0:
raise Exception("No images found in {}".format(path_to_images))
if len(images) < batch_size:
while len(images) != batch_size:
images.append(choice(images))
return images
def get_duration_in_secs(target_device):
duration = 0
for device in DEVICE_DURATION_IN_SECS:
if device in target_device:
duration = max(duration, DEVICE_DURATION_IN_SECS[device])
if duration == 0:
duration = DEVICE_DURATION_IN_SECS[UNKNOWN_DEVICE_TYPE]
logger.warn("Default duration {} seconds for unknown device {} is used".format(duration, target_device))
return duration
def fill_blob_with_image(images_path, shape):
images = np.ndarray(shape)
for item in range(shape[0]):
image = cv2.imread(images_path[item])
new_im_size = tuple(shape[2:])
if image.shape[:-1] != new_im_size:
logger.warn("Image {} is resize from ({}) to ({})".format(images_path[item], image.shape[:-1], new_im_size))
image = cv2.resize(image, new_im_size)
image = image.transpose((2, 0, 1))
images[item] = image
return images
def sync_infer_request(exe_network, times, images):
iteration_start_time = datetime.now()
exe_network.infer(images)
current_time = datetime.now()
times.append((current_time - iteration_start_time).total_seconds())
return current_time

View File

@@ -0,0 +1,65 @@
"""
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
HELP_MESSAGES = {
'HELP': "Show this help message and exit.",
'IMAGE_MESSAGE': "Required. Path to a folder with images or to image files.",
'MULTI_INPUT_MESSAGE': "Optional. Path to multi input file containing.",
'MODEL_MESSAGE': "Required. Path to an .xml file with a trained model.",
'PLUGIN_PATH_MESSAGE': "Optional. Path to a plugin folder.",
'API_MESSAGE': "Optional. Enable using sync/async API. Default value is sync",
'TARGET_DEVICE_MESSAGE': "Optional. Specify a target device to infer on: CPU, GPU, FPGA, HDDL or MYRIAD. "
"Use \"-d HETERO:<comma separated devices list>\" format to specify HETERO plugin. "
"The application looks for a suitable plugin for the specified device.",
'ITERATIONS_COUNT_MESSAGE': "Optional. Number of iterations. "
"If not specified, the number of iterations is calculated depending on a device.",
'INFER_REQUESTS_COUNT_MESSAGE': "Optional. Number of infer requests (default value is 2).",
'INFER_NUM_THREADS_MESSAGE': "Number of threads to use for inference on the CPU "
"(including Hetero cases).",
'CUSTOM_CPU_LIBRARY_MESSAGE': "Optional. Required for CPU custom layers. "
"Absolute path to a shared library with the kernels implementations.",
'CUSTOM_GPU_LIBRARY_MESSAGE': "Optional. Required for GPU custom kernels. Absolute path to an .xml file with the "
"kernels description.",
'BATCH_SIZE_MESSAGE': "Optional. Batch size value. If not specified, the batch size value is determined from IR",
'INFER_THREADS_PINNING_MESSAGE': "Optional. Enable (\"YES\" is default value) or disable (\"NO\")"
"CPU threads pinning for CPU-involved inference."
}
DEVICE_DURATION_IN_SECS = {
"CPU": 60,
"GPU": 60,
"VPU": 60,
"MYRIAD": 60,
"FPGA": 120,
"HDDL": 60,
"UNKNOWN": 120
}
IMAGE_EXTENSIONS = ['JPEG', 'JPG', 'PNG', 'BMP']
MYRIAD_DEVICE_NAME = "MYRIAD"
CPU_DEVICE_NAME = "CPU"
GPU_DEVICE_NAME = "GPU"
UNKNOWN_DEVICE_TYPE = "UNKNOWN"
BATCH_SIZE_ELEM = 0
LAYOUT_TYPE = 'NCHW'
XML_EXTENSION = ".xml"
BIN_EXTENSION = ".bin"
XML_EXTENSION_PATTERN = '*' + XML_EXTENSION

View File

@@ -0,0 +1,37 @@
import benchmark
from argparse import ArgumentParser, SUPPRESS
def parse_args():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help=benchmark.HELP_MESSAGES["HELP"])
args.add_argument('-i', '--path_to_images', type=str, required=True,
help=benchmark.HELP_MESSAGES['IMAGE_MESSAGE'])
args.add_argument('-m', '--path_to_model', type=str, required=True,
help=benchmark.HELP_MESSAGES['MODEL_MESSAGE'])
args.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
help=benchmark.HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
args.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
help=benchmark.HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
help=benchmark.HELP_MESSAGES['API_MESSAGE'])
args.add_argument('-d', '--target_device', type=str, required=False, default="CPU",
help=benchmark.HELP_MESSAGES['TARGET_DEVICE_MESSAGE'])
args.add_argument('-niter', '--number_iterations', type=int, required=False, default=None,
help=benchmark.HELP_MESSAGES['ITERATIONS_COUNT_MESSAGE'])
args.add_argument('-nireq', '--number_infer_requests', type=int, required=False, default=2,
help=benchmark.HELP_MESSAGES['INFER_REQUESTS_COUNT_MESSAGE'])
args.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
help=benchmark.HELP_MESSAGES['INFER_NUM_THREADS_MESSAGE'])
args.add_argument('-b', '--batch_size', type=int, required=False, default=None,
help=benchmark.HELP_MESSAGES['BATCH_SIZE_MESSAGE'])
args.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, default='YES',
choices=['YES', 'NO'], help=benchmark.HELP_MESSAGES['INFER_THREADS_PINNING_MESSAGE'])
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
benchmark.main(args)

View File

@@ -0,0 +1,79 @@
# Image Classification Python* Sample
This topic demonstrates how to run the Image Classification sample application, which performs
inference using image classification networks such as AlexNet and GoogLeNet.
### How It Works
Upon the start-up, the sample application reads command line parameters and loads a network and an image to the Inference
Engine plugin. When inference is done, the application creates an
output image and outputs data to the standard output stream.
> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Specify Input Shapes** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
## Running
Run the application with the `-h` option yields the usage message:
```
python3 classification_sample.py -h
```
The command yields the following usage message:
```
usage: classification_sample.py [-h] -m MODEL -i INPUT [INPUT ...]
[-l CPU_EXTENSION] [-pp PLUGIN_DIR]
[-d DEVICE] [--labels LABELS] [-nt NUMBER_TOP]
[-ni NUMBER_ITER] [-pc]
Options:
-h, --help Show this help message and exit.
-m MODEL, --model MODEL
Required. Path to an .xml file with a trained model.
-i INPUT [INPUT ...], --input INPUT [INPUT ...]
Required. Path to a folder with images or path to an
image files
-l CPU_EXTENSION, --cpu_extension CPU_EXTENSION
Optional. Required for CPU custom layers. MKLDNN (CPU)-targeted custom layers.
Absolute path to a shared library with the kernels
implementations.
-pp PLUGIN_DIR, --plugin_dir PLUGIN_DIR
Optional. Path to a plugin folder
-d DEVICE, --device DEVICE
Optional. Specify the target device to infer on; CPU,
GPU, FPGA, HDDL or MYRIAD is acceptable. The sample
will look for a suitable plugin for device specified.
Default value is CPU
--labels LABELS Optional. Path to a labels mapping file
-nt NUMBER_TOP, --number_top NUMBER_TOP
Optional. Number of top results
-ni NUMBER_ITER, --number_iter NUMBER_ITER
Optional. Number of inference iterations
-pc, --perf_counts Optional. Report performance counters
```
Running the application with the empty list of options yields the usage message given above.
To run the sample, you can use AlexNet and GoogLeNet or other image classification models. You can download the pre-trained models with the OpenVINO [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader) or from [https://download.01.org/opencv/](https://download.01.org/opencv/).
> **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
For example, to perform inference of an AlexNet model (previously converted to the Inference Engine format) on CPU, use the following command:
```
python3 classification_sample.py -i <path_to_image>/cat.bmp -m <path_to_model>/alexnet_fp32.xml
```
### Sample Output
By default the application outputs top-10 inference results.
Add the `-nt` option to the previous command to modify the number of top output results.
For example, to get the top-5 results on GPU, run the following command:
```
python3 classification_sample.py<path_to_image>/cat.bmp -m <path_to_model>/alexnet_fp32.xml -nt 5 -d GPU
```
## See Also
* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md)
* [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
* [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
"""
Copyright (c) 2018 Intel Corporation
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser
from argparse import ArgumentParser, SUPPRESS
import cv2
import numpy as np
import logging as log
@@ -26,22 +26,29 @@ from openvino.inference_engine import IENetwork, IEPlugin
def build_argparser():
parser = ArgumentParser()
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
parser.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
type=str, nargs="+")
parser.add_argument("-l", "--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
"impl.", type=str, default=None)
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
type=str)
parser.add_argument("--labels", help="Labels mapping file", default=None, type=str)
parser.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
parser.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", required=True,
type=str)
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files",
required=True,
type=str, nargs="+")
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. "
"MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the"
" kernels implementations.", type=str, default=None)
args.add_argument("-pp", "--plugin_dir", help="Optional. Path to a plugin folder", type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL, MYRIAD or HETERO: is "
"acceptable. The sample will look for a suitable plugin for device specified. Default "
"value is CPU",
default="CPU", type=str)
args.add_argument("--labels", help="Optional. Path to a labels mapping file", default=None, type=str)
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
args.add_argument("-ni", "--number_iter", help="Optional. Number of inference iterations", default=1, type=int)
args.add_argument("-pc", "--perf_counts", help="Optional. Report performance counters", default=False,
action="store_true")
return parser
@@ -58,7 +65,7 @@ def main():
plugin.add_cpu_extension(args.cpu_extension)
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork.from_ir(model=model_xml, weights=model_bin)
net = IENetwork(model=model_xml, weights=model_bin)
if plugin.device == "CPU":
supported_layers = plugin.get_supported_layers(net)
@@ -93,7 +100,6 @@ def main():
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = plugin.load(network=net)
del net
# Start sync inference
log.info("Starting inference ({} iterations)".format(args.number_iter))
@@ -101,15 +107,15 @@ def main():
for i in range(args.number_iter):
t0 = time()
res = exec_net.infer(inputs={input_blob: images})
infer_time.append((time()-t0)*1000)
infer_time.append((time() - t0) * 1000)
log.info("Average running time of one iteration: {} ms".format(np.average(np.asarray(infer_time))))
if args.perf_counts:
perf_counts = exec_net.requests[0].get_perf_counts()
log.info("Performance counters:")
print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
for layer, stats in perf_counts.items():
print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
# Processing output blob
log.info("Processing output blob")
@@ -120,18 +126,25 @@ def main():
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
else:
labels_map = None
classid_str = "classid"
probability_str = "probability"
for i, probs in enumerate(res):
probs = np.squeeze(probs)
top_ind = np.argsort(probs)[-args.number_top:][::-1]
print("Image {}\n".format(args.input[i]))
print(classid_str, probability_str)
print("{} {}".format('-' * len(classid_str), '-' * len(probability_str)))
for id in top_ind:
det_label = labels_map[id] if labels_map else "#{}".format(id)
print("{:.7f} label {}".format(probs[id], det_label))
det_label = labels_map[id] if labels_map else "{}".format(id)
label_length = len(det_label)
space_num_before = (len(classid_str) - label_length) // 2
space_num_after = len(classid_str) - (space_num_before + label_length) + 2
space_num_before_prob = (len(probability_str) - len(str(probs[id]))) // 2
print("{}{}{}{}{:.7f}".format(' ' * space_num_before, det_label,
' ' * space_num_after, ' ' * space_num_before_prob,
probs[id]))
print("\n")
del exec_net
del plugin
if __name__ == '__main__':
sys.exit(main() or 0)

View File

@@ -0,0 +1,89 @@
# Image Classification Python* Sample Async
This sample demonstrates how to build and execute inference in pipelined mode on example of classifications networks.
The pipelined mode might increase the throughput of the pictures. The latency of one inference will be the same as for synchronous execution.
<br>
The throughput increases due to follow reasons:
* Some plugins have heterogeneity inside themselves: data transferring, execution on remote device, pre-processing and post-processing on the host.
* Using of explicit heterogeneous plugin with execution of different parts of network on different devices, for example HETERO:CPU,GPU.
When two or more devices process one image, creating several infer requests and starting asynchronous inference allow for using devices in the most efficient way.
If two devices are involved in execution, the most optimal value for `-nireq` option is 2.
To process infer requests more efficiently, Classification Sample Async uses round-robin algorithm. It starts execution of the current infer request and switches to waiting for results of the previous one. After finishing of waiting, it switches infer requests and repeat the procedure.
Another required aspect of good throughput is a number of iterations. Only with big number of iterations you can emulate the real application work and get good performance.
The batch mode is an independent attribute on the pipelined mode. Pipelined mode works efficiently with any batch size.
### How It Works
Upon the start-up, the sample application reads command line parameters and loads a network and an image to the Inference
Engine plugin.
Then application creates several infer requests pointed in `-nireq` parameter and loads images for inference.
Then in a loop it starts inference for the current infer request and switches to waiting for the previous one. When results are ready, it swaps infer requests.
When inference is done, the application outputs data to the standard output stream.
> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Specify Input Shapes** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
## Running
Running the application with the <code>-h</code> option yields the following usage message:
```
python3 classification_sample_async.py -h
```
The command yields the following usage message:
```
usage: classification_sample_async.py [-h] -m MODEL -i INPUT [INPUT ...]
[-l CPU_EXTENSION] [-pp PLUGIN_DIR]
[-d DEVICE] [--labels LABELS]
[-nt NUMBER_TOP] [-ni NUMBER_ITER] [-pc]
Options:
-h, --help Show this help message and exit.
-m MODEL, --model MODEL
Required. Path to an .xml file with a trained model.
-i INPUT [INPUT ...], --input INPUT [INPUT ...]
Required. Path to a folder with images or path to an
image files
-l CPU_EXTENSION, --cpu_extension CPU_EXTENSION
Optional. Required for CPU custom layers. Absolute
path to a shared library with the kernels
implementations.
-pp PLUGIN_DIR, --plugin_dir PLUGIN_DIR
Optional. Path to a plugin folder
-d DEVICE, --device DEVICE
Optional. Specify the target device to infer on; CPU,
GPU, FPGA, HDDL or MYRIAD is acceptable. The sample
will look for a suitable plugin for device specified.
Default value is CPU
--labels LABELS Optional. Labels mapping file
-nt NUMBER_TOP, --number_top NUMBER_TOP
Optional. Number of top results
-ni NUMBER_ITER, --number_iter NUMBER_ITER
Optional. Number of inference iterations
-pc, --perf_counts Optional. Report performance counters
```
Running the application with the empty list of options yields the usage message given above and an error message.
To run the sample, you can use AlexNet and GoogLeNet or other image classification models. You can download the pre-trained models with the OpenVINO [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader) or from [https://download.01.org/opencv/](https://download.01.org/opencv/).
> **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
You can do inference on an image using a trained AlexNet network on FPGA with fallback to CPU using the following command:
```
python3 classification_sample_async.py -i <path_to_image>/cat.bmp -m <path_to_model>/alexnet_fp32.xml -nt 5 -d HETERO:FPGA,CPU -nireq 2 -ni 200
```
### Sample Output
By default, the application outputs top-10 inference results for each infer request.
It also provides throughput value measured in frames per seconds.
## See Also
* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
"""
Copyright (c) 2018 Intel Corporation
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser
from argparse import ArgumentParser, SUPPRESS
import cv2
import numpy as np
import logging as log
@@ -26,22 +26,26 @@ from openvino.inference_engine import IENetwork, IEPlugin
def build_argparser():
parser = ArgumentParser()
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
parser.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
type=str, nargs="+")
parser.add_argument("-l", "--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
"impl.", type=str, default=None)
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
type=str)
parser.add_argument("--labels", help="Labels mapping file", default=None, type=str)
parser.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
parser.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files",
required=True, type=str, nargs="+")
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. Absolute path to a shared library with the"
" kernels implementations.", type=str, default=None)
args.add_argument("-pp", "--plugin_dir", help="Optional. Path to a plugin folder", type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is "
"acceptable. The sample will look for a suitable plugin for device specified. Default value is CPU",
default="CPU", type=str)
args.add_argument("--labels", help="Optional. Labels mapping file", default=None, type=str)
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
args.add_argument("-ni", "--number_iter", help="Optional. Number of inference iterations", default=1, type=int)
args.add_argument("-pc", "--perf_counts", help="Optional. Report performance counters",
default=False, action="store_true")
return parser
@@ -58,7 +62,7 @@ def main():
plugin.add_cpu_extension(args.cpu_extension)
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork.from_ir(model=model_xml, weights=model_bin)
net = IENetwork(model=model_xml, weights=model_bin)
if plugin.device == "CPU":
supported_layers = plugin.get_supported_layers(net)
@@ -92,7 +96,6 @@ def main():
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = plugin.load(network=net)
del net
# Start sync inference
log.info("Starting inference ({} iterations)".format(args.number_iter))
@@ -106,10 +109,10 @@ def main():
if args.perf_counts:
perf_counts = infer_request_handle.get_perf_counts()
log.info("Performance counters:")
print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
for layer, stats in perf_counts.items():
print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
# Processing output blob
log.info("Processing output blob")
res = infer_request_handle.outputs[out_blob]
@@ -119,18 +122,25 @@ def main():
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
else:
labels_map = None
classid_str = "classid"
probability_str = "probability"
for i, probs in enumerate(res):
probs = np.squeeze(probs)
top_ind = np.argsort(probs)[-args.number_top:][::-1]
print("Image {}\n".format(args.input[i]))
print(classid_str, probability_str)
print("{} {}".format('-' * len(classid_str), '-' * len(probability_str)))
for id in top_ind:
det_label = labels_map[id] if labels_map else "#{}".format(id)
print("{:.7f} {}".format(probs[id], det_label))
det_label = labels_map[id] if labels_map else "{}".format(id)
label_length = len(det_label)
space_num_before = (7 - label_length) // 2
space_num_after = 7 - (space_num_before + label_length) + 2
space_num_before_prob = (11 - len(str(probs[id]))) // 2
print("{}{}{}{}{:.7f}".format(' ' * space_num_before, det_label,
' ' * space_num_after, ' ' * space_num_before_prob,
probs[id]))
print("\n")
del exec_net
del plugin
if __name__ == '__main__':
sys.exit(main() or 0)

View File

@@ -1,49 +0,0 @@
# This README demonstrates use of all GreenGrass samples
# GreenGrass Classification Sample
This topic demonstrates how to build and run the GreenGrass Image Classification sample application, which does inference using image classification networks like AlexNet and GoogLeNet on on Intel® Processors, Intel® HD Graphics and Intel® FPGA.
## Running
1. Modify the "accelerator" parameter inside the sample to deploy the sample on any accelerator option of your choice(CPU/GPU/FPGA)
For CPU, please specify "CPU"
For GPU, please specify "GPU"
For FPGA, please specify "HETERO:FPGA,CPU"
2. Enable the option(s) on how output is displayed/consumed
3. Now follow the instructions listed in the Greengrass-FaaS-User-Guide.pdf to create the lambda and deploy on edge device using Greengrass
### Outputs
The application publishes top-10 results on AWS IoT Cloud every second by default. For other output consumption options, please refer to Greengrass-FaaS-User-Guide.pdf
### How it works
Upon deployment,the sample application loads a network and an image to the Inference Engine plugin. When inference is done, the application publishes results to AWS IoT Cloud
=====================================================================================================
# GreenGrass Object Detection Sample SSD
This topic demonstrates how to run the GreenGrass Object Detection SSD sample application, which does inference using object detection networks like Squeezenet-SSD on Intel® Processors, Intel® HD Graphics and Intel® FPGA.
## Running
1. Modify the "accelerator" parameter inside the sample to deploy the sample on any accelerator option of your choice(CPU/GPU/FPGA)
For CPU, please specify "CPU"
For GPU, please specify "GPU"
For FPGA, please specify "HETERO:FPGA,CPU"
2. Enable the option(s) on how output is displayed/consumed
3. Set the variable is_async_mode to 'True' for Asynchronous execution and 'False' for Synchronous execution
3. Now follow the instructions listed in the Greengrass-FaaS-User-Guide.pdf to create the lambda and deploy on edge device using Greengrass
### Outputs
The application publishes detection outputs such as class label, class confidence, and bounding box coordinates on AWS IoT Cloud every second. For other output consumption options, please refer to Greengrass-FaaS-User-Guide.pdf
### How it works
Upon deployment,the sample application loads a network and an image to the Inference Engine plugin. When inference is done, the application publishes results to AWS IoT Cloud

View File

@@ -1,176 +0,0 @@
"""
BSD 3-clause "New" or "Revised" license
Copyright (C) 2018 Intel Coporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
import cv2
import numpy as np
import greengrasssdk
import boto3
import timeit
import datetime
import json
from collections import OrderedDict
from openvino.inference_engine import IENetwork, IEPlugin
# Specify the delta in seconds between each report
reporting_interval = 1.0
# Parameters for IoT Cloud
enable_iot_cloud_output = True
# Parameters for Kinesis
enable_kinesis_output = False
kinesis_stream_name = ""
kinesis_partition_key = ""
kinesis_region = ""
# Parameters for S3
enable_s3_jpeg_output = False
s3_bucket_name = ""
# Parameters for jpeg output on local disk
enable_local_jpeg_output = False
# Create a Greengrass Core SDK client for publishing messages to AWS Cloud
client = greengrasssdk.client("iot-data")
# Create an S3 client for uploading files to S3
if enable_s3_jpeg_output:
s3_client = boto3.client("s3")
# Create a Kinesis client for putting records to streams
if enable_kinesis_output:
kinesis_client = boto3.client("kinesis", "us-west-2")
# Read environment variables set by Lambda function configuration
PARAM_MODEL_XML = os.environ.get("PARAM_MODEL_XML")
PARAM_INPUT_SOURCE = os.environ.get("PARAM_INPUT_SOURCE")
PARAM_DEVICE = os.environ.get("PARAM_DEVICE")
PARAM_OUTPUT_DIRECTORY = os.environ.get("PARAM_OUTPUT_DIRECTORY")
PARAM_CPU_EXTENSION_PATH = os.environ.get("PARAM_CPU_EXTENSION_PATH")
PARAM_LABELMAP_FILE = os.environ.get("PARAM_LABELMAP_FILE")
PARAM_TOPIC_NAME = os.environ.get("PARAM_TOPIC_NAME", "intel/faas/classification")
PARAM_NUM_TOP_RESULTS = int(os.environ.get("PARAM_NUM_TOP_RESULTS", "10"))
def report(res_json, frame):
now = datetime.datetime.now()
date_prefix = str(now).replace(" ", "_")
if enable_iot_cloud_output:
data = json.dumps(res_json)
client.publish(topic=PARAM_TOPIC_NAME, payload=data)
if enable_kinesis_output:
kinesis_client.put_record(StreamName=kinesis_stream_name, Data=json.dumps(res_json), PartitionKey=kinesis_partition_key)
if enable_s3_jpeg_output:
temp_image = os.path.join(PARAM_OUTPUT_DIRECTORY, "inference_result.jpeg")
cv2.imwrite(temp_image, frame)
with open(temp_image) as file:
image_contents = file.read()
s3_client.put_object(Body=image_contents, Bucket=s3_bucket_name, Key=date_prefix + ".jpeg")
if enable_local_jpeg_output:
cv2.imwrite(os.path.join(PARAM_OUTPUT_DIRECTORY, date_prefix + ".jpeg"), frame)
def greengrass_classification_sample_run():
client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Initializing...")
model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
plugin = IEPlugin(device=PARAM_DEVICE, plugin_dirs="")
if "CPU" in PARAM_DEVICE:
plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)
# Read IR
net = IENetwork.from_ir(model=PARAM_MODEL_XML, weights=model_bin)
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
# Read and pre-process input image
n, c, h, w = net.inputs[input_blob]
cap = cv2.VideoCapture(PARAM_INPUT_SOURCE)
exec_net = plugin.load(network=net)
del net
client.publish(topic=PARAM_TOPIC_NAME, payload="Starting inference on %s" % PARAM_INPUT_SOURCE)
start_time = timeit.default_timer()
inf_seconds = 0.0
frame_count = 0
res_json = []
labeldata = None
if PARAM_LABELMAP_FILE is not None:
with open(PARAM_LABELMAP_FILE) as labelmap_file:
labeldata = json.load(labelmap_file)
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
frameid = cap.get(cv2.CAP_PROP_POS_FRAMES)
initial_w = cap.get(3)
initial_h = cap.get(4)
in_frame = cv2.resize(frame, (w, h))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
in_frame = in_frame.reshape((n, c, h, w))
# Start synchronous inference
inf_start_time = timeit.default_timer()
res = exec_net.infer(inputs={input_blob: in_frame})
inf_seconds += timeit.default_timer() - inf_start_time
top_ind = np.argsort(res[out_blob], axis=1)[0, -PARAM_NUM_TOP_RESULTS:][::-1]
# Parse detection results of the current request
res_json = OrderedDict()
res_json["Candidates"] = OrderedDict()
frame_timestamp = datetime.datetime.now()
for i in top_ind:
classlabel = labeldata[str(i)] if labeldata else str(i)
res_json["Candidates"][classlabel] = round(res[out_blob][0, i], 2)
frame_count += 1
# Measure elapsed seconds since the last report
seconds_elapsed = timeit.default_timer() - start_time
if seconds_elapsed >= reporting_interval:
res_json["timestamp"] = frame_timestamp.isoformat()
res_json["frame_id"] = int(frameid)
res_json["inference_fps"] = frame_count / inf_seconds
start_time = timeit.default_timer()
report(res_json, frame)
frame_count = 0
inf_seconds = 0.0
client.publish(topic=PARAM_TOPIC_NAME, payload="End of the input, exiting...")
del exec_net
del plugin
greengrass_classification_sample_run()
def function_handler(event, context):
client.publish(topic=PARAM_TOPIC_NAME, payload='HANDLER_CALLED!')
return

View File

@@ -1,179 +0,0 @@
"""
BSD 3-clause "New" or "Revised" license
Copyright (C) 2018 Intel Coporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
import cv2
import numpy as np
import greengrasssdk
import boto3
import timeit
import datetime
import json
from collections import OrderedDict
from openvino.inference_engine import IENetwork, IEPlugin
# Specify the delta in seconds between each report
reporting_interval = 1.0
# Parameters for IoT Cloud
enable_iot_cloud_output = True
# Parameters for Kinesis
enable_kinesis_output = False
kinesis_stream_name = ""
kinesis_partition_key = ""
kinesis_region = ""
# Parameters for S3
enable_s3_jpeg_output = False
s3_bucket_name = "ssd_test"
# Parameters for jpeg output on local disk
enable_local_jpeg_output = False
# Create a Greengrass Core SDK client for publishing messages to AWS Cloud
client = greengrasssdk.client("iot-data")
# Create an S3 client for uploading files to S3
if enable_s3_jpeg_output:
s3_client = boto3.client("s3")
# Create a Kinesis client for putting records to streams
if enable_kinesis_output:
kinesis_client = boto3.client("kinesis", "us-west-2")
# Read environment variables set by Lambda function configuration
PARAM_MODEL_XML = os.environ.get("PARAM_MODEL_XML")
PARAM_INPUT_SOURCE = os.environ.get("PARAM_INPUT_SOURCE")
PARAM_DEVICE = os.environ.get("PARAM_DEVICE")
PARAM_OUTPUT_DIRECTORY = os.environ.get("PARAM_OUTPUT_DIRECTORY")
PARAM_CPU_EXTENSION_PATH = os.environ.get("PARAM_CPU_EXTENSION_PATH")
PARAM_LABELMAP_FILE = os.environ.get("PARAM_LABELMAP_FILE")
PARAM_TOPIC_NAME = os.environ.get("PARAM_TOPIC_NAME", "intel/faas/ssd")
def report(res_json, frame):
now = datetime.datetime.now()
date_prefix = str(now).replace(" ", "_")
if enable_iot_cloud_output:
data = json.dumps(res_json)
client.publish(topic=PARAM_TOPIC_NAME, payload=data)
if enable_kinesis_output:
kinesis_client.put_record(StreamName=kinesis_stream_name, Data=json.dumps(res_json), PartitionKey=kinesis_partition_key)
if enable_s3_jpeg_output:
temp_image = os.path.join(PARAM_OUTPUT_DIRECTORY, "inference_result.jpeg")
cv2.imwrite(temp_image, frame)
with open(temp_image) as file:
image_contents = file.read()
s3_client.put_object(Body=image_contents, Bucket=s3_bucket_name, Key=date_prefix + ".jpeg")
if enable_local_jpeg_output:
cv2.imwrite(os.path.join(PARAM_OUTPUT_DIRECTORY, date_prefix + ".jpeg"), frame)
def greengrass_object_detection_sample_ssd_run():
client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Initializing...")
model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
plugin = IEPlugin(device=PARAM_DEVICE, plugin_dirs="")
if "CPU" in PARAM_DEVICE:
plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)
# Read IR
net = IENetwork.from_ir(model=PARAM_MODEL_XML, weights=model_bin)
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
# Read and pre-process input image
n, c, h, w = net.inputs[input_blob]
cap = cv2.VideoCapture(PARAM_INPUT_SOURCE)
exec_net = plugin.load(network=net)
del net
client.publish(topic=PARAM_TOPIC_NAME, payload="Starting inference on %s" % PARAM_INPUT_SOURCE)
start_time = timeit.default_timer()
inf_seconds = 0.0
frame_count = 0
labeldata = None
if PARAM_LABELMAP_FILE is not None:
with open(PARAM_LABELMAP_FILE) as labelmap_file:
labeldata = json.load(labelmap_file)
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
frameid = cap.get(cv2.CAP_PROP_POS_FRAMES)
initial_w = cap.get(3)
initial_h = cap.get(4)
in_frame = cv2.resize(frame, (w, h))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
in_frame = in_frame.reshape((n, c, h, w))
# Start synchronous inference
inf_start_time = timeit.default_timer()
res = exec_net.infer(inputs={input_blob: in_frame})
inf_seconds += timeit.default_timer() - inf_start_time
# Parse detection results of the current request
res_json = OrderedDict()
frame_timestamp = datetime.datetime.now()
object_id = 0
for obj in res[out_blob][0][0]:
if obj[2] > 0.5:
xmin = int(obj[3] * initial_w)
ymin = int(obj[4] * initial_h)
xmax = int(obj[5] * initial_w)
ymax = int(obj[6] * initial_h)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 4)
obj_id = "Object" + str(object_id)
classlabel = labeldata[str(int(obj[1]))] if labeldata else ""
res_json[obj_id] = {"label": int(obj[1]), "class": classlabel, "confidence": round(obj[2], 2), "xmin": round(obj[3], 2), "ymin": round(obj[4], 2), "xmax": round(obj[5], 2), "ymax": round(obj[6], 2)}
object_id += 1
frame_count += 1
# Measure elapsed seconds since the last report
seconds_elapsed = timeit.default_timer() - start_time
if seconds_elapsed >= reporting_interval:
res_json["timestamp"] = frame_timestamp.isoformat()
res_json["frame_id"] = int(frameid)
res_json["inference_fps"] = frame_count / inf_seconds
start_time = timeit.default_timer()
report(res_json, frame)
frame_count = 0
inf_seconds = 0.0
client.publish(topic=PARAM_TOPIC_NAME, payload="End of the input, exiting...")
del exec_net
del plugin
greengrass_object_detection_sample_ssd_run()
def function_handler(event, context):
client.publish(topic=PARAM_TOPIC_NAME, payload='HANDLER_CALLED!')
return

View File

@@ -1,154 +0,0 @@
#!/usr/bin/env python
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser
import cv2
import numpy as np
import logging as log
from time import time
from openvino.inference_engine import IENetwork, IEPlugin
classes_color_map = [
(150, 150, 150),
(58, 55, 169),
(211, 51, 17),
(157, 80, 44),
(23, 95, 189),
(210, 133, 34),
(76, 226, 202),
(101, 138, 127),
(223, 91, 182),
(80, 128, 113),
(235, 155, 55),
(44, 151, 243),
(159, 80, 170),
(239, 208, 44),
(128, 50, 51),
(82, 141, 193),
(9, 107, 10),
(223, 90, 142),
(50, 248, 83),
(178, 101, 130),
(71, 30, 204)
]
def build_argparser():
parser = ArgumentParser()
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
parser.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
type=str, nargs="+")
parser.add_argument("-l", "--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
"impl.", type=str, default=None)
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
type=str)
parser.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
parser.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
if args.cpu_extension and 'CPU' in args.device:
plugin.add_cpu_extension(args.cpu_extension)
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork.from_ir(model=model_xml, weights=model_bin)
if plugin.device == "CPU":
supported_layers = plugin.get_supported_layers(net)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
format(plugin.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
log.info("Preparing input blobs")
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
net.batch_size = len(args.input)
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
for i in range(n):
image = cv2.imread(args.input[i])
if image.shape[:-1] != (h, w):
log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w)))
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image
log.info("Batch size is {}".format(n))
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = plugin.load(network=net)
del net
# Start sync inference
log.info("Starting inference ({} iterations)".format(args.number_iter))
infer_time = []
for i in range(args.number_iter):
t0 = time()
res = exec_net.infer(inputs={input_blob: images})
infer_time.append((time() - t0) * 1000)
log.info("Average running time of one iteration: {} ms".format(np.average(np.asarray(infer_time))))
if args.perf_counts:
perf_counts = exec_net.requests[0].get_perf_counts()
log.info("Performance counters:")
print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
for layer, stats in perf_counts.items():
print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
# Processing output blob
log.info("Processing output blob")
res = res[out_blob]
for batch, data in enumerate(res):
classes_map = np.zeros(shape=(h, w, c), dtype=np.int)
for i in range(h):
for j in range(w):
if len(data[:, i, j]) == 1:
pixel_class = int(data[:, i, j])
else:
pixel_class = np.argmax(data[:, i, j])
classes_map[i, j, :] = classes_color_map[min(pixel_class, 20)]
out_img = os.path.join(os.path.dirname(__file__), "out_{}.bmp".format(batch))
cv2.imwrite(out_img, classes_map)
log.info("Result image was saved to {}".format(out_img))
del exec_net
del plugin
if __name__ == '__main__':
sys.exit(main() or 0)

View File

@@ -0,0 +1,74 @@
# Neural Style Transfer Python* Sample
This topic demonstrates how to run the Neural Style Transfer sample application, which performs
inference of style transfer models.
> **NOTE**: The OpenVINO™ toolkit does not include a pre-trained model to run the Neural Style Transfer sample. A public model from the [Zhaw's Neural Style Transfer repository](https://github.com/zhaw/neural_style) can be used. Read the [Converting a Style Transfer Model from MXNet*](./docs/MO_DG/prepare_model/convert_model/mxnet_specific/Convert_Style_Transfer_From_MXNet.md) topic from the [Model Optimizer Developer Guide](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) to learn about how to get the trained model and how to convert it to the Inference Engine format (\*.xml + \*.bin).
## How It Works
> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Specify Input Shapes** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
## Running
Running the application with the <code>-h</code> option yields the following usage message:
```
python3 style_transfer_sample.py --help
```
The command yields the following usage message:
```
usage: style_transfer_sample.py [-h] -m MODEL -i INPUT [INPUT ...]
[-l CPU_EXTENSION] [-pp PLUGIN_DIR]
[-d DEVICE] [-nt NUMBER_TOP] [-ni NUMBER_ITER]
[--mean_val_r MEAN_VAL_R]
[--mean_val_g MEAN_VAL_G]
[--mean_val_b MEAN_VAL_B] [-pc]
Options:
-h, --help Show this help message and exit.
-m MODEL, --model MODEL
Path to an .xml file with a trained model.
-i INPUT [INPUT ...], --input INPUT [INPUT ...]
Path to a folder with images or path to an image files
-l CPU_EXTENSION, --cpu_extension CPU_EXTENSION
Optional. Required for CPU custom layers. Absolute
MKLDNN (CPU)-targeted custom layers. Absolute path to
a shared library with the kernels implementations
-pp PLUGIN_DIR, --plugin_dir PLUGIN_DIR
Path to a plugin folder
-d DEVICE, --device DEVICE
Specify the target device to infer on; CPU, GPU, FPGA,
HDDL or MYRIAD is acceptable. Sample will look for a
suitable plugin for device specified. Default value is CPU
-nt NUMBER_TOP, --number_top NUMBER_TOP
Number of top results
-ni NUMBER_ITER, --number_iter NUMBER_ITER
Number of inference iterations
--mean_val_r MEAN_VAL_R, -mean_val_r MEAN_VAL_R
Mean value of red chanel for mean value subtraction in
postprocessing
--mean_val_g MEAN_VAL_G, -mean_val_g MEAN_VAL_G
Mean value of green chanel for mean value subtraction
in postprocessing
--mean_val_b MEAN_VAL_B, -mean_val_b MEAN_VAL_B
Mean value of blue chanel for mean value subtraction
in postprocessing
-pc, --perf_counts Report performance counters
```
Running the application with the empty list of options yields the usage message given above and an error message.
To perform inference on an image using a trained model of NST network on Intel® CPUs, use the following command:
```
python3 style_transfer_sample.py -i <path_to_image>/cat.bmp -m <path_to_model>/1_decoder_FP32.xml
```
### Demo Output
The application outputs an image (`out1.bmp`) or a sequence of images (`out1.bmp`, ..., `out<N>.bmp`) which are redrawn in style of the style transfer model used for sample.
## See Also
* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
"""
Copyright (c) 2018 Intel Corporation
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser
from argparse import ArgumentParser, SUPPRESS
import cv2
import numpy as np
import logging as log
@@ -26,31 +26,33 @@ from openvino.inference_engine import IENetwork, IEPlugin
def build_argparser():
parser = ArgumentParser()
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
parser.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
type=str, nargs="+")
parser.add_argument("-l", "--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
"impl.", type=str, default=None)
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
type=str)
parser.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
parser.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
parser.add_argument("--mean_val_r", "-mean_val_r",
help="Mean value of red chanel for mean value subtraction in postprocessing ", default=0,
type=float)
parser.add_argument("--mean_val_g", "-mean_val_g",
help="Mean value of green chanel for mean value subtraction in postprocessing ", default=0,
type=float)
parser.add_argument("--mean_val_b", "-mean_val_b",
help="Mean value of blue chanel for mean value subtraction in postprocessing ", default=0,
type=float)
parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
args.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
type=str, nargs="+")
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. "
"Absolute MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the "
"kernels implementations", type=str, default=None)
args.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
args.add_argument("-d", "--device",
help="Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified. Default value is CPU", default="CPU",
type=str)
args.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
args.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
args.add_argument("--mean_val_r", "-mean_val_r",
help="Mean value of red chanel for mean value subtraction in postprocessing ", default=0,
type=float)
args.add_argument("--mean_val_g", "-mean_val_g",
help="Mean value of green chanel for mean value subtraction in postprocessing ", default=0,
type=float)
args.add_argument("--mean_val_b", "-mean_val_b",
help="Mean value of blue chanel for mean value subtraction in postprocessing ", default=0,
type=float)
args.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
return parser
@@ -67,7 +69,7 @@ def main():
plugin.add_cpu_extension(args.cpu_extension)
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = IENetwork.from_ir(model=model_xml, weights=model_bin)
net = IENetwork(model=model_xml, weights=model_bin)
if plugin.device == "CPU":
supported_layers = plugin.get_supported_layers(net)
@@ -102,7 +104,6 @@ def main():
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = plugin.load(network=net)
del net
# Start sync inference
log.info("Starting inference ({} iterations)".format(args.number_iter))
@@ -117,8 +118,8 @@ def main():
log.info("Performance counters:")
print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
for layer, stats in perf_counts.items():
print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
# Processing output blob
log.info("Processing output blob")
res = res[out_blob]
@@ -134,8 +135,6 @@ def main():
out_img = os.path.join(os.path.dirname(__file__), "out_{}.bmp".format(batch))
cv2.imwrite(out_img, data)
log.info("Result image was saved to {}".format(out_img))
del exec_net
del plugin
if __name__ == '__main__':

View File

@@ -1,21 +0,0 @@
background
aeroplane
bicycle
bird
boat
bottle
bus
car
cat
chair
cow
diningtable
dog
horse
motorbike
person
pottedplant
sheep
sofa
train
tvmonitor

View File

@@ -51,8 +51,8 @@ def parse_command_line_options(cls):
base_init_options(self)
def run(self):
global INFERENCE_ENGINE_DIR
global BUNDLE_INFERENCE_ENGINE
global INFERENCE_ENGINE_DIR
global BUNDLE_INFERENCE_ENGINE
if self.copy_ie_libs:
BUNDLE_INFERENCE_ENGINE = True
@@ -167,12 +167,12 @@ except ImportError:
c_sources = [
PACKAGE / 'ie_driver.cpp',
PACKAGE / 'ie_driver.hpp',
PACKAGE / 'ie_api_impl.cpp',
PACKAGE / 'ie_api_impl.hpp',
PACKAGE / 'c_ie_driver.pxd',
PACKAGE / 'ie_driver.pyx',
PACKAGE / 'ie_driver.pxd',
PACKAGE / 'ie_api_impl_defs.pxd',
PACKAGE / 'ie_api.pyx',
PACKAGE / 'ie_api.pxd',
]
extensions = [
@@ -187,16 +187,14 @@ cmdclass = {
}
setup(
name="inference_engine",
version='0.1.1',
name="src",
version='1.0',
description='Python inference for Inference Engine',
packages=find_packages(exclude=['tests']),
package_data={PACKAGE_NAME: ['*.so', '*.dll', '*dylib*', '*.pyd']},
include_package_data=True,
ext_modules=extensions,
cmdclass=cmdclass,
author='', author_email='',
tests_require=['pytest'],
install_requires=list(requirements),
zip_safe=False,
)

View File

@@ -0,0 +1,32 @@
# If the pyx file is a C++ file, we should specify that here.
set (CMAKE_INCLUDE_CURRENT_DIR ON)
set (TARGET_NAME "ie_api")
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine)
set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
file(GLOB SOURCE
${CMAKE_CURRENT_SOURCE_DIR}/*.pyx
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
)
set_source_files_properties(${SOURCE} PROPERTIES CYTHON_IS_CXX TRUE
)
## Compatibility with python 2.7 which has depricated "register" specifier
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
add_definitions("-Wno-register")
endif()
cython_add_module (${TARGET_NAME} ${SOURCE})
set_target_properties (${TARGET_NAME} PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
target_link_libraries (${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
# perform copy
ADD_CUSTOM_COMMAND (TARGET ${TARGET_NAME}
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/inference_engine/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/requirements.txt ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../../requirements.txt
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../__init__.py
)

View File

@@ -0,0 +1,3 @@
from .ie_api import *
__version__ = get_version()
__all__ = ['IENetwork', "IEPlugin", "IENetReader"]

View File

@@ -1,8 +1,3 @@
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
from .cimport ie_api_impl_defs as C
from .ie_api_impl_defs cimport Blob, TensorDesc
@@ -24,24 +19,23 @@ cdef class BlobBuffer:
cdef class InferRequest:
cdef C.InferRequestWrap *impl
cpdef BlobBuffer _get_input_buffer(self, const string & blob_name)
cpdef BlobBuffer _get_output_buffer(self, const string & blob_name)
cpdef BlobBuffer _get_blob_buffer(self, const string & blob_name)
cpdef infer(self, inputs = ?)
cpdef async_infer(self, inputs = ?)
cpdef wait(self, timeout = ?)
cpdef get_perf_counts(self)
cdef public:
_inputs, _outputs
_inputs_list, _outputs_list
cdef class IENetwork:
cdef C.IENetwork impl
cdef class ExecutableNetwork:
cdef unique_ptr[C.IEExecNetwork] impl
cdef C.IEPlugin plugin_impl
cdef public:
_requests, async, _request_iterator
_requests, inputs, outputs
cdef class IEPlugin:
cdef C.IEPlugin impl
@@ -51,9 +45,6 @@ cdef class IEPlugin:
cpdef void set_initial_affinity(self, IENetwork network) except *
cpdef set get_supported_layers(self, IENetwork net)
cdef class IENetReader:
cdef C.IENetReader impl
cdef class IENetLayer:
cdef C.IENetLayer impl
@@ -61,4 +52,7 @@ cdef class InputInfo:
cdef C.InputInfo impl
cdef class OutputInfo:
cdef C.OutputInfo impl
cdef C.OutputInfo impl
cdef class LayersStatsMap(dict):
cdef C.IENetwork net_impl

View File

@@ -1,20 +1,18 @@
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
#distutils: language=c++
from cython.operator cimport dereference as deref
from .cimport ie_api_impl_defs as C
from .ie_api_impl_defs cimport Blob, TensorDesc, SizeVector, Precision
from libcpp.string cimport string
from libcpp.vector cimport vector
from libcpp.pair cimport pair
from libcpp.map cimport map
from libcpp.memory cimport unique_ptr
from libc.stdint cimport int64_t
import os
import numpy as np
from copy import deepcopy
import warnings
from collections import OrderedDict
cdef extern from "<utility>" namespace "std" nogil:
cdef unique_ptr[C.IEExecNetwork] move(unique_ptr[C.IEExecNetwork])
@@ -34,8 +32,8 @@ cdef dict_to_c_map(py_dict):
return c_map
supported_precisions = ["FP32", "FP16", "Q78", "I32", "I16", "I8", "U32", "U16"]
supported_layouts = ["NCHW", "NHWC", "OIHW", "C", "CHW", "HW", "NC", "CN", "BLOCKED"]
known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO']
supported_layouts = ["NCHW", "NHWC", "OIHW", "C", "CHW", "HW", "NC", "CN", "BLOCKED", "NCDHW"]
known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL']
def get_version():
return C.get_version().decode()
@@ -68,7 +66,23 @@ cdef class IENetLayer:
@property
def params(self):
return {k.decode(): v.decode() for k, v in self.impl.params}
@property
def parents(self):
cdef vector[string] c_parents = self.impl.parents
parents = []
return [parent.decode() for parent in c_parents]
@property
def children(self):
cdef vector[string] c_children = self.impl.children
children = []
return [child.decode() for child in c_children]
@property
def shape(self):
string_shape = self.impl.shape.decode()
return [int(i) for i in string_shape.split(' ')]
@property
def layout(self):
return self.impl.layout.decode()
@affinity.setter
def affinity(self, target_affinity):
self.impl.setAffinity(target_affinity.encode())
@@ -80,7 +94,6 @@ cdef class IENetLayer:
def precision(self, precision: str):
self.impl.setPrecision(precision.upper().encode())
cdef class InputInfo:
@property
def precision(self):
@@ -105,7 +118,6 @@ cdef class InputInfo:
"Unsupported layout {}! List of supported layouts: {}".format(layout, supported_layouts))
self.impl.setLayout(layout.encode())
cdef class OutputInfo:
@property
def precision(self):
@@ -122,20 +134,18 @@ cdef class OutputInfo:
raise AttributeError(
"Unsupported precision {}! List of supported precisions: {}".format(precision, supported_precisions))
self.impl.setPrecision(precision.encode())
# @layout.setter
# def layout(self, layout):
# self.impl.setLayout(layout.encode())
cdef class ExecutableNetwork:
def __init__(self):
self._requests = []
self.inputs = []
self.outputs = []
def infer(self, inputs=None):
current_request = self.requests[0]
current_request.infer(inputs)
return deepcopy(current_request.outputs)
def start_async(self, request_id, inputs=None):
if request_id not in list(range(len(self.requests))):
raise ValueError("Incorrect request_id specified!")
@@ -145,21 +155,25 @@ cdef class ExecutableNetwork:
@property
def requests(self):
return self._requests
requests = []
for i in range(deref(self.impl).infer_requests.size()):
infer_request = InferRequest()
infer_request.impl = &(deref(self.impl).infer_requests[i])
infer_request._inputs_list = self.inputs
infer_request._outputs_list = self.outputs
requests.append(infer_request)
return requests
cdef class InferRequest:
def __init__(self):
self._inputs = {}
self._outputs = {}
self._inputs_list = []
self._outputs_list = []
cpdef BlobBuffer _get_input_buffer(self, const string & blob_name):
cpdef BlobBuffer _get_blob_buffer(self, const string & blob_name):
cdef BlobBuffer buffer = BlobBuffer()
buffer.reset(deref(self.impl).getInputBlob(blob_name))
return buffer
cpdef BlobBuffer _get_output_buffer(self, const string & blob_name):
cdef BlobBuffer buffer = BlobBuffer()
buffer.reset(deref(self.impl).getOutputBlob(blob_name))
cdef Blob.Ptr blob_ptr
deref(self.impl).getBlobPtr(blob_name, blob_ptr)
buffer.reset(blob_ptr)
return buffer
cpdef infer(self, inputs=None):
@@ -192,17 +206,71 @@ cdef class InferRequest:
@property
def inputs(self):
return self._inputs
inputs = {}
for input in self._inputs_list:
inputs[input] = self._get_blob_buffer(input.encode()).to_numpy()
return inputs
@property
def outputs(self):
return self._outputs
outputs = {}
for output in self._outputs_list:
outputs[output] = self._get_blob_buffer(output.encode()).to_numpy()
return deepcopy(outputs)
@property
def latency(self):
return self.impl.exec_time
def set_batch(self, size):
if size <= 0:
raise ValueError("Batch size should be positive integer number but {} specified".format(size))
deref(self.impl).setBatch(size)
def _fill_inputs(self, inputs):
for k, v in inputs.items():
self._inputs[k][:] = v
assert k in self._inputs_list, "No input with name {} found in network".format(k)
self.inputs[k][:] = v
class LayerStats:
def __init__(self, min: tuple = (), max: tuple = ()):
self._min = min
self._max = max
@property
def min(self):
return self._min
@property
def max(self):
return self._max
cdef class LayersStatsMap(dict):
def update(self, other=None, **kwargs):
super(LayersStatsMap, self).update(other, **kwargs)
cdef map[string, map[string, vector[float]]] c_stats_map
cdef map[string, vector[float]] c_node_stats
for k, v in self.items():
c_node_stats["min".encode()] = v.min
c_node_stats["max".encode()] = v.max
c_stats_map[k.encode()] = c_node_stats
self.net_impl.setStats(c_stats_map)
cdef class IENetwork:
def __cinit__(self, model: str="", weights: str=""):
cdef string model_
cdef string weights_
if model and weights:
if not os.path.isfile(model):
raise Exception("Path to the model {} doesn't exists or it's a directory".format(model))
if not os.path.isfile(weights):
raise Exception("Path to the weights {} doesn't exists or it's a directory".format(weights))
model_ = model.encode()
weights_ = weights.encode()
self.impl = C.IENetwork(model_, weights_)
else:
self.impl = C.IENetwork()
@property
def name(self):
name = bytes(self.impl.name)
@@ -213,7 +281,7 @@ cdef class IENetwork:
cdef map[string, C.InputInfo] c_inputs = self.impl.getInputs()
inputs = {}
cdef InputInfo in_info
for input in c_inputs:
for input in c_inputs:
in_info = InputInfo()
in_info.impl = input.second
inputs[input.first.decode()] = in_info
@@ -224,7 +292,7 @@ cdef class IENetwork:
cdef map[string, C.OutputInfo] c_outputs = self.impl.getOutputs()
outputs = {}
cdef OutputInfo out_info
for out in c_outputs:
for out in c_outputs:
out_info = OutputInfo()
out_info.impl = out.second
outputs[out.first.decode()] = out_info
@@ -243,23 +311,37 @@ cdef class IENetwork:
@property
def layers(self):
cdef map[string, C.IENetLayer] c_layers = <map[string, C.IENetLayer]> self.impl.getLayers()
layers = {}
cdef vector[pair[string, C.IENetLayer]] c_layers = self.impl.getLayers()
layers = OrderedDict()
cdef IENetLayer net_l = IENetLayer()
for l in c_layers:
net_l = IENetLayer()
net_l.impl = l.second
layers[l.first.decode()] = net_l
return layers
@property
def stats(self):
cdef map[string, map[string, vector[float]]] c_stats_map = self.impl.getStats()
py_stats_map = LayersStatsMap()
py_stats_map.net_impl = self.impl
for it in c_stats_map:
stats_map = LayersStatsMap()
py_stats_map[it.first.decode()] = LayerStats(min=tuple(it.second["min".encode()]),
max=tuple(it.second["max".encode()]))
return py_stats_map
@classmethod
def from_ir(cls, model: str, weights: str):
warnings.filterwarnings("always",category=DeprecationWarning)
warnings.warn("from_ir() method of IENetwork is deprecated. "
"Please use IENetwork class constructor to create valid IENetwork instance",
DeprecationWarning)
if not os.path.isfile(model):
raise Exception("Path to the model {} doesn't exists or it's a directory".format(model))
if not os.path.isfile(weights):
raise Exception("Path to the weights {} doesn't exists or it's a directory".format(weights))
net_reader = IENetReader()
return net_reader.read(model, weights)
cdef IENetwork net = IENetwork(model, weights)
return net
# TODO: Use enum with precision type instead of srting parameter when python2 support will not be required.
def add_outputs(self, outputs, precision="FP32"):
@@ -273,11 +355,14 @@ cdef class IENetwork:
_outputs.push_back(l.encode())
self.impl.addOutputs(_outputs, precision.upper().encode())
def serialize(self, path_to_xml, path_to_bin):
self.impl.serialize(path_to_xml.encode(), path_to_bin.encode())
def reshape(self, input_shapes: dict):
cdef map[string, vector[size_t]] c_input_shapes;
cdef vector[size_t] c_shape
net_inputs = self.inputs
for input, shape in input_shapes.items():
c_shape = []
if input not in net_inputs:
raise AttributeError("Specified {} layer not in network inputs {}! ".format(input, net_inputs))
for v in shape:
@@ -312,36 +397,15 @@ cdef class IEPlugin:
raise ValueError(
"Incorrect number of requests specified: {}. Expected positive integer number.".format(num_requests))
cdef ExecutableNetwork exec_net = ExecutableNetwork()
cdef vector[string] inputs_list
cdef vector[string] outputs_list
cdef map[string, string] c_config
if config:
for k, v in config.items():
c_config[to_std_string(k)] = to_std_string(v)
exec_net.plugin_impl = self.impl
exec_net.impl = move(self.impl.load(network.impl, num_requests, c_config))
requests = []
for i in range(deref(exec_net.impl).infer_requests.size()):
infer_request = InferRequest()
infer_request.impl = &(deref(exec_net.impl).infer_requests[i])
inputs_list = infer_request.impl.getInputsList()
outputs_list = infer_request.impl.getOutputsList()
for input_b in inputs_list:
input_s = input_b.decode()
infer_request._inputs[input_s] = infer_request._get_input_buffer(input_b).to_numpy()
for output_b in outputs_list:
output_s = output_b.decode()
infer_request._outputs[output_s] = infer_request._get_output_buffer(output_b).to_numpy()
# create blob buffers
requests.append(infer_request)
exec_net._requests = tuple(requests)
exec_net.inputs = network.inputs.keys()
exec_net.outputs = list(network.outputs.keys())
return exec_net
cpdef void set_initial_affinity(self, IENetwork net) except *:
@@ -374,11 +438,6 @@ cdef class IEPlugin:
c_config[to_std_string(k)] = to_std_string(v)
self.impl.setConfig(c_config)
cdef class IENetReader:
def read(self, model: str, weights: str) -> IENetwork:
cdef IENetwork net = IENetwork()
net.impl = self.impl.read(model.encode(), weights.encode())
return net
cdef class BlobBuffer:
"""Copy-less accessor for Inference Engine Blob"""

View File

@@ -0,0 +1,415 @@
// Copyright (C) 2018-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ie_api_impl.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include "ie_iinfer_request.hpp"
#include "details/ie_cnn_network_tools.h"
std::map<std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
{"FP16", InferenceEngine::Precision::FP16},
{"Q78", InferenceEngine::Precision::Q78},
{"I32", InferenceEngine::Precision::I32},
{"I16", InferenceEngine::Precision::I16},
{"I8", InferenceEngine::Precision::I8},
{"U16", InferenceEngine::Precision::U16},
{"U8", InferenceEngine::Precision::U8}};
std::map<std::string, InferenceEngine::Layout> layout_map = {{"ANY", InferenceEngine::Layout::ANY},
{"NCHW", InferenceEngine::Layout::NCHW},
{"NHWC", InferenceEngine::Layout::NHWC},
{"OIHW", InferenceEngine::Layout::OIHW},
{"C", InferenceEngine::Layout::C},
{"CHW", InferenceEngine::Layout::CHW},
{"HW", InferenceEngine::Layout::HW},
{"NC", InferenceEngine::Layout::NC},
{"CN", InferenceEngine::Layout::CN},
{"NCDHW", InferenceEngine::Layout::NCDHW},
{"BLOCKED", InferenceEngine::Layout::BLOCKED}};
#define stringify(name) # name
#define IE_CHECK_CALL(expr) { \
auto ret = (expr); \
if (ret != InferenceEngine::StatusCode::OK) { \
THROW_IE_EXCEPTION << response.msg; \
} \
} \
InferenceEnginePython::IENetwork::IENetwork(const std::string &model, const std::string &weights) {
InferenceEngine::CNNNetReader net_reader;
net_reader.ReadNetwork(model);
net_reader.ReadWeights(weights);
name = net_reader.getName();
actual = net_reader.getNetwork();
batch_size = actual.getBatchSize();
}
void InferenceEnginePython::IENetwork::serialize(const std::string &path_to_xml, const std::string &path_to_bin) {
actual.serialize(path_to_xml, path_to_bin);
}
const std::vector<std::pair<std::string, InferenceEnginePython::IENetLayer>>
InferenceEnginePython::IENetwork::getLayers() {
std::vector<std::pair<std::string, InferenceEnginePython::IENetLayer>> result;
std::vector<InferenceEngine::CNNLayerPtr> sorted_layers = InferenceEngine::details::CNNNetSortTopologically(actual);
for (const auto &layer : sorted_layers) {
InferenceEnginePython::IENetLayer layer_info;
layer_info.layer_ptr = layer;
layer_info.network_ptr = actual;
layer_info.name = layer->name;
layer_info.type = layer->type;
layer_info.precision = layer->precision.name();
layer_info.params = layer->params;
layer_info.affinity = layer->affinity;
std::vector<std::string> parents;
for (const auto &i : layer->insData) {
auto data = i.lock();
if (data) {
parents.emplace_back(data->getName());
}
}
layer_info.parents = parents;
std::vector<std::string> children;
for (const auto &data : layer->outData) {
auto inputTo = data->getInputTo();
for (auto layer_iter : inputTo) {
InferenceEngine::CNNLayerPtr layer_in_data = layer_iter.second;
if (!layer_in_data) {
THROW_IE_EXCEPTION << "Layer which takes data " << data->name << " is nullptr";
}
children.emplace_back(layer_in_data->name);
}
}
layer_info.children = children;
const InferenceEngine::TensorDesc &inputTensorDesc = layer->outData[0]->getTensorDesc();
for (const auto &it : layout_map) {
if (it.second == inputTensorDesc.getLayout()) {
layer_info.layout = it.first;
}
}
auto dims = inputTensorDesc.getDims();
std::string string_dims = "";
for (const auto &it : dims) {
string_dims += std::to_string(it) + " ";
}
string_dims = string_dims.substr(0, string_dims.size() - 1);
layer_info.shape = string_dims;
result.emplace_back(std::make_pair(layer->name, layer_info));
}
return result;
}
const std::map<std::string, InferenceEnginePython::InputInfo> InferenceEnginePython::IENetwork::getInputs() {
std::map<std::string, InferenceEnginePython::InputInfo> inputs;
const InferenceEngine::InputsDataMap &inputsInfo = actual.getInputsInfo();
for (auto &in : inputsInfo) {
InferenceEnginePython::InputInfo info;
info.actual = *in.second;
const InferenceEngine::TensorDesc &inputTensorDesc = in.second->getTensorDesc();
info.dims = inputTensorDesc.getDims();
for (auto it : precision_map)
if (it.second == in.second->getPrecision())
info.precision = it.first;
for (auto it : layout_map)
if (it.second == in.second->getLayout())
info.layout = it.first;
inputs[in.first] = info;
}
return inputs;
}
const std::map<std::string, InferenceEnginePython::OutputInfo> InferenceEnginePython::IENetwork::getOutputs() {
std::map<std::string, InferenceEnginePython::OutputInfo> outputs;
const InferenceEngine::OutputsDataMap &outputsInfo = actual.getOutputsInfo();
for (auto &out : outputsInfo) {
InferenceEnginePython::OutputInfo info;
info.actual = out.second;
const InferenceEngine::TensorDesc &inputTensorDesc = out.second->getTensorDesc();
info.dims = inputTensorDesc.getDims();
for (auto it : precision_map)
if (it.second == out.second->getPrecision())
info.precision = it.first;
for (auto it : layout_map)
if (it.second == out.second->getLayout())
info.layout = it.first;
outputs[out.first] = info;
}
return outputs;
}
void
InferenceEnginePython::IENetwork::addOutputs(const std::vector<std::string> &out_layers, const std::string &precision) {
for (auto &&l : out_layers) {
InferenceEngine::OutputsDataMap outputsDataMap = actual.getOutputsInfo();
if (outputsDataMap.find(l) != outputsDataMap.end()) {
continue;
}
InferenceEngine::CNNLayerPtr cnnLayer = actual.getLayerByName(l.c_str());
std::vector<InferenceEngine::DataPtr> outData = cnnLayer->outData;
if (outData.size() != 1) {
std::cout << "Layer " << l << " has " << outData.size() << " output blobs and can not be set as output."
<< std::endl;
continue;
}
actual.addOutput(l);
InferenceEngine::OutputsDataMap outputsDataMapUpd = actual.getOutputsInfo();
outputsDataMapUpd[l]->setPrecision(precision_map[precision]);
}
}
void InferenceEnginePython::IENetwork::setBatch(const size_t size) {
actual.setBatchSize(size);
}
void InferenceEnginePython::IENetwork::reshape(const std::map<std::string, std::vector<size_t>> &input_shapes) {
actual.reshape(input_shapes);
}
const std::map<std::string, std::map<std::string, std::vector<float>>> InferenceEnginePython::IENetwork::getStats() {
InferenceEngine::ICNNNetworkStats *pstats = nullptr;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(((InferenceEngine::ICNNNetwork &) actual).getStats(&pstats, &response));
auto statsMap = pstats->getNodesStats();
std::map<std::string, std::map<std::string, std::vector<float>>> map;
for (const auto &it : statsMap) {
std::map<std::string, std::vector<float>> stats;
stats.emplace("min", it.second->_minOutputs);
stats.emplace("max", it.second->_maxOutputs);
map.emplace(it.first, stats);
}
return map;
}
void
InferenceEnginePython::IENetwork::setStats(
const std::map<std::string, std::map<std::string, std::vector<float>>> &stats) {
InferenceEngine::ICNNNetworkStats *pstats = nullptr;
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(((InferenceEngine::ICNNNetwork &) actual).getStats(&pstats, &response));
std::map<std::string, InferenceEngine::NetworkNodeStatsPtr> newNetNodesStats;
for (const auto &it : stats) {
InferenceEngine::NetworkNodeStatsPtr nodeStats = InferenceEngine::NetworkNodeStatsPtr(
new InferenceEngine::NetworkNodeStats());
newNetNodesStats.emplace(it.first, nodeStats);
nodeStats->_minOutputs = it.second.at("min");
nodeStats->_maxOutputs = it.second.at("max");
}
pstats->setNodesStats(newNetNodesStats);
}
void InferenceEnginePython::InputInfo::setPrecision(std::string precision) {
actual.setPrecision(precision_map[precision]);
}
void InferenceEnginePython::InputInfo::setLayout(std::string layout) {
actual.setLayout(layout_map[layout]);
}
void InferenceEnginePython::OutputInfo::setPrecision(std::string precision) {
actual->setPrecision(precision_map[precision]);
}
InferenceEnginePython::IEPlugin::IEPlugin(const std::string &device, const std::vector<std::string> &plugin_dirs) {
InferenceEngine::PluginDispatcher dispatcher{plugin_dirs};
actual = dispatcher.getPluginByDevice(device);
const InferenceEngine::Version *pluginVersion;
actual->GetVersion(pluginVersion);
version = std::to_string(pluginVersion->apiVersion.major) + ".";
version += std::to_string(pluginVersion->apiVersion.minor) + ".";
version += pluginVersion->buildNumber;
device_name = device;
}
void InferenceEnginePython::IEPlugin::setInitialAffinity(const InferenceEnginePython::IENetwork &net) {
InferenceEngine::HeteroPluginPtr hetero_plugin(actual);
InferenceEngine::ResponseDesc response;
auto &network = net.actual;
IE_CHECK_CALL(hetero_plugin->SetAffinity(network, {}, &response));
}
std::set<std::string> InferenceEnginePython::IEPlugin::queryNetwork(const InferenceEnginePython::IENetwork &net) {
const InferenceEngine::CNNNetwork &network = net.actual;
InferenceEngine::QueryNetworkResult queryRes;
actual->QueryNetwork(network, queryRes);
return queryRes.supportedLayers;
}
void InferenceEnginePython::IENetLayer::setAffinity(const std::string &target_affinity) {
layer_ptr->affinity = target_affinity;
}
void InferenceEnginePython::IENetLayer::setParams(const std::map<std::string, std::string> &params_map) {
layer_ptr->params = params_map;
}
std::map<std::string, InferenceEngine::Blob::Ptr> InferenceEnginePython::IENetLayer::getWeights() {
auto w_layer = std::dynamic_pointer_cast<InferenceEngine::WeightableLayer>(layer_ptr);
// IF current layer is weightable gather weights and biases from casted WeightableLayer and all other blobs
// considered as custom and gathered from blobs field pf CNNLayer.
std::map<std::string, InferenceEngine::Blob::Ptr> weights;
if (w_layer != nullptr) {
if (w_layer->_weights != nullptr) {
weights["weights"] = w_layer->_weights;
}
if (w_layer->_biases != nullptr) {
weights["biases"] = w_layer->_biases;
}
for (auto it : w_layer->blobs) {
if (it.first == "weights" || it.first == "biases") {
continue;
}
weights[it.first] = it.second;
}
} else {
// Otherwise all layer's blobs are considered as custom and gathered from CNNLayer
std::map<std::string, InferenceEngine::Blob::Ptr> map_placeholder;
weights = map_placeholder; // If layer has no blobs it should not be missed from weights map
for (auto it : layer_ptr->blobs) {
weights[it.first] = it.second;
}
}
return weights;
}
void InferenceEnginePython::IENetLayer::setPrecision(std::string precision) {
layer_ptr->precision = precision_map[precision];
}
void InferenceEnginePython::IEPlugin::addCpuExtension(const std::string &extension_path) {
InferenceEngine::ResponseDesc response;
auto extension_ptr = InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(extension_path);
auto extension = std::dynamic_pointer_cast<InferenceEngine::IExtension>(extension_ptr);
IE_CHECK_CALL(actual->AddExtension(extension, &response))
}
std::unique_ptr<InferenceEnginePython::IEExecNetwork>
InferenceEnginePython::IEPlugin::load(const InferenceEnginePython::IENetwork &net,
int num_requests,
const std::map<std::string, std::string> &config) {
InferenceEngine::ResponseDesc response;
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(net.name,
num_requests);
IE_CHECK_CALL(actual->LoadNetwork(exec_network->actual, net.actual, config, &response))
for (size_t i = 0; i < num_requests; ++i) {
InferRequestWrap &infer_request = exec_network->infer_requests[i];
IE_CHECK_CALL(exec_network->actual->CreateInferRequest(infer_request.request_ptr, &response))
}
return exec_network;
}
void InferenceEnginePython::IEPlugin::setConfig(const std::map<std::string, std::string> &config) {
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->SetConfig(config, &response))
}
InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string &name, size_t num_requests) :
infer_requests(num_requests), name(name) {
}
void InferenceEnginePython::IEExecNetwork::infer() {
InferRequestWrap &request = infer_requests[0];
request.infer();
}
void InferenceEnginePython::InferRequestWrap::getBlobPtr(const std::string &blob_name, InferenceEngine::Blob::Ptr &blob_ptr)
{
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(request_ptr->GetBlob(blob_name.c_str(), blob_ptr, &response));
}
void InferenceEnginePython::InferRequestWrap::setBatch(int size) {
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(request_ptr->SetBatch(size, &response));
}
void latency_callback(InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode code){
if (code != InferenceEngine::StatusCode::OK) {
THROW_IE_EXCEPTION << "Async Infer Request failed with status code " << code;
}
InferenceEnginePython::InferRequestWrap *requestWrap;
InferenceEngine::ResponseDesc dsc;
request->GetUserData(reinterpret_cast<void**>(&requestWrap), &dsc);
auto end_time = Time::now();
auto execTime = std::chrono::duration_cast<ns>(end_time - requestWrap->start_time);
requestWrap->exec_time = static_cast<double>(execTime.count()) * 0.000001;
}
void InferenceEnginePython::InferRequestWrap::infer() {
InferenceEngine::ResponseDesc response;
start_time = Time::now();
IE_CHECK_CALL(request_ptr->Infer(&response));
auto end_time = Time::now();
auto execTime = std::chrono::duration_cast<ns>(end_time - start_time);
exec_time = static_cast<double>(execTime.count()) * 0.000001;
}
void InferenceEnginePython::InferRequestWrap::infer_async() {
InferenceEngine::ResponseDesc response;
start_time = Time::now();
IE_CHECK_CALL(request_ptr->SetUserData(this, &response));
request_ptr->SetCompletionCallback(latency_callback);
IE_CHECK_CALL(request_ptr->StartAsync(&response));
}
int InferenceEnginePython::InferRequestWrap::wait(int64_t timeout) {
InferenceEngine::ResponseDesc responseDesc;
InferenceEngine::StatusCode code = request_ptr->Wait(timeout, &responseDesc);
return static_cast<int >(code);
}
std::map<std::string, InferenceEnginePython::ProfileInfo>
InferenceEnginePython::InferRequestWrap::getPerformanceCounts() {
std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perf_counts;
InferenceEngine::ResponseDesc response;
request_ptr->GetPerformanceCounts(perf_counts, &response);
std::map<std::string, InferenceEnginePython::ProfileInfo> perf_map;
for (auto it : perf_counts) {
InferenceEnginePython::ProfileInfo profile_info;
switch (it.second.status) {
case InferenceEngine::InferenceEngineProfileInfo::EXECUTED:
profile_info.status = "EXECUTED";
break;
case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN:
profile_info.status = "NOT_RUN";
break;
case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT:
profile_info.status = "OPTIMIZED_OUT";
break;
default:
profile_info.status = "UNKNOWN";
}
profile_info.exec_type = it.second.exec_type;
profile_info.layer_type = it.second.layer_type;
profile_info.cpu_time = it.second.cpu_uSec;
profile_info.real_time = it.second.realTime_uSec;
perf_map[it.first] = profile_info;
}
return perf_map;
}
std::string InferenceEnginePython::get_version() {
auto version = InferenceEngine::GetInferenceEngineVersion();
std::string version_str = std::to_string(version->apiVersion.major) + ".";
version_str += std::to_string(version->apiVersion.minor) + ".";
version_str += version->buildNumber;
return version_str;
}

View File

@@ -0,0 +1,180 @@
// Copyright (C) 2018-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <ie_extension.h>
#include <iterator>
#include <string>
#include <utility>
#include <map>
#include <vector>
#include <set>
#include <iostream>
#include <algorithm>
#include <sstream>
#include <chrono>
#include "inference_engine.hpp"
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::nanoseconds ns;
namespace InferenceEnginePython {
struct IENetLayer {
InferenceEngine::CNNLayerPtr layer_ptr;
InferenceEngine::CNNNetwork network_ptr;
std::string name;
std::string type;
std::string precision;
std::string shape;
std::string layout;
std::vector<std::string> children;
std::vector<std::string> parents;
std::string affinity;
std::map<std::string, std::string> params;
void setAffinity(const std::string &target_affinity);
void setParams(const std::map<std::string, std::string> &params_map);
std::map<std::string, InferenceEngine::Blob::Ptr> getWeights();
void setPrecision(std::string precision);
};
struct InputInfo {
InferenceEngine::InputInfo actual;
std::vector<size_t> dims;
std::string precision;
std::string layout;
void setPrecision(std::string precision);
void setLayout(std::string layout);
};
struct OutputInfo {
InferenceEngine::DataPtr actual;
std::vector<size_t> dims;
std::string precision;
std::string layout;
void setPrecision(std::string precision);
};
struct ProfileInfo {
std::string status;
std::string exec_type;
std::string layer_type;
int64_t real_time;
int64_t cpu_time;
unsigned execution_index;
};
struct IENetwork {
InferenceEngine::CNNNetwork actual;
std::string name;
std::size_t batch_size;
void setBatch(const size_t size);
void addOutputs(const std::vector<std::string> &out_layers, const std::string &precision);
const std::vector<std::pair<std::string, InferenceEnginePython::IENetLayer>> getLayers();
const std::map<std::string, InferenceEnginePython::InputInfo> getInputs();
const std::map<std::string, InferenceEnginePython::OutputInfo> getOutputs();
void reshape(const std::map<std::string, std::vector<size_t>> &input_shapes);
void serialize(const std::string &path_to_xml, const std::string &path_to_bin);
void setStats(const std::map<std::string, std::map<std::string, std::vector<float>>> &stats);
const std::map<std::string, std::map<std::string, std::vector<float>>> getStats();
IENetwork(const std::string &model, const std::string &weights);
IENetwork() = default;
};
struct InferRequestWrap {
InferenceEngine::IInferRequest::Ptr request_ptr;
Time::time_point start_time;
double exec_time;
void infer();
void infer_async();
int wait(int64_t timeout);
void getBlobPtr(const std::string &blob_name, InferenceEngine::Blob::Ptr &blob_ptr);
void setBatch(int size);
std::map<std::string, InferenceEnginePython::ProfileInfo> getPerformanceCounts();
};
struct IEExecNetwork {
InferenceEngine::IExecutableNetwork::Ptr actual;
std::vector<InferRequestWrap> infer_requests;
std::string name;
IEExecNetwork(const std::string &name, size_t num_requests);
void infer();
};
struct IEPlugin {
std::unique_ptr<InferenceEnginePython::IEExecNetwork> load(const InferenceEnginePython::IENetwork &net,
int num_requests,
const std::map<std::string, std::string> &config);
std::string device_name;
std::string version;
void setConfig(const std::map<std::string, std::string> &);
void addCpuExtension(const std::string &extension_path);
void setInitialAffinity(const InferenceEnginePython::IENetwork &net);
IEPlugin(const std::string &device, const std::vector<std::string> &plugin_dirs);
IEPlugin() = default;
std::set<std::string> queryNetwork(const InferenceEnginePython::IENetwork &net);
InferenceEngine::InferenceEnginePluginPtr actual;
};
template<class T>
T *get_buffer(InferenceEngine::Blob &blob) {
return blob.buffer().as<T *>();
}
template<class T, class... Args>
std::unique_ptr<T> make_unique(Args &&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
std::string get_version();
}; // namespace InferenceEnginePython

View File

@@ -1,8 +1,3 @@
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
from libc.stddef cimport size_t
from libcpp.string cimport string
from libcpp.vector cimport vector
@@ -10,7 +5,6 @@ from libcpp.map cimport map
from libcpp.set cimport set
from libcpp.pair cimport pair
from libcpp.memory cimport unique_ptr, shared_ptr
from libcpp cimport bool
from libc.stdint cimport int64_t
@@ -28,7 +22,7 @@ cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
size_t element_size() const
cdef cppclass Precision:
const char* name() const
const char*name() const
cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
@@ -37,9 +31,11 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
string type
string precision
string affinity
string shape
string layout
vector[string] children
vector[string] parents
map[string, string] params
# map[string, BlobInfo] blob_info
# map[string, Blob.Ptr] weights;
void setAffinity(const string & target_affinity) except +
void setParams(const map[string, string] & params_map) except +
map[string, Blob.Ptr] getWeights() except +
@@ -49,15 +45,14 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
vector[size_t] dims
string precision
string layout
void setPrecision(string precision)
void setLayout(string layout)
void setPrecision(string precision) except +
void setLayout(string layout) except +
cdef cppclass OutputInfo:
vector[size_t] dims
string precision
string layout
void setPrecision(string precision)
void setPrecision(string precision) except +
cdef cppclass ProfileInfo:
string status
@@ -68,51 +63,51 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
unsigned int execution_index
cdef cppclass WeightsInfo:
Blob.Ptr &weights;
Blob.Ptr &biases;
Blob.Ptr & weights;
Blob.Ptr & biases;
map[string, Blob.Ptr] custom_blobs;
cdef cppclass IEExecNetwork:
vector[InferRequestWrap] infer_requests
cdef cppclass IENetwork:
IENetwork() except +
IENetwork(const string &, const string &) except +
string name
size_t batch_size
map[string, vector[size_t]] inputs
map[string, IENetLayer] getLayers() except +
const vector[pair[string, IENetLayer]] getLayers() except +
map[string, InputInfo] getInputs() except +
map[string, OutputInfo] getOutputs() except +
void addOutputs(vector[string] &, string &) except +
void setAffinity(map[string, string] &types_affinity_map, map[string, string] &layers_affinity_map) except +
void setAffinity(map[string, string] & types_affinity_map, map[string, string] & layers_affinity_map) except +
void setBatch(size_t size) except +
void setLayerParams(map[string, map[string, string]] params_map) except +
void serialize(const string& path_to_xml, const string& path_to_bin) except +
void reshape(map[string, vector[size_t]] input_shapes) except +
void setStats(map[string, map[string, vector[float]]] & stats) except +
map[string, map[string, vector[float]]] getStats() except +
cdef cppclass IEPlugin:
IEPlugin() except +
IEPlugin(const string &, const vector[string] &) except +
unique_ptr[IEExecNetwork] load(IENetwork & net, int num_requests, const map[string, string]& config) except +
void addCpuExtension(const string &) except +
void setConfig(const map[string, string]&) except +
void setConfig(const map[string, string] &) except +
void setInitialAffinity(IENetwork & net) except +
set[string] queryNetwork(const IENetwork &net) except +
set[string] queryNetwork(const IENetwork & net) except +
string device_name
string version
cdef cppclass IENetReader:
IENetwork read(const string &, const string &) except +
cdef cppclass InferRequestWrap:
vector[string] getInputsList() except +
vector[string] getOutputsList() except +
Blob.Ptr& getOutputBlob(const string &blob_name) except +
Blob.Ptr& getInputBlob(const string &blob_name) except +
double exec_time;
void getBlobPtr(const string &blob_name, Blob.Ptr &blob_ptr) except +
map[string, ProfileInfo] getPerformanceCounts() except +
void infer() except +
void infer_async() except +
int wait(int64_t timeout) except +
void setBatch(int size) except +
cdef T* get_buffer[T](Blob &)
cdef T*get_buffer[T](Blob &)
cdef string get_version()

View File

@@ -0,0 +1,88 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ArgMax layer
*/
class INFERENCE_ENGINE_API_CLASS(ArgMaxLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ArgMaxLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit ArgMaxLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit ArgMaxLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ArgMaxLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ArgMaxLayer& setPort(const Port& port);
/**
* @brief Returns axis
* @return Axis
*/
int getAxis() const;
/**
* @brief Sets axis
* @param axis Axis
* @return reference to layer builder
*/
ArgMaxLayer& setAxis(int axis);
/**
* @brief Returns top K
* @return Top K
*/
size_t getTopK() const;
/**
* @brief Sets top K
* @param topK Top K
* @return reference to layer builder
*/
ArgMaxLayer& setTopK(size_t topK);
/**
* @brief Returns output maximum value
* @return Output maximum value
*/
size_t getOutMaxVal() const;
/**
* @brief Sets output maximum value
* @param size Maximum value
* @return reference to layer builder
*/
ArgMaxLayer& setOutMaxVal(size_t size);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,67 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for BatchNormalization layer
*/
class INFERENCE_ENGINE_API_CLASS(BatchNormalizationLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit BatchNormalizationLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit BatchNormalizationLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit BatchNormalizationLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
BatchNormalizationLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
BatchNormalizationLayer& setPort(const Port &port);
/**
* @brief Returns epsilon
* @return Epsilon
*/
float getEpsilon() const;
/**
* @brief Sets epsilon
* @param eps Epsilon
* @return reference to layer builder
*/
BatchNormalizationLayer& setEpsilon(float eps);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,77 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Clamp layer
*/
class INFERENCE_ENGINE_API_CLASS(ClampLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ClampLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit ClampLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit ClampLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ClampLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ClampLayer& setPort(const Port& port);
/**
* @brief Returns minimum value
* @return minimum value
*/
float getMinValue() const;
/**
* @brief Sets minimum value
* @param minValue Minimum value
* @return reference to layer builder
*/
ClampLayer& setMinValue(float minValue);
/**
* @brief Returns maximum value
* @return Maximum value
*/
float getMaxValue() const;
/**
* @brief Sets maximum value
* @param maxValue Maximum value
* @return reference to layer builder
*/
ClampLayer& setMaxValue(float maxValue);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,81 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Concat layer
*/
class INFERENCE_ENGINE_API_CLASS(ConcatLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ConcatLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit ConcatLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit ConcatLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ConcatLayer& setName(const std::string& name);
/**
* @brief Returns vector with input ports
* @return vector with ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
ConcatLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
ConcatLayer& setOutputPort(const Port& port);
/**
* @brief Returns axis
* @return Axis
*/
size_t getAxis() const;
/**
* @brief Sets axis
* @param axis Axis
* @return reference to layer builder
*/
ConcatLayer& setAxis(size_t axis);
private:
size_t axis;
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,68 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Const layer
*/
class INFERENCE_ENGINE_API_CLASS(ConstLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ConstLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit ConstLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit ConstLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ConstLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ConstLayer& setPort(const Port& port);
/**
* @brief Sets constant data
* @param data constant blob with data
* @return reference to layer builder
*/
ConstLayer& setData(const Blob::CPtr& data);
/**
* @brief Returns constant data
* @return constant blob with data
*/
const Blob::CPtr& getData() const;
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,144 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <vector>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ArgMax layer
*/
class INFERENCE_ENGINE_API_CLASS(ConvolutionLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ConvolutionLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit ConvolutionLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit ConvolutionLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ConvolutionLayer& setName(const std::string& name);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
ConvolutionLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
ConvolutionLayer& setOutputPort(const Port& port);
/**
* @brief Returns kernel size
* @return Kernel size
*/
const std::vector<size_t> getKernel() const;
/**
* @brief Sets kernel size
* @param kernel Kernel size
* @return reference to layer builder
*/
ConvolutionLayer& setKernel(const std::vector<size_t>& kernel);
/**
* @brief Returns vector of strides
* @return vector of strides
*/
const std::vector<size_t> getStrides() const;
/**
* @brief Sets strides
* @param strides vector of strides
* @return reference to layer builder
*/
ConvolutionLayer& setStrides(const std::vector<size_t>& strides);
/**
* @brief Returns dilations
* @return vector of dilations
*/
const std::vector<size_t> getDilation() const;
/**
* @brief Sets dilations
* @param dilation Vector of dilations
* @return reference to layer builder
*/
ConvolutionLayer& setDilation(const std::vector<size_t>& dilation);
/**
* @brief Returns begin paddings
* @return vector of paddings
*/
const std::vector<size_t> getPaddingsBegin() const;
/**
* @brief Sets begin paddings
* @param paddings Vector of paddings
* @return reference to layer builder
*/
ConvolutionLayer& setPaddingsBegin(const std::vector<size_t>& paddings);
/**
* @brief Return end paddings
* @return Vector of paddings
*/
const std::vector<size_t> getPaddingsEnd() const;
/**
* @brief Sets end paddings
* @param paddings Vector of paddings
* @return reference to layer builder
*/
ConvolutionLayer& setPaddingsEnd(const std::vector<size_t>& paddings);
/**
* @brief Returns group
* @return Group
*/
size_t getGroup() const;
/**
* @brief Sets group
* @param group Group
* @return reference to layer builder
*/
ConvolutionLayer& setGroup(size_t group);
/**
* @brief Return output depth
* @return Output depth
*/
size_t getOutDepth() const;
/**
* @brief Sets output depth
* @param outDepth Output depth
* @return reference to layer builder
*/
ConvolutionLayer& setOutDepth(size_t outDepth);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,89 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Crop layer
*/
class INFERENCE_ENGINE_API_CLASS(CropLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit CropLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit CropLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit CropLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
CropLayer& setName(const std::string& name);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param port Vector of input ports
* @return reference to layer builder
*/
CropLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Return output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
CropLayer& setOutputPort(const Port& port);
/**
* @brief Returns axis
* @return Vector of axis
*/
const std::vector<size_t> getAxis() const;
/**
* @brief Sets axis
* @param axis Vector of axis
* @return reference to layer builder
*/
CropLayer& setAxis(const std::vector<size_t>& axis);
/**
* @brief Returns offsets
* @return Vector of offsets
*/
const std::vector<size_t> getOffset() const;
/**
* @brief Sets offsets
* @param offsets Vector of offsets
* @return reference to layer builder
*/
CropLayer& setOffset(const std::vector<size_t>& offsets);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,79 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for CTCGreedyDecoder layer
*/
class INFERENCE_ENGINE_API_CLASS(CTCGreedyDecoderLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit CTCGreedyDecoderLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit CTCGreedyDecoderLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit CTCGreedyDecoderLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
CTCGreedyDecoderLayer& setName(const std::string& name);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
CTCGreedyDecoderLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
CTCGreedyDecoderLayer& setOutputPort(const Port& port);
/**
* @brief Returns CTCMergeRepeated
* @return true if merge repeated
*/
bool getCTCMergeRepeated() const;
/**
* @brief Sets CTCMergeRepeated
* @param flag bool value
* @return reference to layer builder
*/
CTCGreedyDecoderLayer& setCTCMergeRepeated(bool flag);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,37 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_convolution_layer.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Deconvolution layer
*/
class INFERENCE_ENGINE_API_CLASS(DeconvolutionLayer): public ConvolutionLayer {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit DeconvolutionLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit DeconvolutionLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit DeconvolutionLayer(const Layer::CPtr& layer);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,188 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ArgMax layer
*/
class INFERENCE_ENGINE_API_CLASS(DetectionOutputLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit DetectionOutputLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit DetectionOutputLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit DetectionOutputLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
DetectionOutputLayer& setName(const std::string& name);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
DetectionOutputLayer& setOutputPort(const Port& port);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
DetectionOutputLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns number of classes
* @return Number of classes
*/
size_t getNumClasses() const;
/**
* @brief Sets number of classes to be predict
* @param num Number of classes
* @return reference to layer builder
*/
DetectionOutputLayer& setNumClasses(size_t num);
/**
* @brief Returns background label ID
* @return Background ID
*/
int getBackgroudLabelId() const;
/**
* @brief Sets background label ID
* @param labelId Background ID if there is no background class, set it to -1.
* @return reference to layer builder
*/
DetectionOutputLayer& setBackgroudLabelId(int labelId);
/**
* @brief Returns maximum number of results to be kept on NMS stage
* @return Top K
*/
int getTopK() const;
/**
* @brief Sets maximum number of results to be kept on NMS stage
* @param topK Top K
* @return reference to layer builder
*/
DetectionOutputLayer& setTopK(int topK);
/**
* @brief Returns number of total boxes to be kept per image after NMS step
* @return Keep top K
*/
int getKeepTopK() const;
/**
* @brief Sets number of total boxes to be kept per image after NMS step
* @param topK Keep top K
* @return reference to layer builder
*/
DetectionOutputLayer& setKeepTopK(int topK);
/**
* @brief Returns number of oriented classes
* @return Number of oriented classes
*/
int getNumOrientClasses() const;
/**
* @brief Sets number of oriented classes
* @param numClasses Number of classes
* @return reference to layer builder
*/
DetectionOutputLayer& setNumOrientClasses(int numClasses);
/**
* @brief Returns type of coding method for bounding boxes
* @return String with code type
*/
std::string getCodeType() const;
/**
* @brief Sets type of coding method for bounding boxes
* @param type Type
* @return reference to layer builder
*/
DetectionOutputLayer& setCodeType(std::string type);
/**
* @brief Returns interpolate orientation
* @return Interpolate orientation
*/
int getInterpolateOrientation() const;
/**
* @brief Sets interpolate orientation
* @param orient Orientation
* @return reference to layer builder
*/
DetectionOutputLayer& setInterpolateOrientation(int orient);
/**
* @brief Returns threshold to be used in NMS stage
* @return Threshold
*/
float getNMSThreshold() const;
/**
* @brief Sets threshold to be used in NMS stage
* @param threshold NMS threshold
* @return reference to layer builder
*/
DetectionOutputLayer& setNMSThreshold(float threshold);
/**
* @brief Returns confidence threshold
* @return Threshold
*/
float getConfidenceThreshold() const;
/**
* @brief Sets confidence threshold
* @param threshold Threshold
* @return reference to layer builder
*/
DetectionOutputLayer& setConfidenceThreshold(float threshold);
/**
* @brief Returns share location
* @return true if bounding boxes are shared among different classes
*/
bool getShareLocation() const;
/**
* @brief Sets share location
* @param flag true if bounding boxes are shared among different classes
* @return reference to layer builder
*/
DetectionOutputLayer& setShareLocation(bool flag);
/**
* @brief Returns encoded settings
* @return true if variance is encoded in target
*/
bool getVariantEncodedInTarget() const;
/**
* @brief Sets encoded settings
* @param flag true if variance is encoded in target
* @return reference to layer builder
*/
DetectionOutputLayer& setVariantEncodedInTarget(bool flag);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,105 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Eltwise layer
*/
class INFERENCE_ENGINE_API_CLASS(EltwiseLayer): public LayerDecorator {
public:
/**
* @brief The enum defines all Eltwise types
*/
enum EltwiseType {
SUM = 1,
MAX,
MUL,
SUB,
DIV,
MIN,
SQUARED_DIFF
};
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit EltwiseLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit EltwiseLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit EltwiseLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
EltwiseLayer& setName(const std::string& name);
/**
* @brief Returns input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports
* @param ports Vector of input ports
* @return reference to layer builder
*/
EltwiseLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
EltwiseLayer& setOutputPort(const Port& port);
/**
* @brief Returns eltwise type
* @return Eltwise type
*/
EltwiseType getEltwiseType() const;
/**
* @brief Sets eltwise type
* @param type Eltwise type
* @return reference to layer builder
*/
EltwiseLayer& setEltwiseType(EltwiseType type);
/**
* @brief Returns eltwise scales
* @return Vector of scales
*/
const std::vector<float> getScales() const;
/**
* @brief Sets eltwise scales
* @param scales Vector of scales
* @return reference to layer builder
*/
EltwiseLayer& setScales(const std::vector<float>& scales);
private:
EltwiseType type;
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,67 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ELU layer
*/
class INFERENCE_ENGINE_API_CLASS(ELULayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit ELULayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit ELULayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit ELULayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
ELULayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
ELULayer& setPort(const Port& port);
/**
* @brief Returns alpha
* @return alpha
*/
float getAlpha() const;
/**
* @brief Sets alpha
* @param alpha Alpha
* @return reference to layer builder
*/
ELULayer& setAlpha(float alpha);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,77 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for FullyConnected layer
*/
class INFERENCE_ENGINE_API_CLASS(FullyConnectedLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit FullyConnectedLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit FullyConnectedLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit FullyConnectedLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
FullyConnectedLayer& setName(const std::string& name);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
FullyConnectedLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
FullyConnectedLayer& setOutputPort(const Port& port);
/**
* @brief Return output size
* @return Output size
*/
size_t getOutputNum() const;
/**
* @brief Sets output size
* @param outNum Output size
* @return reference to layer builder
*/
FullyConnectedLayer& setOutputNum(size_t outNum);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,66 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for ArgMax layer
*/
class INFERENCE_ENGINE_API_CLASS(GRNLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit GRNLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit GRNLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit GRNLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
GRNLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
GRNLayer& setPort(const Port& port);
/**
* @brief Returns beta
* @return Beta
*/
float getBeta() const;
/**
* @brief Sets beta
* @param beta Beta
* @return reference to layer builder
*/
GRNLayer& setBeta(float beta);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,87 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <vector>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for GRUSequence layer
*/
class INFERENCE_ENGINE_API_CLASS(GRUSequenceLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit GRUSequenceLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit GRUSequenceLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit GRUSequenceLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
GRUSequenceLayer& setName(const std::string& name);
/**
* @brief Returns input ports with shapes for the layer
* @return Vector of ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports for the layer
* @param ports vector of input ports
* @return reference to layer builder
*/
GRUSequenceLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output ports with shapes for the layer
* @return Vector of ports
*/
const std::vector<Port>& getOutputPorts() const;
/**
* @brief Sets output ports for the layer
* @param ports vector of output ports
* @return reference to layer builder
*/
GRUSequenceLayer& setOutputPorts(const std::vector<Port>& ports);
int getHiddenSize() const;
GRUSequenceLayer& setHiddenSize(int size);
bool getSequenceDim() const;
GRUSequenceLayer& setSqquenceDim(bool flag);
const std::vector<std::string>& getActivations() const;
GRUSequenceLayer& setActivations(const std::vector<std::string>& activations);
const std::vector<float>& getActivationsAlpha() const;
GRUSequenceLayer& setActivationsAlpha(const std::vector<float>& activations);
const std::vector<float>& getActivationsBeta() const;
GRUSequenceLayer& setActivationsBeta(const std::vector<float>& activations);
float getClip() const;
GRUSequenceLayer& setClip(float clip);
bool getLinearBeforeReset() const;
GRUSequenceLayer& setLinearBeforeReset(bool flag);
const std::string& getDirection() const;
GRUSequenceLayer& setDirection(const std::string& direction);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,55 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Input layer
*/
class INFERENCE_ENGINE_API_CLASS(InputLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit InputLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit InputLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit InputLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
InputLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
InputLayer& setPort(const Port &port);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,201 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <details/caseless.hpp>
#include <ie_parameter.hpp>
#include <ie_network.hpp>
#include <ie_blob.h>
#include <string>
#include <vector>
#include <memory>
#include <map>
namespace InferenceEngine {
namespace Builder {
class Layer;
/**
* @brief This structure implements a holder for validators
*/
struct ValidatorsHolder {
/**
* @brief Caseless map connects type with validator
*/
details::caseless_map<std::string, std::function<void(const std::shared_ptr<const Layer>&, bool)>> validators;
};
/**
* @brief This class implements a builder for IE Layer
*/
class INFERENCE_ENGINE_API_CLASS(Layer): public ILayer,
public std::enable_shared_from_this<Layer> {
public:
/**
* @brief A shared pointer to the Layer builder
*/
using Ptr = std::shared_ptr<Layer>;
/**
* @brief A shared pointer to the constant Layer builder
*/
using CPtr = std::shared_ptr<const Layer>;
/**
* @brief The constructor creates a Layer builder with layer type and layer name
* @param type Layer type
* @param name Layer name
*/
explicit Layer(const std::string& type, const std::string& name = "");
/**
* @brief The constructor creates a Layer builder from shared pointer to constant ILayer
* @param layer shared pointer to constant ILayer
*/
explicit Layer(const ILayer::CPtr& layer);
/**
* @brief The constructor creates a Layer builder with layer ID and layer builder
* @param id Layer ID
* @param layer layer builder
*/
Layer(idx_t id, const Layer& layer);
/**
* @brief Compares the given Layer builder with the current one
* @param rhs Layer builder to compare with
* @return true if the given Layer builder is equal to the current one, false - otherwise
*/
bool operator==(const Layer& rhs) const {
return params == rhs.params;
}
/**
* @brief Returns layer ID
* @return Layer ID
*/
idx_t getId() const noexcept override;
/**
* @brief Returns a constant reference to layer name
* @return Layer name
*/
const std::string& getName() const noexcept override;
/**
* @brief Sets layer name
* @param name Layer name
* @return Reference to Layer builder
*/
Layer& setName(const std::string& name);
/**
* @brief Returns a constant reference to layer type
* @return Layer type
*/
const std::string& getType() const noexcept override;
/**
* @brief Sets layer type
* @param type Layer type
* @return Reference to Layer builder
*/
Layer& setType(const std::string& type);
/**
* @brief Returns map of parameters
* @return map of parameters
*/
const std::map<std::string, Parameter>& getParameters() const noexcept override;
/**
* @brief Returns map of parameters
* @return map of parameters
*/
std::map<std::string, Parameter>& getParameters();
/**
* @brief Sets parameters for layer
* @param params constant map of parameters
* @return Reference to Layer builder
*/
Layer& setParameters(const std::map<std::string, Parameter>& params);
/**
* @brief Returns vector of input ports
* @return Vector of input ports
*/
const std::vector<Port>& getInputPorts() const noexcept override;
/**
* @brief Returns vector of input ports
* @return Vector of input ports
*/
std::vector<Port>& getInputPorts();
/**
* @brief Sets input ports
* @param ports vector of ports
* @return Reference to Layer builder
*/
Layer& setInputPorts(const std::vector<Port> &ports);
/**
* @brief Returns vector of output ports
* @return Vector of output ports
*/
const std::vector<Port>& getOutputPorts() const noexcept override;
/**
* @brief Returns vector of output ports
* @return Vector of output ports
*/
std::vector<Port>& getOutputPorts();
/**
* @brief Sets output ports
* @param ports vector of ports
* @return Reference to Layer builder
*/
Layer& setOutputPorts(const std::vector<Port> &ports);
/**
* @brief Validates the current builder and generates ILayer object
* @return constant shared pointer to ILayer
*/
const ILayer::CPtr build() const;
/**
* @brief Validates layer builder
*/
void validate(bool partial = false) const;
/**
* @brief Registers a new validator for type
* @param type Layer type
* @param validator Layer validator
*/
static void addValidator(const std::string& type, const std::function<void(const Layer::CPtr&, bool)>& validator);
private:
idx_t id;
std::string type;
std::string name;
std::vector<Port> inPorts;
std::vector<Port> outPorts;
std::map<std::string, Parameter> params;
static std::shared_ptr<ValidatorsHolder> getValidatorsHolder();
};
/**
* @brief This class registers layer validators
*/
class ValidatorRegisterBase {
public:
/**
* @brief The constructor registers new layer validator
* @param type Layer type
* @param validator Layer validator
*/
explicit ValidatorRegisterBase(const std::string& type, const std::function<void(const Layer::CPtr&, bool)>& validator) {
InferenceEngine::Builder::Layer::addValidator(type, validator);
}
};
#define REG_VALIDATOR_FOR(__type, __validator) \
static InferenceEngine::Builder::ValidatorRegisterBase _reg_##__type(#__type, __validator)
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,95 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_builder.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Builder {
/**
* @brief This class defines the basic functional for layer builders
*/
class INFERENCE_ENGINE_API_CLASS(LayerDecorator) {
public:
/**
* @brief The constructor creates layer builders with layer type and layer name
* @param type Layer type
* @param name Layer name
*/
LayerDecorator(const std::string& type, const std::string& name);
/**
* @brief The constructor creates layer builders from reference to generic layer builder
* @param layer pointer to generic layer builder
*/
explicit LayerDecorator(const Layer::Ptr& layer);
/**
* @brief The constructor creates layer builders from reference to generic layer builder
* @param layer constant pointer to generic layer builder
*/
explicit LayerDecorator(const Layer::CPtr& layer);
/**
* @brief The copy constructor
* @param rval Source builder
*/
LayerDecorator(const LayerDecorator& rval);
/**
* @brief Copy operator for LayerDecorator
* @param rval
* @return Layer builder
*/
LayerDecorator& operator=(const LayerDecorator& rval);
/**
* @brief Virtual destructor
*/
virtual ~LayerDecorator() = default;
/**
* @brief The operator creates generic builder
* @return Generic builder
*/
virtual operator Layer() const;
/**
* @brief The operator creates generic builder
* @return Pointer to generic builder
*/
virtual operator Layer::Ptr();
/**
* @brief The operator creates generic builder
* @return Constant pointer to generic builder
*/
virtual operator Layer::CPtr() const;
/**
* @brief Returns layer type
* @return Layer type
*/
const std::string& getType() const;
/**
* @brief Returns layer name
* @return Layer name
*/
const std::string& getName() const;
protected:
Layer::Ptr& getLayer();
const Layer::CPtr getLayer() const;
void checkType(const std::string& type) const;
Layer::CPtr cLayer;
private:
Layer::Ptr layer;
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,99 @@
// Copyright (C) 2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for LRN layer
*/
class INFERENCE_ENGINE_API_CLASS(LRNLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit LRNLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit LRNLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit LRNLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
LRNLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
LRNLayer& setPort(const Port& port);
/**
* @brief Returns side length of the region
* @return Size
*/
size_t getSize() const;
/**
* @brief Sets side length of the region
* @param size Size
* @return reference to layer builder
*/
LRNLayer& setSize(size_t size);
/**
* @brief Returns scaling parameter for the normalizing sum
* @return Scaling parameter
*/
float getAlpha() const;
/**
* @brief Sets scaling parameter for the normalizing sum
* @param alpha Scaling parameter
* @return reference to layer builder
*/
LRNLayer& setAlpha(float alpha);
/**
* @brief Returns exponent for the normalizing sum
* @return Exponent
*/
float getBeta() const;
/**
* @brief Sets exponent for the normalizing sum
* @param beta Exponent
* @return reference to layer builder
*/
LRNLayer& setBeta(float beta);
/**
* @brief Returns region type
* @return true if normalizing sum is performed over adjacent channels
*/
float getBias() const;
/**
* @brief Sets bias for the normalizing sum
* @param bias Bias
* @return reference to layer builder
*/
LRNLayer& setBias(float bias);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,87 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <vector>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for LSTMSequence layer
*/
class INFERENCE_ENGINE_API_CLASS(LSTMSequenceLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit LSTMSequenceLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit LSTMSequenceLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit LSTMSequenceLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
LSTMSequenceLayer& setName(const std::string& name);
/**
* @brief Returns input ports with shapes for the layer
* @return Vector of ports
*/
const std::vector<Port>& getInputPorts() const;
/**
* @brief Sets input ports for the layer
* @param ports vector of input ports
* @return reference to layer builder
*/
LSTMSequenceLayer& setInputPorts(const std::vector<Port>& ports);
/**
* @brief Returns output ports with shapes for the layer
* @return Vector of ports
*/
const std::vector<Port>& getOutputPorts() const;
/**
* @brief Sets output ports for the layer
* @param ports vector of output ports
* @return reference to layer builder
*/
LSTMSequenceLayer& setOutputPorts(const std::vector<Port>& ports);
int getHiddenSize() const;
LSTMSequenceLayer& setHiddenSize(int size);
bool getSequenceDim() const;
LSTMSequenceLayer& setSqquenceDim(bool flag);
const std::vector<std::string>& getActivations() const;
LSTMSequenceLayer& setActivations(const std::vector<std::string>& activations);
const std::vector<float>& getActivationsAlpha() const;
LSTMSequenceLayer& setActivationsAlpha(const std::vector<float>& activations);
const std::vector<float>& getActivationsBeta() const;
LSTMSequenceLayer& setActivationsBeta(const std::vector<float>& activations);
float getClip() const;
LSTMSequenceLayer& setClip(float clip);
bool getInputForget() const;
LSTMSequenceLayer& setInputForget(bool flag);
const std::string& getDirection() const;
LSTMSequenceLayer& setDirection(const std::string& direction);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,99 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Memory layer
*/
class INFERENCE_ENGINE_API_CLASS(MemoryLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit MemoryLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit MemoryLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit MemoryLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
MemoryLayer& setName(const std::string& name);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
MemoryLayer& setOutputPort(const Port& port);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
MemoryLayer& setInputPort(const Port& port);
/**
* @brief Returns memory ID
* @return String with memory ID
*/
const std::string getId() const;
/**
* @brief Sets memory ID
* @param id Memory ID
* @return reference to layer builder
*/
MemoryLayer& setId(const std::string& id);
/**
* @brief Returns the index of memory layer
* @return Index
*/
size_t getIndex() const;
/**
* @brief Sets the index of memory layer
* @param index Index equal 0 means this layer is output one.
* @return reference to layer builder
*/
MemoryLayer& setIndex(size_t index);
/**
* @brief Returns size of the group
* @return Size of the group
*/
size_t getSize() const;
/**
* @brief Sets size of the group
* @param size Size if size equals 2 means this group is a pair (only 2 is supported).
* @return reference to layer builder
*/
MemoryLayer& setSize(size_t size);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,88 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for MVN layer
*/
class INFERENCE_ENGINE_API_CLASS(MVNLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit MVNLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit MVNLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit MVNLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
MVNLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
MVNLayer& setPort(const Port& port);
/**
* @brief Returns across channels value
* @return true if mean values are shared across channels
*/
bool getAcrossChannels() const;
/**
* @brief Sets across channels
* @param flag true if mean values are shared across channels
* @return reference to layer builder
*/
MVNLayer& setAcrossChannels(bool flag);
/**
* @brief Returns normalize variance
* @return true if variance normalization is performed
*/
bool getNormalize() const;
/**
* @brief Sets normalize variance
* @param flag true if variance normalization is performed
* @return reference to layer builder
*/
MVNLayer& setNormalize(bool flag);
/**
* @brief Return epsilon
* @return Epsilon
*/
float getEpsilon() const;
/**
* @brief Sets epsilon
* @param eps Epsilon
* @return reference to layer builder
*/
MVNLayer& setEpsilon(float eps);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,249 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_builder.hpp>
#include <ie_icnn_network.hpp>
#include <cpp/ie_cnn_network.h>
#include <ie_network.hpp>
#include <ie_context.hpp>
#include <ie_common.h>
#include <ie_blob.h>
#include <utility>
#include <memory>
#include <string>
#include <vector>
#include <map>
namespace InferenceEngine {
namespace Builder {
/**
* @brief This class implements a builder for IE Network
*/
class INFERENCE_ENGINE_API_CLASS(Network): public INetwork {
public:
/**
* @brief A shared pointer to the Network builder
*/
using Ptr = std::shared_ptr<Network>;
/**
* @brief An iterator for Network builder definition
*/
using iterator = details::INetworkIterator<Network, Layer>;
/**
* @brief Begin network iterator
* @return Network iterator
*/
iterator begin();
/**
* @brief Begin network iterator
* @return const INetwork iterator
*/
const_iterator begin() const noexcept override;
/**
* @brief End network iterator
* @return Network iterator
*/
iterator end();
/**
* @brief End network iterator
* @return const INetwork iterator
*/
const_iterator end() const noexcept override;
/**
* @brief Returns a number of layers in the network.
* @return Layers count
*/
size_t size() const noexcept override;
/**
* @brief The constructor creates a builder based on ICNNNetwork
*
* @param network constant reference to ICNNNetwork object
*/
explicit Network(const ICNNNetwork& network);
/**
* @brief The constructor creates a empty builder with network name
*
* @param name Network name
*/
explicit Network(const std::string& name);
/**
* @brief The constructor creates a builder based on INetwork
*
* @param network constant reference to INetwork object
*/
explicit Network(const INetwork& network);
/**
* @brief The constructor creates a builder based on ICNNNetwork with custom Context
*
* @param network constant reference to ICNNNetwork object
*/
Network(const Context& ieContext, const ICNNNetwork& network);
/**
* @brief The constructor creates a empty builder with network name and custom Context
*
* @param name Network name
*/
Network(const Context& ieContext, const std::string& name);
/**
* @brief The constructor creates a builder based on INetwork with custom Context
*
* @param network constant reference to INetwork object
*/
Network(const Context& ieContext, const INetwork& network);
/**
* @brief Adds new layer and connects it with previous layers
*
* @param inputs Vector with PortInfo objects from previous layers
* @param layer Layer builder for new layer
*
* @return Id of new builder for the current network
*/
idx_t addLayer(const std::vector<PortInfo>& inputs, const Layer& layer);
/**
* @brief Adds new layer
*
* @param layer Layer builder for new layer
*
* @return Id of new builder for the current network
*/
idx_t addLayer(const Layer& layer);
/**
* @brief Removes a layer by ID
*
* @param layerId Layer ID
*/
void removeLayer(idx_t layerId);
/**
* @brief Connects two layers
*
* @param input PortInfo object from previous layer
* @param output PortInfo object from next layer
*/
void connect(const PortInfo& input, const PortInfo& output);
/**
* @brief Removes connection from the network
*
* @param connection Connection
*/
void disconnect(const Connection& connection);
/**
* @brief Returns vector of layer builders
*
* @return Vector of layer builders
*/
std::vector<Layer::Ptr>& getLayers();
/**
* @brief Returns constant vector of layer builders
*
* @return constant vector of layer builders
*/
const std::vector<Layer::Ptr>& getLayers() const;
/**
* @brief Returns a constant smart pointer to a Layer interface.
* If the layer is missing, returns nullptr.
* @param id Id of the Layer
* @return Layer interface smart pointer
*/
const ILayer::CPtr getLayer(idx_t id) const noexcept override;
Layer::Ptr getLayer(idx_t layerId);
/**
* @brief Returns a constant vector of input layers.
* @return Vector of input layers
*/
const std::vector<ILayer::CPtr> getInputs() const noexcept override;
/**
* @brief Returns a vector of input layers.
* @return Vector of input layers
*/
std::vector<Layer::Ptr> getInputs();
/**
* @brief Returns a constant vector of output layers.
* @return Vector of output layers
*/
const std::vector<ILayer::CPtr> getOutputs() const noexcept override;
/**
* @brief Returns a vector of input layers.
* @return Vector of input layers
*/
std::vector<Layer::Ptr> getOutputs();
/**
* @brief Returns a constant vector of connections for specific layer.
* If the layer is missing, returns empty vector.
* @param layerId layer index
* @return Vector of connections
*/
const std::vector<Connection> getLayerConnections(idx_t layerId) const noexcept override;
/**
* @brief Returns a constant vector of all connections.
* @return Vector of connections
*/
const std::vector<Connection>& getConnections() const;
/**
* @brief Returns a network name.
* @return Network name
*/
const std::string& getName() const noexcept override;
/**
* @brief Returns a network context
* @return const reference to Context
*/
const Context& getContext() const noexcept override;
/**
* @brief Returns a network context
* @return reference to Context
*/
Context& getContext() noexcept;
/**
* @brief Builds and validate network
*
* @return const shared pointer to INetwork
*/
const INetwork::CPtr build();
/**
* @brief Validates network
*
*/
void validate();
/**
* @brief The operator builds network
*
* @return const shared pointer to INetwork
*/
explicit operator const INetwork::CPtr();
private:
std::map<std::string, Parameter> parameters;
};
/**
* @brief This function converts INetwork to ICNNNetwork
*
* @param network constant shared pointer to INetwork object
* @return constant shared pointer to ICNNNetwork
*/
INFERENCE_ENGINE_API_CPP(const std::shared_ptr<ICNNNetwork>) convertToICNNNetwork(const INetwork::CPtr& network);
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,117 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Norm layer
*/
class INFERENCE_ENGINE_API_CLASS(NormLayer): public LayerDecorator {
public:
/**
* @brief The enum defines all Norm types
*/
enum NormType {
WITHIN_CHANNEL = 0,
ACROSS_CHANNELS = 1
};
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit NormLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit NormLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit NormLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
NormLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
NormLayer& setPort(const Port& port);
/**
* @brief Returns side length of the region
* @return Size
*/
size_t getSize() const;
/**
* @brief Sets side length of the region
* @param size Size
* @return reference to layer builder
*/
NormLayer& setSize(size_t size);
/**
* @brief Returns scaling parameter for the normalizing sum
* @return Scaling parameter
*/
float getAlpha() const;
/**
* @brief Sets scaling parameter for the normalizing sum
* @param alpha Scaling parameter
* @return reference to layer builder
*/
NormLayer& setAlpha(float alpha);
/**
* @brief Returns exponent for the normalizing sum
* @return Exponent
*/
float getBeta() const;
/**
* @brief Sets exponent for the normalizing sum
* @param beta Exponent
* @return reference to layer builder
*/
NormLayer& setBeta(float beta);
/**
* @brief Returns region type
* @return true if normalizing sum is performed over adjacent channels
*/
bool getAcrossMaps() const;
/**
* @brief Sets region type
* @param acrossMap true if normalizing sum is performed over adjacent channels
* @return reference to layer builder
*/
NormLayer& setAcrossMaps(bool acrossMap);
/**
* @brief Returns region type
* @return Norm type
*/
NormType getRegion() const;
/**
* @brief Sets region type
* @param type region type
* @return reference to layer builder
*/
NormLayer& setRegion(NormType type);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,90 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Normalize layer
*/
class INFERENCE_ENGINE_API_CLASS(NormalizeLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit NormalizeLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit NormalizeLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit NormalizeLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
NormalizeLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
NormalizeLayer& setPort(const Port& port);
/**
* @brief Returns channel shared flag
* @return true if scale parameters are shared across channels
*/
bool getChannelShared() const;
/**
* @brief Sets channel shared flag
* @param acrossMap true if scale parameters are shared across channels
* @return reference to layer builder
*/
NormalizeLayer& setChannelShared(bool acrossMap);
/**
* @brief Returns across maps
* @return true if normalization is shared across channels
*/
bool getAcrossMaps() const;
/**
* @brief Sets across map
* @param acrossMap true if normalization is shared across channels
* @return reference to layer builder
*/
NormalizeLayer& setAcrossMaps(bool acrossMap);
/**
* @brief Returns epsilon
* @return Epsilon
*/
float getEpsilon() const;
/**
* @brief Sets epsilon
* @param eps Epsilon
* @return reference to layer builder
*/
NormalizeLayer& setEpsilon(float eps);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,55 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Output layer
*/
class INFERENCE_ENGINE_API_CLASS(OutputLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit OutputLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit OutputLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit OutputLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
OutputLayer& setName(const std::string& name);
/**
* @brief Returns port with shapes for the layer
* @return Port with shapes
*/
const Port& getPort() const;
/**
* @brief Sets port shapes for the layer
* @param port Port with shapes
* @return reference to layer builder
*/
OutputLayer& setPort(const Port &port);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,78 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <vector>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Permute layer
*/
class INFERENCE_ENGINE_API_CLASS(PermuteLayer): public LayerDecorator {
public:
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit PermuteLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit PermuteLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit PermuteLayer(const Layer::CPtr& layer);
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
PermuteLayer& setName(const std::string& name);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
PermuteLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
PermuteLayer& setOutputPort(const Port& port);
/**
* @brief Return vector of dimensions indexes for output blob
* @return Order of dimensions for output blob
*/
const std::vector<size_t> getOrder() const;
/**
* @brief Sets the order of dimensions for output blob
* @param order dimensions indexes for output blob
* @return reference to layer builder
*/
PermuteLayer& setOrder(const std::vector<size_t>& order);
};
} // namespace Builder
} // namespace InferenceEngine

View File

@@ -0,0 +1,169 @@
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <builders/ie_layer_decorator.hpp>
#include <ie_network.hpp>
#include <vector>
#include <string>
namespace InferenceEngine {
namespace Builder {
/**
* @brief The class represents a builder for Pooling layer
*/
class INFERENCE_ENGINE_API_CLASS(PoolingLayer): public LayerDecorator {
public:
/**
* @brief The enum defines available pooling types
*/
enum PoolingType {
MAX = 1,
AVG = 2
};
/**
* @brief The enum defines available rounding types
*/
enum RoundingType {
CEIL = 1,
FLOOR = 2
};
/**
* @brief The constructor creates a builder with the name
* @param name Layer name
*/
explicit PoolingLayer(const std::string& name = "");
/**
* @brief The constructor creates a builder from generic builder
* @param layer pointer to generic builder
*/
explicit PoolingLayer(const Layer::Ptr& layer);
/**
* @brief The constructor creates a builder from generic builder
* @param layer constant pointer to generic builder
*/
explicit PoolingLayer(const Layer::CPtr& layer);
/**
* @brief Operator creates generic layer builder
* @return Generic layer builder
*/
operator Layer() const override;
/**
* @brief Sets the name for the layer
* @param name Layer name
* @return reference to layer builder
*/
PoolingLayer& setName(const std::string& name);
/**
* @brief Returns input port
* @return Input port
*/
const Port& getInputPort() const;
/**
* @brief Sets input port
* @param port Input port
* @return reference to layer builder
*/
PoolingLayer& setInputPort(const Port& port);
/**
* @brief Returns output port
* @return Output port
*/
const Port& getOutputPort() const;
/**
* @brief Sets output port
* @param port Output port
* @return reference to layer builder
*/
PoolingLayer& setOutputPort(const Port& port);
/**
* @brief Returns kernel size
* @return Kernel size
*/
const std::vector<size_t> getKernel() const;
/**
* @brief Sets kernel size
* @param kernel Kernel size
* @return reference to layer builder
*/
PoolingLayer& setKernel(const std::vector<size_t>& kernel);
/**
* @brief Returns vector of strides
* @return vector of strides
*/
const std::vector<size_t> getStrides() const;
/**
* @brief Sets strides
* @param strides vector of strides
* @return reference to layer builder
*/
PoolingLayer& setStrides(const std::vector<size_t>& strides);
/**
* @brief Returns begin paddings
* @return vector of paddings
*/
const std::vector<size_t> getPaddingsBegin() const;
/**
* @brief Sets begin paddings
* @param paddings Vector of paddings
* @return reference to layer builder
*/
PoolingLayer& setPaddingsBegin(const std::vector<size_t>& paddings);
/**
* @brief Return end paddings
* @return Vector of paddings
*/
const std::vector<size_t> getPaddingsEnd() const;
/**
* @brief Sets end paddings
* @param paddings Vector of paddings
* @return reference to layer builder
*/
PoolingLayer& setPaddingsEnd(const std::vector<size_t>& paddings);
/**
* @brief Returns pooling type
* @return Pooling type
*/
PoolingType getPoolingType() const;
/**
* @brief Sets pooling type
* @param type Pooling type
* @return reference to layer builder
*/
PoolingLayer& setPoolingType(PoolingType type);
/**
* @brief Returns rounding type
* @return Rounding type
*/
RoundingType getRoundingType() const;
/**
* @brief Sets rounding types
* @param type Rounding type
* @return reference to layer builder
*/
PoolingLayer& setRoundingType(RoundingType type);
/**
* @brief Returns a type of pooling strategy
* @return true if zero-values in the padding are not used
*/
bool getExcludePad() const;
/**
* @brief Sets a type of pooling strategy
* @param exclude zero-values in the padding are not used if true
* @return reference to layer builder
*/
PoolingLayer& setExcludePad(bool exclude);
private:
PoolingType type;
RoundingType roundingType;
};
} // namespace Builder
} // namespace InferenceEngine

Some files were not shown because too many files have changed in this diff Show More