Publishing 2019 R1 content
This commit is contained in:
parent
669bee86e5
commit
72660e9a4d
@ -1,5 +1,5 @@
|
||||
# [OpenVINO™ Toolkit](https://01.org/openvinotoolkit) - Deep Learning Deployment Toolkit repository
|
||||
[](https://github.com/opencv/dldt/releases/tag/2018_R5)
|
||||
[](https://github.com/opencv/dldt/releases/tag/2019_R1)
|
||||
[](LICENSE)
|
||||
|
||||
This toolkit allows developers to deploy pre-trained deep learning models through a high-level C++ Inference Engine API integrated with application logic.
|
||||
|
@ -1,13 +1,15 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required (VERSION 3.3)
|
||||
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
|
||||
|
||||
project(InferenceEngine)
|
||||
|
||||
set(DEV_BUILD TRUE)
|
||||
|
||||
include(CTest)
|
||||
|
||||
## WA for problem with gtest submodule. It cannot detect uint32 type.
|
||||
## remove Gtest submodule and this two lines together
|
||||
include (CheckTypeSize)
|
||||
@ -133,25 +135,28 @@ set (CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
include (sanitizer)
|
||||
|
||||
include(CheckCXXCompilerFlag)
|
||||
if(UNIX)
|
||||
CHECK_CXX_COMPILER_FLAG("-fvisibility=hidden" COMPILER_SUPPORTS_VISIBILITY)
|
||||
if (COMPILER_SUPPORTS_VISIBILITY)
|
||||
#add_definitions(-fvisibility=hidden) todo: should be hidden? if so define default visibiliti explicite for each funtion
|
||||
add_definitions(-fvisibility=default)
|
||||
endif(COMPILER_SUPPORTS_VISIBILITY)
|
||||
endif(UNIX)
|
||||
|
||||
include(cpplint)
|
||||
|
||||
add_subdirectory(src)
|
||||
add_subdirectory(tests)
|
||||
add_subdirectory(thirdparty)
|
||||
if (ENABLE_SAMPLES_CORE)
|
||||
set(InferenceEngine_DIR "${CMAKE_BINARY_DIR}")
|
||||
set(InferenceEngine_DIR "${CMAKE_BINARY_DIR}")
|
||||
|
||||
#to be able to link
|
||||
set (LIB_FOLDER ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION}/lib)
|
||||
add_subdirectory(samples)
|
||||
endif()
|
||||
#to be able to link
|
||||
set (LIB_FOLDER ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION}/lib)
|
||||
|
||||
# gflags and format_reader targets are kept inside of samples directory and
|
||||
# they must be built even if samples build is disabled (required for tests and tools).
|
||||
add_subdirectory(samples)
|
||||
|
||||
file(GLOB_RECURSE SAMPLES_SOURCES samples/*.cpp samples/*.hpp samples/*.h)
|
||||
add_cpplint_target(sample_cpplint
|
||||
FOR_SOURCES ${SAMPLES_SOURCES}
|
||||
EXCLUDE_PATTERNS "thirdparty/*" "pugixml/*")
|
||||
|
||||
if (ENABLE_PYTHON)
|
||||
add_subdirectory(ie_bridges/python)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_cpplint_report_target()
|
||||
|
@ -16,8 +16,8 @@ Inference Engine plugins for Intel® FPGA and Intel® Movidius™ Neural Compute
|
||||
## Build on Linux\* Systems
|
||||
|
||||
The software was validated on:
|
||||
- Ubuntu\* 16.04 with default GCC\* 5.4.0
|
||||
- CentOS\* 7.4 with default GCC\* 4.8.5
|
||||
- Ubuntu\* 16.04 (64-bit) with default GCC\* 5.4.0
|
||||
- CentOS\* 7.4 (64-bit) with default GCC\* 4.8.5
|
||||
- [Intel® Graphics Compute Runtime for OpenCL™ Driver package 18.28.11080](https://github.com/intel/compute-runtime/releases/tag/18.28.11080).
|
||||
|
||||
### Software Requirements
|
||||
@ -45,11 +45,19 @@ The software was validated on:
|
||||
You can use the following additional build options:
|
||||
- Internal JIT GEMM implementation is used by default.
|
||||
- To switch to OpenBLAS\* implementation, use `GEMM=OPENBLAS` option and `BLAS_INCLUDE_DIRS` and `BLAS_LIBRARIES` cmake options to specify path to OpenBLAS headers and library, for example use the following options on CentOS\*: `-DGEMM=OPENBLAS -DBLAS_INCLUDE_DIRS=/usr/include/openblas -DBLAS_LIBRARIES=/usr/lib64/libopenblas.so.0`
|
||||
- To switch to optimized MKL-ML\* GEMM implementation, use `GEMM=MKL` and `MKLROOT` cmake options to specify path to unpacked MKL-ML with `include` and `lib` folders, for example use the following options: `-DGEMM=MKL -DMKLROOT=<path_to_MKL>`. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_lnx_2019.0.1.20180928.tgz)
|
||||
|
||||
- OpenMP threading is used by default. To build Inference Engine with TBB threading, set `-DTHREADING=TBB` option.
|
||||
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_lnx_2019.0.1.20180928.tgz)
|
||||
|
||||
- To build Python API wrapper, use -DENABLE_PYTHON=ON option. To specify exact Python version, use the following options: `-DPYTHON_EXECUTABLE=`which python3.6` -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.6`
|
||||
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
|
||||
|
||||
- Required versions of TBB and OpenCV packages are downloaded automatically by the CMake-based script. If you already have installed TBB or OpenCV packages configured in your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR` environment variables before running the `cmake` command, otherwise they won't be downloaded and the build may fail if incompatible versions were installed.
|
||||
|
||||
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
|
||||
```sh
|
||||
-DPYTHON_EXECUTABLE=`which python3.7` \
|
||||
-DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.7m.so \
|
||||
-DPYTHON_INCLUDE_DIR=/usr/include/python3.7
|
||||
```
|
||||
|
||||
- To switch on/off the CPU and GPU plugins, use `cmake` options `-DENABLE_MKL_DNN=ON/OFF` and `-DENABLE_CLDNN=ON/OFF`.
|
||||
|
||||
@ -74,7 +82,7 @@ You can use the following additional build options:
|
||||
## Build on Windows\* Systems:
|
||||
|
||||
The software was validated on:
|
||||
- Microsoft\* Windows\* 10 with Visual Studio 2017 and Intel® C++ Compiler 2018 Update 3
|
||||
- Microsoft\* Windows\* 10 (64-bit) with Visual Studio 2017 and Intel® C++ Compiler 2018 Update 3
|
||||
- [Intel® Graphics Driver for Windows* [24.20] driver package](https://downloadcenter.intel.com/download/27803/Graphics-Intel-Graphics-Driver-for-Windows-10?v=t).
|
||||
|
||||
### Software Requirements
|
||||
@ -107,25 +115,75 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
|
||||
|
||||
- Internal JIT GEMM implementation is used by default.
|
||||
- To switch to OpenBLAS GEMM implementation, use -DGEMM=OPENBLAS cmake option and specify path to OpenBLAS using `-DBLAS_INCLUDE_DIRS=<OPENBLAS_DIR>\include` and `-DBLAS_LIBRARIES=<OPENBLAS_DIR>\lib\libopenblas.dll.a` options. Prebuilt OpenBLAS\* package can be downloaded [here](https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download), mingw64* runtime dependencies [here](https://sourceforge.net/projects/openblas/files/v0.2.14/mingw64_dll.zip/download)
|
||||
- To switch to optimized MKL-ML GEMM implementation, use `GEMM=MKL` and `MKLROOT` cmake options to specify path to unpacked MKL-ML with `include` and `lib` folders, for example use the following options: `-DGEMM=MKL -DMKLROOT=<path_to_MKL>`. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_win_2019.0.1.20180928.zip)
|
||||
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_win_2019.0.1.20180928.zip)
|
||||
|
||||
- OpenMP threading is used by default. To build Inference Engine with TBB threading, set `-DTHREADING=TBB` option.
|
||||
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
|
||||
|
||||
- To build Python API wrapper, use -DENABLE_PYTHON=ON option. To specify exact Python version, use the following options: `-DPYTHON_EXECUTABLE="C:\Program Files\Python36\python.exe" -DPYTHON_INCLUDE_DIR="C:\Program Files\Python36\include" -DPYTHON_LIBRARY="C:\Program Files\Python36\libs\python36.lib"`.
|
||||
- Required versions of TBB and OpenCV packages are downloaded automatically by the CMake-based script. If you already have installed TBB or OpenCV packages configured in your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR` environment variables before running the `cmake` command, otherwise they won't be downloaded and the build may fail if incompatible versions were installed.
|
||||
|
||||
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
|
||||
```sh
|
||||
-DPYTHON_EXECUTABLE="C:\Program Files\Python37\python.exe" ^
|
||||
-DPYTHON_LIBRARY="C:\Program Files\Python37\libs\python37.lib" ^
|
||||
-DPYTHON_INCLUDE_DIR="C:\Program Files\Python37\include"
|
||||
```
|
||||
|
||||
6. Build generated solution in Visual Studio 2017 or run `cmake --build . --config Release` to build from the command line.
|
||||
|
||||
7. Before running the samples, add paths to TBB and OpenCV binaries used for the build to the %PATH% environment variable. By default, TBB binaries are downloaded by the CMake-based script to the `<dldt_repo>/inference-engine/temp/tbb/lib` folder, OpenCV binaries - to the `<dldt_repo>/inference-engine/temp/opencv_4.1.0/bin` folder.
|
||||
|
||||
### Building Inference Engine with Ninja
|
||||
|
||||
```sh
|
||||
call "C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\bin\ipsxe-comp-vars.bat" intel64 vs2017
|
||||
set CXX=icl
|
||||
set CC=icl
|
||||
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by dldt cmake script
|
||||
set TBBROOT=
|
||||
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --config Release
|
||||
```
|
||||
|
||||
Before running the samples on Microsoft\* Windows\*, please add path to OpenMP library (<dldt_repo>/inference-engine/temp/omp/lib) and OpenCV libraries (<dldt_repo>/inference-engine/temp/opencv_4.0.0/bin) to the %PATH% environment variable.
|
||||
## Build on macOS\* Systems
|
||||
|
||||
The software was validated on:
|
||||
- macOS\* 10.14, 64-bit
|
||||
|
||||
### Software Requirements
|
||||
- [CMake\*](https://cmake.org/download/) 3.9 or higher
|
||||
- Clang\* compiler from Xcode\* 10.1
|
||||
- Python\* 3.4 or higher for the Inference Engine Python API wrapper
|
||||
|
||||
### Build Steps
|
||||
1. Clone submodules:
|
||||
```sh
|
||||
cd dldt/inference-engine
|
||||
git submodule init
|
||||
git submodule update --recursive
|
||||
```
|
||||
2. Install build dependencies using the `install_dependencies.sh` script in the project root folder.
|
||||
3. Create a build folder:
|
||||
```sh
|
||||
mkdir build
|
||||
```
|
||||
4. Inference Engine uses a CMake-based build system. In the created `build` directory, run `cmake` to fetch project dependencies and create Unix makefiles, then run `make` to build the project:
|
||||
```sh
|
||||
cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
make -j16
|
||||
```
|
||||
You can use the following additional build options:
|
||||
- Internal JIT GEMM implementation is used by default.
|
||||
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17.1/mklml_mac_2019.0.1.20180928.tgz)
|
||||
|
||||
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
|
||||
|
||||
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
|
||||
```sh
|
||||
-DPYTHON_EXECUTABLE=/Library/Frameworks/Python.framework/Versions/3.7/bin/python3.7 \
|
||||
-DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.7/lib/libpython3.7m.dylib \
|
||||
-DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
\* Other names and brands may be claimed as the property of others.
|
||||
|
@ -1,12 +1,10 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
#module to locate GNA libraries
|
||||
|
||||
cmake_minimum_required(VERSION 2.8)
|
||||
|
||||
if (WIN32)
|
||||
set(GNA_PLATFORM_DIR win64)
|
||||
set(GNA_LIB_DIR x64)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -65,10 +65,6 @@ if (ENABLE_PROFILING_RAW)
|
||||
add_definitions(-DENABLE_PROFILING_RAW=1)
|
||||
endif()
|
||||
|
||||
if (ENABLE_GTEST_PATCHES)
|
||||
add_definitions(-DENABLE_GTEST_PATCHES=1)
|
||||
endif()
|
||||
|
||||
if (ENABLE_CLDNN)
|
||||
add_definitions(-DENABLE_CLDNN=1)
|
||||
endif()
|
||||
@ -77,22 +73,14 @@ if (ENABLE_MKL_DNN)
|
||||
add_definitions(-DENABLE_MKL_DNN=1)
|
||||
endif()
|
||||
|
||||
if (ENABLE_STRESS_UNIT_TESTS)
|
||||
add_definitions(-DENABLE_STRESS_UNIT_TESTS=1)
|
||||
endif()
|
||||
|
||||
if (ENABLE_SEGMENTATION_TESTS)
|
||||
add_definitions(-DENABLE_SEGMENTATION_TESTS=1)
|
||||
endif()
|
||||
|
||||
if (ENABLE_OBJECT_DETECTION_TESTS)
|
||||
add_definitions(-DENABLE_OBJECT_DETECTION_TESTS=1)
|
||||
endif()
|
||||
|
||||
if (ENABLE_GNA)
|
||||
add_definitions(-DENABLE_GNA)
|
||||
endif()
|
||||
|
||||
if (ENABLE_SAMPLES)
|
||||
set (ENABLE_SAMPLES_CORE ON)
|
||||
endif()
|
||||
|
||||
if (DEVELOPMENT_PLUGIN_MODE)
|
||||
message (STATUS "Enabled development plugin mode")
|
||||
|
||||
@ -112,5 +100,4 @@ if (VERBOSE_BUILD)
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
endif()
|
||||
|
||||
|
||||
print_enabled_features()
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
162
inference-engine/cmake/cpplint.cmake
Normal file
162
inference-engine/cmake/cpplint.cmake
Normal file
@ -0,0 +1,162 @@
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
if(ENABLE_CPPLINT)
|
||||
find_package(PythonInterp 2.7 EXACT)
|
||||
|
||||
if(NOT PYTHONINTERP_FOUND)
|
||||
message(WARNING "Python was not found (required for cpplint check)")
|
||||
set(ENABLE_CPPLINT OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(ENABLE_CPPLINT)
|
||||
add_custom_target(cpplint_all ALL)
|
||||
set(CPPLINT_ALL_OUTPUT_FILES "" CACHE INTERNAL "All cpplint output files")
|
||||
endif()
|
||||
|
||||
function(add_cpplint_target TARGET_NAME)
|
||||
if(NOT ENABLE_CPPLINT)
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(options "")
|
||||
set(oneValueArgs "")
|
||||
set(multiValueArgs "FOR_TARGETS" "FOR_SOURCES" "EXCLUDE_PATTERNS")
|
||||
cmake_parse_arguments(CPPLINT "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
foreach(target IN LISTS CPPLINT_FOR_TARGETS)
|
||||
get_target_property(target_sources "${target}" SOURCES)
|
||||
list(APPEND CPPLINT_FOR_SOURCES ${target_sources})
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES CPPLINT_FOR_SOURCES)
|
||||
|
||||
set(all_output_files "")
|
||||
foreach(source_file IN LISTS CPPLINT_FOR_SOURCES)
|
||||
set(exclude FALSE)
|
||||
foreach(pattern IN LISTS CPPLINT_EXCLUDE_PATTERNS)
|
||||
if(source_file MATCHES "${pattern}")
|
||||
set(exclude TRUE)
|
||||
break()
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
if(exclude)
|
||||
continue()
|
||||
endif()
|
||||
|
||||
file(RELATIVE_PATH source_file_relative "${CMAKE_CURRENT_SOURCE_DIR}" "${source_file}")
|
||||
set(output_file "${CMAKE_CURRENT_BINARY_DIR}/cpplint/${source_file_relative}.cpplint")
|
||||
string(REPLACE ".." "__" output_file "${output_file}")
|
||||
get_filename_component(output_dir "${output_file}" DIRECTORY)
|
||||
file(MAKE_DIRECTORY "${output_dir}")
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT
|
||||
"${output_file}"
|
||||
COMMAND
|
||||
"${CMAKE_COMMAND}"
|
||||
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
|
||||
-D "CPPLINT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cpplint.py"
|
||||
-D "INPUT_FILE=${source_file}"
|
||||
-D "OUTPUT_FILE=${output_file}"
|
||||
-D "WORKING_DIRECTORY=${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
-D "SKIP_RETURN_CODE=${ENABLE_CPPLINT_REPORT}"
|
||||
-P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_run.cmake"
|
||||
DEPENDS
|
||||
"${source_file}"
|
||||
"${IE_MAIN_SOURCE_DIR}/scripts/cpplint.py"
|
||||
"${IE_MAIN_SOURCE_DIR}/cmake/cpplint_run.cmake"
|
||||
COMMENT
|
||||
"[cpplint] ${source_file}"
|
||||
VERBATIM)
|
||||
|
||||
list(APPEND all_output_files "${output_file}")
|
||||
endforeach()
|
||||
|
||||
set(CPPLINT_ALL_OUTPUT_FILES
|
||||
${CPPLINT_ALL_OUTPUT_FILES} ${all_output_files}
|
||||
CACHE INTERNAL
|
||||
"All cpplint output files")
|
||||
|
||||
add_custom_target(${TARGET_NAME} ALL
|
||||
DEPENDS ${all_output_files}
|
||||
COMMENT "[cpplint] ${TARGET_NAME}")
|
||||
|
||||
if(CPPLINT_FOR_TARGETS)
|
||||
foreach(target IN LISTS CPPLINT_FOR_TARGETS)
|
||||
add_dependencies(${target} ${TARGET_NAME})
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
add_dependencies(cpplint_all ${TARGET_NAME})
|
||||
endfunction()
|
||||
|
||||
function(add_cpplint_report_target)
|
||||
if(NOT ENABLE_CPPLINT OR NOT ENABLE_CPPLINT_REPORT)
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(cpplint_output_file "${CMAKE_BINARY_DIR}/cpplint/final_output.cpplint")
|
||||
add_custom_command(
|
||||
OUTPUT
|
||||
"${cpplint_output_file}"
|
||||
COMMAND
|
||||
"${CMAKE_COMMAND}"
|
||||
-D "FINAL_OUTPUT_FILE=${cpplint_output_file}"
|
||||
-D "OUTPUT_FILES=${CPPLINT_ALL_OUTPUT_FILES}"
|
||||
-P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_merge.cmake"
|
||||
DEPENDS
|
||||
${CPPLINT_ALL_OUTPUT_FILES}
|
||||
"${IE_MAIN_SOURCE_DIR}/cmake/cpplint_merge.cmake"
|
||||
COMMENT
|
||||
"[cpplint] Merge all output files"
|
||||
VERBATIM)
|
||||
|
||||
set(cppcheck_output_file "${CMAKE_BINARY_DIR}/cpplint/cpplint-cppcheck-result.xml")
|
||||
add_custom_command(
|
||||
OUTPUT
|
||||
"${cppcheck_output_file}"
|
||||
COMMAND
|
||||
"${CMAKE_COMMAND}"
|
||||
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
|
||||
-D "CONVERT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py"
|
||||
-D "INPUT_FILE=${cpplint_output_file}"
|
||||
-D "OUTPUT_FILE=${cppcheck_output_file}"
|
||||
-P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_to_cppcheck_xml.cmake"
|
||||
DEPENDS
|
||||
${cpplint_output_file}
|
||||
"${IE_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py"
|
||||
"${IE_MAIN_SOURCE_DIR}/cmake/cpplint_to_cppcheck_xml.cmake"
|
||||
COMMENT
|
||||
"[cpplint] Convert to cppcheck XML format"
|
||||
VERBATIM)
|
||||
|
||||
set(report_dir "${IE_MAIN_SOURCE_DIR}/report/cpplint")
|
||||
set(html_output_file "${report_dir}/index.html")
|
||||
add_custom_command(
|
||||
OUTPUT
|
||||
"${html_output_file}"
|
||||
COMMAND
|
||||
"${CMAKE_COMMAND}"
|
||||
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
|
||||
-D "CONVERT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py"
|
||||
-D "INPUT_FILE=${cppcheck_output_file}"
|
||||
-D "REPORT_DIR=${report_dir}"
|
||||
-D "SOURCE_DIR=${IE_MAIN_SOURCE_DIR}"
|
||||
-D "TITLE=${CMAKE_PROJECT_NAME}"
|
||||
-P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_html.cmake"
|
||||
DEPENDS
|
||||
"${cppcheck_output_file}"
|
||||
"${IE_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py"
|
||||
"${IE_MAIN_SOURCE_DIR}/cmake/cpplint_html.cmake"
|
||||
COMMENT
|
||||
"[cpplint] Generate HTML report"
|
||||
VERBATIM)
|
||||
|
||||
add_custom_target(cpplint_report
|
||||
DEPENDS "${html_output_file}"
|
||||
COMMENT "[cpplint] Generate report")
|
||||
endfunction()
|
30
inference-engine/cmake/cpplint_html.cmake
Normal file
30
inference-engine/cmake/cpplint_html.cmake
Normal file
@ -0,0 +1,30 @@
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
if(EXISTS "${REPORT_DIR}")
|
||||
file(REMOVE_RECURSE "${REPORT_DIR}")
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY "${REPORT_DIR}")
|
||||
|
||||
execute_process(
|
||||
COMMAND
|
||||
"${PYTHON_EXECUTABLE}"
|
||||
"${CONVERT_SCRIPT}"
|
||||
"--file=${INPUT_FILE}"
|
||||
"--report-dir=${REPORT_DIR}"
|
||||
"--source-dir=${SOURCE_DIR}"
|
||||
"--title=${TITLE}")
|
||||
|
||||
# Change cppcheck things to cpplint
|
||||
|
||||
file(READ "${REPORT_DIR}/index.html" cur_file_content)
|
||||
|
||||
string(REPLACE "Cppcheck" "cpplint" cur_file_content ${cur_file_content})
|
||||
string(REPLACE "a tool for static C/C++ code analysis" "an open source lint-like tool from Google" cur_file_content ${cur_file_content})
|
||||
string(REPLACE "http://cppcheck.sourceforge.net" "http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py" cur_file_content ${cur_file_content})
|
||||
string(REPLACE "IRC: <a href=\"irc://irc.freenode.net/cppcheck\">irc://irc.freenode.net/cppcheck</a>" " " cur_file_content ${cur_file_content})
|
||||
|
||||
file(WRITE "${REPORT_DIR}/index.html" "${cur_file_content}")
|
11
inference-engine/cmake/cpplint_merge.cmake
Normal file
11
inference-engine/cmake/cpplint_merge.cmake
Normal file
@ -0,0 +1,11 @@
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
file(WRITE "${FINAL_OUTPUT_FILE}" "")
|
||||
|
||||
foreach(output_file IN LISTS OUTPUT_FILES)
|
||||
file(READ "${output_file}" cur_file_content)
|
||||
file(APPEND "${FINAL_OUTPUT_FILE}" "${cur_file_content}\n")
|
||||
endforeach()
|
37
inference-engine/cmake/cpplint_run.cmake
Normal file
37
inference-engine/cmake/cpplint_run.cmake
Normal file
@ -0,0 +1,37 @@
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
file(REMOVE "${OUTPUT_FILE}")
|
||||
|
||||
execute_process(
|
||||
COMMAND
|
||||
"${PYTHON_EXECUTABLE}"
|
||||
"${CPPLINT_SCRIPT}"
|
||||
"--linelength=160"
|
||||
"--counting=detailed"
|
||||
"--filter=-readability/fn_size"
|
||||
"${INPUT_FILE}"
|
||||
WORKING_DIRECTORY "${WORKING_DIRECTORY}"
|
||||
RESULT_VARIABLE result
|
||||
OUTPUT_VARIABLE output
|
||||
ERROR_VARIABLE output)
|
||||
|
||||
# Display the cpplint output to console (to parse it form IDE)
|
||||
message("${output}")
|
||||
|
||||
# Store cpplint output to file (replace problematic symbols)
|
||||
string(REPLACE "\"" ""\;" output ${output})
|
||||
string(REPLACE "<" "<\;" output ${output})
|
||||
string(REPLACE ">" ">\;" output ${output})
|
||||
string(REPLACE "'" "&apos\;" output ${output})
|
||||
string(REPLACE "&" "&\;" output ${output})
|
||||
file(WRITE "${OUTPUT_FILE}" ${output})
|
||||
|
||||
if(NOT SKIP_RETURN_CODE)
|
||||
# Pass through the cpplint return code
|
||||
if(NOT result EQUAL 0)
|
||||
message(FATAL_ERROR "[cpplint] Code style check failed for : ${INPUT_FILE}")
|
||||
endif()
|
||||
endif()
|
12
inference-engine/cmake/cpplint_to_cppcheck_xml.cmake
Normal file
12
inference-engine/cmake/cpplint_to_cppcheck_xml.cmake
Normal file
@ -0,0 +1,12 @@
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
execute_process(
|
||||
COMMAND
|
||||
"${PYTHON_EXECUTABLE}"
|
||||
"${CONVERT_SCRIPT}"
|
||||
INPUT_FILE "${INPUT_FILE}"
|
||||
OUTPUT_FILE "${OUTPUT_FILE}"
|
||||
ERROR_FILE "${OUTPUT_FILE}")
|
@ -1,10 +1,8 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required (VERSION 2.8)
|
||||
|
||||
function (debug_message)
|
||||
if (VERBOSE_BUILD)
|
||||
message(${ARGV})
|
||||
|
@ -1,9 +1,8 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required(VERSION 2.8)
|
||||
cmake_policy(SET CMP0054 NEW)
|
||||
|
||||
#features trigger supported by build system
|
||||
@ -14,7 +13,7 @@ include(debug)
|
||||
include(dependency_solver)
|
||||
|
||||
#prepare temporary folder
|
||||
if (DEFINED ENV{${DL_SDK_TEMP}})
|
||||
if (DEFINED ENV{${DL_SDK_TEMP}} AND NOT $ENV{${DL_SDK_TEMP}} STREQUAL "")
|
||||
if (WIN32)
|
||||
string(REPLACE "\\" "\\\\" TEMP $ENV{${DL_SDK_TEMP}})
|
||||
else(WIN32)
|
||||
@ -38,9 +37,6 @@ else()
|
||||
set(MODELS_BRANCH "master")
|
||||
endif()
|
||||
|
||||
set(MODELS_PATH "${TEMP}/models")
|
||||
debug_message(STATUS "MODELS_PATH=" ${MODELS_PATH})
|
||||
|
||||
## enable cblas_gemm from OpenBLAS package
|
||||
if (GEMM STREQUAL "OPENBLAS")
|
||||
if(NOT BLAS_LIBRARIES OR NOT BLAS_INCLUDE_DIRS)
|
||||
@ -77,6 +73,12 @@ elseif(LINUX)
|
||||
TARGET_PATH "${TEMP}/omp"
|
||||
ENVIRONMENT "OMP"
|
||||
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
|
||||
else(APPLE)
|
||||
RESOLVE_DEPENDENCY(OMP
|
||||
ARCHIVE_MAC "iomp_20190130_mac.tgz"
|
||||
TARGET_PATH "${TEMP}/omp"
|
||||
ENVIRONMENT "OMP"
|
||||
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
|
||||
endif()
|
||||
log_rpath_from_dir(OMP "${OMP}/lib")
|
||||
debug_message(STATUS "intel_omp=" ${OMP})
|
||||
@ -96,6 +98,12 @@ elseif(LINUX)
|
||||
ARCHIVE_LIN "tbb2019_20181010_lin.tgz"
|
||||
TARGET_PATH "${TEMP}/tbb"
|
||||
ENVIRONMENT "TBBROOT")
|
||||
else(APPLE)
|
||||
RESOLVE_DEPENDENCY(TBB
|
||||
ARCHIVE_MAC "tbb2019_20190130_mac.tgz"
|
||||
TARGET_PATH "${TEMP}/tbb"
|
||||
ENVIRONMENT "TBBROOT"
|
||||
VERSION_REGEX ".*_([a-z]*_([a-z0-9]+\\.)*[0-9]+).*")
|
||||
endif()
|
||||
log_rpath_from_dir(TBB "${TBB}/lib")
|
||||
debug_message(STATUS "tbb=" ${TBB})
|
||||
@ -104,34 +112,51 @@ endif ()
|
||||
if (ENABLE_OPENCV)
|
||||
if (WIN32)
|
||||
RESOLVE_DEPENDENCY(OPENCV
|
||||
ARCHIVE_WIN "opencv_4.0.1-0353.zip"
|
||||
TARGET_PATH "${TEMP}/opencv_4.0.0"
|
||||
ARCHIVE_WIN "opencv_4.1.0-0437.zip"
|
||||
TARGET_PATH "${TEMP}/opencv_4.1.0"
|
||||
ENVIRONMENT "OpenCV_DIR"
|
||||
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
|
||||
log_rpath_from_dir(OPENCV "\\opencv_4.0.0\\bin")
|
||||
log_rpath_from_dir(OPENCV "\\opencv_4.1.0\\bin")
|
||||
set( ENV{OpenCV_DIR} ${OPENCV}/cmake )
|
||||
elseif(APPLE)
|
||||
RESOLVE_DEPENDENCY(OPENCV
|
||||
ARCHIVE_MAC "opencv_4.1.0-0437_osx.tar.xz"
|
||||
TARGET_PATH "${TEMP}/opencv_4.1.0_osx"
|
||||
ENVIRONMENT "OpenCV_DIR"
|
||||
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
|
||||
log_rpath_from_dir(OPENCV "opencv_4.1.0_osx/lib")
|
||||
set( ENV{OpenCV_DIR} ${OPENCV}/cmake )
|
||||
elseif(LINUX)
|
||||
if (${LINUX_OS_NAME} STREQUAL "Ubuntu 16.04")
|
||||
RESOLVE_DEPENDENCY(OPENCV
|
||||
ARCHIVE_LIN "opencv_4.0.0-0305_ubuntu16.tgz"
|
||||
TARGET_PATH "${TEMP}/opencv_4.0.0_ubuntu"
|
||||
ARCHIVE_LIN "opencv_4.1.0-0437_ubuntu16.tar.xz"
|
||||
TARGET_PATH "${TEMP}/opencv_4.1.0_ubuntu16"
|
||||
ENVIRONMENT "OpenCV_DIR"
|
||||
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
|
||||
log_rpath_from_dir(OPENCV "opencv_4.0.0_ubuntu/lib")
|
||||
log_rpath_from_dir(OPENCV "opencv_4.1.0_ubuntu16/lib")
|
||||
elseif (${LINUX_OS_NAME} STREQUAL "Ubuntu 18.04")
|
||||
RESOLVE_DEPENDENCY(OPENCV
|
||||
ARCHIVE_LIN "opencv_4.0.0-0305_ubuntu18.tgz"
|
||||
TARGET_PATH "${TEMP}/opencv_4.0.0_ubuntu18"
|
||||
ARCHIVE_LIN "opencv_4.1.0-0437_ubuntu18.tar.xz"
|
||||
TARGET_PATH "${TEMP}/opencv_4.1.0_ubuntu18"
|
||||
ENVIRONMENT "OpenCV_DIR"
|
||||
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
|
||||
log_rpath_from_dir(OPENCV "opencv_4.0.0_ubuntu/lib")
|
||||
log_rpath_from_dir(OPENCV "opencv_4.1.0_ubuntu18/lib")
|
||||
elseif (${LINUX_OS_NAME} STREQUAL "CentOS 7")
|
||||
RESOLVE_DEPENDENCY(OPENCV
|
||||
ARCHIVE_LIN "opencv_4.0.0-0305_centos.tgz"
|
||||
TARGET_PATH "${TEMP}/opencv_4.0.0_centos"
|
||||
ARCHIVE_LIN "opencv_4.1.0-0437_centos7.tar.xz"
|
||||
TARGET_PATH "${TEMP}/opencv_4.1.0_centos"
|
||||
ENVIRONMENT "OpenCV_DIR"
|
||||
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
|
||||
log_rpath_from_dir(OPENCV "opencv_4.0.0_centos/lib")
|
||||
log_rpath_from_dir(OPENCV "opencv_4.1.0_centos/lib")
|
||||
elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "armv7l" AND
|
||||
(${LINUX_OS_NAME} STREQUAL "Debian 9" OR
|
||||
${LINUX_OS_NAME} STREQUAL "Raspbian 9"))
|
||||
RESOLVE_DEPENDENCY(OPENCV
|
||||
ARCHIVE_LIN "opencv_4.1.0-0437_debian9arm.tar.xz"
|
||||
TARGET_PATH "${TEMP}/opencv_4.1.0_debian9arm"
|
||||
ENVIRONMENT "OpenCV_DIR"
|
||||
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+).*")
|
||||
log_rpath_from_dir(OPENCV "opencv_4.1.0_debian9arm/lib")
|
||||
endif()
|
||||
set( ENV{OpenCV_DIR} ${OPENCV}/cmake )
|
||||
endif()
|
||||
|
@ -1,10 +1,8 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required (VERSION 2.8)
|
||||
|
||||
include ("download")
|
||||
|
||||
function (resolve_archive_dependency VAR COMPONENT ARCHIVE ARCHIVE_UNIFIED ARCHIVE_WIN ARCHIVE_LIN ARCHIVE_MAC TARGET_PATH FOLDER ENVIRONMENT)
|
||||
@ -15,7 +13,7 @@ function (resolve_archive_dependency VAR COMPONENT ARCHIVE ARCHIVE_UNIFIED ARCHI
|
||||
|
||||
if (NOT DEFINED HAS_ENV)
|
||||
if (ARCHIVE)
|
||||
#TODO: check wether this is platform specific binary with same name per or it is in common folder
|
||||
#TODO: check whether this is platform specific binary with same name per or it is in common folder
|
||||
DownloadAndExtract(${COMPONENT} ${ARCHIVE} ${TARGET_PATH} result_path ${FOLDER})
|
||||
else()
|
||||
DownloadAndExtractPlatformSpecific(${COMPONENT} ${ARCHIVE_UNIFIED} ${ARCHIVE_WIN} ${ARCHIVE_LIN} ${ARCHIVE_MAC} ${TARGET_PATH} result_path ${FOLDER})
|
||||
@ -130,11 +128,3 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
|
||||
endif()
|
||||
|
||||
endfunction(RESOLVE_DEPENDENCY)
|
||||
|
||||
function (resolve_model_dependency network archive network_model_path)
|
||||
RESOLVE_DEPENDENCY(${network_model_path}
|
||||
ARCHIVE "models_archives/${archive}"
|
||||
TARGET_PATH "${MODELS_PATH}/${network}")
|
||||
string (REPLACE ${MODELS_PATH} "" relative_path ${${network_model_path}})
|
||||
set(${network_model_path} ".${relative_path}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
@ -1,10 +1,8 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required (VERSION 2.8)
|
||||
|
||||
function (Download from to fatal result output)
|
||||
|
||||
if((NOT EXISTS "${to}"))
|
||||
|
@ -1,10 +1,8 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required (VERSION 2.8)
|
||||
|
||||
function (DownloadAndApply URL apply_to)
|
||||
|
||||
if (EXISTS ${apply_to})
|
||||
|
@ -1,23 +1,22 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required (VERSION 2.8)
|
||||
include (FindWget)
|
||||
|
||||
function (DownloadAndCheck from to fatal result)
|
||||
set(status_res "ON")
|
||||
set(output 1)
|
||||
set(status_res "ON")
|
||||
set(output 1)
|
||||
|
||||
get_filename_component(download_dir ${to} DIRECTORY)
|
||||
if (NOT EXISTS ${download_dir})
|
||||
file(MAKE_DIRECTORY ${download_dir})
|
||||
endif()
|
||||
get_filename_component(download_dir ${to} DIRECTORY)
|
||||
if (NOT EXISTS ${download_dir})
|
||||
file(MAKE_DIRECTORY ${download_dir})
|
||||
endif()
|
||||
|
||||
if(NOT EXISTS "${to}")
|
||||
if(NOT EXISTS "${to}")
|
||||
if (${from} MATCHES "(http:)|(https:)|(ftp:)")
|
||||
message(STATUS "Downloading from ${from} to ${to} ...")
|
||||
|
||||
find_program(aria2c "aria2c")
|
||||
if (${aria2c} STREQUAL "aria2c-NOTFOUND")
|
||||
if (NOT ${WGET_FOUND})
|
||||
@ -48,9 +47,13 @@ function (DownloadAndCheck from to fatal result)
|
||||
status_code: ${status_code}")
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
message(STATUS "Copying from local folder ${from} to ${to} ... ")
|
||||
file(COPY ${from} DESTINATION ${download_dir})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
file(REMOVE ${to}.md5)
|
||||
set(${result} "${status_res}" PARENT_SCOPE)
|
||||
|
||||
endfunction(DownloadAndCheck)
|
||||
endfunction(DownloadAndCheck)
|
||||
|
@ -1,9 +1,8 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required (VERSION 2.8)
|
||||
include ("extract")
|
||||
include ("download_and_check")
|
||||
|
||||
@ -120,12 +119,12 @@ function (DownloadOrExtractInternal URL archive_path unpacked_path folder fattal
|
||||
if (ENABLE_UNSAFE_LOCATIONS)
|
||||
ExtractWithVersion(${URL} ${archive_path} ${unpacked_path} ${folder} result)
|
||||
if(NOT ${result})
|
||||
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
|
||||
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
|
||||
endif()
|
||||
else()
|
||||
debug_message("archive found on FS : ${archive_path}, however we cannot check it's checksum and think that it is invalid")
|
||||
file(REMOVE_RECURSE "${archive_path}")
|
||||
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
|
||||
DownloadAndExtractInternal(${URL} ${archive_path} ${unpacked_path} ${folder} ${fattal} result)
|
||||
endif()
|
||||
|
||||
|
||||
@ -144,7 +143,11 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
|
||||
set (status "ON")
|
||||
set (on_master FALSE)
|
||||
|
||||
set (URL "https://download.01.org/openvinotoolkit/2018_R5/dldt/inference_engine/${RELATIVE_URL}")
|
||||
if(DEFINED ENV{IE_PATH_TO_DEPS})
|
||||
set(URL "$ENV{IE_PATH_TO_DEPS}/${RELATIVE_URL}")
|
||||
else()
|
||||
set(URL "https://download.01.org/opencv/2019/openvinotoolkit/R1/inference_engine/${RELATIVE_URL}")
|
||||
endif()
|
||||
|
||||
#no message on recursive calls
|
||||
if (${use_alternatives})
|
||||
|
@ -1,17 +1,15 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required (VERSION 2.8)
|
||||
|
||||
function (extract archive_path unpacked_path folder result)
|
||||
# Slurped from a generated extract-TARGET.cmake file.
|
||||
if (NOT EXISTS ${unpacked_path})
|
||||
get_filename_component(unpacked_dir ${unpacked_path} DIRECTORY)
|
||||
|
||||
|
||||
file(MAKE_DIRECTORY ${unpacked_path})
|
||||
|
||||
|
||||
message(STATUS "extracting...
|
||||
src='${archive_path}'
|
||||
dst='${unpacked_path}'")
|
||||
|
@ -1,11 +1,9 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required (VERSION 2.8)
|
||||
|
||||
include ("options")
|
||||
include (options)
|
||||
|
||||
#this options are aimed to optimize build time on development system
|
||||
|
||||
@ -21,8 +19,6 @@ ie_option (ENABLE_PROFILING_ITT "ITT tracing of IE and plugins internals" ON)
|
||||
|
||||
ie_option (ENABLE_PROFILING_RAW "Raw counters profiling (just values, no start/stop time or timeline)" OFF)
|
||||
|
||||
#
|
||||
|
||||
# "MKL-DNN library might use MKL-ML or OpenBLAS for gemm tasks: MKL|OPENBLAS|JIT"
|
||||
if (NOT GEMM STREQUAL "MKL"
|
||||
AND NOT GEMM STREQUAL "OPENBLAS"
|
||||
@ -30,15 +26,17 @@ if (NOT GEMM STREQUAL "MKL"
|
||||
set (GEMM "JIT")
|
||||
message(STATUS "GEMM should be set to MKL, OPENBLAS or JIT. Default option is " ${GEMM})
|
||||
endif()
|
||||
set(GEMM "${GEMM}" CACHE STRING "Gemm implementation" FORCE)
|
||||
list (APPEND IE_OPTIONS GEMM)
|
||||
|
||||
# "MKL-DNN library based on OMP or TBB or Sequential implementation: TBB|OMP|SEQ"
|
||||
if (NOT THREADING STREQUAL "TBB"
|
||||
AND NOT THREADING STREQUAL "OMP"
|
||||
AND NOT THREADING STREQUAL "SEQ")
|
||||
set (THREADING "OMP")
|
||||
set (THREADING "TBB")
|
||||
message(STATUS "THREADING should be set to TBB, OMP or SEQ. Default option is " ${THREADING})
|
||||
endif()
|
||||
set(THREADING "${THREADING}" CACHE STRING "Threading" FORCE)
|
||||
list (APPEND IE_OPTIONS THREADING)
|
||||
|
||||
# Enable postfixes for Debug/Release builds
|
||||
@ -53,7 +51,9 @@ else()
|
||||
set (IE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX_LIN})
|
||||
set (IE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX_LIN})
|
||||
endif()
|
||||
set(IE_DEBUG_POSTFIX "${IE_DEBUG_POSTFIX}" CACHE STRING "Debug postfix" FORCE)
|
||||
list (APPEND IE_OPTIONS IE_DEBUG_POSTFIX)
|
||||
set(IE_RELEASE_POSTFIX "${IE_RELEASE_POSTFIX}" CACHE STRING "Release postfix" FORCE)
|
||||
list (APPEND IE_OPTIONS IE_RELEASE_POSTFIX)
|
||||
|
||||
ie_option (ENABLE_TESTS "unit and functional tests" OFF)
|
||||
@ -62,6 +62,7 @@ ie_option (ENABLE_GAPI_TESTS "unit tests for GAPI kernels" OFF)
|
||||
|
||||
ie_option (GAPI_TEST_PERF "if GAPI unit tests should examine performance" OFF)
|
||||
|
||||
ie_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON)
|
||||
|
||||
ie_option (ENABLE_SAMPLES_CORE "console samples core library" ON)
|
||||
|
||||
@ -93,6 +94,9 @@ ie_option (ENABLE_DEBUG_SYMBOLS "generates symbols for debugging" OFF)
|
||||
|
||||
ie_option (ENABLE_PYTHON "enables ie python bridge build" OFF)
|
||||
|
||||
ie_option(ENABLE_CPPLINT "Enable cpplint checks during the build" OFF)
|
||||
ie_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF)
|
||||
|
||||
#environment variables used
|
||||
|
||||
#name of environment variable stored path to temp directory"
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -94,7 +94,13 @@ function(set_ie_threading_interface_for TARGET_NAME)
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
endif ()
|
||||
|
||||
target_compile_definitions(${TARGET_NAME} PUBLIC -DIE_THREAD=${IE_THREAD_DEFINE})
|
||||
|
||||
if (NOT THREADING STREQUAL "SEQ")
|
||||
find_package(Threads REQUIRED)
|
||||
target_link_libraries(${TARGET_NAME} PUBLIC ${CMAKE_THREAD_LIBS_INIT})
|
||||
endif()
|
||||
endfunction(set_ie_threading_interface_for)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -1,10 +1,8 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required(VERSION 2.8)
|
||||
|
||||
if (UNIX)
|
||||
function(get_linux_name res_var)
|
||||
if (NOT EXISTS "/etc/lsb-release")
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -8,10 +8,13 @@ if (WIN32)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_SCL_SECURE_NO_WARNINGS")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") #no asynchronous structured exception handling
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE")
|
||||
|
||||
|
||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /Z7")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Z7")
|
||||
|
||||
if(ENABLE_DEBUG_SYMBOLS)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Zi")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Z7")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Z7")
|
||||
|
||||
set(DEBUG_SYMBOLS_LINKER_FLAGS "/DEBUG")
|
||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -11,7 +11,11 @@ if (ENABLE_SANITIZER)
|
||||
if (SANITIZE_RECOVER_SUPPORTED)
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize-recover=address")
|
||||
endif()
|
||||
set(SANITIZER_LINKER_FLAGS "-fsanitize=address -fuse-ld=gold")
|
||||
|
||||
set(SANITIZER_LINKER_FLAGS "-fsanitize=address")
|
||||
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
||||
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=gold")
|
||||
endif()
|
||||
|
||||
set(CMAKE_CC_FLAGS "${CMAKE_CC_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set(InferenceEngine_VERSION 1.5.0)
|
||||
set(InferenceEngine_VERSION 1.6.0)
|
||||
set(PACKAGE_VERSION ${InferenceEngine_VERSION})
|
||||
|
||||
set(PACKAGE_VERSION_EXACT False)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -54,72 +54,27 @@ else()
|
||||
set(THREADING "@THREADING@")
|
||||
|
||||
# check whether setvars.sh is sourced
|
||||
if(NOT IE_ROOT_DIR AND (DEFINED ENV{InferenceEngine_DIR} OR InferenceEngine_DIR OR DEFINED ENV{INTEL_CVSDK_DIR}))
|
||||
if(NOT IE_ROOT_DIR AND (DEFINED ENV{InferenceEngine_DIR} OR InferenceEngine_DIR OR DEFINED ENV{INTEL_OPENVINO_DIR}))
|
||||
if (EXISTS "${InferenceEngine_DIR}")
|
||||
# InferenceEngine_DIR manually set via command line params
|
||||
set(IE_ROOT_DIR "${InferenceEngine_DIR}/..")
|
||||
elseif (EXISTS "$ENV{InferenceEngine_DIR}")
|
||||
# InferenceEngine_DIR manually set via env
|
||||
set(IE_ROOT_DIR "$ENV{InferenceEngine_DIR}/..")
|
||||
elseif (EXISTS "$ENV{INTEL_CVSDK_DIR}/inference_engine")
|
||||
elseif (EXISTS "$ENV{INTEL_OPENVINO_DIR}/inference_engine")
|
||||
# if we installed DL SDK
|
||||
set(IE_ROOT_DIR "$ENV{INTEL_CVSDK_DIR}/inference_engine")
|
||||
elseif (EXISTS "$ENV{INTEL_CVSDK_DIR}/deployment_tools/inference_engine")
|
||||
set(IE_ROOT_DIR "$ENV{INTEL_OPENVINO_DIR}/inference_engine")
|
||||
elseif (EXISTS "$ENV{INTEL_OPENVINO_DIR}/deployment_tools/inference_engine")
|
||||
# CV SDK is installed
|
||||
set(IE_ROOT_DIR "$ENV{INTEL_CVSDK_DIR}/deployment_tools/inference_engine")
|
||||
set(IE_ROOT_DIR "$ENV{INTEL_OPENVINO_DIR}/deployment_tools/inference_engine")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(IE_ROOT_DIR)
|
||||
if (WIN32)
|
||||
set(_OS_PATH "")
|
||||
else()
|
||||
if (NOT EXISTS "/etc/lsb-release")
|
||||
execute_process(COMMAND find -L /etc/ -maxdepth 1 -type f -name *-release -exec cat {} \;
|
||||
OUTPUT_VARIABLE release_data RESULT_VARIABLE result)
|
||||
set(name_regex "NAME=\"([^ \"\n]*).*\"\n")
|
||||
set(version_regex "VERSION=\"([0-9]+(\\.[0-9]+)?)[^\n]*\"")
|
||||
else()
|
||||
#linux version detection using cat /etc/lsb-release
|
||||
file(READ "/etc/lsb-release" release_data)
|
||||
set(name_regex "DISTRIB_ID=([^ \n]*)\n")
|
||||
set(version_regex "DISTRIB_RELEASE=([0-9]+(\\.[0-9]+)?)")
|
||||
endif()
|
||||
|
||||
string(REGEX MATCH ${name_regex} name ${release_data})
|
||||
set(os_name ${CMAKE_MATCH_1})
|
||||
|
||||
string(REGEX MATCH ${version_regex} version ${release_data})
|
||||
set(os_name "${os_name} ${CMAKE_MATCH_1}")
|
||||
|
||||
if (NOT os_name)
|
||||
ext_message(FATAL_ERROR "Cannot detect OS via reading /etc/*-release:\n ${release_data}")
|
||||
endif()
|
||||
|
||||
if (NOT InferenceEngine_FIND_QUIETLY)
|
||||
message (STATUS "/etc/*-release distrib: ${os_name}")
|
||||
endif()
|
||||
|
||||
if (${os_name} STREQUAL "Ubuntu 14.04")
|
||||
set(_OS_PATH "ubuntu_14.04/")
|
||||
elseif (${os_name} STREQUAL "Ubuntu 16.04")
|
||||
set(_OS_PATH "ubuntu_16.04/")
|
||||
elseif (${os_name} STREQUAL "Ubuntu 18.04")
|
||||
set(_OS_PATH "ubuntu_18.04/")
|
||||
elseif (${os_name} STREQUAL "CentOS 7")
|
||||
set(_OS_PATH "centos_7.4/")
|
||||
elseif (${os_name} STREQUAL "poky 2.0")
|
||||
set(_OS_PATH "ubuntu_16.04/")
|
||||
elseif (${os_name} STREQUAL "poky 2.5")
|
||||
set(_OS_PATH "ubuntu_18.04/")
|
||||
elseif (${os_name} STREQUAL "Raspbian 9")
|
||||
set(_OS_PATH "raspbian_9/")
|
||||
else()
|
||||
ext_message(FATAL_ERROR "${os_name} is not supported. List of supported OS: Ubuntu 14.04, Ubuntu 16.04, Ubuntu 18.04, CentOS 7, poky 2.0, poky 2.5, Raspbian 9")
|
||||
endif()
|
||||
endif()
|
||||
if(NOT IE_ROOT_DIR)
|
||||
ext_message(FATAL_ERROR "inference_engine directory is not found")
|
||||
endif()
|
||||
|
||||
|
||||
if(IE_INCLUDE_DIR AND NOT "${IE_ROOT_DIR}/include" EQUAL "${IE_INCLUDE_DIR}")
|
||||
unset(IE_INCLUDE_DIR CACHE)
|
||||
endif()
|
||||
@ -128,13 +83,13 @@ else()
|
||||
unset(IE_SRC_DIR CACHE)
|
||||
endif()
|
||||
|
||||
if(IE_LIBRARY AND NOT "${IE_ROOT_DIR}/lib/${_OS_PATH}/${_ARCH}" EQUAL "${IE_LIBRARY}")
|
||||
if(IE_LIBRARY AND NOT "${IE_ROOT_DIR}/lib/${_ARCH}" EQUAL "${IE_LIBRARY}")
|
||||
unset(IE_LIBRARY CACHE)
|
||||
endif()
|
||||
|
||||
set(_IE_ROOT_INCLUDE_DIR "${IE_ROOT_DIR}/include")
|
||||
set(_IE_ROOT_SRC_DIR "${IE_ROOT_DIR}/src")
|
||||
set(_IE_ROOT_LIBRARY "${IE_ROOT_DIR}/lib/${_OS_PATH}/${_ARCH}")
|
||||
set(_IE_ROOT_LIBRARY "${IE_ROOT_DIR}/lib/${_ARCH}")
|
||||
|
||||
find_path(IE_INCLUDE_DIR inference_engine.hpp "${_IE_ROOT_INCLUDE_DIR}")
|
||||
find_path(IE_SRC_DIR extension "${_IE_ROOT_SRC_DIR}")
|
||||
|
@ -1,10 +1,8 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
cmake_minimum_required(VERSION 2.8)
|
||||
|
||||
function (branchName VAR)
|
||||
execute_process(
|
||||
COMMAND git rev-parse --abbrev-ref HEAD
|
||||
|
@ -26,6 +26,11 @@ if (NOT(IE_MAIN_SOURCE_DIR))
|
||||
if(NOT(WIN32))
|
||||
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE})
|
||||
endif()
|
||||
else()
|
||||
if (UNIX OR APPLE)
|
||||
# cython generated files requires public visibility. Force visibility required.
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -fvisibility=default")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include (UseCython)
|
||||
@ -45,5 +50,4 @@ endif()
|
||||
find_package (InferenceEngine REQUIRED)
|
||||
|
||||
set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
add_subdirectory (src/openvino/inference_engine)
|
||||
add_subdirectory (src/openvino/inference_engine/dnn_builder)
|
||||
add_subdirectory (src/openvino/inference_engine)
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2016 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -46,7 +46,7 @@
|
||||
#
|
||||
# See also FindCython.cmake
|
||||
|
||||
# Copyright (c) 2016 Intel Corporation
|
||||
# Copyright (C) 2018-2019 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Overview of Inference Engine Python* API
|
||||
|
||||
**NOTE:** It is a preview version of the Inference Engine Python\* API for evaluation purpose only.
|
||||
Module structure and API itself may be changed in future releases.
|
||||
> **NOTE:** It is a preview version of the Inference Engine Python\* API for evaluation purpose only.
|
||||
> Module structure and API itself may be changed in future releases.
|
||||
|
||||
This API provides a simplified interface for Inference Engine functionality that allows to:
|
||||
|
||||
@ -21,24 +21,24 @@ Supported Python* versions:
|
||||
## Setting Up the Environment
|
||||
|
||||
To configure the environment for the Inference Engine Python\* API, run:
|
||||
* On Ubuntu 16.04: `source <INSTALL_DIR>/bin/setupvars.sh .`
|
||||
* On Ubuntu 16.04: `source <INSTALL_DIR>/bin/setupvars.sh .`
|
||||
* On Windows 10: `call <INSTALL_DIR>\deployment_tools\inference_engine\python_api\setenv.bat`
|
||||
|
||||
|
||||
The script automatically detects latest installed Python\* version and configures required environment if the version is supported.
|
||||
If you want to use certain version of Python\*, set the environment variable `PYTHONPATH=<INSTALL_DIR>/deployment_tools/inference_engine/python_api/<desired_python_version>`
|
||||
after running the environment configuration script.
|
||||
|
||||
|
||||
## <a name="ienetlayer-class"></a>IENetLayer
|
||||
This class stores main information about the layer and allow to modify some layer parameters
|
||||
This class stores main information about the layer and allow to modify some layer parameters
|
||||
### Class attributes:
|
||||
|
||||
* `name` - Name of the layer
|
||||
|
||||
* `name` - Name of the layer
|
||||
* `type`- Layer type
|
||||
* `precision` - Layer base operating precision. Provides getter and setter interfaces.
|
||||
* `layout` - Returns the layout of shape of the layer.
|
||||
* `shape` - Return the list of the shape of the layer.
|
||||
* `parents` - Returns a list, which contains names of layers preceding this layer.
|
||||
* `children` - Returns a list, which contains names of layers following this layer.
|
||||
* `children` - Returns a list, which contains names of layers following this layer.
|
||||
* `affinity` - Layer affinity set by user or a default affinity set by the `IEPlugin.set_initial_affinity()` method.
|
||||
The affinity attribute provides getter and setter interfaces, so the layer affinity can be modified directly.
|
||||
For example:
|
||||
@ -46,39 +46,39 @@ This class stores main information about the layer and allow to modify some laye
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> plugin = IEPlugin(device="HETERO:FPGA,CPU")
|
||||
>>> plugin.set_config({"TARGET_FALLBACK": "HETERO:FPGA,CPU"})
|
||||
>>> plugin.set_initial_affinity(net)
|
||||
>>> plugin.set_initial_affinity(net)
|
||||
>>> for l in net.layers.values():
|
||||
... if l.type == "Convolution":
|
||||
... l.affinity = "CPU"
|
||||
|
||||
```
|
||||
|
||||
To correctly set affinity for the network, you must first initialize and properly configure the HETERO plugin.
|
||||
`set_config({"TARGET_FALLBACK": "HETERO:FPGA,GPU"})` function configures the plugin fallback devices and their order.
|
||||
`plugin.set_initial_affinity(net)` function sets affinity parameter of model layers according to its support
|
||||
on specified devices.
|
||||
|
||||
After default affinity is set by the plugin, override the default values by setting affinity manually how it's
|
||||
To correctly set affinity for the network, you must first initialize and properly configure the HETERO plugin.
|
||||
`set_config({"TARGET_FALLBACK": "HETERO:FPGA,GPU"})` function configures the plugin fallback devices and their order.
|
||||
`plugin.set_initial_affinity(net)` function sets affinity parameter of model layers according to its support
|
||||
on specified devices.
|
||||
|
||||
After default affinity is set by the plugin, override the default values by setting affinity manually how it's
|
||||
described in example above
|
||||
|
||||
To understand how default and non-default affinities are set:
|
||||
To understand how default and non-default affinities are set:
|
||||
|
||||
1. Call `net.layers` function right after model loading and check that layer affinity parameter is empty.
|
||||
2. Call `plugin.set_default_affinity(net)`.
|
||||
3. Call `net.layers` and check layer affinity parameters to see how plugin set a default affinity
|
||||
4. Set layer affinity how it's described above
|
||||
5. Call `net.layers` again and check layer affinity parameters to see how it was changed after manual affinity
|
||||
5. Call `net.layers` again and check layer affinity parameters to see how it was changed after manual affinity
|
||||
setting
|
||||
|
||||
|
||||
Please refer to `affinity_setting_demo.py` to see the full usage pipeline.
|
||||
|
||||
|
||||
* `weights`- Dictionary with layer weights, biases or custom blobs if any
|
||||
* `params` - Layer specific parameters. Provides getter and setter interfaces to get and modify layer parameters.
|
||||
Please note that some modifications can be ignored and\or overwriten by target plugin (e.g. modification of
|
||||
Please note that some modifications can be ignored and\or overwriten by target plugin (e.g. modification of
|
||||
convolution kernel size will be reflected in layer parameters but finally the plugin will ignore it and will
|
||||
use initial kernel size)
|
||||
use initial kernel size)
|
||||
|
||||
## <a name="ienetwork-class"></a>IENetwork
|
||||
## <a name="ienetwork-class"></a>IENetwork
|
||||
|
||||
This class contains the information about the network model read from IR and allows you to manipulate with some model parameters such as
|
||||
layers affinity and output layers.
|
||||
@ -86,18 +86,15 @@ layers affinity and output layers.
|
||||
### Class Constructor
|
||||
|
||||
* `__init__(model: str, weights: str)`
|
||||
|
||||
* Parameters:
|
||||
|
||||
* model - Path to `.xml` file of the IR
|
||||
* weights - Path to `.bin` file of the IR
|
||||
|
||||
### Class attributes:
|
||||
|
||||
* `name` - Name of the loaded network
|
||||
* `inputs` - A dictionary that maps input layer names to <a name="inputinfo-class"></a>InputInfo objects.
|
||||
* `inputs` - A dictionary that maps input layer names to <a name="inputinfo-class"></a>InputInfo objects.
|
||||
For example, to get a shape of the input layer:
|
||||
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> net.inputs
|
||||
@ -105,10 +102,8 @@ layers affinity and output layers.
|
||||
>>> net.inputs['data'].shape
|
||||
[1, 3, 224, 224]
|
||||
```
|
||||
|
||||
* `outputs` - A dictionary that maps output layer names to <a name="inputinfo-class"></a>OutputInfo objects
|
||||
For example, to get a shape of the output layer:
|
||||
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> net.inputs
|
||||
@ -116,10 +111,9 @@ layers affinity and output layers.
|
||||
>>> net.outputs['prob'].shape
|
||||
[1, 1000]
|
||||
```
|
||||
|
||||
* `batch_size` - Batch size of the network. Provides getter and setter interfaces to get and modify the
|
||||
|
||||
* `batch_size` - Batch size of the network. Provides getter and setter interfaces to get and modify the
|
||||
network batch size. For example:
|
||||
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> net.batch_size
|
||||
@ -130,10 +124,8 @@ layers affinity and output layers.
|
||||
>>> net.inputs['data'].shape
|
||||
[4, 3, 224, 224]
|
||||
```
|
||||
|
||||
* `layers` - Return dictionary that maps network layer names to <a name="ienetlayer-class"></a>`IENetLayer`
|
||||
* `layers` - Return dictionary that maps network layer names to <a name="ienetlayer-class"></a>`IENetLayer`
|
||||
objects containing layer properties in topological order. For example, to list all network layers:
|
||||
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> net.layers
|
||||
@ -141,11 +133,10 @@ layers affinity and output layers.
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
* `stats` - Returns `LayersStatsMap` object containing dictionary that maps network layer names to calibration statistics
|
||||
* `stats` - Returns `LayersStatsMap` object containing dictionary that maps network layer names to calibration statistics
|
||||
represented by <a name="layerstats-class"></a> `LayerStats` objects.
|
||||
`LayersStatsMap` class inherited from built-in python `dict` and overrides default `update()`method to allow
|
||||
to set or modify layers calibration statistics.
|
||||
to set or modify layers calibration statistics.
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> net.stats.update({
|
||||
@ -153,151 +144,104 @@ layers affinity and output layers.
|
||||
"conv2_2d" : LayserStats(min=(-5, -1, 0, 1, -7, 2), max=(63, 124, 70, 174, 99, 106)),
|
||||
})
|
||||
```
|
||||
For more details about low precision inference please refer to "Low-Precision 8-bit Integer Inference"
|
||||
section in Inference Engine Developers Guide documentation.
|
||||
For more details about low precision inference please refer to "Low-Precision 8-bit Integer Inference"
|
||||
section in Inference Engine Developers Guide documentation.
|
||||
|
||||
|
||||
### Class Methods
|
||||
|
||||
* `from_ir(model: str, weights: str)`
|
||||
|
||||
**Note:** The function is deprecated. Please use `IENetwork()` class constructor to create valid instance of `IENetwork`
|
||||
|
||||
* Description:
|
||||
|
||||
* `from_ir(model: str, weights: str)`
|
||||
> **NOTE:** The function is deprecated. Please use `IENetwork()` class constructor to create valid instance of `IENetwork`
|
||||
* Description:
|
||||
The class method serves to read the model from the `.xml` and `.bin` files of the IR.
|
||||
|
||||
* Parameters:
|
||||
|
||||
* model - Path to `.xml` file of the IR
|
||||
* weights - Path to `.bin` file of the IR
|
||||
|
||||
* Return value:
|
||||
|
||||
An instance of the `IENetwork` class
|
||||
|
||||
* Usage example:
|
||||
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> net
|
||||
<inference_engine.ie_api.IENetwork object at 0x7fd7dbce54b0>
|
||||
```
|
||||
|
||||
### Instance Methods
|
||||
|
||||
* `add_outputs(outputs)`:
|
||||
|
||||
* Description:
|
||||
|
||||
The method serves to mark any intermediate layer as output layer to retrieve the inference results
|
||||
### Instance Methods
|
||||
|
||||
* `add_outputs(outputs)`:
|
||||
* Description:
|
||||
The method serves to mark any intermediate layer as output layer to retrieve the inference results
|
||||
from the specified layers.
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `outputs` - List of layer names to be set as model outputs. In case of setting one layer as output, string with one layer can be provided.
|
||||
|
||||
* Return value:
|
||||
|
||||
None
|
||||
|
||||
* Usage example:
|
||||
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> net.add_outputs(["conv5_1/dwise', conv2_1/expand'])]
|
||||
>>> net.outputs
|
||||
['prob', 'conv5_1/dwise', 'conv2_1/expand']
|
||||
```
|
||||
|
||||
**Note**
|
||||
|
||||
The last layers (nodes without successors in graph representation of the model) are set as output
|
||||
by default. In the case above, `prob` layer is a default output and `conv5_1/dwise`, `conv2_1/expand` are user-defined
|
||||
outputs.
|
||||
> **NOTE**: The last layers (nodes without successors in graph representation of the model) are set as output
|
||||
> by default. In the case above, `prob` layer is a default output and `conv5_1/dwise`, `conv2_1/expand` are user-defined
|
||||
> outputs.
|
||||
|
||||
* `reshape(input_shapes: dict)`:
|
||||
|
||||
* Description:
|
||||
|
||||
* Description:
|
||||
The method reshapes the network to change spatial dimensions, batch size, or any dimension.
|
||||
|
||||
**Note:**
|
||||
|
||||
Before using this method, make sure that the target shape is applicable for the network
|
||||
Changing the network shape to an arbitrary value may lead to unpredictable behaviour.
|
||||
|
||||
> **Note:** Before using this method, make sure that the target shape is applicable for the network. Changing the network shape to an arbitrary value may lead to unpredictable behaviour.
|
||||
* Parameters:
|
||||
|
||||
* `input_shapes` - The dictionary that maps input layer names to tuples with the target shape
|
||||
|
||||
* Return value:
|
||||
|
||||
None
|
||||
|
||||
* Return value:
|
||||
None
|
||||
* Usage example:
|
||||
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> input_layer = next(iter(net.inputs))
|
||||
>>> n, c, h, w = net.inputs[input_layer]
|
||||
>>> net.reshape({input_layer: (n, c, h*2, w*2)}]
|
||||
```
|
||||
|
||||
* `serialize(path_to_xml, path_to_bin)`:
|
||||
|
||||
* Description:
|
||||
|
||||
The method serializes the network and stores it in files.
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `path_to_xml` - path to a file, where a serialized model will be stored.
|
||||
```
|
||||
* `serialize(path_to_xml, path_to_bin)`:
|
||||
* Description:
|
||||
The method serializes the network and stores it in files.
|
||||
* Parameters:
|
||||
* `path_to_xml` - path to a file, where a serialized model will be stored.
|
||||
* `path_to_bin` - path to a file, where serialized weights will be stored.
|
||||
|
||||
* Return value:
|
||||
|
||||
None
|
||||
|
||||
None
|
||||
* Usage example:
|
||||
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_model, weights=path_to_weights)
|
||||
>>> net.serialize(path_to_xml, path_to_bin)
|
||||
```
|
||||
```
|
||||
|
||||
## <a name="layerstats-class"></a>LayerStats
|
||||
Layer calibration statistic container
|
||||
|
||||
Layer calibration statistic container.
|
||||
|
||||
### Class Constructor
|
||||
|
||||
* `__init__(min: tuple = (), max: tuple = ())`
|
||||
|
||||
* Parameters:
|
||||
|
||||
* min - Tuple with per-channel minimum layer activation values
|
||||
* min - Tuple with per-channel minimum layer activation values
|
||||
* max - Tuple with per-channel maximum layer activation values
|
||||
|
||||
## <a name="inputinfo-class"></a>InputInfo
|
||||
## <a name="inputinfo-class"></a>InputInfo
|
||||
|
||||
This class contains the information about the network input layers
|
||||
|
||||
### Class attributes:
|
||||
|
||||
* `precision` - Precision of the input data provided by user. Provides setter and getter interfaces
|
||||
* `precision` - Precision of the input data provided by user. Provides setter and getter interfaces
|
||||
to get and modify input layer precision.
|
||||
|
||||
List of applicable precisions: FP32 FP16, I32, I16, I8, U32, U16
|
||||
|
||||
**Note**: Support of any calculation precision depends on the target plugin
|
||||
|
||||
> **NOTE**: Support of any calculation precision depends on the target plugin.
|
||||
* `layout` - Layout of the input data provided by user. Provides setter and getter interfaces
|
||||
to get and modify input layer layout.
|
||||
|
||||
to get and modify input layer layout.
|
||||
List of applicable layouts: NCHW, NHWC, OIHW, C, CHW, HW, NC, CN, BLOCKED
|
||||
|
||||
* `shape` - input layer data shape
|
||||
|
||||
|
||||
## <a name="outputinfo-class"></a>OutputInfo
|
||||
## <a name="outputinfo-class"></a>OutputInfo
|
||||
|
||||
This class contains the information about the network input layers
|
||||
|
||||
@ -305,52 +249,40 @@ This class contains the information about the network input layers
|
||||
|
||||
* `precision` - Precision of the output data. Provides setter and getter interfaces
|
||||
to get and modify output layer precision.
|
||||
|
||||
* `layout` - Layout of the output data provided by user
|
||||
|
||||
* `shape` - Input layer data shape
|
||||
|
||||
|
||||
## <a name="ieplugin-class"></a>IEPlugin Class
|
||||
|
||||
This class is the main plugin interface and serves to initialize and configure the plugin.
|
||||
|
||||
|
||||
### Class Constructor
|
||||
|
||||
* `__init__(device: str, plugin_dirs=None)`
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `device` - Target device name. Supported devices: CPU, GPU, FPGA, MYRIAD, HETERO
|
||||
* `plugin_dirs` - List of paths to plugin directories
|
||||
|
||||
* `plugin_dirs` - List of paths to plugin directories
|
||||
|
||||
### Properties
|
||||
|
||||
* `device` - a name of the device that was specified to initialize IEPlugin
|
||||
* `version` - a version of the plugin
|
||||
* `version` - a version of the plugin
|
||||
|
||||
### Instance Methods
|
||||
|
||||
* ```load(network: IENetwork, num_requests: int=1, config=None)```
|
||||
|
||||
* Description:
|
||||
|
||||
Loads a network that was read from the IR to the plugin and creates an executable network from a network object.
|
||||
You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware
|
||||
* Description:
|
||||
Loads a network that was read from the IR to the plugin and creates an executable network from a network object.
|
||||
You can create as many networks as you need and use them simultaneously (up to the limitation of the hardware
|
||||
resources).
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `network` - A valid `IENetwork` instance
|
||||
* `num_requests` - A positive integer value of infer requests to be created. Number of infer requests may be limited
|
||||
* `num_requests` - A positive integer value of infer requests to be created. Number of infer requests may be limited
|
||||
by device capabilities.
|
||||
* `config` - A dictionary of plugin configuration keys and their values
|
||||
|
||||
* Return value:
|
||||
|
||||
* Return value:
|
||||
None
|
||||
|
||||
* Usage example:
|
||||
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> plugin = IEPlugin(device="CPU")
|
||||
@ -358,89 +290,52 @@ This class is the main plugin interface and serves to initialize and configure t
|
||||
>>> exec_net
|
||||
<inference_engine.ie_api.ExecutableNetwork object at 0x7f5140bbcd38>
|
||||
```
|
||||
|
||||
* `set_initial_affinity(net: IENetwork)`
|
||||
|
||||
* Description:
|
||||
|
||||
Sets initial affinity for model layers according to the HETERO plugin logic. Applicable only if
|
||||
Sets initial affinity for model layers according to the HETERO plugin logic. Applicable only if
|
||||
IEPlugin was initialized for HETERO device.
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `net` - A valid instance of IENetwork
|
||||
|
||||
* Return value:
|
||||
|
||||
None
|
||||
|
||||
* Usage example:
|
||||
|
||||
See `affinity` attribute of the `IENetLayer` class.
|
||||
|
||||
* `add_cpu_extension(extension_path: str)`
|
||||
|
||||
* Description:
|
||||
|
||||
Loads extensions library to the plugin. Applicable only for CPU device and HETERO device with CPU
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `extension_path` - A full path to CPU extensions library
|
||||
|
||||
* Return value:
|
||||
|
||||
None
|
||||
|
||||
* `net` - A valid instance of IENetwork
|
||||
* Return value:
|
||||
None
|
||||
* Usage example:
|
||||
See `affinity` attribute of the `IENetLayer` class.
|
||||
* `add_cpu_extension(extension_path: str)`
|
||||
* Description:
|
||||
Loads extensions library to the plugin. Applicable only for CPU device and HETERO device with CPU
|
||||
* Parameters:
|
||||
* `extension_path` - A full path to CPU extensions library
|
||||
* Return value:
|
||||
None
|
||||
* Usage example:
|
||||
|
||||
```py
|
||||
>>> plugin = IEPlugin(device="CPU")
|
||||
>>> plugin.add_cpu_extenstions(ext_lib_path)
|
||||
```
|
||||
|
||||
|
||||
```
|
||||
* `set_config(config: dict)`
|
||||
|
||||
* Description:
|
||||
|
||||
Sets a configuration for the plugin. Refer to `SetConfig()` in Inference Engine C++ documentation for acceptable
|
||||
keys and values list.
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `config` - A dictionary of keys and values of acceptable configuration parameters
|
||||
|
||||
* Return value:
|
||||
|
||||
None
|
||||
|
||||
* Usage examples:
|
||||
|
||||
See `set_affinity` method of the `IENetwork` class.
|
||||
|
||||
* `get_supported_layers(net: IENetwork)`
|
||||
|
||||
* Description:
|
||||
|
||||
Returns the set of layers supported by the plugin. Please note that in case of CPU plugin support of
|
||||
a layer may depends on extension loaded by `add_cpu_extenstion()` method
|
||||
|
||||
Sets a configuration for the plugin. Refer to `SetConfig()` in Inference Engine C++ documentation for acceptable
|
||||
keys and values list.
|
||||
* Parameters:
|
||||
|
||||
* `net` - A valid instance of IENetwork
|
||||
|
||||
* `config` - A dictionary of keys and values of acceptable configuration parameters
|
||||
* Return value:
|
||||
None
|
||||
* Usage examples:
|
||||
See `set_affinity` method of the `IENetwork` class.
|
||||
* `get_supported_layers(net: IENetwork)`
|
||||
* Description:
|
||||
Returns the set of layers supported by the plugin. Please note that in case of CPU plugin support of
|
||||
a layer may depends on extension loaded by `add_cpu_extenstion()` method
|
||||
* Parameters:
|
||||
* `net` - A valid instance of IENetwork
|
||||
* Return value:
|
||||
|
||||
Set of layers supported by the plugin
|
||||
|
||||
* Usage example:
|
||||
|
||||
See `affinity` attribute of the `IENetLayer` class.
|
||||
|
||||
* Usage example:
|
||||
See `affinity` attribute of the `IENetLayer` class.
|
||||
|
||||
## <a name="executablenetwork"></a>ExecutableNetwork Class
|
||||
|
||||
This class represents a network instance loaded to plugin and ready for inference.
|
||||
This class represents a network instance loaded to plugin and ready for inference.
|
||||
|
||||
### Class Constructor
|
||||
|
||||
@ -449,37 +344,28 @@ There is no explicit class constructor. To make a valid instance of `ExecutableN
|
||||
### Class attributes
|
||||
|
||||
* `requests` - A tuple of InferRequest instances
|
||||
|
||||
* Usage example:
|
||||
|
||||
* Usage example:
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> plugin = IEPlugin(device="CPU")
|
||||
>>> exec_net = plugin.load(network=net, num_requsts=3)
|
||||
>>> exec_net.requests
|
||||
(<inference_engine.ie_api.InferRequest object at 0x7f66f56c57e0>,
|
||||
<inference_engine.ie_api.InferRequest object at 0x7f66f56c58b8>,
|
||||
(<inference_engine.ie_api.InferRequest object at 0x7f66f56c57e0>,
|
||||
<inference_engine.ie_api.InferRequest object at 0x7f66f56c58b8>,
|
||||
<inference_engine.ie_api.InferRequest object at 0x7f66f56c5900>)
|
||||
```
|
||||
|
||||
|
||||
### Instance Methods
|
||||
|
||||
* `infer(inputs=None)`
|
||||
|
||||
* Description:
|
||||
|
||||
Starts synchronous inference for the first infer request of the executable network and returns output data.
|
||||
Wraps `infer()` method of the `InferRequest` class
|
||||
|
||||
* Parameters:
|
||||
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
|
||||
|
||||
* Return value:
|
||||
|
||||
A dictionary that maps output layer names to `numpy.ndarray` objects with output data of the layer
|
||||
|
||||
* Usage example:
|
||||
|
||||
```py
|
||||
>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
|
||||
>>> plugin = IEPlugin(device="CPU")
|
||||
@ -493,35 +379,26 @@ There is no explicit class constructor. To make a valid instance of `ExecutableN
|
||||
......
|
||||
]])}
|
||||
```
|
||||
For illustration of input data preparation, please see samples (for example, `classification_sample.py`).
|
||||
|
||||
For illustration of input data preparation, please see samples (for example, `classification_sample.py`).
|
||||
* `start_async(request_id, inputs=None)`
|
||||
|
||||
* Description:
|
||||
|
||||
Starts asynchronous inference for specified infer request.
|
||||
Wraps `async_infer()` method of the `InferRequest` class
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `request_id` - Index of infer request to start inference
|
||||
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
|
||||
|
||||
* Return value:
|
||||
|
||||
A handler of specified infer request, which is an instance of the `InferRequest` class.
|
||||
|
||||
* Usage example:
|
||||
|
||||
```py
|
||||
>>> infer_request_handle = exec_net.start_async(request_id=0, inputs={input_blob: image})
|
||||
>>> infer_status = infer_request_handle.wait()
|
||||
>>> res = infer_request_handle.outputs[out_blob]
|
||||
```
|
||||
|
||||
For more details about infer requests processing, see `classification_sample_async.py` (simplified case) and
|
||||
|
||||
For more details about infer requests processing, see `classification_sample_async.py` (simplified case) and
|
||||
`object_detection_demo_ssd_async.py` (real asynchronous use case) samples.
|
||||
|
||||
|
||||
## <a name="inferrequest"></a>InferRequest Class
|
||||
|
||||
This class provides an interface to infer requests of `ExecutableNetwork` and serves to handle infer requests execution
|
||||
@ -529,153 +406,107 @@ and to set and get output data.
|
||||
|
||||
### Class Constructor
|
||||
|
||||
There is no explicit class constructor. To make a valid `InferRequest` instance, use `load()` method of the `IEPlugin`
|
||||
class with specified number of requests to get `ExecutableNetwork` instance which stores infer requests.
|
||||
There is no explicit class constructor. To make a valid `InferRequest` instance, use `load()` method of the `IEPlugin`
|
||||
class with specified number of requests to get `ExecutableNetwork` instance which stores infer requests.
|
||||
|
||||
### Class attributes
|
||||
|
||||
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
|
||||
* `outputs` - A dictionary that maps output layer names to `numpy.ndarray` objects with output data of the layer
|
||||
|
||||
* Usage example:
|
||||
|
||||
```py
|
||||
>>> exec_net.requests[0].inputs['data'][:] = image
|
||||
>>> exec_net.requests[0].infer()
|
||||
>>> res = exec_net.requests[0].outputs['prob']
|
||||
>>> np.flip(np.sort(np.squeeze(res)),0)
|
||||
>>> np.flip(np.sort(np.squeeze(res)),0)
|
||||
array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
|
||||
5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03,
|
||||
2.26027006e-03, 2.12283316e-03 ...])
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
### Instance Methods
|
||||
|
||||
It is not recommended to run inference directly on `InferRequest` instance.
|
||||
To run inference, please use simplified methods `infer()` and `start_async()` of `ExecutableNetwork`.
|
||||
It is not recommended to run inference directly on `InferRequest` instance.
|
||||
To run inference, please use simplified methods `infer()` and `start_async()` of `ExecutableNetwork`.
|
||||
|
||||
* `infer(inputs=None)`
|
||||
|
||||
* Description:
|
||||
|
||||
Starts synchronous inference of the infer request and fill outputs array
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
|
||||
|
||||
* Return value:
|
||||
|
||||
None
|
||||
|
||||
* Usage example:
|
||||
|
||||
* Description:
|
||||
Starts synchronous inference of the infer request and fill outputs array
|
||||
* Parameters:
|
||||
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
|
||||
* Return value:
|
||||
None
|
||||
* Usage example:
|
||||
```py
|
||||
>>> exec_net = plugin.load(network=net, num_requests=2)
|
||||
>>> exec_net.requests[0].infer({input_blob: image})
|
||||
>>> res = exec_net.requests[0].outputs['prob']
|
||||
>>> np.flip(np.sort(np.squeeze(res)),0)
|
||||
>>> np.flip(np.sort(np.squeeze(res)),0)
|
||||
array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
|
||||
5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03,
|
||||
2.26027006e-03, 2.12283316e-03 ...])
|
||||
```
|
||||
|
||||
2.26027006e-03, 2.12283316e-03 ...])
|
||||
```
|
||||
* `async_infer(inputs=None)`
|
||||
|
||||
* Description:
|
||||
|
||||
Starts asynchronous inference of the infer request and fill outputs array
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
|
||||
|
||||
* Return value:
|
||||
|
||||
None
|
||||
|
||||
* Usage example:
|
||||
|
||||
* Description:
|
||||
Starts asynchronous inference of the infer request and fill outputs array
|
||||
* Parameters:
|
||||
* `inputs` - A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with input data for the layer
|
||||
* Return value:
|
||||
None
|
||||
* Usage example:
|
||||
```py
|
||||
>>> exec_net = plugin.load(network=net, num_requests=2)
|
||||
>>> exec_net.requests[0].async_infer({input_blob: image})
|
||||
>>> exec_net.requests[0].wait()
|
||||
>>> res = exec_net.requests[0].outputs['prob']
|
||||
>>> np.flip(np.sort(np.squeeze(res)),0)
|
||||
>>> np.flip(np.sort(np.squeeze(res)),0)
|
||||
array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
|
||||
5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03,
|
||||
2.26027006e-03, 2.12283316e-03 ...])
|
||||
```
|
||||
|
||||
2.26027006e-03, 2.12283316e-03 ...])
|
||||
```
|
||||
* `wait(timeout=-1)`
|
||||
|
||||
* Description:
|
||||
|
||||
Waits for the result to become available. Blocks until specified timeout elapses or the result
|
||||
becomes available, whichever comes first.
|
||||
|
||||
**Note:**
|
||||
|
||||
There are special values of the timeout parameter:
|
||||
|
||||
* 0 - Immediately returns the inference status. It does not block or interrupt execution.
|
||||
* Description:
|
||||
Waits for the result to become available. Blocks until specified timeout elapses or the result
|
||||
becomes available, whichever comes first.
|
||||
> **NOTE:** There are special values of the timeout parameter:
|
||||
* 0 - Immediately returns the inference status. It does not block or interrupt execution.
|
||||
To find statuses meaning, please refer to InferenceEngine::StatusCode in Inference Engine C++ documentation
|
||||
|
||||
* -1 - Waits until inference result becomes available (default value)
|
||||
|
||||
* Parameters:
|
||||
|
||||
* `timeout` - Time to wait in milliseconds or special (0, -1) cases described above.
|
||||
* `timeout` - Time to wait in milliseconds or special (0, -1) cases described above.
|
||||
If not specified, `timeout` value is set to -1 by default.
|
||||
|
||||
* Usage example:
|
||||
|
||||
See `async_infer()` method of the the `InferRequest` class.
|
||||
|
||||
|
||||
* Usage example:
|
||||
See `async_infer()` method of the the `InferRequest` class.
|
||||
* `get_perf_counts()`
|
||||
|
||||
* Description:
|
||||
|
||||
Queries performance measures per layer to get feedback of what is the most time consuming layer. .
|
||||
|
||||
**Note**:
|
||||
|
||||
Performance counters data and format depends on the plugin
|
||||
|
||||
Queries performance measures per layer to get feedback of what is the most time consuming layer.
|
||||
> **NOTE**: Performance counters data and format depends on the plugin
|
||||
* Parameters:
|
||||
|
||||
None
|
||||
|
||||
* Usage example:
|
||||
|
||||
* Usage example:
|
||||
```py
|
||||
>>> exec_net = plugin.load(network=net, num_requests=2)
|
||||
>>> exec_net.requests[0].infer({input_blob: image})
|
||||
>>> exec_net.requests[0].get_perf_counts()
|
||||
{'Conv2D': {'exec_type': 'jit_avx2_1x1',
|
||||
'real_time': 154,
|
||||
'cpu_time': 154,
|
||||
'status': 'EXECUTED',
|
||||
{'Conv2D': {'exec_type': 'jit_avx2_1x1',
|
||||
'real_time': 154,
|
||||
'cpu_time': 154,
|
||||
'status': 'EXECUTED',
|
||||
'layer_type': 'Convolution'},
|
||||
'Relu6': {'exec_type': 'undef',
|
||||
'real_time': 0,
|
||||
'cpu_time': 0,
|
||||
'status': 'NOT_RUN',
|
||||
'Relu6': {'exec_type': 'undef',
|
||||
'real_time': 0,
|
||||
'cpu_time': 0,
|
||||
'status': 'NOT_RUN',
|
||||
'layer_type': 'Clamp'}
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
* `set_batch(size)`
|
||||
* Description:
|
||||
Sets new batch size for certain infer request when dynamic batching is enabled in executable network that created this request.
|
||||
|
||||
**Note:** Support of dynamic batch size depends on the target plugin.
|
||||
|
||||
> **NOTE:** Support of dynamic batch size depends on the target plugin.
|
||||
* Parameters:
|
||||
* `batch` - new batch size to be used by all the following inference calls for this request.
|
||||
|
||||
* Usage example:
|
||||
```py
|
||||
>>> plugin.set_config({"DYN_BATCH_ENABLED": "YES"})
|
||||
@ -683,5 +514,3 @@ array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
|
||||
>>> exec_net.requests[0].set_batch(inputs_count)
|
||||
```
|
||||
Please refer to `dynamic_batch_demo.py` to see the full usage example.
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Benchmark Application Demo
|
||||
# Benchmark Application Python* Demo
|
||||
|
||||
This topic demonstrates how to run the Benchmark Application demo, which performs inference using convolutional networks.
|
||||
|
||||
@ -8,6 +8,7 @@ This topic demonstrates how to run the Benchmark Application demo, which perform
|
||||
|
||||
Upon the start-up, the application reads command-line parameters and loads a network and images to the Inference Engine plugin. The number of infer requests and execution approach depend on a mode defined with the `-api` command-line parameter.
|
||||
|
||||
> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Specify Input Shapes** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
|
||||
|
||||
### Synchronous API
|
||||
For synchronous mode, the primary metric is latency. The application creates one infer request and executes the `Infer` method. A number of executions is defined by one of the two values:
|
||||
@ -30,37 +31,69 @@ The infer requests are executed asynchronously. `Wait` method is used to wait fo
|
||||
## Running
|
||||
|
||||
Running the application with the `-h` or `--help`' option yields the following usage message:
|
||||
```python3 benchmark_app.py -h
|
||||
```python3 benchmark_app.py -h```
|
||||
|
||||
The command yields the following usage message:
|
||||
```
|
||||
usage: benchmark_app.py [-h] -i PATH_TO_IMAGES -m PATH_TO_MODEL
|
||||
[-c PATH_TO_CLDNN_CONFIG] [-l PATH_TO_EXTENSION]
|
||||
[-api {sync,async}] [-d TARGET_DEVICE]
|
||||
[-niter NUMBER_ITERATIONS]
|
||||
[-nireq NUMBER_INFER_REQUESTS]
|
||||
[-nthreads NUMBER_THREADS] [-b BATCH_SIZE]
|
||||
[-pin {YES,NO}]
|
||||
|
||||
benchmark_app [OPTION]
|
||||
Options:
|
||||
|
||||
-h, --help Print a usage message
|
||||
-i, --path_to_images "<path>" Required. Path to a folder with images or to image files.
|
||||
-m, --path_to_model "<path>" Required. Path to an .xml file with a trained model.
|
||||
-pp "<path>" Path to a plugin folder.
|
||||
-api, --api_type "<sync/async>" Required. Enable using sync/async API.
|
||||
-d, --target_device "<device>" Specify a target device to infer on: CPU, GPU, FPGA or MYRIAD. Use "-d HETERO:<comma separated devices list>" format to specify HETERO plugin. The application looks for a suitable plugin for the specified device.
|
||||
-niter, --number_iterations "<integer>" Optional. Number of iterations. If not specified, the number of iterations is calculated depending on a device.
|
||||
-nireq, --number_infer_requests "<integer>" Optional. Number of infer requests (default value is 2).
|
||||
-l, --path_to_extension "<absolute_path>" Required for CPU custom layers. Absolute path to a shared library with the kernels implementations.
|
||||
Or
|
||||
-c, --path_to_cldnn_config "<absolute_path>" Required for GPU custom kernels. Absolute path to an .xml file with the kernels description.
|
||||
-b, --batch_size "<integer>" Optional. Batch size value. If not specified, the batch size value is determined from IR.
|
||||
-nthreads, --number_threads "<integer>" Number of threads to use for inference on the CPU (including Hetero cases).
|
||||
-pin {YES,NO}, --infer_threads_pinning {YES,NO} Optional. Enable ("YES" is default value) or disable ("NO")CPU threads pinning for CPU-involved inference.
|
||||
-h, --help Show this help message and exit.
|
||||
-i PATH_TO_IMAGES, --path_to_images PATH_TO_IMAGES
|
||||
Required. Path to a folder with images or to image
|
||||
files.
|
||||
-m PATH_TO_MODEL, --path_to_model PATH_TO_MODEL
|
||||
Required. Path to an .xml file with a trained model.
|
||||
-c PATH_TO_CLDNN_CONFIG, --path_to_cldnn_config PATH_TO_CLDNN_CONFIG
|
||||
Optional. Required for GPU custom kernels. Absolute
|
||||
path to an .xml file with the kernels description.
|
||||
-l PATH_TO_EXTENSION, --path_to_extension PATH_TO_EXTENSION
|
||||
Optional. Required for GPU custom kernels. Absolute
|
||||
path to an .xml file with the kernels description.
|
||||
-api {sync,async}, --api_type {sync,async}
|
||||
Optional. Enable using sync/async API. Default value
|
||||
is sync
|
||||
-d TARGET_DEVICE, --target_device TARGET_DEVICE
|
||||
Optional. Specify a target device to infer on: CPU,
|
||||
GPU, FPGA, HDDL or MYRIAD. Use "-d HETERO:<comma
|
||||
separated devices list>" format to specify HETERO
|
||||
plugin. The application looks for a suitable plugin
|
||||
for the specified device.
|
||||
-niter NUMBER_ITERATIONS, --number_iterations NUMBER_ITERATIONS
|
||||
Optional. Number of iterations. If not specified, the
|
||||
number of iterations is calculated depending on a
|
||||
device.
|
||||
-nireq NUMBER_INFER_REQUESTS, --number_infer_requests NUMBER_INFER_REQUESTS
|
||||
Optional. Number of infer requests (default value is
|
||||
2).
|
||||
-nthreads NUMBER_THREADS, --number_threads NUMBER_THREADS
|
||||
Number of threads to use for inference on the CPU
|
||||
(including Hetero cases).
|
||||
-b BATCH_SIZE, --batch_size BATCH_SIZE
|
||||
Optional. Batch size value. If not specified, the
|
||||
batch size value is determined from IR
|
||||
-pin {YES,NO}, --infer_threads_pinning {YES,NO}
|
||||
Optional. Enable ("YES" is default value) or disable
|
||||
("NO")CPU threads pinning for CPU-involved inference.
|
||||
```
|
||||
|
||||
Running the application with the empty list of options yields the usage message given above and an error message.
|
||||
|
||||
To run the demo, you can use one-layer public models or one-layer pre-trained and optimized models delivered with the package that support images as input.
|
||||
To run the demo, you can use public or pre-trained models. To download the pre-trained models, use the OpenVINO [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader) or go to [https://download.01.org/opencv/](https://download.01.org/opencv/).
|
||||
|
||||
> **NOTE**: Before running the demo with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
|
||||
|
||||
For example, to do inference on an image using a trained network with multiple outputs on CPU, run the following command:
|
||||
|
||||
```python3 benchmark_app.py -i <path_to_image>/inputImage.bmp -m <path_to_model>/multiple-output.xml -d CPU
|
||||
```
|
||||
|
||||
> **NOTE**: Public models should be first converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
|
||||
python3 benchmark_app.py -i <path_to_image>/inputImage.bmp -m <path_to_model>/multiple-output.xml -d CPU
|
||||
```
|
||||
|
||||
## Demo Output
|
||||
|
||||
@ -79,3 +112,5 @@ For asynchronous API, the application outputs only throughput:
|
||||
|
||||
## See Also
|
||||
* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md)
|
||||
* [Model Optimizer](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
|
||||
* [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader)
|
||||
|
@ -1,5 +1,5 @@
|
||||
"""
|
||||
Copyright (c) 2018 Intel Corporation
|
||||
Copyright (C) 2018-2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -13,11 +13,6 @@
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
from mo.front.common.partial_infer.reduce import tf_reduce_infer
|
||||
|
||||
|
||||
def tf_sum_ext(pb):
|
||||
return {
|
||||
'keep_dims': pb.attr["keep_dims"].b,
|
||||
'infer': lambda node: tf_reduce_infer(node)
|
||||
}
|
||||
from .benchmark import main
|
||||
from .utils.constants import HELP_MESSAGES
|
@ -1,6 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Copyright (c) 2018 Intel Corporation
|
||||
Copyright (C) 2018-2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -18,7 +17,7 @@
|
||||
from statistics import median
|
||||
from openvino.inference_engine import IENetwork, IEPlugin
|
||||
|
||||
from utils.benchmark_utils import *
|
||||
from .utils.benchmark_utils import *
|
||||
|
||||
def main(args=None):
|
||||
try:
|
||||
@ -198,7 +197,3 @@ def main(args=None):
|
||||
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,15 @@
|
||||
"""
|
||||
Copyright (C) 2018-2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
@ -1,5 +1,5 @@
|
||||
"""
|
||||
Copyright (c) 2018 Intel Corporation
|
||||
Copyright (C) 2018-2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -26,7 +26,7 @@ from random import choice
|
||||
from datetime import datetime
|
||||
from fnmatch import fnmatch
|
||||
|
||||
from . constants import *
|
||||
from .constants import *
|
||||
|
||||
logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO, stream=sys.stdout)
|
||||
logger = logging.getLogger('BenchmarkApp')
|
||||
@ -42,27 +42,29 @@ def validate_args(args):
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-i', '--path_to_images', type=str, required=True, help=HELP_MESSAGES['IMAGE_MESSAGE'])
|
||||
parser.add_argument('-m', '--path_to_model', type=str, required=True, help=HELP_MESSAGES['MODEL_MESSAGE'])
|
||||
parser.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
|
||||
help=HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
|
||||
parser.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
|
||||
help=HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
|
||||
parser.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
|
||||
help=HELP_MESSAGES['API_MESSAGE'])
|
||||
parser.add_argument('-d', '--target_device', type=str, required=False, default="CPU",
|
||||
help=HELP_MESSAGES['TARGET_DEVICE_MESSAGE'])
|
||||
parser.add_argument('-niter', '--number_iterations', type=int, required=False, default=None,
|
||||
help=HELP_MESSAGES['ITERATIONS_COUNT_MESSAGE'])
|
||||
parser.add_argument('-nireq', '--number_infer_requests', type=int, required=False, default=2,
|
||||
help=HELP_MESSAGES['INFER_REQUESTS_COUNT_MESSAGE'])
|
||||
parser.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
|
||||
help=HELP_MESSAGES['INFER_NUM_THREADS_MESSAGE'])
|
||||
parser.add_argument('-b', '--batch_size', type=int, required=False, default=None,
|
||||
help=HELP_MESSAGES['BATCH_SIZE_MESSAGE'])
|
||||
parser.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, default='YES',
|
||||
choices=['YES', 'NO'], help=HELP_MESSAGES['INFER_THREADS_PINNING_MESSAGE'])
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
args.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help=HELP_MESSAGES["HELP"])
|
||||
args.add_argument('-i', '--path_to_images', type=str, required=True, help=HELP_MESSAGES['IMAGE_MESSAGE'])
|
||||
args.add_argument('-m', '--path_to_model', type=str, required=True, help=HELP_MESSAGES['MODEL_MESSAGE'])
|
||||
args.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
|
||||
help=HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
|
||||
args.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
|
||||
help=HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
|
||||
args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
|
||||
help=HELP_MESSAGES['API_MESSAGE'])
|
||||
args.add_argument('-d', '--target_device', type=str, required=False, default="CPU",
|
||||
help=HELP_MESSAGES['TARGET_DEVICE_MESSAGE'])
|
||||
args.add_argument('-niter', '--number_iterations', type=int, required=False, default=None,
|
||||
help=HELP_MESSAGES['ITERATIONS_COUNT_MESSAGE'])
|
||||
args.add_argument('-nireq', '--number_infer_requests', type=int, required=False, default=2,
|
||||
help=HELP_MESSAGES['INFER_REQUESTS_COUNT_MESSAGE'])
|
||||
args.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
|
||||
help=HELP_MESSAGES['INFER_NUM_THREADS_MESSAGE'])
|
||||
args.add_argument('-b', '--batch_size', type=int, required=False, default=None,
|
||||
help=HELP_MESSAGES['BATCH_SIZE_MESSAGE'])
|
||||
args.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, default='YES',
|
||||
choices=['YES', 'NO'], help=HELP_MESSAGES['INFER_THREADS_PINNING_MESSAGE'])
|
||||
return parser.parse_args()
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
"""
|
||||
Copyright (c) 2018 Intel Corporation
|
||||
Copyright (C) 2018-2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -15,22 +15,24 @@
|
||||
"""
|
||||
|
||||
HELP_MESSAGES = {
|
||||
'IMAGE_MESSAGE': "Path to a folder with images or to image files.",
|
||||
'MULTI_INPUT_MESSAGE': "Path to multi input file containing.",
|
||||
'MODEL_MESSAGE': "Path to an .xml file with a trained model.",
|
||||
'PLUGIN_PATH_MESSAGE': "Path to a plugin folder.",
|
||||
'API_MESSAGE': "Enable using sync/async API. Default value is sync",
|
||||
'TARGET_DEVICE_MESSAGE': "Specify a target device to infer on: CPU, GPU, FPGA or MYRIAD. "
|
||||
'HELP': "Show this help message and exit.",
|
||||
'IMAGE_MESSAGE': "Required. Path to a folder with images or to image files.",
|
||||
'MULTI_INPUT_MESSAGE': "Optional. Path to multi input file containing.",
|
||||
'MODEL_MESSAGE': "Required. Path to an .xml file with a trained model.",
|
||||
'PLUGIN_PATH_MESSAGE': "Optional. Path to a plugin folder.",
|
||||
'API_MESSAGE': "Optional. Enable using sync/async API. Default value is sync",
|
||||
'TARGET_DEVICE_MESSAGE': "Optional. Specify a target device to infer on: CPU, GPU, FPGA, HDDL or MYRIAD. "
|
||||
"Use \"-d HETERO:<comma separated devices list>\" format to specify HETERO plugin. "
|
||||
"The application looks for a suitable plugin for the specified device.",
|
||||
'ITERATIONS_COUNT_MESSAGE': "Number of iterations. "
|
||||
'ITERATIONS_COUNT_MESSAGE': "Optional. Number of iterations. "
|
||||
"If not specified, the number of iterations is calculated depending on a device.",
|
||||
'INFER_REQUESTS_COUNT_MESSAGE': "Number of infer requests (default value is 2).",
|
||||
'INFER_REQUESTS_COUNT_MESSAGE': "Optional. Number of infer requests (default value is 2).",
|
||||
'INFER_NUM_THREADS_MESSAGE': "Number of threads to use for inference on the CPU "
|
||||
"(including Hetero cases).",
|
||||
'CUSTOM_CPU_LIBRARY_MESSAGE': "Required for CPU custom layers. "
|
||||
'CUSTOM_CPU_LIBRARY_MESSAGE': "Optional. Required for CPU custom layers. "
|
||||
"Absolute path to a shared library with the kernels implementations.",
|
||||
'CUSTOM_GPU_LIBRARY_MESSAGE': "Required for GPU custom kernels. Absolute path to an .xml file with the kernels description.",
|
||||
'CUSTOM_GPU_LIBRARY_MESSAGE': "Optional. Required for GPU custom kernels. Absolute path to an .xml file with the "
|
||||
"kernels description.",
|
||||
'BATCH_SIZE_MESSAGE': "Optional. Batch size value. If not specified, the batch size value is determined from IR",
|
||||
'INFER_THREADS_PINNING_MESSAGE': "Optional. Enable (\"YES\" is default value) or disable (\"NO\")"
|
||||
"CPU threads pinning for CPU-involved inference."
|
@ -0,0 +1,37 @@
|
||||
import benchmark
|
||||
|
||||
from argparse import ArgumentParser, SUPPRESS
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help=benchmark.HELP_MESSAGES["HELP"])
|
||||
args.add_argument('-i', '--path_to_images', type=str, required=True,
|
||||
help=benchmark.HELP_MESSAGES['IMAGE_MESSAGE'])
|
||||
args.add_argument('-m', '--path_to_model', type=str, required=True,
|
||||
help=benchmark.HELP_MESSAGES['MODEL_MESSAGE'])
|
||||
args.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
|
||||
help=benchmark.HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
|
||||
args.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
|
||||
help=benchmark.HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
|
||||
args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
|
||||
help=benchmark.HELP_MESSAGES['API_MESSAGE'])
|
||||
args.add_argument('-d', '--target_device', type=str, required=False, default="CPU",
|
||||
help=benchmark.HELP_MESSAGES['TARGET_DEVICE_MESSAGE'])
|
||||
args.add_argument('-niter', '--number_iterations', type=int, required=False, default=None,
|
||||
help=benchmark.HELP_MESSAGES['ITERATIONS_COUNT_MESSAGE'])
|
||||
args.add_argument('-nireq', '--number_infer_requests', type=int, required=False, default=2,
|
||||
help=benchmark.HELP_MESSAGES['INFER_REQUESTS_COUNT_MESSAGE'])
|
||||
args.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
|
||||
help=benchmark.HELP_MESSAGES['INFER_NUM_THREADS_MESSAGE'])
|
||||
args.add_argument('-b', '--batch_size', type=int, required=False, default=None,
|
||||
help=benchmark.HELP_MESSAGES['BATCH_SIZE_MESSAGE'])
|
||||
args.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, default='YES',
|
||||
choices=['YES', 'NO'], help=benchmark.HELP_MESSAGES['INFER_THREADS_PINNING_MESSAGE'])
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
benchmark.main(args)
|
@ -0,0 +1,79 @@
|
||||
# Image Classification Python* Sample
|
||||
|
||||
This topic demonstrates how to run the Image Classification sample application, which performs
|
||||
inference using image classification networks such as AlexNet and GoogLeNet.
|
||||
|
||||
### How It Works
|
||||
|
||||
Upon the start-up, the sample application reads command line parameters and loads a network and an image to the Inference
|
||||
Engine plugin. When inference is done, the application creates an
|
||||
output image and outputs data to the standard output stream.
|
||||
|
||||
> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Specify Input Shapes** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
|
||||
|
||||
## Running
|
||||
|
||||
Run the application with the `-h` option yields the usage message:
|
||||
```
|
||||
python3 classification_sample.py -h
|
||||
```
|
||||
The command yields the following usage message:
|
||||
```
|
||||
usage: classification_sample.py [-h] -m MODEL -i INPUT [INPUT ...]
|
||||
[-l CPU_EXTENSION] [-pp PLUGIN_DIR]
|
||||
[-d DEVICE] [--labels LABELS] [-nt NUMBER_TOP]
|
||||
[-ni NUMBER_ITER] [-pc]
|
||||
|
||||
Options:
|
||||
-h, --help Show this help message and exit.
|
||||
-m MODEL, --model MODEL
|
||||
Required. Path to an .xml file with a trained model.
|
||||
-i INPUT [INPUT ...], --input INPUT [INPUT ...]
|
||||
Required. Path to a folder with images or path to an
|
||||
image files
|
||||
-l CPU_EXTENSION, --cpu_extension CPU_EXTENSION
|
||||
Optional. Required for CPU custom layers. MKLDNN (CPU)-targeted custom layers.
|
||||
Absolute path to a shared library with the kernels
|
||||
implementations.
|
||||
-pp PLUGIN_DIR, --plugin_dir PLUGIN_DIR
|
||||
Optional. Path to a plugin folder
|
||||
-d DEVICE, --device DEVICE
|
||||
Optional. Specify the target device to infer on; CPU,
|
||||
GPU, FPGA, HDDL or MYRIAD is acceptable. The sample
|
||||
will look for a suitable plugin for device specified.
|
||||
Default value is CPU
|
||||
--labels LABELS Optional. Path to a labels mapping file
|
||||
-nt NUMBER_TOP, --number_top NUMBER_TOP
|
||||
Optional. Number of top results
|
||||
-ni NUMBER_ITER, --number_iter NUMBER_ITER
|
||||
Optional. Number of inference iterations
|
||||
-pc, --perf_counts Optional. Report performance counters
|
||||
```
|
||||
|
||||
Running the application with the empty list of options yields the usage message given above.
|
||||
|
||||
To run the sample, you can use AlexNet and GoogLeNet or other image classification models. You can download the pre-trained models with the OpenVINO [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader) or from [https://download.01.org/opencv/](https://download.01.org/opencv/).
|
||||
|
||||
> **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
|
||||
|
||||
For example, to perform inference of an AlexNet model (previously converted to the Inference Engine format) on CPU, use the following command:
|
||||
|
||||
```
|
||||
python3 classification_sample.py -i <path_to_image>/cat.bmp -m <path_to_model>/alexnet_fp32.xml
|
||||
```
|
||||
|
||||
### Sample Output
|
||||
|
||||
By default the application outputs top-10 inference results.
|
||||
Add the `-nt` option to the previous command to modify the number of top output results.
|
||||
For example, to get the top-5 results on GPU, run the following command:
|
||||
```
|
||||
python3 classification_sample.py<path_to_image>/cat.bmp -m <path_to_model>/alexnet_fp32.xml -nt 5 -d GPU
|
||||
```
|
||||
|
||||
## See Also
|
||||
* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md)
|
||||
* [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
|
||||
* [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Copyright (c) 2018 Intel Corporation
|
||||
Copyright (C) 2018-2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -17,7 +17,7 @@
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import os
|
||||
from argparse import ArgumentParser
|
||||
from argparse import ArgumentParser, SUPPRESS
|
||||
import cv2
|
||||
import numpy as np
|
||||
import logging as log
|
||||
@ -26,22 +26,29 @@ from openvino.inference_engine import IENetwork, IEPlugin
|
||||
|
||||
|
||||
def build_argparser():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
|
||||
parser.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
|
||||
type=str, nargs="+")
|
||||
parser.add_argument("-l", "--cpu_extension",
|
||||
help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
|
||||
"impl.", type=str, default=None)
|
||||
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
|
||||
parser.add_argument("-d", "--device",
|
||||
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
|
||||
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
|
||||
type=str)
|
||||
parser.add_argument("--labels", help="Labels mapping file", default=None, type=str)
|
||||
parser.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
|
||||
parser.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
|
||||
parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
|
||||
parser = ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
|
||||
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", required=True,
|
||||
type=str)
|
||||
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files",
|
||||
required=True,
|
||||
type=str, nargs="+")
|
||||
args.add_argument("-l", "--cpu_extension",
|
||||
help="Optional. Required for CPU custom layers. "
|
||||
"MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the"
|
||||
" kernels implementations.", type=str, default=None)
|
||||
args.add_argument("-pp", "--plugin_dir", help="Optional. Path to a plugin folder", type=str, default=None)
|
||||
args.add_argument("-d", "--device",
|
||||
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL, MYRIAD or HETERO: is "
|
||||
"acceptable. The sample will look for a suitable plugin for device specified. Default "
|
||||
"value is CPU",
|
||||
default="CPU", type=str)
|
||||
args.add_argument("--labels", help="Optional. Path to a labels mapping file", default=None, type=str)
|
||||
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
|
||||
args.add_argument("-ni", "--number_iter", help="Optional. Number of inference iterations", default=1, type=int)
|
||||
args.add_argument("-pc", "--perf_counts", help="Optional. Report performance counters", default=False,
|
||||
action="store_true")
|
||||
|
||||
return parser
|
||||
|
||||
@ -93,7 +100,6 @@ def main():
|
||||
# Loading model to the plugin
|
||||
log.info("Loading model to the plugin")
|
||||
exec_net = plugin.load(network=net)
|
||||
del net
|
||||
|
||||
# Start sync inference
|
||||
log.info("Starting inference ({} iterations)".format(args.number_iter))
|
||||
@ -101,7 +107,7 @@ def main():
|
||||
for i in range(args.number_iter):
|
||||
t0 = time()
|
||||
res = exec_net.infer(inputs={input_blob: images})
|
||||
infer_time.append((time()-t0)*1000)
|
||||
infer_time.append((time() - t0) * 1000)
|
||||
log.info("Average running time of one iteration: {} ms".format(np.average(np.asarray(infer_time))))
|
||||
if args.perf_counts:
|
||||
perf_counts = exec_net.requests[0].get_perf_counts()
|
||||
@ -120,18 +126,25 @@ def main():
|
||||
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
|
||||
else:
|
||||
labels_map = None
|
||||
classid_str = "classid"
|
||||
probability_str = "probability"
|
||||
for i, probs in enumerate(res):
|
||||
probs = np.squeeze(probs)
|
||||
top_ind = np.argsort(probs)[-args.number_top:][::-1]
|
||||
print("Image {}\n".format(args.input[i]))
|
||||
print(classid_str, probability_str)
|
||||
print("{} {}".format('-' * len(classid_str), '-' * len(probability_str)))
|
||||
for id in top_ind:
|
||||
det_label = labels_map[id] if labels_map else "#{}".format(id)
|
||||
print("{:.7f} label {}".format(probs[id], det_label))
|
||||
det_label = labels_map[id] if labels_map else "{}".format(id)
|
||||
label_length = len(det_label)
|
||||
space_num_before = (len(classid_str) - label_length) // 2
|
||||
space_num_after = len(classid_str) - (space_num_before + label_length) + 2
|
||||
space_num_before_prob = (len(probability_str) - len(str(probs[id]))) // 2
|
||||
print("{}{}{}{}{:.7f}".format(' ' * space_num_before, det_label,
|
||||
' ' * space_num_after, ' ' * space_num_before_prob,
|
||||
probs[id]))
|
||||
print("\n")
|
||||
|
||||
del exec_net
|
||||
del plugin
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main() or 0)
|
@ -0,0 +1,89 @@
|
||||
# Image Classification Python* Sample Async
|
||||
|
||||
This sample demonstrates how to build and execute inference in pipelined mode on example of classifications networks.
|
||||
|
||||
The pipelined mode might increase the throughput of the pictures. The latency of one inference will be the same as for synchronous execution.
|
||||
<br>
|
||||
The throughput increases due to follow reasons:
|
||||
* Some plugins have heterogeneity inside themselves: data transferring, execution on remote device, pre-processing and post-processing on the host.
|
||||
* Using of explicit heterogeneous plugin with execution of different parts of network on different devices, for example HETERO:CPU,GPU.
|
||||
|
||||
When two or more devices process one image, creating several infer requests and starting asynchronous inference allow for using devices in the most efficient way.
|
||||
If two devices are involved in execution, the most optimal value for `-nireq` option is 2.
|
||||
To process infer requests more efficiently, Classification Sample Async uses round-robin algorithm. It starts execution of the current infer request and switches to waiting for results of the previous one. After finishing of waiting, it switches infer requests and repeat the procedure.
|
||||
|
||||
Another required aspect of good throughput is a number of iterations. Only with big number of iterations you can emulate the real application work and get good performance.
|
||||
|
||||
The batch mode is an independent attribute on the pipelined mode. Pipelined mode works efficiently with any batch size.
|
||||
|
||||
### How It Works
|
||||
|
||||
Upon the start-up, the sample application reads command line parameters and loads a network and an image to the Inference
|
||||
Engine plugin.
|
||||
Then application creates several infer requests pointed in `-nireq` parameter and loads images for inference.
|
||||
|
||||
Then in a loop it starts inference for the current infer request and switches to waiting for the previous one. When results are ready, it swaps infer requests.
|
||||
|
||||
When inference is done, the application outputs data to the standard output stream.
|
||||
|
||||
> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Specify Input Shapes** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
|
||||
|
||||
## Running
|
||||
|
||||
Running the application with the <code>-h</code> option yields the following usage message:
|
||||
```
|
||||
python3 classification_sample_async.py -h
|
||||
```
|
||||
The command yields the following usage message:
|
||||
```
|
||||
usage: classification_sample_async.py [-h] -m MODEL -i INPUT [INPUT ...]
|
||||
[-l CPU_EXTENSION] [-pp PLUGIN_DIR]
|
||||
[-d DEVICE] [--labels LABELS]
|
||||
[-nt NUMBER_TOP] [-ni NUMBER_ITER] [-pc]
|
||||
|
||||
Options:
|
||||
-h, --help Show this help message and exit.
|
||||
-m MODEL, --model MODEL
|
||||
Required. Path to an .xml file with a trained model.
|
||||
-i INPUT [INPUT ...], --input INPUT [INPUT ...]
|
||||
Required. Path to a folder with images or path to an
|
||||
image files
|
||||
-l CPU_EXTENSION, --cpu_extension CPU_EXTENSION
|
||||
Optional. Required for CPU custom layers. Absolute
|
||||
path to a shared library with the kernels
|
||||
implementations.
|
||||
-pp PLUGIN_DIR, --plugin_dir PLUGIN_DIR
|
||||
Optional. Path to a plugin folder
|
||||
-d DEVICE, --device DEVICE
|
||||
Optional. Specify the target device to infer on; CPU,
|
||||
GPU, FPGA, HDDL or MYRIAD is acceptable. The sample
|
||||
will look for a suitable plugin for device specified.
|
||||
Default value is CPU
|
||||
--labels LABELS Optional. Labels mapping file
|
||||
-nt NUMBER_TOP, --number_top NUMBER_TOP
|
||||
Optional. Number of top results
|
||||
-ni NUMBER_ITER, --number_iter NUMBER_ITER
|
||||
Optional. Number of inference iterations
|
||||
-pc, --perf_counts Optional. Report performance counters
|
||||
|
||||
```
|
||||
|
||||
Running the application with the empty list of options yields the usage message given above and an error message.
|
||||
|
||||
To run the sample, you can use AlexNet and GoogLeNet or other image classification models. You can download the pre-trained models with the OpenVINO [Model Downloader](https://github.com/opencv/open_model_zoo/tree/2018/model_downloader) or from [https://download.01.org/opencv/](https://download.01.org/opencv/).
|
||||
|
||||
> **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
|
||||
|
||||
|
||||
You can do inference on an image using a trained AlexNet network on FPGA with fallback to CPU using the following command:
|
||||
```
|
||||
python3 classification_sample_async.py -i <path_to_image>/cat.bmp -m <path_to_model>/alexnet_fp32.xml -nt 5 -d HETERO:FPGA,CPU -nireq 2 -ni 200
|
||||
```
|
||||
|
||||
### Sample Output
|
||||
|
||||
By default, the application outputs top-10 inference results for each infer request.
|
||||
It also provides throughput value measured in frames per seconds.
|
||||
|
||||
## See Also
|
||||
* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md)
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Copyright (c) 2018 Intel Corporation
|
||||
Copyright (C) 2018-2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -17,7 +17,7 @@
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import os
|
||||
from argparse import ArgumentParser
|
||||
from argparse import ArgumentParser, SUPPRESS
|
||||
import cv2
|
||||
import numpy as np
|
||||
import logging as log
|
||||
@ -26,22 +26,26 @@ from openvino.inference_engine import IENetwork, IEPlugin
|
||||
|
||||
|
||||
def build_argparser():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
|
||||
parser.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
|
||||
type=str, nargs="+")
|
||||
parser.add_argument("-l", "--cpu_extension",
|
||||
help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
|
||||
"impl.", type=str, default=None)
|
||||
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
|
||||
parser.add_argument("-d", "--device",
|
||||
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
|
||||
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
|
||||
type=str)
|
||||
parser.add_argument("--labels", help="Labels mapping file", default=None, type=str)
|
||||
parser.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
|
||||
parser.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
|
||||
parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
|
||||
parser = ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
|
||||
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
|
||||
required=True, type=str)
|
||||
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files",
|
||||
required=True, type=str, nargs="+")
|
||||
args.add_argument("-l", "--cpu_extension",
|
||||
help="Optional. Required for CPU custom layers. Absolute path to a shared library with the"
|
||||
" kernels implementations.", type=str, default=None)
|
||||
args.add_argument("-pp", "--plugin_dir", help="Optional. Path to a plugin folder", type=str, default=None)
|
||||
args.add_argument("-d", "--device",
|
||||
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is "
|
||||
"acceptable. The sample will look for a suitable plugin for device specified. Default value is CPU",
|
||||
default="CPU", type=str)
|
||||
args.add_argument("--labels", help="Optional. Labels mapping file", default=None, type=str)
|
||||
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
|
||||
args.add_argument("-ni", "--number_iter", help="Optional. Number of inference iterations", default=1, type=int)
|
||||
args.add_argument("-pc", "--perf_counts", help="Optional. Report performance counters",
|
||||
default=False, action="store_true")
|
||||
|
||||
return parser
|
||||
|
||||
@ -92,7 +96,6 @@ def main():
|
||||
# Loading model to the plugin
|
||||
log.info("Loading model to the plugin")
|
||||
exec_net = plugin.load(network=net)
|
||||
del net
|
||||
|
||||
# Start sync inference
|
||||
log.info("Starting inference ({} iterations)".format(args.number_iter))
|
||||
@ -119,18 +122,25 @@ def main():
|
||||
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
|
||||
else:
|
||||
labels_map = None
|
||||
classid_str = "classid"
|
||||
probability_str = "probability"
|
||||
for i, probs in enumerate(res):
|
||||
probs = np.squeeze(probs)
|
||||
top_ind = np.argsort(probs)[-args.number_top:][::-1]
|
||||
print("Image {}\n".format(args.input[i]))
|
||||
print(classid_str, probability_str)
|
||||
print("{} {}".format('-' * len(classid_str), '-' * len(probability_str)))
|
||||
for id in top_ind:
|
||||
det_label = labels_map[id] if labels_map else "#{}".format(id)
|
||||
print("{:.7f} {}".format(probs[id], det_label))
|
||||
det_label = labels_map[id] if labels_map else "{}".format(id)
|
||||
label_length = len(det_label)
|
||||
space_num_before = (7 - label_length) // 2
|
||||
space_num_after = 7 - (space_num_before + label_length) + 2
|
||||
space_num_before_prob = (11 - len(str(probs[id]))) // 2
|
||||
print("{}{}{}{}{:.7f}".format(' ' * space_num_before, det_label,
|
||||
' ' * space_num_after, ' ' * space_num_before_prob,
|
||||
probs[id]))
|
||||
print("\n")
|
||||
|
||||
del exec_net
|
||||
del plugin
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main() or 0)
|
Binary file not shown.
@ -1,49 +0,0 @@
|
||||
# This README demonstrates use of all GreenGrass samples
|
||||
|
||||
# GreenGrass Classification Sample
|
||||
|
||||
This topic demonstrates how to build and run the GreenGrass Image Classification sample application, which does inference using image classification networks like AlexNet and GoogLeNet on on Intel® Processors, Intel® HD Graphics and Intel® FPGA.
|
||||
|
||||
## Running
|
||||
|
||||
1. Modify the "accelerator" parameter inside the sample to deploy the sample on any accelerator option of your choice(CPU/GPU/FPGA)
|
||||
For CPU, please specify "CPU"
|
||||
For GPU, please specify "GPU"
|
||||
For FPGA, please specify "HETERO:FPGA,CPU"
|
||||
2. Enable the option(s) on how output is displayed/consumed
|
||||
3. Now follow the instructions listed in the Greengrass-FaaS-User-Guide.pdf to create the lambda and deploy on edge device using Greengrass
|
||||
|
||||
### Outputs
|
||||
|
||||
The application publishes top-10 results on AWS IoT Cloud every second by default. For other output consumption options, please refer to Greengrass-FaaS-User-Guide.pdf
|
||||
|
||||
### How it works
|
||||
|
||||
Upon deployment,the sample application loads a network and an image to the Inference Engine plugin. When inference is done, the application publishes results to AWS IoT Cloud
|
||||
|
||||
=====================================================================================================
|
||||
|
||||
# GreenGrass Object Detection Sample SSD
|
||||
|
||||
This topic demonstrates how to run the GreenGrass Object Detection SSD sample application, which does inference using object detection networks like Squeezenet-SSD on Intel® Processors, Intel® HD Graphics and Intel® FPGA.
|
||||
|
||||
## Running
|
||||
|
||||
1. Modify the "accelerator" parameter inside the sample to deploy the sample on any accelerator option of your choice(CPU/GPU/FPGA)
|
||||
For CPU, please specify "CPU"
|
||||
For GPU, please specify "GPU"
|
||||
For FPGA, please specify "HETERO:FPGA,CPU"
|
||||
2. Enable the option(s) on how output is displayed/consumed
|
||||
3. Set the variable is_async_mode to 'True' for Asynchronous execution and 'False' for Synchronous execution
|
||||
3. Now follow the instructions listed in the Greengrass-FaaS-User-Guide.pdf to create the lambda and deploy on edge device using Greengrass
|
||||
|
||||
### Outputs
|
||||
|
||||
The application publishes detection outputs such as class label, class confidence, and bounding box coordinates on AWS IoT Cloud every second. For other output consumption options, please refer to Greengrass-FaaS-User-Guide.pdf
|
||||
|
||||
### How it works
|
||||
|
||||
Upon deployment,the sample application loads a network and an image to the Inference Engine plugin. When inference is done, the application publishes results to AWS IoT Cloud
|
||||
|
||||
|
||||
|
@ -1,180 +0,0 @@
|
||||
"""
|
||||
BSD 3-clause "New" or "Revised" license
|
||||
|
||||
Copyright (C) 2018 Intel Corporation.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import cv2
|
||||
import numpy as np
|
||||
import greengrasssdk
|
||||
import boto3
|
||||
import timeit
|
||||
import datetime
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
|
||||
from openvino.inference_engine import IENetwork, IEPlugin
|
||||
|
||||
# Specify the delta in seconds between each report
|
||||
reporting_interval = 1.0
|
||||
|
||||
# Parameters for IoT Cloud
|
||||
enable_iot_cloud_output = True
|
||||
|
||||
# Parameters for Kinesis
|
||||
enable_kinesis_output = False
|
||||
kinesis_stream_name = ""
|
||||
kinesis_partition_key = ""
|
||||
kinesis_region = ""
|
||||
|
||||
# Parameters for S3
|
||||
enable_s3_jpeg_output = False
|
||||
s3_bucket_name = ""
|
||||
|
||||
# Parameters for jpeg output on local disk
|
||||
enable_local_jpeg_output = False
|
||||
|
||||
# Create a Greengrass Core SDK client for publishing messages to AWS Cloud
|
||||
client = greengrasssdk.client("iot-data")
|
||||
|
||||
# Create an S3 client for uploading files to S3
|
||||
if enable_s3_jpeg_output:
|
||||
s3_client = boto3.client("s3")
|
||||
|
||||
# Create a Kinesis client for putting records to streams
|
||||
if enable_kinesis_output:
|
||||
kinesis_client = boto3.client("kinesis", "us-west-2")
|
||||
|
||||
# Read environment variables set by Lambda function configuration
|
||||
PARAM_MODEL_XML = os.environ.get("PARAM_MODEL_XML")
|
||||
PARAM_INPUT_SOURCE = os.environ.get("PARAM_INPUT_SOURCE")
|
||||
PARAM_DEVICE = os.environ.get("PARAM_DEVICE")
|
||||
PARAM_OUTPUT_DIRECTORY = os.environ.get("PARAM_OUTPUT_DIRECTORY")
|
||||
PARAM_CPU_EXTENSION_PATH = os.environ.get("PARAM_CPU_EXTENSION_PATH")
|
||||
PARAM_LABELMAP_FILE = os.environ.get("PARAM_LABELMAP_FILE")
|
||||
PARAM_TOPIC_NAME = os.environ.get("PARAM_TOPIC_NAME", "intel/faas/classification")
|
||||
PARAM_NUM_TOP_RESULTS = int(os.environ.get("PARAM_NUM_TOP_RESULTS", "10"))
|
||||
|
||||
|
||||
def report(res_json, frame):
|
||||
now = datetime.datetime.now()
|
||||
date_prefix = str(now).replace(" ", "_")
|
||||
if enable_iot_cloud_output:
|
||||
data = json.dumps(res_json)
|
||||
client.publish(topic=PARAM_TOPIC_NAME, payload=data)
|
||||
if enable_kinesis_output:
|
||||
kinesis_client.put_record(StreamName=kinesis_stream_name, Data=json.dumps(res_json),
|
||||
PartitionKey=kinesis_partition_key)
|
||||
if enable_s3_jpeg_output:
|
||||
temp_image = os.path.join(PARAM_OUTPUT_DIRECTORY, "inference_result.jpeg")
|
||||
cv2.imwrite(temp_image, frame)
|
||||
with open(temp_image) as file:
|
||||
image_contents = file.read()
|
||||
s3_client.put_object(Body=image_contents, Bucket=s3_bucket_name, Key=date_prefix + ".jpeg")
|
||||
if enable_local_jpeg_output:
|
||||
cv2.imwrite(os.path.join(PARAM_OUTPUT_DIRECTORY, date_prefix + ".jpeg"), frame)
|
||||
|
||||
|
||||
def greengrass_classification_sample_run():
|
||||
client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Initializing...")
|
||||
model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"
|
||||
|
||||
# Plugin initialization for specified device and load extensions library if specified
|
||||
plugin = IEPlugin(device=PARAM_DEVICE, plugin_dirs="")
|
||||
if "CPU" in PARAM_DEVICE:
|
||||
plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)
|
||||
# Read IR
|
||||
net = IENetwork(model=PARAM_MODEL_XML, weights=model_bin)
|
||||
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
|
||||
assert len(net.outputs) == 1, "Sample supports only single output topologies"
|
||||
input_blob = next(iter(net.inputs))
|
||||
out_blob = next(iter(net.outputs))
|
||||
# Read and pre-process input image
|
||||
n, c, h, w = net.inputs[input_blob]
|
||||
cap = cv2.VideoCapture(PARAM_INPUT_SOURCE)
|
||||
exec_net = plugin.load(network=net)
|
||||
del net
|
||||
client.publish(topic=PARAM_TOPIC_NAME, payload="Starting inference on %s" % PARAM_INPUT_SOURCE)
|
||||
start_time = timeit.default_timer()
|
||||
inf_seconds = 0.0
|
||||
frame_count = 0
|
||||
res_json = []
|
||||
labeldata = None
|
||||
if PARAM_LABELMAP_FILE is not None:
|
||||
with open(PARAM_LABELMAP_FILE) as labelmap_file:
|
||||
labeldata = json.load(labelmap_file)
|
||||
|
||||
while (cap.isOpened()):
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
frameid = cap.get(cv2.CAP_PROP_POS_FRAMES)
|
||||
initial_w = cap.get(3)
|
||||
initial_h = cap.get(4)
|
||||
in_frame = cv2.resize(frame, (w, h))
|
||||
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
|
||||
in_frame = in_frame.reshape((n, c, h, w))
|
||||
# Start synchronous inference
|
||||
inf_start_time = timeit.default_timer()
|
||||
res = exec_net.infer(inputs={input_blob: in_frame})
|
||||
inf_seconds += timeit.default_timer() - inf_start_time
|
||||
top_ind = np.argsort(res[out_blob], axis=1)[0, -PARAM_NUM_TOP_RESULTS:][::-1]
|
||||
# Parse detection results of the current request
|
||||
res_json = OrderedDict()
|
||||
res_json["Candidates"] = OrderedDict()
|
||||
frame_timestamp = datetime.datetime.now()
|
||||
|
||||
for i in top_ind:
|
||||
classlabel = labeldata[str(i)] if labeldata else str(i)
|
||||
res_json["Candidates"][classlabel] = round(res[out_blob][0, i], 2)
|
||||
|
||||
frame_count += 1
|
||||
# Measure elapsed seconds since the last report
|
||||
seconds_elapsed = timeit.default_timer() - start_time
|
||||
if seconds_elapsed >= reporting_interval:
|
||||
res_json["timestamp"] = frame_timestamp.isoformat()
|
||||
res_json["frame_id"] = int(frameid)
|
||||
res_json["inference_fps"] = frame_count / inf_seconds
|
||||
start_time = timeit.default_timer()
|
||||
report(res_json, frame)
|
||||
frame_count = 0
|
||||
inf_seconds = 0.0
|
||||
|
||||
client.publish(topic=PARAM_TOPIC_NAME, payload="End of the input, exiting...")
|
||||
del exec_net
|
||||
del plugin
|
||||
|
||||
|
||||
greengrass_classification_sample_run()
|
||||
|
||||
|
||||
def function_handler(event, context):
|
||||
client.publish(topic=PARAM_TOPIC_NAME, payload='HANDLER_CALLED!')
|
||||
return
|
@ -1,184 +0,0 @@
|
||||
"""
|
||||
BSD 3-clause "New" or "Revised" license
|
||||
|
||||
Copyright (C) 2018 Intel Corporation.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import cv2
|
||||
import numpy as np
|
||||
import greengrasssdk
|
||||
import boto3
|
||||
import timeit
|
||||
import datetime
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
|
||||
from openvino.inference_engine import IENetwork, IEPlugin
|
||||
|
||||
# Specify the delta in seconds between each report
|
||||
reporting_interval = 1.0
|
||||
|
||||
# Parameters for IoT Cloud
|
||||
enable_iot_cloud_output = True
|
||||
|
||||
# Parameters for Kinesis
|
||||
enable_kinesis_output = False
|
||||
kinesis_stream_name = ""
|
||||
kinesis_partition_key = ""
|
||||
kinesis_region = ""
|
||||
|
||||
# Parameters for S3
|
||||
enable_s3_jpeg_output = False
|
||||
s3_bucket_name = "ssd_test"
|
||||
|
||||
# Parameters for jpeg output on local disk
|
||||
enable_local_jpeg_output = False
|
||||
|
||||
# Create a Greengrass Core SDK client for publishing messages to AWS Cloud
|
||||
client = greengrasssdk.client("iot-data")
|
||||
|
||||
# Create an S3 client for uploading files to S3
|
||||
if enable_s3_jpeg_output:
|
||||
s3_client = boto3.client("s3")
|
||||
|
||||
# Create a Kinesis client for putting records to streams
|
||||
if enable_kinesis_output:
|
||||
kinesis_client = boto3.client("kinesis", "us-west-2")
|
||||
|
||||
# Read environment variables set by Lambda function configuration
|
||||
PARAM_MODEL_XML = os.environ.get("PARAM_MODEL_XML")
|
||||
PARAM_INPUT_SOURCE = os.environ.get("PARAM_INPUT_SOURCE")
|
||||
PARAM_DEVICE = os.environ.get("PARAM_DEVICE")
|
||||
PARAM_OUTPUT_DIRECTORY = os.environ.get("PARAM_OUTPUT_DIRECTORY")
|
||||
PARAM_CPU_EXTENSION_PATH = os.environ.get("PARAM_CPU_EXTENSION_PATH")
|
||||
PARAM_LABELMAP_FILE = os.environ.get("PARAM_LABELMAP_FILE")
|
||||
PARAM_TOPIC_NAME = os.environ.get("PARAM_TOPIC_NAME", "intel/faas/ssd")
|
||||
|
||||
|
||||
def report(res_json, frame):
|
||||
now = datetime.datetime.now()
|
||||
date_prefix = str(now).replace(" ", "_")
|
||||
if enable_iot_cloud_output:
|
||||
data = json.dumps(res_json)
|
||||
client.publish(topic=PARAM_TOPIC_NAME, payload=data)
|
||||
if enable_kinesis_output:
|
||||
kinesis_client.put_record(StreamName=kinesis_stream_name, Data=json.dumps(res_json),
|
||||
PartitionKey=kinesis_partition_key)
|
||||
if enable_s3_jpeg_output:
|
||||
temp_image = os.path.join(PARAM_OUTPUT_DIRECTORY, "inference_result.jpeg")
|
||||
cv2.imwrite(temp_image, frame)
|
||||
with open(temp_image) as file:
|
||||
image_contents = file.read()
|
||||
s3_client.put_object(Body=image_contents, Bucket=s3_bucket_name, Key=date_prefix + ".jpeg")
|
||||
if enable_local_jpeg_output:
|
||||
cv2.imwrite(os.path.join(PARAM_OUTPUT_DIRECTORY, date_prefix + ".jpeg"), frame)
|
||||
|
||||
|
||||
def greengrass_object_detection_sample_ssd_run():
|
||||
client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Initializing...")
|
||||
model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"
|
||||
|
||||
# Plugin initialization for specified device and load extensions library if specified
|
||||
plugin = IEPlugin(device=PARAM_DEVICE, plugin_dirs="")
|
||||
if "CPU" in PARAM_DEVICE:
|
||||
plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)
|
||||
# Read IR
|
||||
net = IENetwork(model=PARAM_MODEL_XML, weights=model_bin)
|
||||
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
|
||||
assert len(net.outputs) == 1, "Sample supports only single output topologies"
|
||||
input_blob = next(iter(net.inputs))
|
||||
out_blob = next(iter(net.outputs))
|
||||
# Read and pre-process input image
|
||||
n, c, h, w = net.inputs[input_blob]
|
||||
cap = cv2.VideoCapture(PARAM_INPUT_SOURCE)
|
||||
exec_net = plugin.load(network=net)
|
||||
del net
|
||||
client.publish(topic=PARAM_TOPIC_NAME, payload="Starting inference on %s" % PARAM_INPUT_SOURCE)
|
||||
start_time = timeit.default_timer()
|
||||
inf_seconds = 0.0
|
||||
frame_count = 0
|
||||
labeldata = None
|
||||
if PARAM_LABELMAP_FILE is not None:
|
||||
with open(PARAM_LABELMAP_FILE) as labelmap_file:
|
||||
labeldata = json.load(labelmap_file)
|
||||
|
||||
while (cap.isOpened()):
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
frameid = cap.get(cv2.CAP_PROP_POS_FRAMES)
|
||||
initial_w = cap.get(3)
|
||||
initial_h = cap.get(4)
|
||||
in_frame = cv2.resize(frame, (w, h))
|
||||
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
|
||||
in_frame = in_frame.reshape((n, c, h, w))
|
||||
# Start synchronous inference
|
||||
inf_start_time = timeit.default_timer()
|
||||
res = exec_net.infer(inputs={input_blob: in_frame})
|
||||
inf_seconds += timeit.default_timer() - inf_start_time
|
||||
# Parse detection results of the current request
|
||||
res_json = OrderedDict()
|
||||
frame_timestamp = datetime.datetime.now()
|
||||
object_id = 0
|
||||
for obj in res[out_blob][0][0]:
|
||||
if obj[2] > 0.5:
|
||||
xmin = int(obj[3] * initial_w)
|
||||
ymin = int(obj[4] * initial_h)
|
||||
xmax = int(obj[5] * initial_w)
|
||||
ymax = int(obj[6] * initial_h)
|
||||
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 4)
|
||||
obj_id = "Object" + str(object_id)
|
||||
classlabel = labeldata[str(int(obj[1]))] if labeldata else ""
|
||||
res_json[obj_id] = {"label": int(obj[1]), "class": classlabel, "confidence": round(obj[2], 2), "xmin": round(
|
||||
obj[3], 2), "ymin": round(obj[4], 2), "xmax": round(obj[5], 2), "ymax": round(obj[6], 2)}
|
||||
object_id += 1
|
||||
frame_count += 1
|
||||
# Measure elapsed seconds since the last report
|
||||
seconds_elapsed = timeit.default_timer() - start_time
|
||||
if seconds_elapsed >= reporting_interval:
|
||||
res_json["timestamp"] = frame_timestamp.isoformat()
|
||||
res_json["frame_id"] = int(frameid)
|
||||
res_json["inference_fps"] = frame_count / inf_seconds
|
||||
start_time = timeit.default_timer()
|
||||
report(res_json, frame)
|
||||
frame_count = 0
|
||||
inf_seconds = 0.0
|
||||
|
||||
client.publish(topic=PARAM_TOPIC_NAME, payload="End of the input, exiting...")
|
||||
del exec_net
|
||||
del plugin
|
||||
|
||||
|
||||
greengrass_object_detection_sample_ssd_run()
|
||||
|
||||
|
||||
def function_handler(event, context):
|
||||
client.publish(topic=PARAM_TOPIC_NAME, payload='HANDLER_CALLED!')
|
||||
return
|
@ -1,463 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This notebook demonstrates the worklflow of a simple image classification task.\n",
|
||||
"We will go through all the pipeline steps: downloading the model, generating the Intermediate Representation (IR) using the Model Optimizer, running inference in Python, and parsing and interpretating the output results.\n",
|
||||
"\n",
|
||||
"To demonstrate the scenario, we will use the pre-trained SquezeNet V1.1 Caffe\\* model. SqueezeNet is a pretty accurate and at the same time lightweight network. For more information about the model, please visit <a href=\"https://github.com/DeepScale/SqueezeNet/\">GitHub</a> page and refer to original <a href=\"https://arxiv.org/abs/1602.07360\">SqueezeNet paper</a>.\n",
|
||||
"\n",
|
||||
"Follow the steps to perform image classification with the SquezeNet V1.1 model:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**1. Download the model files:** "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%bash\n",
|
||||
"echo \"Downloading deploy.protxt ...\"\n",
|
||||
"if [ -f deploy.prototxt ]; then \n",
|
||||
" echo \"deploy.protxt file already exists. Downloading skipped\"\n",
|
||||
"else\n",
|
||||
" wget https://raw.githubusercontent.com/DeepScale/SqueezeNet/a47b6f13d30985279789d08053d37013d67d131b/SqueezeNet_v1.1/deploy.prototxt -q\n",
|
||||
" echo \"Finished!\"\n",
|
||||
"fi"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%bash\n",
|
||||
"! echo \"Downloading squeezenet_v1.1.caffemodel ...\"\n",
|
||||
"if [ -f squeezenet_v1.1.caffemodel ]; then\n",
|
||||
" echo \"squeezenet_v1.1.caffemodel file already exists. Download skipped\"\n",
|
||||
"else\n",
|
||||
" wget https://github.com/DeepScale/SqueezeNet/raw/a47b6f13d30985279789d08053d37013d67d131b/SqueezeNet_v1.1/squeezenet_v1.1.caffemodel -q\n",
|
||||
" echo \"Finished!\"\n",
|
||||
"fi"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Run the following command to see the model files:**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!ls -la"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"* `deploy.prototxt` contains the network toplogy description in text format. \n",
|
||||
"* `squeezenet_v1.1.caffemodel` contains weights for all network layers"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**2. Optimize and convert the model from intial Caffe representation to the IR representation, which is required for scoring the model using Inference Engine. To convert and optimize the model, use the Model Optimizer command line tool.**\n",
|
||||
"\n",
|
||||
"To locate Model Optimizer scripts, specify the path to the Model Optimizer root directory in the `MO_ROOT` variable in the cell bellow and then run it (If you use the installed OpenVINO™ package, you can find the Model Optimizer in `<INSTALLATION_ROOT_DIR>/deployment_tools/model_optimizer`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%bash\n",
|
||||
"MO_ROOT=/localdisk/repos/model-optimizer-tensorflow/\n",
|
||||
"echo $MO_ROOT\n",
|
||||
"python3 $MO_ROOT/mo.py --input_model squeezenet_v1.1.caffemodel --input_proto deploy.prototxt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**3. Now, you have the SqueezeNet model converted to the IR, and you can infer it.**\n",
|
||||
"\n",
|
||||
"a. First, import required modules:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from openvino.inference_engine import IENetwork, IEPlugin\n",
|
||||
"import numpy as np\n",
|
||||
"import cv2\n",
|
||||
"import logging as log\n",
|
||||
"from time import time\n",
|
||||
"import sys\n",
|
||||
"import glob\n",
|
||||
"import os\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"%matplotlib inline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"b. Initialize required constants:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Configure logging format\n",
|
||||
"log.basicConfig(format=\"[ %(levelname)s ] %(message)s\", level=log.INFO, stream=sys.stdout)\n",
|
||||
"\n",
|
||||
"# Path to IR model files\n",
|
||||
"MODEL_XML = \"./squeezenet_v1.1.xml\"\n",
|
||||
"MODEL_BIN = \"./squeezenet_v1.1.bin\"\n",
|
||||
"\n",
|
||||
"# Target device to run inference\n",
|
||||
"TARGET_DEVICE = \"CPU\"\n",
|
||||
"\n",
|
||||
"# Folder with input images for the model\n",
|
||||
"IMAGES_FOLDER = \"./images\"\n",
|
||||
"\n",
|
||||
"# File containing information about classes names \n",
|
||||
"LABELS_FILE = \"./image_net_synset.txt\"\n",
|
||||
"\n",
|
||||
"# Number of top prediction results to parse\n",
|
||||
"NTOP = 5\n",
|
||||
"\n",
|
||||
"# Required batch size - number of images which will be processed in parallel\n",
|
||||
"BATCH = 4"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"c. Create a plugin instance for the specified target device \n",
|
||||
"d. Read the IR files and create an `IENEtwork` instance"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plugin = IEPlugin(TARGET_DEVICE)\n",
|
||||
"net = IENetwork(model=MODEL_XML, weights=MODEL_BIN)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"e. Set the network batch size to the constatns specified above. \n",
|
||||
"\n",
|
||||
"Batch size is an \"amount\" of input data that will be infered in parallel. In this cases it is a number of images, which will be classified in parallel. \n",
|
||||
"\n",
|
||||
"You can set the network batch size using one of the following options:\n",
|
||||
"1. On the IR generation stage, run the Model Optimizer with `-b` command line option. For example, to generate the IR with batch size equal to 4, add `-b 4` to Model Optimizer command line options. By default, it takes the batch size from the original network in framework representation (usually, it is equal to 1, but in this case, the original Caffe model is provided with the batch size equal to 10). \n",
|
||||
"2. Use Inference Engine after reading IR. We will use this option.\n",
|
||||
"\n",
|
||||
"To set the batch size with the Inference Engine:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"log.info(\"Current network batch size is {}, will be changed to {}\".format(net.batch_size, BATCH))\n",
|
||||
"net.batch_size = BATCH"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"f. After setting batch size, you can get required information about network input layers.\n",
|
||||
"To preprocess input images, you need to know input layer shape.\n",
|
||||
"\n",
|
||||
"`inputs` property of `IENetwork` returns the dicitonary with input layer names and `InputInfo` objects, which contain information about an input layer including its shape.\n",
|
||||
"\n",
|
||||
"SqueezeNet is a single-input toplogy, so to get the input layer name and its shape, you can get the first item from the `inputs` dictionary:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_layer = next(iter(net.inputs))\n",
|
||||
"n,c,h,w = net.inputs[input_layer].shape\n",
|
||||
"layout = net.inputs[input_layer].layout\n",
|
||||
"log.info(\"Network input layer {} has shape {} and layout {}\".format(input_layer, (n,c,h,w), layout))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So what do the shape and layout mean? \n",
|
||||
"Layout will helps to interprete the shape dimsesnions meaning. \n",
|
||||
"\n",
|
||||
"`NCHW` input layer layout means:\n",
|
||||
"* the fisrt dimension of an input data is a batch of **N** images processed in parallel \n",
|
||||
"* the second dimension is a numnber of **C**hannels expected in the input images\n",
|
||||
"* the third and the forth are a spatial dimensions - **H**eight and **W**idth of an input image\n",
|
||||
"\n",
|
||||
"Our shapes means that the network expects four 3-channel images running in parallel with size 227x227."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"g. Read and preprocess input images.\n",
|
||||
"\n",
|
||||
"For it, go to `IMAGES_FOLDER`, find all `.bmp` files, and take four images for inference:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search_pattern = os.path.join(IMAGES_FOLDER, \"*.bmp\")\n",
|
||||
"images = glob.glob(search_pattern)[:BATCH]\n",
|
||||
"log.info(\"Input images:\\n {}\".format(\"\\n\".join(images)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now you can read and preprocess the image files and create an array with input blob data.\n",
|
||||
"\n",
|
||||
"For preprocessing, you must do the following:\n",
|
||||
"1. Resize the images to fit the HxW input dimenstions.\n",
|
||||
"2. Transpose the HWC layout.\n",
|
||||
"\n",
|
||||
"Transposing is tricky and not really obvious.\n",
|
||||
"As you alredy saw above, the network has the `NCHW` layout, so each input image should be in `CHW` format. But by deafult, OpenCV\\* reads images in the `HWC` format. That is why you have to swap the axes using the `numpy.transpose()` function:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"input_data = np.ndarray(shape=(n, c, h, w))\n",
|
||||
"orig_images = [] # Will be used to show image in notebook\n",
|
||||
"for i, img in enumerate(images):\n",
|
||||
" image = cv2.imread(img)\n",
|
||||
" orig_images.append(image)\n",
|
||||
" if image.shape[:-1] != (h, w):\n",
|
||||
" log.warning(\"Image {} is resized from {} to {}\".format(img, image.shape[:-1], (h, w)))\n",
|
||||
" image = cv2.resize(image, (w, h))\n",
|
||||
" image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW\n",
|
||||
" input_data[i] = image"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"i. Infer the model model to classify input images:\n",
|
||||
"\n",
|
||||
"1. Load the `IENetwork` object to the plugin to create `ExectuableNEtwork` object. \n",
|
||||
"2. Start inference using the `infer()` function specifying dictionary with input layer name and prepared data as an argument for the function. \n",
|
||||
"3. Measure inference time in miliseconds and calculate throughput metric in frames-per-second (FPS)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"exec_net = plugin.load(net)\n",
|
||||
"t0 = time()\n",
|
||||
"res_map = exec_net.infer({input_layer: input_data})\n",
|
||||
"inf_time = (time() - t0) * 1000 \n",
|
||||
"fps = BATCH * inf_time \n",
|
||||
"log.info(\"Inference time: {} ms.\".format(inf_time))\n",
|
||||
"log.info(\"Throughput: {} fps.\".format(fps))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**4. After the inference, you need to parse and interpretate the inference results.**\n",
|
||||
"\n",
|
||||
"First, you need to see the shape of the network output layer. It can be done in similar way as for the inputs, but here you need to call `outputs` property of `IENetwork` object:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"output_layer = next(iter(net.outputs))\n",
|
||||
"n,c,h,w = net.outputs[output_layer].shape\n",
|
||||
"layout = net.outputs[output_layer].layout\n",
|
||||
"log.info(\"Network output layer {} has shape {} and layout {}\".format(output_layer, (n,c,h,w), layout))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"It is not a common case for classification netowrks to have output layer with *NCHW* layout. Usually, it is just *NC*. However, in this case, the last two dimensions are just a feature of the network and do not have much sense. Ignore them as you will remove them on the final parsing stage. \n",
|
||||
"\n",
|
||||
"What are the first and second dimensions of the output layer? \n",
|
||||
"* The first dimension is a batch. We precoessed four images, and the prediction result for a particular image is stored in the first dimension of the output array. For example, prediction results for the third image is `res[2]` (since numeration starts from 0).\n",
|
||||
"* The second dimension is an array with normalized probabilities (from 0 to 1) for each class. This network is trained using the <a href=\"http://image-net.org/index\">ImageNet</a> dataset with 1000 classes. Each `n`-th value in the output data for a certain image represent the probability of the image belonging to the `n`-th class. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To parse the output results:\n",
|
||||
"\n",
|
||||
"a. Read the `LABELS_FILE`, which maps the class ID to human-readable class names:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open(LABELS_FILE, 'r') as f:\n",
|
||||
" labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"b. Parse the output array with prediction results. The parsing algorith is the following:\n",
|
||||
"0. Squeeze the last two \"extra\" dimensions of the output data.\n",
|
||||
"1. Iterate over all batches.\n",
|
||||
"2. Sort the probabilities vector descendingly to get `NTOP` classes with the highest probabilities (by default, the `numpy.argsort` sorts the data in the ascending order, but using the array slicing `[::-1]`, you can reverse the data order).\n",
|
||||
"3. Map the `NTOP` probabilities to the corresponding labeles in `labeles_map`.\n",
|
||||
"\n",
|
||||
"For the vizualization, you also need to store top-1 class and probability."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"top1_res = [] # will be used for the visualization\n",
|
||||
"res = np.squeeze(res_map[output_layer])\n",
|
||||
"log.info(\"Top {} results: \".format(NTOP))\n",
|
||||
"for i, probs in enumerate(res):\n",
|
||||
" top_ind = np.argsort(probs)[-NTOP:][::-1]\n",
|
||||
" print(\"Image {}\".format(images[i]))\n",
|
||||
" top1_ind = top_ind[0]\n",
|
||||
" top1_res.append((labels_map[top1_ind], probs[top1_ind]))\n",
|
||||
" for id in top_ind:\n",
|
||||
" print(\"label: {} probability: {:.2f}% \".format(labels_map[id], probs[id] * 100))\n",
|
||||
" print(\"\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The code above prints the results as plain text. \n",
|
||||
"You can also use OpenCV\\* to visualize the results using the `orig_images` and `top1_res` variables, which you created during images reading and results parsing:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.clf()\n",
|
||||
"for i, img in enumerate(orig_images):\n",
|
||||
" label_str = \"{}\".format(top1_res[i][0].split(',')[0])\n",
|
||||
" prob_str = \"{:.2f}%\".format(top1_res[i][1])\n",
|
||||
" cv2.putText(img, label_str, (5, 15), cv2.FONT_HERSHEY_COMPLEX, 0.6, (220,100,10), 1)\n",
|
||||
" cv2.putText(img, prob_str, (5, 35), cv2.FONT_HERSHEY_COMPLEX, 0.6, (220,100,10), 1)\n",
|
||||
" plt.figure()\n",
|
||||
" plt.axis(\"off\")\n",
|
||||
" \n",
|
||||
" # We have to convert colors, because matplotlib expects an image in RGB color format \n",
|
||||
" # but by default, the OpenCV read images in BRG format\n",
|
||||
" im_to_show = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
|
||||
" plt.imshow(im_to_show)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,74 @@
|
||||
# Neural Style Transfer Python* Sample
|
||||
|
||||
This topic demonstrates how to run the Neural Style Transfer sample application, which performs
|
||||
inference of style transfer models.
|
||||
|
||||
> **NOTE**: The OpenVINO™ toolkit does not include a pre-trained model to run the Neural Style Transfer sample. A public model from the [Zhaw's Neural Style Transfer repository](https://github.com/zhaw/neural_style) can be used. Read the [Converting a Style Transfer Model from MXNet*](./docs/MO_DG/prepare_model/convert_model/mxnet_specific/Convert_Style_Transfer_From_MXNet.md) topic from the [Model Optimizer Developer Guide](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) to learn about how to get the trained model and how to convert it to the Inference Engine format (\*.xml + \*.bin).
|
||||
|
||||
## How It Works
|
||||
|
||||
> **NOTE**: By default, Inference Engine samples and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Specify Input Shapes** section of [Converting a Model Using General Conversion Parameters](./docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
|
||||
|
||||
## Running
|
||||
|
||||
Running the application with the <code>-h</code> option yields the following usage message:
|
||||
```
|
||||
python3 style_transfer_sample.py --help
|
||||
```
|
||||
The command yields the following usage message:
|
||||
```
|
||||
usage: style_transfer_sample.py [-h] -m MODEL -i INPUT [INPUT ...]
|
||||
[-l CPU_EXTENSION] [-pp PLUGIN_DIR]
|
||||
[-d DEVICE] [-nt NUMBER_TOP] [-ni NUMBER_ITER]
|
||||
[--mean_val_r MEAN_VAL_R]
|
||||
[--mean_val_g MEAN_VAL_G]
|
||||
[--mean_val_b MEAN_VAL_B] [-pc]
|
||||
|
||||
Options:
|
||||
-h, --help Show this help message and exit.
|
||||
-m MODEL, --model MODEL
|
||||
Path to an .xml file with a trained model.
|
||||
-i INPUT [INPUT ...], --input INPUT [INPUT ...]
|
||||
Path to a folder with images or path to an image files
|
||||
-l CPU_EXTENSION, --cpu_extension CPU_EXTENSION
|
||||
Optional. Required for CPU custom layers. Absolute
|
||||
MKLDNN (CPU)-targeted custom layers. Absolute path to
|
||||
a shared library with the kernels implementations
|
||||
-pp PLUGIN_DIR, --plugin_dir PLUGIN_DIR
|
||||
Path to a plugin folder
|
||||
-d DEVICE, --device DEVICE
|
||||
Specify the target device to infer on; CPU, GPU, FPGA,
|
||||
HDDL or MYRIAD is acceptable. Sample will look for a
|
||||
suitable plugin for device specified. Default value is CPU
|
||||
-nt NUMBER_TOP, --number_top NUMBER_TOP
|
||||
Number of top results
|
||||
-ni NUMBER_ITER, --number_iter NUMBER_ITER
|
||||
Number of inference iterations
|
||||
--mean_val_r MEAN_VAL_R, -mean_val_r MEAN_VAL_R
|
||||
Mean value of red chanel for mean value subtraction in
|
||||
postprocessing
|
||||
--mean_val_g MEAN_VAL_G, -mean_val_g MEAN_VAL_G
|
||||
Mean value of green chanel for mean value subtraction
|
||||
in postprocessing
|
||||
--mean_val_b MEAN_VAL_B, -mean_val_b MEAN_VAL_B
|
||||
Mean value of blue chanel for mean value subtraction
|
||||
in postprocessing
|
||||
-pc, --perf_counts Report performance counters
|
||||
|
||||
```
|
||||
|
||||
Running the application with the empty list of options yields the usage message given above and an error message.
|
||||
|
||||
To perform inference on an image using a trained model of NST network on Intel® CPUs, use the following command:
|
||||
```
|
||||
python3 style_transfer_sample.py -i <path_to_image>/cat.bmp -m <path_to_model>/1_decoder_FP32.xml
|
||||
```
|
||||
|
||||
### Demo Output
|
||||
|
||||
The application outputs an image (`out1.bmp`) or a sequence of images (`out1.bmp`, ..., `out<N>.bmp`) which are redrawn in style of the style transfer model used for sample.
|
||||
|
||||
## See Also
|
||||
* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Copyright (c) 2018 Intel Corporation
|
||||
Copyright (C) 2018-2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -17,7 +17,7 @@
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import os
|
||||
from argparse import ArgumentParser
|
||||
from argparse import ArgumentParser, SUPPRESS
|
||||
import cv2
|
||||
import numpy as np
|
||||
import logging as log
|
||||
@ -26,30 +26,33 @@ from openvino.inference_engine import IENetwork, IEPlugin
|
||||
|
||||
|
||||
def build_argparser():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
|
||||
parser.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
|
||||
type=str, nargs="+")
|
||||
parser.add_argument("-l", "--cpu_extension",
|
||||
help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
|
||||
"impl.", type=str, default=None)
|
||||
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
|
||||
parser.add_argument("-d", "--device",
|
||||
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
|
||||
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
|
||||
type=str)
|
||||
parser.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
|
||||
parser.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
|
||||
parser.add_argument("--mean_val_r", "-mean_val_r",
|
||||
help="Mean value of red chanel for mean value subtraction in postprocessing ", default=0,
|
||||
type=float)
|
||||
parser.add_argument("--mean_val_g", "-mean_val_g",
|
||||
help="Mean value of green chanel for mean value subtraction in postprocessing ", default=0,
|
||||
type=float)
|
||||
parser.add_argument("--mean_val_b", "-mean_val_b",
|
||||
help="Mean value of blue chanel for mean value subtraction in postprocessing ", default=0,
|
||||
type=float)
|
||||
parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
|
||||
parser = ArgumentParser(add_help=False)
|
||||
args = parser.add_argument_group('Options')
|
||||
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
|
||||
args.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
|
||||
args.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
|
||||
type=str, nargs="+")
|
||||
args.add_argument("-l", "--cpu_extension",
|
||||
help="Optional. Required for CPU custom layers. "
|
||||
"Absolute MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the "
|
||||
"kernels implementations", type=str, default=None)
|
||||
args.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
|
||||
args.add_argument("-d", "--device",
|
||||
help="Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is acceptable. Sample "
|
||||
"will look for a suitable plugin for device specified. Default value is CPU", default="CPU",
|
||||
type=str)
|
||||
args.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
|
||||
args.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
|
||||
args.add_argument("--mean_val_r", "-mean_val_r",
|
||||
help="Mean value of red chanel for mean value subtraction in postprocessing ", default=0,
|
||||
type=float)
|
||||
args.add_argument("--mean_val_g", "-mean_val_g",
|
||||
help="Mean value of green chanel for mean value subtraction in postprocessing ", default=0,
|
||||
type=float)
|
||||
args.add_argument("--mean_val_b", "-mean_val_b",
|
||||
help="Mean value of blue chanel for mean value subtraction in postprocessing ", default=0,
|
||||
type=float)
|
||||
args.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
|
||||
|
||||
return parser
|
||||
|
||||
@ -101,7 +104,6 @@ def main():
|
||||
# Loading model to the plugin
|
||||
log.info("Loading model to the plugin")
|
||||
exec_net = plugin.load(network=net)
|
||||
del net
|
||||
|
||||
# Start sync inference
|
||||
log.info("Starting inference ({} iterations)".format(args.number_iter))
|
||||
@ -133,8 +135,6 @@ def main():
|
||||
out_img = os.path.join(os.path.dirname(__file__), "out_{}.bmp".format(batch))
|
||||
cv2.imwrite(out_img, data)
|
||||
log.info("Result image was saved to {}".format(out_img))
|
||||
del exec_net
|
||||
del plugin
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
@ -1,21 +0,0 @@
|
||||
background
|
||||
aeroplane
|
||||
bicycle
|
||||
bird
|
||||
boat
|
||||
bottle
|
||||
bus
|
||||
car
|
||||
cat
|
||||
chair
|
||||
cow
|
||||
diningtable
|
||||
dog
|
||||
horse
|
||||
motorbike
|
||||
person
|
||||
pottedplant
|
||||
sheep
|
||||
sofa
|
||||
train
|
||||
tvmonitor
|
@ -167,12 +167,12 @@ except ImportError:
|
||||
|
||||
|
||||
c_sources = [
|
||||
PACKAGE / 'ie_driver.cpp',
|
||||
PACKAGE / 'ie_driver.hpp',
|
||||
PACKAGE / 'ie_api_impl.cpp',
|
||||
PACKAGE / 'ie_api_impl.hpp',
|
||||
|
||||
PACKAGE / 'c_ie_driver.pxd',
|
||||
PACKAGE / 'ie_driver.pyx',
|
||||
PACKAGE / 'ie_driver.pxd',
|
||||
PACKAGE / 'ie_api_impl_defs.pxd',
|
||||
PACKAGE / 'ie_api.pyx',
|
||||
PACKAGE / 'ie_api.pxd',
|
||||
]
|
||||
|
||||
extensions = [
|
||||
|
@ -5,24 +5,20 @@ set (TARGET_NAME "ie_api")
|
||||
set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine)
|
||||
set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
|
||||
|
||||
set_source_files_properties(
|
||||
ie_api_impl_defs.pxd
|
||||
ie_api_impl.hpp
|
||||
ie_api_impl.cpp
|
||||
ie_api.pyx
|
||||
ie_api.pxd
|
||||
file(GLOB SOURCE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.pyx
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
|
||||
)
|
||||
|
||||
PROPERTIES CYTHON_IS_CXX TRUE
|
||||
set_source_files_properties(${SOURCE} PROPERTIES CYTHON_IS_CXX TRUE
|
||||
)
|
||||
|
||||
cython_add_module (
|
||||
${TARGET_NAME}
|
||||
## Compatibility with python 2.7 which has depricated "register" specifier
|
||||
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||
add_definitions("-Wno-register")
|
||||
endif()
|
||||
|
||||
ie_api_impl_defs.pxd
|
||||
ie_api_impl.hpp
|
||||
ie_api_impl.cpp
|
||||
ie_api.pyx
|
||||
)
|
||||
cython_add_module (${TARGET_NAME} ${SOURCE})
|
||||
|
||||
set_target_properties (${TARGET_NAME} PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
|
||||
target_link_libraries (${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
|
||||
|
@ -1,37 +0,0 @@
|
||||
# If the pyx file is a C++ file, we should specify that here.
|
||||
set(CMAKE_INCLUDE_CURRENT_DIR ON)
|
||||
|
||||
set(TARGET_NAME "dnn_builder")
|
||||
|
||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine/${TARGET_NAME})
|
||||
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
|
||||
|
||||
set_source_files_properties(
|
||||
dnn_builder_defs.pxd
|
||||
dnn_builder_impl.hpp
|
||||
dnn_builder_impl.cpp
|
||||
dnn_builder.pyx
|
||||
dnn_builder.pxd
|
||||
|
||||
PROPERTIES CYTHON_IS_CXX TRUE
|
||||
)
|
||||
|
||||
cython_add_module(
|
||||
${TARGET_NAME}
|
||||
|
||||
dnn_builder_impl_defs.pxd
|
||||
dnn_builder_impl.hpp
|
||||
dnn_builder_impl.cpp
|
||||
dnn_builder.pyx
|
||||
)
|
||||
|
||||
set_target_properties (${TARGET_NAME} PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
|
||||
add_dependencies (${TARGET_NAME} ie_api)
|
||||
target_include_directories (${TARGET_NAME} PRIVATE ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/inference_engine )
|
||||
target_link_libraries (${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
|
||||
|
||||
# perform copy
|
||||
ADD_CUSTOM_COMMAND (TARGET ${TARGET_NAME}
|
||||
POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/inference_engine/${TARGET_NAME}/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
|
||||
)
|
@ -1,2 +0,0 @@
|
||||
from .dnn_builder import *
|
||||
__all__ = ["NetworkBuilder", "LayerBuilder"]
|
@ -1,26 +0,0 @@
|
||||
from .cimport dnn_builder_impl_defs as C
|
||||
from libcpp.memory cimport shared_ptr
|
||||
|
||||
cdef class NetworkBuilder:
|
||||
cdef C.NetworkBuilder impl
|
||||
|
||||
cdef class INetwork:
|
||||
cdef C.INetwork impl
|
||||
|
||||
cdef class ILayer:
|
||||
cdef C.ILayer impl
|
||||
|
||||
cdef class Port:
|
||||
cdef C.Port impl
|
||||
|
||||
cdef class PortInfo:
|
||||
cdef C.PortInfo impl
|
||||
|
||||
cdef class Connection:
|
||||
cdef C.Connection impl
|
||||
|
||||
cdef class LayerBuilder:
|
||||
cdef C.LayerBuilder impl
|
||||
|
||||
cdef class LayerConstantData(dict):
|
||||
cdef shared_ptr[C.LayerBuilder] impl
|
@ -1,423 +0,0 @@
|
||||
# #distutils: language=c++
|
||||
#from cython.operator cimport dereference as deref
|
||||
from libcpp.vector cimport vector
|
||||
from libcpp.map cimport map
|
||||
from libcpp.string cimport string
|
||||
from ..ie_api cimport IENetwork, BlobBuffer
|
||||
from .cimport dnn_builder_impl_defs as C
|
||||
from .dnn_builder_impl_defs cimport Blob
|
||||
import numpy as np
|
||||
|
||||
|
||||
np_precision_map = {
|
||||
"float32": "FP32",
|
||||
"float16": "FP16",
|
||||
"int32": "I32",
|
||||
"int16": "I16",
|
||||
"uint16": "U16",
|
||||
"int8": "I8",
|
||||
"uint8": "U8",
|
||||
}
|
||||
cdef class NetworkBuilder:
|
||||
def __cinit__(self, name=None, IENetwork ie_net=None):
|
||||
if name is not None and ie_net is not None:
|
||||
raise AttributeError("Both name and ie_net arguments are defined")
|
||||
elif name is not None:
|
||||
self.impl = C.NetworkBuilder(name.encode())
|
||||
elif ie_net is not None:
|
||||
self.impl = C.NetworkBuilder().from_ie_network(ie_net.impl)
|
||||
|
||||
def build(self):
|
||||
cdef INetwork i_net = INetwork()
|
||||
i_net.impl = self.impl.build()
|
||||
return i_net
|
||||
|
||||
def get_layer(self, id: int):
|
||||
cdef LayerBuilder py_layer = LayerBuilder()
|
||||
py_layer.impl = self.impl.getLayer(id)
|
||||
return py_layer
|
||||
|
||||
@property
|
||||
def layers(self):
|
||||
cdef vector[C.LayerBuilder] c_layers = self.impl.getLayers()
|
||||
cdef LayerBuilder py_layer
|
||||
py_layers = {}
|
||||
for l in c_layers:
|
||||
py_layer = LayerBuilder()
|
||||
py_layer.impl = l
|
||||
py_layers[l.getName().decode()] = py_layer
|
||||
return py_layers
|
||||
|
||||
def remove_layer(self, LayerBuilder layer):
|
||||
self.impl.removeLayer(layer.impl)
|
||||
|
||||
def get_layer_connection(self, LayerBuilder layer):
|
||||
cdef vector[C.Connection] c_connections = self.impl.getLayerConnections(layer.impl)
|
||||
cdef Connection connection
|
||||
connections = []
|
||||
for con in c_connections:
|
||||
connection = Connection()
|
||||
connection.impl = con
|
||||
connections.append(connection)
|
||||
return connections
|
||||
|
||||
def disconnect(self, Connection connection):
|
||||
self.impl.disconnect(connection.impl)
|
||||
|
||||
def connect(self, PortInfo input, PortInfo output):
|
||||
self.impl.connect(input.impl, output.impl)
|
||||
|
||||
def add_layer(self, LayerBuilder layer, input_ports: list = None):
|
||||
cdef vector[C.PortInfo] c_ports
|
||||
cdef PortInfo c_port
|
||||
if not input_ports:
|
||||
return self.impl.addLayer(layer.impl)
|
||||
else:
|
||||
for p in input_ports:
|
||||
c_port = PortInfo(p.layer_id, p.port_id)
|
||||
c_ports.push_back(c_port.impl)
|
||||
return self.impl.addAndConnectLayer(c_ports, layer.impl)
|
||||
|
||||
cdef class INetwork:
|
||||
def __iter__(self):
|
||||
cdef ILayer layer
|
||||
layers = []
|
||||
cdef vector[C.ILayer] c_layers = self.impl.layers
|
||||
for l in c_layers:
|
||||
layer = ILayer()
|
||||
layer.impl = l
|
||||
layers.append(layer)
|
||||
return iter(layers)
|
||||
|
||||
@property
|
||||
def layers(self):
|
||||
cdef ILayer layer
|
||||
layers = {}
|
||||
cdef vector[C.ILayer] c_layers = self.impl.layers
|
||||
for l in c_layers:
|
||||
layer = ILayer()
|
||||
layer.impl = l
|
||||
layers[l.name.decode()] = layer
|
||||
return layers
|
||||
|
||||
@property
|
||||
def inputs(self):
|
||||
cdef ILayer layer
|
||||
layers = {}
|
||||
cdef vector[C.ILayer] c_layers = self.impl.inputs
|
||||
for l in c_layers:
|
||||
layer = ILayer()
|
||||
layer.impl = l
|
||||
layers[l.name.decode()] = layer
|
||||
return layers
|
||||
|
||||
@property
|
||||
def outputs(self):
|
||||
cdef ILayer layer
|
||||
layers = {}
|
||||
cdef vector[C.ILayer] c_layers = self.impl.outputs
|
||||
for l in c_layers:
|
||||
layer = ILayer()
|
||||
layer.impl = l
|
||||
layers[l.name.decode()] = layer
|
||||
return layers
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.impl.name.decode()
|
||||
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return self.impl.size
|
||||
|
||||
def get_layer_connection(self, layer: ILayer):
|
||||
cdef Connection connection
|
||||
connections = []
|
||||
cdef vector[C.Connection] c_connections = self.impl.getLayerConnections(layer.id)
|
||||
for con in c_connections:
|
||||
connection = Connection()
|
||||
connection.impl = con
|
||||
connections.append(connection)
|
||||
return connections
|
||||
|
||||
def to_ie_network(self):
|
||||
cdef IENetwork net = IENetwork()
|
||||
net.impl = self.impl.to_ie_network()
|
||||
return net
|
||||
|
||||
cdef class ILayer:
|
||||
@property
|
||||
def name(self):
|
||||
return self.impl.name.decode()
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.impl.id
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self.impl.type.decode()
|
||||
|
||||
@property
|
||||
def params(self):
|
||||
return {k.decode(): v.decode() for k, v in self.impl.parameters}
|
||||
|
||||
@property
|
||||
def input_ports(self):
|
||||
cdef Port port
|
||||
cdef vector[C.Port] c_ports = self.impl.in_ports
|
||||
ports = []
|
||||
for p in c_ports:
|
||||
port = Port()
|
||||
port.impl = p
|
||||
ports.append(port)
|
||||
return ports
|
||||
|
||||
@property
|
||||
def output_ports(self):
|
||||
cdef Port port
|
||||
cdef vector[C.Port] c_ports = self.impl.out_ports
|
||||
ports = []
|
||||
for p in c_ports:
|
||||
port = Port()
|
||||
port.impl = p
|
||||
ports.append(port)
|
||||
return ports
|
||||
|
||||
@property
|
||||
def constant_data(self):
|
||||
cdef map[string, Blob.Ptr] c_constant_data
|
||||
c_constant_data = self.impl.constant_data
|
||||
constant_data = {}
|
||||
cdef BlobBuffer weights_buffer
|
||||
for weights in c_constant_data:
|
||||
weights_buffer = BlobBuffer()
|
||||
weights_buffer.reset(weights.second)
|
||||
constant_data[weights.first.decode()] = weights_buffer.to_numpy()
|
||||
return constant_data
|
||||
|
||||
|
||||
cdef class Port:
|
||||
def __cinit__(self, shape: list=[]):
|
||||
cdef vector[size_t] c_shape
|
||||
for d in shape:
|
||||
c_shape.push_back(d)
|
||||
self.impl = C.Port(c_shape)
|
||||
@property
|
||||
def shape(self):
|
||||
return self.impl.shape
|
||||
|
||||
cdef class PortInfo:
|
||||
def __cinit__(self, layer_id: int = -1, port_id: int = -1):
|
||||
if layer_id != -1 and port_id != -1:
|
||||
self.impl = C.PortInfo(layer_id, port_id)
|
||||
else:
|
||||
self.impl = C.PortInfo()
|
||||
@property
|
||||
def layer_id(self):
|
||||
return self.impl.layer_id
|
||||
|
||||
@property
|
||||
def port_id(self):
|
||||
return self.impl.port_id
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.layer_id == other.layer_id and self.port_id == other.port_id
|
||||
|
||||
def __ne__(self, other):
|
||||
return self.layer_id != other.layer_id and self.port_id != other.port_id
|
||||
|
||||
cdef class Connection:
|
||||
def __cinit__(self, PortInfo input = None, PortInfo output = None):
|
||||
if input and output:
|
||||
self.impl = C.Connection(input.impl, output.impl)
|
||||
else:
|
||||
self.impl = C.Connection()
|
||||
@property
|
||||
def _from(self):
|
||||
cdef PortInfo port_info = PortInfo()
|
||||
port_info.impl = self.impl._from
|
||||
return port_info
|
||||
|
||||
@property
|
||||
def to(self):
|
||||
cdef PortInfo port_info = PortInfo()
|
||||
port_info.impl = self.impl.to
|
||||
return port_info
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._from == other._from and self.to == other.to
|
||||
|
||||
def __ne__(self, other):
|
||||
return self._from != other._from and self.to != other.to
|
||||
|
||||
|
||||
def check_constant_data(data):
|
||||
for k, v in data.items():
|
||||
if not all([isinstance(x, type(v[0])) for x in v]):
|
||||
raise TypeError("Elements of list for key {} have different data types! "
|
||||
"Please specify list of 'int' or 'float' values.".format(k))
|
||||
if isinstance(v, list):
|
||||
if isinstance(v[0], float):
|
||||
dtype = np.float32
|
||||
elif isinstance(v[0], int):
|
||||
dtype = np.int32
|
||||
else:
|
||||
raise TypeError("Unsupported precision of the data for key {}! Given {} but 'float or 'int' precision expected".
|
||||
format(k, str(v.dtype)))
|
||||
data[k] = np.asanyarray(v, dtype=dtype)
|
||||
elif isinstance(v, np.ndarray):
|
||||
pass
|
||||
else:
|
||||
raise TypeError("Unsupported data type for key '{}'. {} given but 'list' or 'numpy.ndarray' expected".
|
||||
format(k, type(v)))
|
||||
return data
|
||||
|
||||
|
||||
# TODO: Fix LAyerBuilder object copying - pass by reference
|
||||
# cdef class LayerConstantData(dict):
|
||||
# def update(self, other=None, **kwargs):
|
||||
# if other:
|
||||
# other = check_constant_data(other)
|
||||
# cdef vector[size_t] dims
|
||||
# cdef Blob.Ptr blob_ptr
|
||||
# cdef BlobBuffer buffer
|
||||
# for k, v in other.items():
|
||||
# if k in self.keys() and (v.shape == self[k].shape and v.dtype == self[k].dtype):
|
||||
# print("Reuse blob for {}\n".format(k))
|
||||
# self[k][:] = v
|
||||
# else:
|
||||
# for dim in v.shape:
|
||||
# dims.push_back(dim)
|
||||
# ie_precision = np_precision_map.get(str(v.dtype), None)
|
||||
# if not ie_precision:
|
||||
# raise BufferError("Unsupported precision of the data for key {}! Given {} but one of the {} precisions expected".
|
||||
# format(k, str(v.dtype), ", ".join(np_precision_map.keys())))
|
||||
# blob_ptr = deref(self.impl).allocateBlob(dims, ie_precision.encode())
|
||||
# buffer = BlobBuffer()
|
||||
# buffer.reset(blob_ptr)
|
||||
# np_buffer = buffer.to_numpy()
|
||||
# np_buffer[:] = v
|
||||
# deref(self.impl).addConstantData(k.encode(), blob_ptr)
|
||||
|
||||
cdef class LayerBuilder:
|
||||
|
||||
def __cinit__(self, type: str=None, name: str=None):
|
||||
if name and type:
|
||||
self.impl = C.LayerBuilder(name.encode(), type.encode())
|
||||
else:
|
||||
self.impl = C.LayerBuilder()
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.impl.id
|
||||
@property
|
||||
def name(self):
|
||||
return self.impl.getName().decode()
|
||||
@name.setter
|
||||
def name(self, name: str):
|
||||
self.impl.setName(name.encode())
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self.impl.getType().decode()
|
||||
@type.setter
|
||||
def type(self, type: str):
|
||||
self.impl.setType(type.encode())
|
||||
|
||||
@property
|
||||
def input_ports(self):
|
||||
cdef Port port
|
||||
cdef vector[C.Port] c_ports = self.impl.getInputPorts()
|
||||
py_ports = []
|
||||
for p in c_ports:
|
||||
port = Port()
|
||||
port.impl = p
|
||||
py_ports.append(port)
|
||||
return py_ports
|
||||
|
||||
@input_ports.setter
|
||||
def input_ports(self, ports: list):
|
||||
cdef vector[C.Port] c_ports
|
||||
cdef Port c_port
|
||||
for p in ports:
|
||||
c_port = Port(p.shape)
|
||||
c_ports.push_back(c_port.impl)
|
||||
self.impl.setInputPorts(c_ports)
|
||||
|
||||
@property
|
||||
def output_ports(self):
|
||||
cdef Port port
|
||||
cdef vector[C.Port] c_ports = self.impl.getOutputPorts()
|
||||
py_ports = []
|
||||
for p in c_ports:
|
||||
port = Port()
|
||||
port.impl = p
|
||||
py_ports.append(port)
|
||||
return py_ports
|
||||
|
||||
@output_ports.setter
|
||||
def output_ports(self, ports: list):
|
||||
cdef vector[C.Port] c_ports
|
||||
cdef Port c_port
|
||||
for p in ports:
|
||||
c_port = Port(p.shape)
|
||||
c_ports.push_back(c_port.impl)
|
||||
self.impl.setOutputPorts(c_ports)
|
||||
|
||||
@property
|
||||
def params(self):
|
||||
return {k.decode(): v.decode() for k, v in self.impl.getParameters()}
|
||||
|
||||
@params.setter
|
||||
def params(self, params_map: dict):
|
||||
cdef map[string, string] c_params_map
|
||||
for k, v in params_map.items():
|
||||
c_params_map[k.encode()] = str(v).encode()
|
||||
self.impl.setParameters(c_params_map)
|
||||
|
||||
def build(self):
|
||||
cdef ILayer layer = ILayer()
|
||||
layer.impl = self.impl.build()
|
||||
return layer
|
||||
|
||||
@property
|
||||
def constant_data(self):
|
||||
cdef map[string, Blob.Ptr] c_constant_data
|
||||
c_constant_data = self.impl.getConstantData()
|
||||
constant_data = {}
|
||||
# TODO: Fix LAyerBuilder object copying - pass by reference
|
||||
# constant_data = LayerConstantData()
|
||||
# constant_data.impl = make_shared[C.LayerBuilder](self.impl)
|
||||
cdef BlobBuffer weights_buffer
|
||||
for weights in c_constant_data:
|
||||
weights_buffer = BlobBuffer()
|
||||
weights_buffer.reset(weights.second)
|
||||
constant_data[weights.first.decode()] = weights_buffer.to_numpy()
|
||||
return constant_data
|
||||
|
||||
@constant_data.setter
|
||||
def constant_data(self, data: dict):
|
||||
cdef vector[size_t] dims
|
||||
cdef map[string, Blob.Ptr] c_constant_data
|
||||
cdef Blob.Ptr blob_ptr
|
||||
cdef BlobBuffer buffer
|
||||
data = check_constant_data(data)
|
||||
for k, v in data.items():
|
||||
for dim in v.shape:
|
||||
dims.push_back(dim)
|
||||
ie_precision = np_precision_map.get(str(v.dtype), None)
|
||||
if not ie_precision:
|
||||
raise BufferError("Unsupported precision of the data for key {}! Given {} but one of the {} precisions expected".
|
||||
format(k, str(v.dtype), ", ".join(np_precision_map.keys())))
|
||||
blob_ptr = self.impl.allocateBlob(dims, ie_precision.encode())
|
||||
buffer = BlobBuffer()
|
||||
buffer.reset(blob_ptr)
|
||||
np_buffer = buffer.to_numpy()
|
||||
np_buffer[:] = v
|
||||
c_constant_data[k.encode()] = blob_ptr
|
||||
|
||||
self.impl.setConstantData(c_constant_data)
|
||||
|
||||
# TODO: Implement get\setGraph when will be supported
|
@ -1,330 +0,0 @@
|
||||
// Copyright (c) 2018 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "dnn_builder_impl.hpp"
|
||||
|
||||
// using namespace InferenceEnginePython;
|
||||
// using namespace std;
|
||||
|
||||
std::map<std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
|
||||
{"FP16", InferenceEngine::Precision::FP16},
|
||||
{"Q78", InferenceEngine::Precision::Q78},
|
||||
{"I32", InferenceEngine::Precision::I32},
|
||||
{"I16", InferenceEngine::Precision::I16},
|
||||
{"I8", InferenceEngine::Precision::I8},
|
||||
{"U16", InferenceEngine::Precision::U16},
|
||||
{"U8", InferenceEngine::Precision::U8}};
|
||||
|
||||
InferenceEnginePython::ILayer buildILayer(InferenceEngine::ILayer::CPtr it) {
|
||||
std::vector<InferenceEnginePython::Port> in_ports;
|
||||
std::vector<InferenceEnginePython::Port> out_ports;
|
||||
for (const auto &port : it->getInputPorts()) {
|
||||
in_ports.push_back(InferenceEnginePython::Port(port.shape()));
|
||||
}
|
||||
for (const auto &port : it->getOutputPorts()) {
|
||||
out_ports.push_back(InferenceEnginePython::Port(port.shape()));
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> params_map;
|
||||
for (const auto ¶ms : it->getParameters()->getParameters()) {
|
||||
params_map.emplace(params.first, params.second);
|
||||
}
|
||||
std::map<std::string, InferenceEngine::Blob::Ptr> data_map;
|
||||
for (const auto &data : it->getParameters()->getConstantData()) {
|
||||
data_map.emplace(data.first, std::const_pointer_cast<InferenceEngine::Blob>(data.second));
|
||||
}
|
||||
return {it,
|
||||
it->getName(),
|
||||
it->getId(),
|
||||
it->getType(),
|
||||
params_map,
|
||||
data_map,
|
||||
in_ports,
|
||||
out_ports,
|
||||
};
|
||||
}
|
||||
|
||||
// NetworkBuilder
|
||||
InferenceEnginePython::NetworkBuilder::NetworkBuilder(const std::string &name) {
|
||||
// TODO( ): std::move or instance in heap? Please check in other places.
|
||||
InferenceEngine::Builder::Network network(name);
|
||||
network_ptr = std::make_shared<InferenceEngine::Builder::Network>(network);
|
||||
}
|
||||
|
||||
InferenceEnginePython::NetworkBuilder InferenceEnginePython::NetworkBuilder::from_ie_network(
|
||||
const InferenceEnginePython::IENetwork &icnn_net) {
|
||||
InferenceEngine::Builder::Network network((InferenceEngine::ICNNNetwork &) icnn_net.actual);
|
||||
NetworkBuilder net_builder = NetworkBuilder();
|
||||
net_builder.network_ptr = std::make_shared<InferenceEngine::Builder::Network>(network);
|
||||
return net_builder;
|
||||
}
|
||||
|
||||
InferenceEnginePython::INetwork InferenceEnginePython::NetworkBuilder::build() {
|
||||
InferenceEngine::INetwork::Ptr i_net = network_ptr->build();
|
||||
std::vector<ILayer> layers;
|
||||
for (const auto &it : *i_net) {
|
||||
layers.push_back(buildILayer(it));
|
||||
}
|
||||
std::vector<ILayer> inputs;
|
||||
for (const auto &it : i_net->getInputs()) {
|
||||
inputs.push_back(buildILayer(it));
|
||||
}
|
||||
std::vector<ILayer> outputs;
|
||||
for (const auto &it : i_net->getInputs()) {
|
||||
outputs.push_back(buildILayer(it));
|
||||
}
|
||||
return {i_net, // INetwork ptr
|
||||
i_net->getName(), // name
|
||||
i_net->size(), // Number of layers
|
||||
layers,
|
||||
inputs,
|
||||
outputs
|
||||
};
|
||||
}
|
||||
|
||||
std::vector<InferenceEnginePython::LayerBuilder> InferenceEnginePython::NetworkBuilder::getLayers() {
|
||||
std::vector<LayerBuilder> layers;
|
||||
for (const auto &it : network_ptr->getLayers()) {
|
||||
LayerBuilder layer;
|
||||
layer.actual = it;
|
||||
layer.id = it.getId();
|
||||
layers.push_back(layer);
|
||||
}
|
||||
return layers;
|
||||
}
|
||||
|
||||
InferenceEnginePython::LayerBuilder InferenceEnginePython::NetworkBuilder::getLayer(size_t layer_id) {
|
||||
LayerBuilder layer;
|
||||
InferenceEngine::Builder::Layer ie_layer = network_ptr->getLayer(layer_id);
|
||||
layer.actual = ie_layer;
|
||||
layer.id = ie_layer.getId();
|
||||
return layer;
|
||||
}
|
||||
|
||||
void InferenceEnginePython::NetworkBuilder::removeLayer(const LayerBuilder &layer) {
|
||||
network_ptr->removeLayer(layer.id);
|
||||
}
|
||||
|
||||
const std::vector<InferenceEnginePython::Connection> InferenceEnginePython::NetworkBuilder::getLayerConnections(
|
||||
const LayerBuilder &layer) {
|
||||
std::vector<InferenceEngine::Connection> ie_connections = network_ptr->getLayerConnections(layer.id);
|
||||
std::vector<Connection> connections;
|
||||
for (auto const &it : ie_connections) {
|
||||
PortInfo input(it.from().layerId(), it.from().portId());
|
||||
PortInfo output(it.to().layerId(), it.to().portId());
|
||||
connections.push_back(Connection(input, output));
|
||||
}
|
||||
return connections;
|
||||
}
|
||||
|
||||
void InferenceEnginePython::NetworkBuilder::disconnect(const Connection &connection) {
|
||||
network_ptr->disconnect(connection.actual);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::NetworkBuilder::connect(const PortInfo &input, const PortInfo &output) {
|
||||
network_ptr->connect(input.actual, output.actual);
|
||||
}
|
||||
|
||||
size_t InferenceEnginePython::NetworkBuilder::addLayer(const LayerBuilder &layer) {
|
||||
return network_ptr->addLayer(layer.actual);
|
||||
}
|
||||
|
||||
size_t InferenceEnginePython::NetworkBuilder::addAndConnectLayer(const std::vector<PortInfo> &input,
|
||||
const LayerBuilder &layer) {
|
||||
std::vector<InferenceEngine::PortInfo> ie_ports;
|
||||
for (const auto &it : input) {
|
||||
ie_ports.push_back(it.actual);
|
||||
}
|
||||
return network_ptr->addLayer(ie_ports, layer.actual);
|
||||
}
|
||||
// NetworkBuilder end
|
||||
// NetworkBuilder end
|
||||
|
||||
// Port
|
||||
InferenceEnginePython::Port::Port(const std::vector<size_t> &shapes) {
|
||||
actual = InferenceEngine::Port(shapes);
|
||||
shape = actual.shape();
|
||||
}
|
||||
|
||||
InferenceEnginePython::PortInfo::PortInfo(size_t layer_id, size_t port_id) : PortInfo() {
|
||||
this->actual = InferenceEngine::PortInfo(layer_id, port_id);
|
||||
this->layer_id = layer_id;
|
||||
this->port_id = port_id;
|
||||
}
|
||||
// Port end
|
||||
|
||||
// INetwork
|
||||
std::vector<InferenceEnginePython::Connection> InferenceEnginePython::INetwork::getLayerConnections(size_t layer_id) {
|
||||
std::vector<Connection> connections;
|
||||
for (const auto &it : actual->getLayerConnections(layer_id)) {
|
||||
PortInfo input = PortInfo(it.from().layerId(), it.from().portId());
|
||||
PortInfo output = PortInfo(it.to().layerId(), it.to().portId());
|
||||
connections.push_back(Connection(input, output));
|
||||
}
|
||||
return connections;
|
||||
}
|
||||
|
||||
InferenceEnginePython::IENetwork InferenceEnginePython::INetwork::to_ie_network() {
|
||||
std::shared_ptr<InferenceEngine::ICNNNetwork> icnn_net = InferenceEngine::Builder::convertToICNNNetwork(actual);
|
||||
InferenceEngine::CNNNetwork cnn_net(icnn_net);
|
||||
IENetwork ie_net = IENetwork();
|
||||
ie_net.actual = cnn_net;
|
||||
ie_net.name = name;
|
||||
ie_net.batch_size = cnn_net.getBatchSize();
|
||||
return ie_net;
|
||||
}
|
||||
// INetwork end
|
||||
|
||||
// Connection
|
||||
InferenceEnginePython::Connection::Connection(PortInfo input, PortInfo output) : Connection() {
|
||||
this->actual = InferenceEngine::Connection(InferenceEngine::PortInfo(input.layer_id, input.port_id),
|
||||
InferenceEngine::PortInfo(output.layer_id, output.port_id));
|
||||
this->_from = PortInfo(actual.from().layerId(), actual.from().portId());
|
||||
this->to = PortInfo(actual.to().layerId(), actual.to().portId());
|
||||
}
|
||||
// Connection end
|
||||
|
||||
// LayerBuilder
|
||||
InferenceEnginePython::LayerBuilder::LayerBuilder(const std::string &type, const std::string &name) : LayerBuilder() {
|
||||
InferenceEngine::Builder::Layer layer(type, name);
|
||||
this->actual = layer;
|
||||
this->id = layer.getId();
|
||||
}
|
||||
|
||||
const std::string &InferenceEnginePython::LayerBuilder::getName() {
|
||||
return actual.getName();
|
||||
}
|
||||
|
||||
const std::string &InferenceEnginePython::LayerBuilder::getType() {
|
||||
return actual.getType();
|
||||
}
|
||||
|
||||
std::vector<InferenceEnginePython::Port> InferenceEnginePython::LayerBuilder::getInputPorts() {
|
||||
std::vector<Port> ports;
|
||||
for (const auto &it : actual.getInputPorts()) {
|
||||
ports.push_back(Port(it.shape()));
|
||||
}
|
||||
return ports;
|
||||
}
|
||||
|
||||
std::vector<InferenceEnginePython::Port> InferenceEnginePython::LayerBuilder::getOutputPorts() {
|
||||
std::vector<Port> ports;
|
||||
for (const auto &it : actual.getOutputPorts()) {
|
||||
ports.push_back(Port(it.shape()));
|
||||
}
|
||||
return ports;
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> InferenceEnginePython::LayerBuilder::getParameters() {
|
||||
std::map<std::string, std::string> params_map;
|
||||
for (const auto &it : actual.getParameters()) {
|
||||
params_map.emplace(it.first, it.second);
|
||||
}
|
||||
return params_map;
|
||||
}
|
||||
|
||||
void InferenceEnginePython::LayerBuilder::setParameters(std::map<std::string, std::string> params_map) {
|
||||
std::map<std::string, InferenceEngine::Parameter> ie_params_map;
|
||||
for (const auto &it : params_map) {
|
||||
InferenceEngine::Parameter ie_param((it.second));
|
||||
ie_params_map.emplace(it.first, ie_param);
|
||||
}
|
||||
actual = actual.setParameters(ie_params_map);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::LayerBuilder::setName(const std::string &name) {
|
||||
actual = actual.setName(name);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::LayerBuilder::setType(const std::string &type) {
|
||||
actual = actual.setType(type);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::LayerBuilder::setInputPorts(const std::vector<Port> ports) {
|
||||
std::vector<InferenceEngine::Port> ie_ports;
|
||||
for (const auto &it : ports) {
|
||||
ie_ports.push_back(it.actual);
|
||||
}
|
||||
actual = actual.setInputPorts(ie_ports);
|
||||
}
|
||||
|
||||
void InferenceEnginePython::LayerBuilder::setOutputPorts(const std::vector<Port> ports) {
|
||||
std::vector<InferenceEngine::Port> ie_ports;
|
||||
for (const auto &it : ports) {
|
||||
ie_ports.push_back(it.actual);
|
||||
}
|
||||
actual = actual.setOutputPorts(ie_ports);
|
||||
}
|
||||
|
||||
InferenceEnginePython::ILayer InferenceEnginePython::LayerBuilder::build() {
|
||||
return buildILayer(actual.build());
|
||||
}
|
||||
|
||||
std::map<std::string, InferenceEngine::Blob::Ptr> InferenceEnginePython::LayerBuilder::getConstantData() {
|
||||
std::map<std::string, InferenceEngine::Blob::Ptr> data_map;
|
||||
for (const auto &it : actual.getConstantData()) {
|
||||
data_map.emplace(it.first, std::const_pointer_cast<InferenceEngine::Blob>(it.second));
|
||||
}
|
||||
return data_map;
|
||||
}
|
||||
|
||||
InferenceEngine::Blob::Ptr InferenceEnginePython::LayerBuilder::allocateBlob(std::vector<size_t> dims,
|
||||
const std::string &precision) {
|
||||
InferenceEngine::Layout ie_layout;
|
||||
ie_layout = InferenceEngine::TensorDesc::getLayoutByDims(dims);
|
||||
InferenceEngine::Precision ie_precision = precision_map.at(precision);
|
||||
const InferenceEngine::TensorDesc &tdesc = InferenceEngine::TensorDesc(ie_precision, dims, ie_layout);
|
||||
InferenceEngine::Blob::Ptr blob;
|
||||
switch (ie_precision) {
|
||||
case InferenceEngine::Precision::FP32:
|
||||
blob = InferenceEngine::make_shared_blob<float>(tdesc);
|
||||
break;
|
||||
case InferenceEngine::Precision::FP16:
|
||||
blob = InferenceEngine::make_shared_blob<int>(tdesc);
|
||||
break;
|
||||
case InferenceEngine::Precision::I16:
|
||||
blob = InferenceEngine::make_shared_blob<int>(tdesc);
|
||||
break;
|
||||
case InferenceEngine::Precision::U16:
|
||||
blob = InferenceEngine::make_shared_blob<int>(tdesc);
|
||||
break;
|
||||
case InferenceEngine::Precision::U8:
|
||||
blob = InferenceEngine::make_shared_blob<unsigned char>(tdesc);
|
||||
break;
|
||||
case InferenceEngine::Precision::I8:
|
||||
blob = InferenceEngine::make_shared_blob<signed char>(tdesc);
|
||||
break;
|
||||
case InferenceEngine::Precision::I32:
|
||||
blob = InferenceEngine::make_shared_blob<signed int>(tdesc);
|
||||
break;
|
||||
default:
|
||||
blob = InferenceEngine::make_shared_blob<float>(tdesc);
|
||||
break;
|
||||
}
|
||||
|
||||
blob->allocate();
|
||||
return blob;
|
||||
}
|
||||
|
||||
void InferenceEnginePython::LayerBuilder::setConstantData(const std::map<std::string,
|
||||
InferenceEngine::Blob::Ptr> &const_data) {
|
||||
actual.setConstantData(const_data);
|
||||
}
|
||||
// TODO( ): Fix LAyerBuilder object copying - pass by reference
|
||||
// void LayerBuilder::addConstantData(const std::string & name, InferenceEngine::Blob::Ptr data){
|
||||
// InferenceEngine::Blob::CPtr c_data = const_pointer_cast<const InferenceEngine::Blob>(data);
|
||||
// actual.addConstantData(name, c_data);
|
||||
// }
|
||||
|
||||
// LayerBuilder end
|
@ -1,161 +0,0 @@
|
||||
// Copyright (c) 2018 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ie_blob.h>
|
||||
|
||||
#include <iterator>
|
||||
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
|
||||
#include <sstream>
|
||||
#include <ie_builders.hpp>
|
||||
#include <inference_engine.hpp>
|
||||
|
||||
#include <ie_api_impl.hpp>
|
||||
|
||||
|
||||
// namespace IE Python
|
||||
namespace InferenceEnginePython {
|
||||
struct LayerBuilder;
|
||||
|
||||
struct Port {
|
||||
Port() = default;
|
||||
|
||||
explicit Port(const std::vector<size_t> &shapes);
|
||||
|
||||
InferenceEngine::Port actual;
|
||||
std::vector<size_t> shape;
|
||||
};
|
||||
|
||||
struct ILayer {
|
||||
InferenceEngine::ILayer::CPtr layer_ptr;
|
||||
std::string name;
|
||||
size_t id;
|
||||
std::string type;
|
||||
std::map<std::string, std::string> parameters;
|
||||
std::map<std::string, InferenceEngine::Blob::Ptr> constant_data;
|
||||
std::vector<Port> in_ports;
|
||||
std::vector<Port> out_ports;
|
||||
};
|
||||
|
||||
struct PortInfo {
|
||||
PortInfo(size_t layer_id, size_t port_id);
|
||||
|
||||
PortInfo() : actual(0, 0) {}
|
||||
|
||||
InferenceEngine::PortInfo actual;
|
||||
size_t layer_id;
|
||||
size_t port_id;
|
||||
};
|
||||
|
||||
struct Connection {
|
||||
Connection() : actual(InferenceEngine::PortInfo(0), InferenceEngine::PortInfo(0)) {}
|
||||
|
||||
Connection(PortInfo input, PortInfo output);
|
||||
|
||||
InferenceEngine::Connection actual;
|
||||
PortInfo _from;
|
||||
PortInfo to;
|
||||
};
|
||||
|
||||
struct INetwork {
|
||||
InferenceEngine::INetwork::Ptr actual;
|
||||
std::string name;
|
||||
size_t size;
|
||||
std::vector<ILayer> layers;
|
||||
std::vector<ILayer> inputs;
|
||||
std::vector<ILayer> outputs;
|
||||
|
||||
std::vector<Connection> getLayerConnections(size_t layer_id);
|
||||
|
||||
IENetwork to_ie_network();
|
||||
};
|
||||
|
||||
struct NetworkBuilder {
|
||||
InferenceEngine::Builder::Network::Ptr network_ptr;
|
||||
|
||||
explicit NetworkBuilder(const std::string &name);
|
||||
|
||||
NetworkBuilder() = default;
|
||||
|
||||
NetworkBuilder from_ie_network(const InferenceEnginePython::IENetwork &icnn_net);
|
||||
|
||||
INetwork build();
|
||||
|
||||
std::vector<LayerBuilder> getLayers();
|
||||
|
||||
LayerBuilder getLayer(size_t layer_id);
|
||||
|
||||
void removeLayer(const LayerBuilder &layer);
|
||||
|
||||
size_t addLayer(const LayerBuilder &layer);
|
||||
|
||||
size_t addAndConnectLayer(const std::vector<PortInfo> &input, const LayerBuilder &layer);
|
||||
|
||||
const std::vector<Connection> getLayerConnections(const LayerBuilder &layer);
|
||||
|
||||
void disconnect(const Connection &connection);
|
||||
|
||||
void connect(const PortInfo &input, const PortInfo &output);
|
||||
};
|
||||
|
||||
struct LayerBuilder {
|
||||
InferenceEngine::Builder::Layer actual;
|
||||
size_t id;
|
||||
|
||||
LayerBuilder(const std::string &type, const std::string &name);
|
||||
|
||||
LayerBuilder() : actual("", "") {}
|
||||
|
||||
LayerBuilder from_ilayer(const ILayer &ilayer);
|
||||
|
||||
const std::string &getName();
|
||||
|
||||
void setName(const std::string &name);
|
||||
|
||||
const std::string &getType();
|
||||
|
||||
void setType(const std::string &type);
|
||||
|
||||
std::vector<Port> getInputPorts();
|
||||
|
||||
void setInputPorts(const std::vector<Port> ports);
|
||||
|
||||
std::vector<Port> getOutputPorts();
|
||||
|
||||
void setOutputPorts(const std::vector<Port> ports);
|
||||
|
||||
|
||||
std::map<std::string, std::string> getParameters();
|
||||
|
||||
void setParameters(std::map<std::string, std::string> params_map);
|
||||
|
||||
ILayer build();
|
||||
|
||||
std::map<std::string, InferenceEngine::Blob::Ptr> getConstantData();
|
||||
|
||||
InferenceEngine::Blob::Ptr allocateBlob(std::vector<size_t> dims, const std::string &precision);
|
||||
|
||||
void setConstantData(const std::map<std::string, InferenceEngine::Blob::Ptr> &const_data);
|
||||
|
||||
// TODO( ): Fix LAyerBuilder object copying - pass by reference
|
||||
// void addConstantData(const std::string & name, InferenceEngine::Blob::Ptr data);
|
||||
};
|
||||
} // namespace InferenceEnginePython
|
@ -1,97 +0,0 @@
|
||||
from libcpp.string cimport string
|
||||
from libcpp.vector cimport vector
|
||||
from libc.stddef cimport size_t
|
||||
from libcpp.memory cimport shared_ptr
|
||||
from libcpp.map cimport map
|
||||
from ..ie_api_impl_defs cimport IENetwork
|
||||
|
||||
cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
|
||||
ctypedef vector[size_t] SizeVector
|
||||
|
||||
cdef cppclass TensorDesc:
|
||||
SizeVector& getDims()
|
||||
const Precision& getPrecision() const
|
||||
|
||||
cdef cppclass Blob:
|
||||
ctypedef shared_ptr[Blob] Ptr
|
||||
const TensorDesc& getTensorDesc() const
|
||||
size_t element_size() const
|
||||
|
||||
cdef cppclass Precision:
|
||||
const char*name() const
|
||||
|
||||
cdef extern from "dnn_builder_impl.hpp" namespace "InferenceEnginePython":
|
||||
cdef cppclass ILayer:
|
||||
const string name
|
||||
size_t id
|
||||
string type
|
||||
map[string, string] parameters
|
||||
vector[Port] in_ports
|
||||
vector[Port] out_ports
|
||||
map[string, Blob.Ptr] constant_data;
|
||||
|
||||
|
||||
cdef cppclass INetwork:
|
||||
string name
|
||||
size_t size
|
||||
vector[ILayer] layers
|
||||
vector[ILayer] inputs
|
||||
vector[ILayer] outputs
|
||||
vector[Port] in_ports;
|
||||
vector[Port] out_ports;
|
||||
vector[Connection] getLayerConnections(size_t layer_id);
|
||||
IENetwork to_ie_network();
|
||||
|
||||
cdef cppclass NetworkBuilder:
|
||||
NetworkBuilder() except +
|
||||
NetworkBuilder(string name) except +
|
||||
NetworkBuilder from_ie_network(IENetwork &icnn_net) except +
|
||||
INetwork build() except +
|
||||
vector[LayerBuilder] getLayers() except +
|
||||
LayerBuilder getLayer(size_t layer_id) except +
|
||||
void removeLayer(const LayerBuilder& layer) except +
|
||||
const vector[Connection] getLayerConnections(const LayerBuilder& layer) except +
|
||||
void disconnect(const Connection& connection) except +
|
||||
void connect(const PortInfo& input, const PortInfo& output) except +
|
||||
size_t addLayer(const LayerBuilder& layer) except +
|
||||
size_t addAndConnectLayer(const vector[PortInfo]& input, const LayerBuilder& layer);
|
||||
|
||||
cdef cppclass Port:
|
||||
Port() except +
|
||||
Port(const vector[size_t] & shapes) except +
|
||||
const vector[size_t] shape
|
||||
|
||||
|
||||
cdef cppclass PortInfo:
|
||||
PortInfo(size_t layer_id, size_t port_id) except +
|
||||
PortInfo() except +
|
||||
size_t layer_id
|
||||
size_t port_id
|
||||
|
||||
cdef cppclass Connection:
|
||||
Connection(PortInfo input, PortInfo output) except +
|
||||
Connection() except +
|
||||
PortInfo _from
|
||||
PortInfo to
|
||||
|
||||
cdef cppclass LayerBuilder:
|
||||
LayerBuilder()
|
||||
LayerBuilder(const string& type, const string& name ) except +
|
||||
size_t id
|
||||
LayerBuilder from_ilayer(const ILayer& ilayer) except +
|
||||
string getName() except +
|
||||
string getType() except +
|
||||
vector[Port] getInputPorts() except +
|
||||
vector[Port] getOutputPorts() except +
|
||||
map[string, string] getParameters() except +
|
||||
void setParameters(map[string, string] params_map) except +
|
||||
void setName(const string & name) except +
|
||||
void setType(const string & type) except +
|
||||
void setInputPorts(const vector[Port] ports) except +
|
||||
void setOutputPorts(const vector[Port] ports) except +
|
||||
ILayer build() except +
|
||||
map[string, Blob.Ptr] getConstantData()
|
||||
void setConstantData(map[string, Blob.Ptr] &const_data)
|
||||
# TODO: Fix LAyerBuilder object copying - pass by reference
|
||||
# void addConstantData(const string & name, Blob.Ptr data)
|
||||
Blob.Ptr allocateBlob(vector[size_t] dims, const string & precision)
|
@ -33,6 +33,7 @@ cdef class IENetwork:
|
||||
|
||||
cdef class ExecutableNetwork:
|
||||
cdef unique_ptr[C.IEExecNetwork] impl
|
||||
cdef C.IEPlugin plugin_impl
|
||||
cdef public:
|
||||
_requests, inputs, outputs
|
||||
|
||||
|
@ -32,7 +32,7 @@ cdef dict_to_c_map(py_dict):
|
||||
return c_map
|
||||
|
||||
supported_precisions = ["FP32", "FP16", "Q78", "I32", "I16", "I8", "U32", "U16"]
|
||||
supported_layouts = ["NCHW", "NHWC", "OIHW", "C", "CHW", "HW", "NC", "CN", "BLOCKED"]
|
||||
supported_layouts = ["NCHW", "NHWC", "OIHW", "C", "CHW", "HW", "NC", "CN", "BLOCKED", "NCDHW"]
|
||||
known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL']
|
||||
|
||||
def get_version():
|
||||
@ -218,6 +218,10 @@ cdef class InferRequest:
|
||||
outputs[output] = self._get_blob_buffer(output.encode()).to_numpy()
|
||||
return deepcopy(outputs)
|
||||
|
||||
@property
|
||||
def latency(self):
|
||||
return self.impl.exec_time
|
||||
|
||||
def set_batch(self, size):
|
||||
if size <= 0:
|
||||
raise ValueError("Batch size should be positive integer number but {} specified".format(size))
|
||||
@ -225,6 +229,7 @@ cdef class InferRequest:
|
||||
|
||||
def _fill_inputs(self, inputs):
|
||||
for k, v in inputs.items():
|
||||
assert k in self._inputs_list, "No input with name {} found in network".format(k)
|
||||
self.inputs[k][:] = v
|
||||
|
||||
|
||||
@ -357,6 +362,7 @@ cdef class IENetwork:
|
||||
cdef vector[size_t] c_shape
|
||||
net_inputs = self.inputs
|
||||
for input, shape in input_shapes.items():
|
||||
c_shape = []
|
||||
if input not in net_inputs:
|
||||
raise AttributeError("Specified {} layer not in network inputs {}! ".format(input, net_inputs))
|
||||
for v in shape:
|
||||
@ -396,7 +402,7 @@ cdef class IEPlugin:
|
||||
if config:
|
||||
for k, v in config.items():
|
||||
c_config[to_std_string(k)] = to_std_string(v)
|
||||
|
||||
exec_net.plugin_impl = self.impl
|
||||
exec_net.impl = move(self.impl.load(network.impl, num_requests, c_config))
|
||||
exec_net.inputs = network.inputs.keys()
|
||||
exec_net.outputs = list(network.outputs.keys())
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -35,6 +35,7 @@ std::map<std::string, InferenceEngine::Layout> layout_map = {{"ANY", Inferen
|
||||
{"HW", InferenceEngine::Layout::HW},
|
||||
{"NC", InferenceEngine::Layout::NC},
|
||||
{"CN", InferenceEngine::Layout::CN},
|
||||
{"NCDHW", InferenceEngine::Layout::NCDHW},
|
||||
{"BLOCKED", InferenceEngine::Layout::BLOCKED}};
|
||||
#define stringify(name) # name
|
||||
#define IE_CHECK_CALL(expr) { \
|
||||
@ -301,7 +302,6 @@ InferenceEnginePython::IEPlugin::load(const InferenceEnginePython::IENetwork &ne
|
||||
InferenceEngine::ResponseDesc response;
|
||||
auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(net.name,
|
||||
num_requests);
|
||||
|
||||
IE_CHECK_CALL(actual->LoadNetwork(exec_network->actual, net.actual, config, &response))
|
||||
|
||||
for (size_t i = 0; i < num_requests; ++i) {
|
||||
@ -322,9 +322,8 @@ InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string &name, siz
|
||||
}
|
||||
|
||||
void InferenceEnginePython::IEExecNetwork::infer() {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
InferRequestWrap &request = infer_requests[0];
|
||||
request.request_ptr->Infer(&response);
|
||||
request.infer();
|
||||
}
|
||||
|
||||
|
||||
@ -340,13 +339,33 @@ void InferenceEnginePython::InferRequestWrap::setBatch(int size) {
|
||||
IE_CHECK_CALL(request_ptr->SetBatch(size, &response));
|
||||
}
|
||||
|
||||
void latency_callback(InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode code){
|
||||
if (code != InferenceEngine::StatusCode::OK) {
|
||||
THROW_IE_EXCEPTION << "Async Infer Request failed with status code " << code;
|
||||
}
|
||||
InferenceEnginePython::InferRequestWrap *requestWrap;
|
||||
InferenceEngine::ResponseDesc dsc;
|
||||
request->GetUserData(reinterpret_cast<void**>(&requestWrap), &dsc);
|
||||
auto end_time = Time::now();
|
||||
auto execTime = std::chrono::duration_cast<ns>(end_time - requestWrap->start_time);
|
||||
requestWrap->exec_time = static_cast<double>(execTime.count()) * 0.000001;
|
||||
}
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::infer() {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
start_time = Time::now();
|
||||
IE_CHECK_CALL(request_ptr->Infer(&response));
|
||||
auto end_time = Time::now();
|
||||
auto execTime = std::chrono::duration_cast<ns>(end_time - start_time);
|
||||
exec_time = static_cast<double>(execTime.count()) * 0.000001;
|
||||
}
|
||||
|
||||
|
||||
void InferenceEnginePython::InferRequestWrap::infer_async() {
|
||||
InferenceEngine::ResponseDesc response;
|
||||
start_time = Time::now();
|
||||
IE_CHECK_CALL(request_ptr->SetUserData(this, &response));
|
||||
request_ptr->SetCompletionCallback(latency_callback);
|
||||
IE_CHECK_CALL(request_ptr->StartAsync(&response));
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -23,11 +23,16 @@
|
||||
#include <vector>
|
||||
#include <set>
|
||||
|
||||
|
||||
#include <iostream>
|
||||
#include <algorithm>
|
||||
|
||||
#include <sstream>
|
||||
#include <inference_engine.hpp>
|
||||
#include <chrono>
|
||||
#include "inference_engine.hpp"
|
||||
|
||||
typedef std::chrono::high_resolution_clock Time;
|
||||
typedef std::chrono::nanoseconds ns;
|
||||
|
||||
namespace InferenceEnginePython {
|
||||
struct IENetLayer {
|
||||
@ -111,7 +116,8 @@ struct IENetwork {
|
||||
|
||||
struct InferRequestWrap {
|
||||
InferenceEngine::IInferRequest::Ptr request_ptr;
|
||||
|
||||
Time::time_point start_time;
|
||||
double exec_time;
|
||||
void infer();
|
||||
|
||||
void infer_async();
|
||||
|
@ -45,14 +45,14 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
|
||||
vector[size_t] dims
|
||||
string precision
|
||||
string layout
|
||||
void setPrecision(string precision)
|
||||
void setLayout(string layout)
|
||||
void setPrecision(string precision) except +
|
||||
void setLayout(string layout) except +
|
||||
|
||||
cdef cppclass OutputInfo:
|
||||
vector[size_t] dims
|
||||
string precision
|
||||
string layout
|
||||
void setPrecision(string precision)
|
||||
void setPrecision(string precision) except +
|
||||
|
||||
cdef cppclass ProfileInfo:
|
||||
string status
|
||||
@ -100,7 +100,8 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
|
||||
string version
|
||||
|
||||
cdef cppclass InferRequestWrap:
|
||||
void getBlobPtr(const string &blob_name, Blob.Ptr &blob_ptr)
|
||||
double exec_time;
|
||||
void getBlobPtr(const string &blob_name, Blob.Ptr &blob_ptr) except +
|
||||
map[string, ProfileInfo] getPerformanceCounts() except +
|
||||
void infer() except +
|
||||
void infer_async() except +
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for ArgMax layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(ArgMaxLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(ArgMaxLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit ArgMaxLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit ArgMaxLayer(Layer& genLayer);
|
||||
explicit ArgMaxLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit ArgMaxLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for BatchNormalization layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(BatchNormalizationLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(BatchNormalizationLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit BatchNormalizationLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit BatchNormalizationLayer(Layer& genLayer);
|
||||
explicit BatchNormalizationLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit BatchNormalizationLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
@ -45,19 +50,6 @@ public:
|
||||
*/
|
||||
BatchNormalizationLayer& setPort(const Port &port);
|
||||
|
||||
/**
|
||||
* @brief Sets weights for layer
|
||||
* @param weights Constant blob with weights
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
BatchNormalizationLayer& setWeights(const Blob::CPtr& weights);
|
||||
/**
|
||||
* @brief Sets biases for layer
|
||||
* @param biases Constant blob with biases
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
BatchNormalizationLayer& setBiases(const Blob::CPtr& biases);
|
||||
|
||||
/**
|
||||
* @brief Returns epsilon
|
||||
* @return Epsilon
|
||||
@ -69,12 +61,6 @@ public:
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
BatchNormalizationLayer& setEpsilon(float eps);
|
||||
|
||||
/**
|
||||
* @brief Validates layer before creation
|
||||
* @param layer generic layer builder
|
||||
*/
|
||||
static void validate(const Layer& layer);
|
||||
};
|
||||
|
||||
} // namespace Builder
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Clamp layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(ClampLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(ClampLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit ClampLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit ClampLayer(Layer& genLayer);
|
||||
explicit ClampLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit ClampLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -15,7 +15,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Concat layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(ConcatLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(ConcatLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -24,9 +24,14 @@ public:
|
||||
explicit ConcatLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit ConcatLayer(Layer& genLayer);
|
||||
explicit ConcatLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit ConcatLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Const layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(ConstLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(ConstLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit ConstLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit ConstLayer(Layer& genLayer);
|
||||
explicit ConstLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit ConstLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
@ -51,6 +56,12 @@ public:
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
ConstLayer& setData(const Blob::CPtr& data);
|
||||
|
||||
/**
|
||||
* @brief Returns constant data
|
||||
* @return constant blob with data
|
||||
*/
|
||||
const Blob::CPtr& getData() const;
|
||||
};
|
||||
|
||||
} // namespace Builder
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
@ -15,7 +15,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for ArgMax layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(ConvolutionLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(ConvolutionLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -24,14 +24,14 @@ public:
|
||||
explicit ConvolutionLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit ConvolutionLayer(Layer& genLayer);
|
||||
explicit ConvolutionLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief Operator creates generic layer builder
|
||||
* @return Generic layer builder
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
operator Layer() const override;
|
||||
explicit ConvolutionLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
@ -39,19 +39,6 @@ public:
|
||||
*/
|
||||
ConvolutionLayer& setName(const std::string& name);
|
||||
|
||||
/**
|
||||
* @brief Sets weights for layer
|
||||
* @param weights Constant blob with weights
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
ConvolutionLayer& setWeights(const Blob::CPtr& weights);
|
||||
/**
|
||||
* @brief Sets biases for layer
|
||||
* @param biases Constant blob with biases
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
ConvolutionLayer& setBiases(const Blob::CPtr& biases);
|
||||
|
||||
/**
|
||||
* @brief Returns input port
|
||||
* @return Input port
|
||||
@ -151,12 +138,6 @@ public:
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
ConvolutionLayer& setOutDepth(size_t outDepth);
|
||||
|
||||
/**
|
||||
* @brief Validates layer before creation
|
||||
* @param layer generic layer builder
|
||||
*/
|
||||
static void validate(const Layer& layer);
|
||||
};
|
||||
|
||||
} // namespace Builder
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -15,7 +15,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Crop layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(CropLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(CropLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -24,9 +24,14 @@ public:
|
||||
explicit CropLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit CropLayer(Layer& genLayer);
|
||||
explicit CropLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit CropLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
@ -78,12 +83,6 @@ public:
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
CropLayer& setOffset(const std::vector<size_t>& offsets);
|
||||
|
||||
/**
|
||||
* @brief Validates layer before creation
|
||||
* @param layer generic layer builder
|
||||
*/
|
||||
static void validate(const Layer& layer);
|
||||
};
|
||||
|
||||
} // namespace Builder
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -15,7 +15,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for CTCGreedyDecoder layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(CTCGreedyDecoderLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(CTCGreedyDecoderLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -24,9 +24,14 @@ public:
|
||||
explicit CTCGreedyDecoderLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit CTCGreedyDecoderLayer(Layer& genLayer);
|
||||
explicit CTCGreedyDecoderLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit CTCGreedyDecoderLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_convolution_layer.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit DeconvolutionLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit DeconvolutionLayer(Layer& genLayer);
|
||||
explicit DeconvolutionLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit DeconvolutionLayer(const Layer::CPtr& layer);
|
||||
};
|
||||
|
||||
} // namespace Builder
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -15,7 +15,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for ArgMax layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(DetectionOutputLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(DetectionOutputLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -24,9 +24,14 @@ public:
|
||||
explicit DetectionOutputLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit DetectionOutputLayer(Layer& genLayer);
|
||||
explicit DetectionOutputLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit DetectionOutputLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -15,7 +15,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Eltwise layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(EltwiseLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(EltwiseLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The enum defines all Eltwise types
|
||||
@ -23,7 +23,11 @@ public:
|
||||
enum EltwiseType {
|
||||
SUM = 1,
|
||||
MAX,
|
||||
MUL
|
||||
MUL,
|
||||
SUB,
|
||||
DIV,
|
||||
MIN,
|
||||
SQUARED_DIFF
|
||||
};
|
||||
|
||||
/**
|
||||
@ -33,9 +37,14 @@ public:
|
||||
explicit EltwiseLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit EltwiseLayer(Layer& genLayer);
|
||||
explicit EltwiseLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit EltwiseLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for ELU layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(ELULayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(ELULayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit ELULayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit ELULayer(Layer& genLayer);
|
||||
explicit ELULayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit ELULayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for FullyConnected layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(FullyConnectedLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(FullyConnectedLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit FullyConnectedLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit FullyConnectedLayer(Layer& genLayer);
|
||||
explicit FullyConnectedLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit FullyConnectedLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
@ -33,19 +38,6 @@ public:
|
||||
*/
|
||||
FullyConnectedLayer& setName(const std::string& name);
|
||||
|
||||
/**
|
||||
* @brief Sets weights for layer
|
||||
* @param weights Constant blob with weights
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
FullyConnectedLayer& setWeights(const Blob::CPtr& weights);
|
||||
/**
|
||||
* @brief Sets biases for layer
|
||||
* @param biases Constant blob with biases
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
FullyConnectedLayer& setBiases(const Blob::CPtr& biases);
|
||||
|
||||
/**
|
||||
* @brief Returns input port
|
||||
* @return Input port
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for ArgMax layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(GRNLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(GRNLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit GRNLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit GRNLayer(Layer& genLayer);
|
||||
explicit GRNLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit GRNLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
87
inference-engine/include/builders/ie_gru_sequence_layer.hpp
Normal file
87
inference-engine/include/builders/ie_gru_sequence_layer.hpp
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
namespace Builder {
|
||||
|
||||
/**
|
||||
* @brief The class represents a builder for GRUSequence layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(GRUSequenceLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
* @param name Layer name
|
||||
*/
|
||||
explicit GRUSequenceLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit GRUSequenceLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit GRUSequenceLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
GRUSequenceLayer& setName(const std::string& name);
|
||||
|
||||
/**
|
||||
* @brief Returns input ports with shapes for the layer
|
||||
* @return Vector of ports
|
||||
*/
|
||||
const std::vector<Port>& getInputPorts() const;
|
||||
/**
|
||||
* @brief Sets input ports for the layer
|
||||
* @param ports vector of input ports
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
GRUSequenceLayer& setInputPorts(const std::vector<Port>& ports);
|
||||
|
||||
/**
|
||||
* @brief Returns output ports with shapes for the layer
|
||||
* @return Vector of ports
|
||||
*/
|
||||
const std::vector<Port>& getOutputPorts() const;
|
||||
/**
|
||||
* @brief Sets output ports for the layer
|
||||
* @param ports vector of output ports
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
GRUSequenceLayer& setOutputPorts(const std::vector<Port>& ports);
|
||||
|
||||
int getHiddenSize() const;
|
||||
GRUSequenceLayer& setHiddenSize(int size);
|
||||
bool getSequenceDim() const;
|
||||
GRUSequenceLayer& setSqquenceDim(bool flag);
|
||||
const std::vector<std::string>& getActivations() const;
|
||||
GRUSequenceLayer& setActivations(const std::vector<std::string>& activations);
|
||||
const std::vector<float>& getActivationsAlpha() const;
|
||||
GRUSequenceLayer& setActivationsAlpha(const std::vector<float>& activations);
|
||||
const std::vector<float>& getActivationsBeta() const;
|
||||
GRUSequenceLayer& setActivationsBeta(const std::vector<float>& activations);
|
||||
float getClip() const;
|
||||
GRUSequenceLayer& setClip(float clip);
|
||||
bool getLinearBeforeReset() const;
|
||||
GRUSequenceLayer& setLinearBeforeReset(bool flag);
|
||||
const std::string& getDirection() const;
|
||||
GRUSequenceLayer& setDirection(const std::string& direction);
|
||||
};
|
||||
|
||||
} // namespace Builder
|
||||
} // namespace InferenceEngine
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Input layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(InputLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(InputLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit InputLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit InputLayer(Layer& genLayer);
|
||||
explicit InputLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit InputLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
@ -44,12 +49,6 @@ public:
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
InputLayer& setPort(const Port &port);
|
||||
|
||||
/**
|
||||
* @brief Validates layer before creation
|
||||
* @param layer generic layer builder
|
||||
*/
|
||||
static void validate(const Layer& layer);
|
||||
};
|
||||
|
||||
} // namespace Builder
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -6,7 +6,7 @@
|
||||
|
||||
#include <details/caseless.hpp>
|
||||
#include <ie_parameter.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <ie_blob.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
@ -25,25 +25,30 @@ struct ValidatorsHolder {
|
||||
/**
|
||||
* @brief Caseless map connects type with validator
|
||||
*/
|
||||
details::caseless_map<std::string, std::function<void(const Layer&)>> validators;
|
||||
details::caseless_map<std::string, std::function<void(const std::shared_ptr<const Layer>&, bool)>> validators;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief This class implements a builder for IE Layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(Layer) {
|
||||
class INFERENCE_ENGINE_API_CLASS(Layer): public ILayer,
|
||||
public std::enable_shared_from_this<Layer> {
|
||||
public:
|
||||
/**
|
||||
* @brief A shared pointer to the Layer builder
|
||||
*/
|
||||
using Ptr = std::shared_ptr<Layer>;
|
||||
/**
|
||||
* @brief A shared pointer to the constant Layer builder
|
||||
*/
|
||||
using CPtr = std::shared_ptr<const Layer>;
|
||||
|
||||
/**
|
||||
* @brief The constructor creates a Layer builder with layer type and layer name
|
||||
* @param type Layer type
|
||||
* @param name Layer name
|
||||
*/
|
||||
explicit Layer(const std::string& type, const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a Layer builder from shared pointer to ILayer
|
||||
* @param layer shared pointer to ILayer
|
||||
*/
|
||||
explicit Layer(const ILayer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a Layer builder from shared pointer to constant ILayer
|
||||
* @param layer shared pointer to constant ILayer
|
||||
@ -57,38 +62,25 @@ public:
|
||||
Layer(idx_t id, const Layer& layer);
|
||||
|
||||
/**
|
||||
* @brief Returns layer builder ID
|
||||
* @return ID
|
||||
* @brief Compares the given Layer builder with the current one
|
||||
* @param rhs Layer builder to compare with
|
||||
* @return true if the given Layer builder is equal to the current one, false - otherwise
|
||||
*/
|
||||
idx_t getId() const;
|
||||
bool operator==(const Layer& rhs) const {
|
||||
return params == rhs.params;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns a reference to layer type
|
||||
* @return Layer type
|
||||
* @brief Returns layer ID
|
||||
* @return Layer ID
|
||||
*/
|
||||
std::string& getType();
|
||||
/**
|
||||
* @brief Returns a reference to constant layer type
|
||||
* @return constant layer type
|
||||
*/
|
||||
const std::string& getType() const;
|
||||
/**
|
||||
* @brief Sets layer type
|
||||
* @param type Layer type
|
||||
* @return Reference to Layer builder
|
||||
*/
|
||||
Layer& setType(const std::string& type);
|
||||
idx_t getId() const noexcept override;
|
||||
|
||||
/**
|
||||
* @brief Returns a reference to layer name
|
||||
* @brief Returns a constant reference to layer name
|
||||
* @return Layer name
|
||||
*/
|
||||
std::string& getName();
|
||||
/**
|
||||
* @brief Returns a reference to constant layer name
|
||||
* @return constant layer name
|
||||
*/
|
||||
const std::string& getName() const;
|
||||
const std::string& getName() const noexcept override;
|
||||
/**
|
||||
* @brief Sets layer name
|
||||
* @param name Layer name
|
||||
@ -97,32 +89,27 @@ public:
|
||||
Layer& setName(const std::string& name);
|
||||
|
||||
/**
|
||||
* @brief Returns layer subgraph
|
||||
* @return shared pointer to INetwork
|
||||
* @brief Returns a constant reference to layer type
|
||||
* @return Layer type
|
||||
*/
|
||||
INetwork::Ptr& getGraph();
|
||||
const std::string& getType() const noexcept override;
|
||||
/**
|
||||
* @brief Returns constant layer subgraph
|
||||
* @return constant shared pointer to INetwork
|
||||
*/
|
||||
const INetwork::Ptr& getGraph() const;
|
||||
/**
|
||||
* @brief Sets layer subgraph
|
||||
* @param graph constant shared pointer to INetwork
|
||||
* @brief Sets layer type
|
||||
* @param type Layer type
|
||||
* @return Reference to Layer builder
|
||||
*/
|
||||
Layer& setGraph(const INetwork::Ptr& graph);
|
||||
Layer& setType(const std::string& type);
|
||||
|
||||
/**
|
||||
* @brief Returns map of parameters
|
||||
* @return map of parameters
|
||||
*/
|
||||
std::map<std::string, Parameter>& getParameters();
|
||||
const std::map<std::string, Parameter>& getParameters() const noexcept override;
|
||||
/**
|
||||
* @brief Returns constant map of parameters
|
||||
* @return constant map of parameters
|
||||
* @brief Returns map of parameters
|
||||
* @return map of parameters
|
||||
*/
|
||||
const std::map<std::string, Parameter>& getParameters() const;
|
||||
std::map<std::string, Parameter>& getParameters();
|
||||
/**
|
||||
* @brief Sets parameters for layer
|
||||
* @param params constant map of parameters
|
||||
@ -131,45 +118,15 @@ public:
|
||||
Layer& setParameters(const std::map<std::string, Parameter>& params);
|
||||
|
||||
/**
|
||||
* @brief Returns map of internal blobs
|
||||
* @return map of internal blobs
|
||||
* @brief Returns vector of input ports
|
||||
* @return Vector of input ports
|
||||
*/
|
||||
std::map<std::string, Blob::CPtr>& getConstantData();
|
||||
/**
|
||||
* @brief Returns constant map of internal blobs
|
||||
* @return constant map of internal blobs
|
||||
*/
|
||||
const std::map<std::string, Blob::CPtr>& getConstantData() const;
|
||||
/**
|
||||
* @brief Sets constant data for layer
|
||||
* @param constData constant map of shared pointers to blobs
|
||||
* @return Reference to Layer builder
|
||||
*/
|
||||
Layer& setConstantData(const std::map<std::string, Blob::Ptr>& constData);
|
||||
/**
|
||||
* @brief Sets constant data for layer
|
||||
* @param constData constant map of shared pointers to constant blobs
|
||||
* @return Reference to Layer builder
|
||||
*/
|
||||
Layer& setConstantData(const std::map<std::string, Blob::CPtr>& constData);
|
||||
/**
|
||||
* @brief Adds constant data for layer by name
|
||||
* @param name Name of constant data
|
||||
* @param data shared pointer to constant blob
|
||||
* @return Reference to Layer builder
|
||||
*/
|
||||
Layer& addConstantData(const std::string& name, const Blob::CPtr& data);
|
||||
|
||||
const std::vector<Port>& getInputPorts() const noexcept override;
|
||||
/**
|
||||
* @brief Returns vector of input ports
|
||||
* @return Vector of input ports
|
||||
*/
|
||||
std::vector<Port>& getInputPorts();
|
||||
/**
|
||||
* @brief Returns constant vector of input ports
|
||||
* @return constant vector of input ports
|
||||
*/
|
||||
const std::vector<Port>& getInputPorts() const;
|
||||
/**
|
||||
* @brief Sets input ports
|
||||
* @param ports vector of ports
|
||||
@ -181,12 +138,12 @@ public:
|
||||
* @brief Returns vector of output ports
|
||||
* @return Vector of output ports
|
||||
*/
|
||||
std::vector<Port>& getOutputPorts();
|
||||
const std::vector<Port>& getOutputPorts() const noexcept override;
|
||||
/**
|
||||
* @brief Returns constant vector of output ports
|
||||
* @return constant vector of output ports
|
||||
* @brief Returns vector of output ports
|
||||
* @return Vector of output ports
|
||||
*/
|
||||
const std::vector<Port>& getOutputPorts() const;
|
||||
std::vector<Port>& getOutputPorts();
|
||||
/**
|
||||
* @brief Sets output ports
|
||||
* @param ports vector of ports
|
||||
@ -198,30 +155,27 @@ public:
|
||||
* @brief Validates the current builder and generates ILayer object
|
||||
* @return constant shared pointer to ILayer
|
||||
*/
|
||||
const ILayer::Ptr build() const;
|
||||
const ILayer::CPtr build() const;
|
||||
|
||||
/**
|
||||
* @brief Validates layer builder
|
||||
*/
|
||||
void validate() const;
|
||||
void validate(bool partial = false) const;
|
||||
|
||||
/**
|
||||
* @brief Registers a new validator for type
|
||||
* @param type Layer type
|
||||
* @param validator Layer validator
|
||||
*/
|
||||
static void addValidator(const std::string& type, const std::function<void(const Layer&)>& validator);
|
||||
static void addValidator(const std::string& type, const std::function<void(const Layer::CPtr&, bool)>& validator);
|
||||
|
||||
private:
|
||||
idx_t id;
|
||||
std::string type;
|
||||
std::string name;
|
||||
INetwork::Ptr graph;
|
||||
std::vector<Port> inPorts;
|
||||
std::vector<Port> outPorts;
|
||||
std::map<std::string, Parameter> params;
|
||||
std::map<std::string, Blob::CPtr> constData;
|
||||
|
||||
static std::shared_ptr<ValidatorsHolder> getValidatorsHolder();
|
||||
};
|
||||
|
||||
@ -235,7 +189,7 @@ public:
|
||||
* @param type Layer type
|
||||
* @param validator Layer validator
|
||||
*/
|
||||
explicit ValidatorRegisterBase(const std::string& type, const std::function<void(const Layer&)>& validator) {
|
||||
explicit ValidatorRegisterBase(const std::string& type, const std::function<void(const Layer::CPtr&, bool)>& validator) {
|
||||
InferenceEngine::Builder::Layer::addValidator(type, validator);
|
||||
}
|
||||
};
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -14,36 +14,41 @@ namespace Builder {
|
||||
/**
|
||||
* @brief This class defines the basic functional for layer builders
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(LayerFragment) {
|
||||
class INFERENCE_ENGINE_API_CLASS(LayerDecorator) {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates layer builders with layer type and layer name
|
||||
* @param type Layer type
|
||||
* @param name Layer name
|
||||
*/
|
||||
LayerFragment(const std::string& type, const std::string& name);
|
||||
LayerDecorator(const std::string& type, const std::string& name);
|
||||
/**
|
||||
* @brief The constructor creates layer builders from reference to generic layer builder
|
||||
* @param genLayer Generic layer builder
|
||||
* @param layer pointer to generic layer builder
|
||||
*/
|
||||
explicit LayerFragment(Layer& genLayer);
|
||||
explicit LayerDecorator(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates layer builders from reference to generic layer builder
|
||||
* @param layer constant pointer to generic layer builder
|
||||
*/
|
||||
explicit LayerDecorator(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief The copy constructor
|
||||
* @param rval Source builder
|
||||
*/
|
||||
explicit LayerFragment(const LayerFragment& rval);
|
||||
LayerDecorator(const LayerDecorator& rval);
|
||||
|
||||
/**
|
||||
* @brief Copy operator for LayerFragment
|
||||
* @brief Copy operator for LayerDecorator
|
||||
* @param rval
|
||||
* @return Layer builder
|
||||
*/
|
||||
LayerFragment& operator=(const LayerFragment& rval);
|
||||
LayerDecorator& operator=(const LayerDecorator& rval);
|
||||
|
||||
/**
|
||||
* @brief Virtual destructor
|
||||
*/
|
||||
virtual ~LayerFragment() = default;
|
||||
virtual ~LayerDecorator() = default;
|
||||
|
||||
/**
|
||||
* @brief The operator creates generic builder
|
||||
@ -51,6 +56,18 @@ public:
|
||||
*/
|
||||
virtual operator Layer() const;
|
||||
|
||||
/**
|
||||
* @brief The operator creates generic builder
|
||||
* @return Pointer to generic builder
|
||||
*/
|
||||
virtual operator Layer::Ptr();
|
||||
|
||||
/**
|
||||
* @brief The operator creates generic builder
|
||||
* @return Constant pointer to generic builder
|
||||
*/
|
||||
virtual operator Layer::CPtr() const;
|
||||
|
||||
/**
|
||||
* @brief Returns layer type
|
||||
* @return Layer type
|
||||
@ -63,12 +80,14 @@ public:
|
||||
const std::string& getName() const;
|
||||
|
||||
protected:
|
||||
const std::vector<size_t> uInts2size_t(const std::vector<unsigned int>& vector) const;
|
||||
Layer& getLayer() const;
|
||||
Layer::Ptr& getLayer();
|
||||
const Layer::CPtr getLayer() const;
|
||||
void checkType(const std::string& type) const;
|
||||
|
||||
Layer::CPtr cLayer;
|
||||
|
||||
private:
|
||||
Layer layer;
|
||||
Layer& refLayer;
|
||||
Layer::Ptr layer;
|
||||
};
|
||||
|
||||
} // namespace Builder
|
99
inference-engine/include/builders/ie_lrn_layer.hpp
Normal file
99
inference-engine/include/builders/ie_lrn_layer.hpp
Normal file
@ -0,0 +1,99 @@
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
namespace Builder {
|
||||
|
||||
/**
|
||||
* @brief The class represents a builder for LRN layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(LRNLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
* @param name Layer name
|
||||
*/
|
||||
explicit LRNLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit LRNLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit LRNLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
LRNLayer& setName(const std::string& name);
|
||||
|
||||
/**
|
||||
* @brief Returns port with shapes for the layer
|
||||
* @return Port with shapes
|
||||
*/
|
||||
const Port& getPort() const;
|
||||
/**
|
||||
* @brief Sets port shapes for the layer
|
||||
* @param port Port with shapes
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
LRNLayer& setPort(const Port& port);
|
||||
/**
|
||||
* @brief Returns side length of the region
|
||||
* @return Size
|
||||
*/
|
||||
size_t getSize() const;
|
||||
/**
|
||||
* @brief Sets side length of the region
|
||||
* @param size Size
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
LRNLayer& setSize(size_t size);
|
||||
/**
|
||||
* @brief Returns scaling parameter for the normalizing sum
|
||||
* @return Scaling parameter
|
||||
*/
|
||||
float getAlpha() const;
|
||||
/**
|
||||
* @brief Sets scaling parameter for the normalizing sum
|
||||
* @param alpha Scaling parameter
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
LRNLayer& setAlpha(float alpha);
|
||||
/**
|
||||
* @brief Returns exponent for the normalizing sum
|
||||
* @return Exponent
|
||||
*/
|
||||
float getBeta() const;
|
||||
/**
|
||||
* @brief Sets exponent for the normalizing sum
|
||||
* @param beta Exponent
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
LRNLayer& setBeta(float beta);
|
||||
/**
|
||||
* @brief Returns region type
|
||||
* @return true if normalizing sum is performed over adjacent channels
|
||||
*/
|
||||
float getBias() const;
|
||||
/**
|
||||
* @brief Sets bias for the normalizing sum
|
||||
* @param bias Bias
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
LRNLayer& setBias(float bias);
|
||||
};
|
||||
|
||||
} // namespace Builder
|
||||
} // namespace InferenceEngine
|
87
inference-engine/include/builders/ie_lstm_sequence_layer.hpp
Normal file
87
inference-engine/include/builders/ie_lstm_sequence_layer.hpp
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
namespace Builder {
|
||||
|
||||
/**
|
||||
* @brief The class represents a builder for LSTMSequence layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(LSTMSequenceLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
* @param name Layer name
|
||||
*/
|
||||
explicit LSTMSequenceLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit LSTMSequenceLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit LSTMSequenceLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
LSTMSequenceLayer& setName(const std::string& name);
|
||||
|
||||
/**
|
||||
* @brief Returns input ports with shapes for the layer
|
||||
* @return Vector of ports
|
||||
*/
|
||||
const std::vector<Port>& getInputPorts() const;
|
||||
/**
|
||||
* @brief Sets input ports for the layer
|
||||
* @param ports vector of input ports
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
LSTMSequenceLayer& setInputPorts(const std::vector<Port>& ports);
|
||||
|
||||
/**
|
||||
* @brief Returns output ports with shapes for the layer
|
||||
* @return Vector of ports
|
||||
*/
|
||||
const std::vector<Port>& getOutputPorts() const;
|
||||
/**
|
||||
* @brief Sets output ports for the layer
|
||||
* @param ports vector of output ports
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
LSTMSequenceLayer& setOutputPorts(const std::vector<Port>& ports);
|
||||
|
||||
int getHiddenSize() const;
|
||||
LSTMSequenceLayer& setHiddenSize(int size);
|
||||
bool getSequenceDim() const;
|
||||
LSTMSequenceLayer& setSqquenceDim(bool flag);
|
||||
const std::vector<std::string>& getActivations() const;
|
||||
LSTMSequenceLayer& setActivations(const std::vector<std::string>& activations);
|
||||
const std::vector<float>& getActivationsAlpha() const;
|
||||
LSTMSequenceLayer& setActivationsAlpha(const std::vector<float>& activations);
|
||||
const std::vector<float>& getActivationsBeta() const;
|
||||
LSTMSequenceLayer& setActivationsBeta(const std::vector<float>& activations);
|
||||
float getClip() const;
|
||||
LSTMSequenceLayer& setClip(float clip);
|
||||
bool getInputForget() const;
|
||||
LSTMSequenceLayer& setInputForget(bool flag);
|
||||
const std::string& getDirection() const;
|
||||
LSTMSequenceLayer& setDirection(const std::string& direction);
|
||||
};
|
||||
|
||||
} // namespace Builder
|
||||
} // namespace InferenceEngine
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Memory layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(MemoryLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(MemoryLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit MemoryLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit MemoryLayer(Layer& genLayer);
|
||||
explicit MemoryLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit MemoryLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for MVN layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(MVNLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(MVNLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit MVNLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit MVNLayer(Layer& genLayer);
|
||||
explicit MVNLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit MVNLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
@ -7,7 +7,7 @@
|
||||
#include <builders/ie_layer_builder.hpp>
|
||||
#include <ie_icnn_network.hpp>
|
||||
#include <cpp/ie_cnn_network.h>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <ie_context.hpp>
|
||||
#include <ie_common.h>
|
||||
#include <ie_blob.h>
|
||||
@ -23,12 +23,43 @@ namespace Builder {
|
||||
/**
|
||||
* @brief This class implements a builder for IE Network
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(Network) {
|
||||
class INFERENCE_ENGINE_API_CLASS(Network): public INetwork {
|
||||
public:
|
||||
/**
|
||||
* @brief A shared pointer to the Network builder
|
||||
*/
|
||||
using Ptr = std::shared_ptr<Network>;
|
||||
/**
|
||||
* @brief An iterator for Network builder definition
|
||||
*/
|
||||
using iterator = details::INetworkIterator<Network, Layer>;
|
||||
/**
|
||||
* @brief Begin network iterator
|
||||
* @return Network iterator
|
||||
*/
|
||||
iterator begin();
|
||||
/**
|
||||
* @brief Begin network iterator
|
||||
* @return const INetwork iterator
|
||||
*/
|
||||
const_iterator begin() const noexcept override;
|
||||
|
||||
/**
|
||||
* @brief End network iterator
|
||||
* @return Network iterator
|
||||
*/
|
||||
iterator end();
|
||||
/**
|
||||
* @brief End network iterator
|
||||
* @return const INetwork iterator
|
||||
*/
|
||||
const_iterator end() const noexcept override;
|
||||
|
||||
/**
|
||||
* @brief Returns a number of layers in the network.
|
||||
* @return Layers count
|
||||
*/
|
||||
size_t size() const noexcept override;
|
||||
|
||||
/**
|
||||
* @brief The constructor creates a builder based on ICNNNetwork
|
||||
@ -68,11 +99,6 @@ public:
|
||||
*/
|
||||
Network(const Context& ieContext, const INetwork& network);
|
||||
|
||||
/**
|
||||
* @brief Virtual destructor
|
||||
*/
|
||||
virtual ~Network() = default;
|
||||
|
||||
/**
|
||||
* @brief Adds new layer and connects it with previous layers
|
||||
*
|
||||
@ -111,65 +137,103 @@ public:
|
||||
*/
|
||||
void disconnect(const Connection& connection);
|
||||
|
||||
/**
|
||||
* @brief Returns layer builder by ID
|
||||
*
|
||||
* @param layerId Layer ID
|
||||
*
|
||||
* @return Layer buider
|
||||
*/
|
||||
Layer& getLayer(idx_t layerId);
|
||||
/**
|
||||
* @brief Returns constant layer builder by ID
|
||||
*
|
||||
* @param layerId Layer ID
|
||||
*
|
||||
* @return constant layer builder
|
||||
*/
|
||||
const Layer& getLayer(idx_t layerId) const;
|
||||
|
||||
/**
|
||||
* @brief Returns vector of layer builders
|
||||
*
|
||||
* @return Vector of layer builders
|
||||
*/
|
||||
std::vector<Layer>& getLayers();
|
||||
std::vector<Layer::Ptr>& getLayers();
|
||||
/**
|
||||
* @brief Returns constant vector of layer builders
|
||||
*
|
||||
* @return constant vector of layer builders
|
||||
*/
|
||||
const std::vector<Layer>& getLayers() const;
|
||||
const std::vector<Layer::Ptr>& getLayers() const;
|
||||
|
||||
/**
|
||||
* @brief Returns all connections for layer
|
||||
*
|
||||
* @param layerId Layer ID
|
||||
*
|
||||
* @return Vector of connections for the current layer
|
||||
* @brief Returns a constant smart pointer to a Layer interface.
|
||||
* If the layer is missing, returns nullptr.
|
||||
* @param id Id of the Layer
|
||||
* @return Layer interface smart pointer
|
||||
*/
|
||||
const std::vector<Connection> getLayerConnections(idx_t layerId) const noexcept;
|
||||
const ILayer::CPtr getLayer(idx_t id) const noexcept override;
|
||||
Layer::Ptr getLayer(idx_t layerId);
|
||||
|
||||
/**
|
||||
* @brief Builds and validate networks
|
||||
* @brief Returns a constant vector of input layers.
|
||||
* @return Vector of input layers
|
||||
*/
|
||||
const std::vector<ILayer::CPtr> getInputs() const noexcept override;
|
||||
/**
|
||||
* @brief Returns a vector of input layers.
|
||||
* @return Vector of input layers
|
||||
*/
|
||||
std::vector<Layer::Ptr> getInputs();
|
||||
|
||||
/**
|
||||
* @brief Returns a constant vector of output layers.
|
||||
* @return Vector of output layers
|
||||
*/
|
||||
const std::vector<ILayer::CPtr> getOutputs() const noexcept override;
|
||||
/**
|
||||
* @brief Returns a vector of input layers.
|
||||
* @return Vector of input layers
|
||||
*/
|
||||
std::vector<Layer::Ptr> getOutputs();
|
||||
|
||||
/**
|
||||
* @brief Returns a constant vector of connections for specific layer.
|
||||
* If the layer is missing, returns empty vector.
|
||||
* @param layerId layer index
|
||||
* @return Vector of connections
|
||||
*/
|
||||
const std::vector<Connection> getLayerConnections(idx_t layerId) const noexcept override;
|
||||
|
||||
/**
|
||||
* @brief Returns a constant vector of all connections.
|
||||
* @return Vector of connections
|
||||
*/
|
||||
const std::vector<Connection>& getConnections() const;
|
||||
|
||||
/**
|
||||
* @brief Returns a network name.
|
||||
* @return Network name
|
||||
*/
|
||||
const std::string& getName() const noexcept override;
|
||||
|
||||
/**
|
||||
* @brief Returns a network context
|
||||
* @return const reference to Context
|
||||
*/
|
||||
const Context& getContext() const noexcept override;
|
||||
/**
|
||||
* @brief Returns a network context
|
||||
* @return reference to Context
|
||||
*/
|
||||
Context& getContext() noexcept;
|
||||
|
||||
/**
|
||||
* @brief Builds and validate network
|
||||
*
|
||||
* @return const shared pointer to INetwork
|
||||
*/
|
||||
const INetwork::Ptr build() const;
|
||||
const INetwork::CPtr build();
|
||||
|
||||
/**
|
||||
* @brief Validates network
|
||||
*
|
||||
*/
|
||||
void validate();
|
||||
|
||||
/**
|
||||
* @brief The operator builds network
|
||||
*
|
||||
* @return const shared pointer to INetwork
|
||||
*/
|
||||
explicit operator const INetwork::Ptr() const;
|
||||
explicit operator const INetwork::CPtr();
|
||||
|
||||
private:
|
||||
const Context ctx;
|
||||
const size_t version;
|
||||
std::string name;
|
||||
std::vector<Layer> layers;
|
||||
std::vector<Connection> connections;
|
||||
std::map<std::string, Parameter> parameters;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -178,7 +242,7 @@ private:
|
||||
* @param network constant shared pointer to INetwork object
|
||||
* @return constant shared pointer to ICNNNetwork
|
||||
*/
|
||||
INFERENCE_ENGINE_API_CPP(const std::shared_ptr<ICNNNetwork>) convertToICNNNetwork(const INetwork::Ptr& network);
|
||||
INFERENCE_ENGINE_API_CPP(const std::shared_ptr<ICNNNetwork>) convertToICNNNetwork(const INetwork::CPtr& network);
|
||||
|
||||
} // namespace Builder
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Norm layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(NormLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(NormLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The enum defines all Norm types
|
||||
@ -30,9 +30,14 @@ public:
|
||||
explicit NormLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit NormLayer(Layer& genLayer);
|
||||
explicit NormLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit NormLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Normalize layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(NormalizeLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(NormalizeLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit NormalizeLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit NormalizeLayer(Layer& genLayer);
|
||||
explicit NormalizeLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit NormalizeLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Output layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(OutputLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(OutputLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit OutputLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit OutputLayer(Layer& genLayer);
|
||||
explicit OutputLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit OutputLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,10 +1,10 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Permute layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(PermuteLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(PermuteLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit PermuteLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit PermuteLayer(Layer& genLayer);
|
||||
explicit PermuteLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit PermuteLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
@ -33,19 +38,6 @@ public:
|
||||
*/
|
||||
PermuteLayer& setName(const std::string& name);
|
||||
|
||||
/**
|
||||
* @brief Sets weights for layer
|
||||
* @param weights Constant blob with weights
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
PermuteLayer& setWeights(const Blob::CPtr& weights);
|
||||
/**
|
||||
* @brief Sets biases for layer
|
||||
* @param biases Constant blob with biases
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
PermuteLayer& setBiases(const Blob::CPtr& biases);
|
||||
|
||||
/**
|
||||
* @brief Returns input port
|
||||
* @return Input port
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
@ -15,7 +15,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Pooling layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(PoolingLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(PoolingLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The enum defines available pooling types
|
||||
@ -40,9 +40,14 @@ public:
|
||||
explicit PoolingLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit PoolingLayer(Layer& genLayer);
|
||||
explicit PoolingLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit PoolingLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Operator creates generic layer builder
|
||||
* @return Generic layer builder
|
||||
@ -155,12 +160,6 @@ public:
|
||||
*/
|
||||
PoolingLayer& setExcludePad(bool exclude);
|
||||
|
||||
/**
|
||||
* @brief Validates layer before creation
|
||||
* @param layer generic layer builder
|
||||
*/
|
||||
static void validate(const Layer& layer);
|
||||
|
||||
private:
|
||||
PoolingType type;
|
||||
RoundingType roundingType;
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for Power layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(PowerLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(PowerLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit PowerLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit PowerLayer(Layer& genLayer);
|
||||
explicit PowerLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit PowerLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace InferenceEngine {
|
||||
@ -14,7 +14,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for PReLU layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(PReLULayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(PReLULayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -23,9 +23,14 @@ public:
|
||||
explicit PReLULayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit PReLULayer(Layer& genLayer);
|
||||
explicit PReLULayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit PReLULayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
@ -33,12 +38,6 @@ public:
|
||||
*/
|
||||
PReLULayer& setName(const std::string& name);
|
||||
|
||||
/**
|
||||
* @brief Sets weights for layer
|
||||
* @param weights Constant blob with weights
|
||||
* @return reference to layer builder
|
||||
*/
|
||||
PReLULayer& setWeights(const Blob::CPtr& weights);
|
||||
/**
|
||||
* @brief Returns port with shapes for the layer
|
||||
* @return Port with shapes
|
||||
|
@ -1,11 +1,11 @@
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <builders/ie_layer_fragment.hpp>
|
||||
#include <ie_inetwork.hpp>
|
||||
#include <builders/ie_layer_decorator.hpp>
|
||||
#include <ie_network.hpp>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -15,7 +15,7 @@ namespace Builder {
|
||||
/**
|
||||
* @brief The class represents a builder for PriorBoxClustered layer
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(PriorBoxClusteredLayer): public LayerFragment {
|
||||
class INFERENCE_ENGINE_API_CLASS(PriorBoxClusteredLayer): public LayerDecorator {
|
||||
public:
|
||||
/**
|
||||
* @brief The constructor creates a builder with the name
|
||||
@ -24,9 +24,14 @@ public:
|
||||
explicit PriorBoxClusteredLayer(const std::string& name = "");
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param genLayer generic builder
|
||||
* @param layer pointer to generic builder
|
||||
*/
|
||||
explicit PriorBoxClusteredLayer(Layer& genLayer);
|
||||
explicit PriorBoxClusteredLayer(const Layer::Ptr& layer);
|
||||
/**
|
||||
* @brief The constructor creates a builder from generic builder
|
||||
* @param layer constant pointer to generic builder
|
||||
*/
|
||||
explicit PriorBoxClusteredLayer(const Layer::CPtr& layer);
|
||||
/**
|
||||
* @brief Sets the name for the layer
|
||||
* @param name Layer name
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user