Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Steve Yoo 2021-05-26 10:51:49 +09:00
commit 0a6c3cc9bf
605 changed files with 19995 additions and 5969 deletions

View File

@ -133,9 +133,10 @@ jobs:
displayName: 'IE FuncTests' displayName: 'IE FuncTests'
continueOnError: false continueOnError: false
- script: $(BIN_DIR)/cpuFuncTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml - script: $(BIN_DIR)/cpuFuncTests --gtest_filter=*smoke*:-smoke_LPT/ReduceMinTransformation.CompareWithRefImpl/f32_Shape* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml
displayName: 'CPU FuncTests' displayName: 'CPU FuncTests'
continueOnError: false continueOnError: false
enabled: false
- script: | - script: |
export DATA_PATH=$(MODELS_PATH) export DATA_PATH=$(MODELS_PATH)

View File

@ -103,6 +103,7 @@ function(build_ngraph)
endif() endif()
ie_cpack_add_component(ngraph REQUIRED) ie_cpack_add_component(ngraph REQUIRED)
ie_cpack_add_component(ngraph_dev REQUIRED DEPENDS ngraph)
set(SDL_cmake_included ON) set(SDL_cmake_included ON)
add_subdirectory(ngraph) add_subdirectory(ngraph)

View File

@ -14,7 +14,13 @@ set(CMAKE_MODULE_PATH "${IEDevScripts_DIR}")
function(set_ci_build_number) function(set_ci_build_number)
set(repo_root "${CMAKE_SOURCE_DIR}") set(repo_root "${CMAKE_SOURCE_DIR}")
include(version) include(version)
set(CI_BUILD_NUMBER "${CI_BUILD_NUMBER}" PARENT_SCOPE) foreach(var CI_BUILD_NUMBER IE_VERSION
IE_VERSION_MAJOR IE_VERSION_MINOR IE_VERSION_PATCH)
if(NOT DEFINED ${var})
message(FATAL_ERROR "${var} version component is not defined")
endif()
set(${var} "${${var}}" PARENT_SCOPE)
endforeach()
endfunction() endfunction()
set_ci_build_number() set_ci_build_number()

View File

@ -31,6 +31,7 @@ addIeTarget(
function(addIeTarget) function(addIeTarget)
set(options set(options
ADD_CPPLINT # Enables code style checks for the target ADD_CPPLINT # Enables code style checks for the target
ADD_CLANG_FORMAT # Enables code style checks for the target
) )
set(oneValueRequiredArgs set(oneValueRequiredArgs
TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable
@ -119,6 +120,10 @@ function(addIeTarget)
# code style # code style
add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME}) add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME})
endif() endif()
if (ARG_ADD_CLANG_FORMAT)
# code style
add_clang_format_target(${ARG_NAME}_clang FOR_TARGETS ${ARG_NAME})
endif()
if (ARG_DEVELOPER_PACKAGE) if (ARG_DEVELOPER_PACKAGE)
# developer package # developer package
openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE} openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE}
@ -128,7 +133,6 @@ function(addIeTarget)
# Provide default compile pdb name equal to target name # Provide default compile pdb name equal to target name
set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME}) set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME})
endif() endif()
endfunction() endfunction()
#[[ #[[

View File

@ -27,7 +27,10 @@ endif()
# ) # )
# #
function(ie_add_plugin) function(ie_add_plugin)
set(options SKIP_INSTALL) set(options
SKIP_INSTALL
ADD_CLANG_FORMAT
)
set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR) set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR)
set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS) set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS)
cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
@ -73,7 +76,11 @@ function(ie_add_plugin)
string(CONCAT custom_filter "${custom_filter}" "," "${filter}") string(CONCAT custom_filter "${custom_filter}" "," "${filter}")
endforeach() endforeach()
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter}) if (IE_PLUGIN_ADD_CLANG_FORMAT)
add_clang_format_target(${IE_PLUGIN_NAME}_clang FOR_TARGETS ${IE_PLUGIN_NAME})
else()
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
endif()
# check that plugin with such name is not registered # check that plugin with such name is not registered

View File

@ -26,6 +26,60 @@ function (commitHash VAR)
set (${VAR} ${GIT_COMMIT_HASH} PARENT_SCOPE) set (${VAR} ${GIT_COMMIT_HASH} PARENT_SCOPE)
endfunction() endfunction()
macro(ie_parse_ci_build_number)
if(CI_BUILD_NUMBER MATCHES "^([0-9]+)\.([0-9]+)\.([0-9]+)\-.*")
set(IE_VERSION_MAJOR ${CMAKE_MATCH_1})
set(IE_VERSION_MINOR ${CMAKE_MATCH_2})
set(IE_VERSION_PATCH ${CMAKE_MATCH_3})
set(has_ci_version ON)
else()
set(IE_VERSION_MAJOR 0)
set(IE_VERSION_MINOR 0)
set(IE_VERSION_PATCH 0)
endif()
if(NOT DEFINED repo_root)
message(FATAL_ERROR "repo_root is not defined")
endif()
if(DEFINED IEDevScripts_DIR AND DEFINED IE_MAIN_SOURCE_DIR AND NOT DEFINED custom_build)
set(ie_version_hpp "${IE_MAIN_SOURCE_DIR}/include/ie_version.hpp")
if(NOT EXISTS ${ie_version_hpp})
message(FATAL_ERROR "File ie_version.hpp with IE_VERSION definitions is not found")
endif()
file(STRINGS "${ie_version_hpp}" IE_VERSION_PARTS REGEX "#define IE_VERSION_[A-Z]+[ ]+" )
string(REGEX REPLACE ".+IE_VERSION_MAJOR[ ]+([0-9]+).*" "\\1"
IE_VERSION_MAJOR_HPP "${IE_VERSION_PARTS}")
string(REGEX REPLACE ".+IE_VERSION_MINOR[ ]+([0-9]+).*" "\\1"
IE_VERSION_MINOR_HPP "${IE_VERSION_PARTS}")
string(REGEX REPLACE ".+IE_VERSION_PATCH[ ]+([0-9]+).*" "\\1"
IE_VERSION_PATCH_HPP "${IE_VERSION_PARTS}")
foreach(var IE_VERSION_MAJOR IE_VERSION_MINOR IE_VERSION_PATCH)
if(DEFINED ${var} AND NOT ${var} EQUAL ${var}_HPP)
message(FATAL_ERROR "${var} parsed from CI_BUILD_NUMBER (${${var}}) \
and from ie_version.hpp (${${var}_HPP}) are different")
else()
# CI_BUILD_NUMBER is not defined well, take info from ie_verison.hpp as a baseline
set(${var} ${${var}_HPP})
endif()
endforeach()
elseif(has_ci_version)
message(WARNING "IE_MAIN_SOURCE_DIR is not defined. No way to compare versions")
else()
message(WARNING "No way to detect OpenVINO version. Supposing 0.0.0.0")
endif()
set(IE_VERSION "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}")
endmacro()
# WA for DL Benchmark
if(DEFINED ENV{CI_BUILD_NUMBER} AND "$ENV{CI_BUILD_NUMBER}" STREQUAL "1")
unset(ENV{CI_BUILD_NUMBER})
endif()
if (DEFINED ENV{CI_BUILD_NUMBER}) if (DEFINED ENV{CI_BUILD_NUMBER})
set(CI_BUILD_NUMBER $ENV{CI_BUILD_NUMBER}) set(CI_BUILD_NUMBER $ENV{CI_BUILD_NUMBER})
else() else()
@ -36,6 +90,11 @@ else()
set(CI_BUILD_NUMBER "${custom_build}") set(CI_BUILD_NUMBER "${custom_build}")
endif() endif()
# provides Inference Engine version
# 1. If CI_BUILD_NUMBER is defined, parses this information
# 2. Otherwise, parses ie_version.hpp
ie_parse_ci_build_number()
function (addVersionDefines FILE) function (addVersionDefines FILE)
foreach (VAR ${ARGN}) foreach (VAR ${ARGN})
if (DEFINED ${VAR} AND NOT "${${VAR}}" STREQUAL "") if (DEFINED ${VAR} AND NOT "${${VAR}}" STREQUAL "")

View File

@ -2,24 +2,9 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
macro(ie_parse_ci_build_number) set(IE_VS_VER_FILEVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0")
if(CI_BUILD_NUMBER MATCHES "^([0-9]+)\.([0-9]+)\.([0-9]+)\-.*") set(IE_VS_VER_PRODUCTVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0")
set(IE_VERSION_MAJOR ${CMAKE_MATCH_1}) set(IE_VS_VER_FILEVERSION_STR "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}.0")
set(IE_VERSION_MINOR ${CMAKE_MATCH_2})
set(IE_VERSION_PATCH ${CMAKE_MATCH_3})
set(IE_VS_VER_HAS_VERSION 1)
else()
set(IE_VS_VER_HAS_VERSION 0)
endif()
endmacro()
ie_parse_ci_build_number()
if(IE_VS_VER_HAS_VERSION)
set(IE_VS_VER_FILEVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0")
set(IE_VS_VER_PRODUCTVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0")
set(IE_VS_VER_FILEVERSION_STR "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}.0")
endif()
set(IE_VS_VER_COMPANY_NAME_STR "Intel Corporation") set(IE_VS_VER_COMPANY_NAME_STR "Intel Corporation")
set(IE_VS_VER_PRODUCTVERSION_STR "${CI_BUILD_NUMBER}") set(IE_VS_VER_PRODUCTVERSION_STR "${CI_BUILD_NUMBER}")

View File

@ -1,10 +1,8 @@
#include <winver.h> #include <winver.h>
VS_VERSION_INFO VERSIONINFO VS_VERSION_INFO VERSIONINFO
#if @IE_VS_VER_HAS_VERSION@
FILEVERSION @IE_VS_VER_FILEVERSION_QUAD@ FILEVERSION @IE_VS_VER_FILEVERSION_QUAD@
PRODUCTVERSION @IE_VS_VER_PRODUCTVERSION_QUAD@ PRODUCTVERSION @IE_VS_VER_PRODUCTVERSION_QUAD@
#endif
FILEFLAGSMASK VS_FFI_FILEFLAGSMASK FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
#ifdef _DEBUG #ifdef _DEBUG
FILEFLAGS 1 FILEFLAGS 1
@ -21,9 +19,7 @@ BEGIN
BEGIN BEGIN
VALUE "CompanyName", "@IE_VS_VER_COMPANY_NAME_STR@\0" VALUE "CompanyName", "@IE_VS_VER_COMPANY_NAME_STR@\0"
VALUE "FileDescription", "@IE_VS_VER_FILEDESCRIPTION_STR@\0" VALUE "FileDescription", "@IE_VS_VER_FILEDESCRIPTION_STR@\0"
#if @IE_VS_VER_HAS_VERSION@
VALUE "FileVersion", "@IE_VS_VER_FILEVERSION_STR@\0" VALUE "FileVersion", "@IE_VS_VER_FILEVERSION_STR@\0"
#endif
VALUE "InternalName", "@IE_VS_VER_INTERNALNAME_STR@\0" VALUE "InternalName", "@IE_VS_VER_INTERNALNAME_STR@\0"
VALUE "LegalCopyright", "@IE_VS_VER_COPYRIGHT_STR@\0" VALUE "LegalCopyright", "@IE_VS_VER_COPYRIGHT_STR@\0"
VALUE "OriginalFilename", "@IE_VS_VER_ORIGINALFILENAME_STR@\0" VALUE "OriginalFilename", "@IE_VS_VER_ORIGINALFILENAME_STR@\0"

25
docs/.clang-format Normal file
View File

@ -0,0 +1,25 @@
BasedOnStyle: Google
IndentWidth: 4
UseTab: Never
Language: Cpp
Standard: Cpp11
AccessModifierOffset: -4
AlignConsecutiveMacros: true
AllowAllArgumentsOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: Never
AllowShortLambdasOnASingleLine: Empty
AllowShortLoopsOnASingleLine: false
AlwaysBreakBeforeMultilineStrings: false
ColumnLimit: 160
# Specialize this comment pragma in order to avoid changes in SEA copyrights
CommentPragmas: '^#'
DerivePointerAlignment: false
FixNamespaceComments: true
IndentCaseLabels: false
IndentPPDirectives: BeforeHash
SpaceBeforeCpp11BracedList: true
SpaceBeforeCtorInitializerColon: false

View File

@ -16,7 +16,7 @@ To add your custom nGraph operation, create a new class that extends `ngraph::Op
5. Override the `visit_attributes` method, which enables serialization and deserialization of operation attributes. An `AttributeVisitor` is passed to the method, and the implementation is expected to walk over all the attributes in the op using the type-aware `on_attribute` helper. Helpers are already implemented for standard C++ types like `int64_t`, `float`, `bool`, `vector`, and for existing nGraph defined types. 5. Override the `visit_attributes` method, which enables serialization and deserialization of operation attributes. An `AttributeVisitor` is passed to the method, and the implementation is expected to walk over all the attributes in the op using the type-aware `on_attribute` helper. Helpers are already implemented for standard C++ types like `int64_t`, `float`, `bool`, `vector`, and for existing nGraph defined types.
6. Override `evaluate`, which is an optional method that enables the application of constant folding if there is a custom operation on the constant branch. 6. Override `evaluate`, which is an optional method that enables the application of constant folding if there is a custom operation on the constant branch. If your operation contains `evaluate` method you also need to override the `has_evaluate` method, this method allow to get information about availability of `evaluate` method for the operation.
Based on that, declaration of an operation class can look as follows: Based on that, declaration of an operation class can look as follows:
@ -55,7 +55,7 @@ nGraph operation contains two constructors:
@snippet template_extension/op.cpp op:visit_attributes @snippet template_extension/op.cpp op:visit_attributes
### `evaluate()` ### `evaluate()` and `has_evaluate()`
`ngraph::Node::evaluate` method enables you to apply constant folding to an operation. `ngraph::Node::evaluate` method enables you to apply constant folding to an operation.

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:6c9ddc759bc419268f4c23089b91a9e3373114a1d36b01d6fe62a5e87b5c0ad4 oid sha256:4b14b03ebb6a00b5f52a8404282f83d4ad214c8d04aea74738027a775c4ef545
size 59827 size 100581

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:59890c0c4a6d1c721dfaca22f0c1d0b305401f75dcd30418f858382830be2d31 oid sha256:cbfadd457b4d943ffb46906a7daf03516e971fe49d2806cd32c84c5015178f03
size 49598 size 92819

View File

@ -2,36 +2,36 @@
## Introduction ## Introduction
Inference Engine CPU plugin can infer models in the 8-bit integer (INT8) precision. Inference Engine CPU and GPU plugin can infer models in the low precision.
For details, refer to [INT8 inference on the CPU](../../../IE_DG/Int8Inference.md). For details, refer to [Low Precision Inference on the CPU](../../../IE_DG/Int8Inference.md).
Intermediate Representation (IR) should be specifically formed to be suitable for INT8 inference. Intermediate Representation (IR) should be specifically formed to be suitable for low precision inference.
Such an IR is called an INT8 IR and you can generate it in two ways: Such an IR is called a Low Precision IR and you can generate it in two ways:
- [Quantize model with the Post-Training Optimization tool](@ref pot_README) - [Quantize regular IR with the Post-Training Optimization tool](@ref pot_README)
- Use the Model Optimizer for TensorFlow\* pre-TFLite models (`.pb` model file with `FakeQuantize*` operations) - Use the Model Optimizer for a model pretrained for Low Precision inference: TensorFlow\* pre-TFLite models (`.pb` model file with `FakeQuantize*` operations) and ONNX\* quantized models.
Both Tensorflow and ONNX quantized models could be prepared by [Neural Network Compression Framework](https://github.com/openvinotoolkit/nncf/blob/develop/README.md)
For an operation to be executed in INT8, it must have `FakeQuantize` operations as inputs with the `levels` attribute set to `255` or `256`. For an operation to be executed in INT8, it must have `FakeQuantize` operations as inputs.
See the [specification of `FakeQuantize` operation](../../../ops/quantization/FakeQuantize_1.md) for details. See the [specification of `FakeQuantize` operation](../../../ops/quantization/FakeQuantize_1.md) for details.
To see the list of supported INT8 layers, refer to [INT8 inference on the CPU](../../../IE_DG/Int8Inference.md).
To execute the `Convolution` operation in INT8 on CPU, both data and weight inputs should have `FakeQuantize` as an input operation: To execute the `Convolution` operation in INT8 on CPU, both data and weight inputs should have `FakeQuantize` as an input operation:
![](../../img/expanded_int8_Convolution_weights.png) ![](../../img/expanded_int8_Convolution_weights.png)
INT8 IR is also suitable for FP32 and FP16 inference if a chosen plugin supports all operations of the IR, because the only difference between an INT8 IR and FP16 or FP32 IR is the existence of `FakeQuantize` in the INT8 IR. Low pecision IR is also suitable for FP32 and FP16 inference if a chosen plugin supports all operations of the IR, because the only difference between a Low Precision IR and FP16 or FP32 IR is the existence of `FakeQuantize` in the Low Precision IR.
Plugins with INT8 inference support recognize these sub-graphs and quantize them during the inference time. Plugins with Low Precision Inference support recognize these sub-graphs and quantize them during the inference time.
Plugins without INT8 support execute all operations, including `FakeQuantize`, as is in the FP32 or FP16 precision. Plugins without Low Precision support execute all operations, including `FakeQuantize`, as is in the FP32 or FP16 precision.
Accordingly, the presence of FakeQuantize operations in the IR is a recommendation for a plugin on how to quantize particular operations in the model. Accordingly, the presence of FakeQuantize operations in the IR is a recommendation for a plugin on how to quantize particular operations in the model.
If capable, a plugin accepts the recommendation and performs INT8 inference, otherwise the plugin ignores the recommendation and executes a model in the floating-point precision. If capable, a plugin accepts the recommendation and performs Low Precision Inference, otherwise, the plugin ignores the recommendation and executes a model in the floating-point precision.
## Compressed INT8 Weights ## Compressed Low Precision Weights
Weighted operations, like `Convolution`, `MatMul`, and others, store weights as floating-point `Constant` in the graph followed by the `FakeQuantize` operation. Weighted operations, like `Convolution`, `MatMul`, and others, store weights as floating-point `Constant` in the graph followed by the `FakeQuantize` operation.
`Constant` followed by the `FakeQuantize` operation could be optimized memory-wise due to the `FakeQuantize` operation semantics. `Constant` followed by the `FakeQuantize` operation could be optimized memory-wise due to the `FakeQuantize` operation semantics.
The resulting weights sub-graph stores weights in INT8 `Constant`, which gets unpacked back to floating point with the `Convert` operation. The resulting weights sub-graph stores weights in Low Precision `Constant`, which gets unpacked back to floating point with the `Convert` operation.
Weights compression leaves `FakeQuantize` output arithmetically the same and weights storing takes four times less memory. Weights compression replaces `FakeQuantize` with optional `Subtract` and `Multiply` operation leaving output arithmetically the same and weights storing takes four times less memory.
See the visualization of `Convolution` with the compressed weights: See the visualization of `Convolution` with the compressed weights:
![](../../img/compressed_int8_Convolution_weights.png) ![](../../img/compressed_int8_Convolution_weights.png)
Both Model Optimizer and Post-Training Optimization tool generate a compressed IR by default. To generate an expanded INT8 IR, use `--disable_weights_compression`. Both Model Optimizer and Post-Training Optimization tool generate a compressed IR by default.

View File

@ -75,11 +75,11 @@ The guide assumes you downloaded the OpenVINO toolkit for Raspbian* OS. If you d
By default, the package file is saved as `l_openvino_toolkit_runtime_raspbian_p_<version>.tgz`. By default, the package file is saved as `l_openvino_toolkit_runtime_raspbian_p_<version>.tgz`.
3. Create an installation folder. 3. Create an installation folder.
```sh ```sh
sudo mkdir -p /opt/intel/openvino sudo mkdir -p /opt/intel/openvino_2021
``` ```
4. Unpack the archive: 4. Unpack the archive:
```sh ```sh
sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_<version>.tgz --strip 1 -C /opt/intel/openvino sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_<version>.tgz --strip 1 -C /opt/intel/openvino_2021
``` ```
Now the OpenVINO toolkit components are installed. Additional configuration steps are still required. Continue to the next sections to install External Software Dependencies, configure the environment and set up USB rules. Now the OpenVINO toolkit components are installed. Additional configuration steps are still required. Continue to the next sections to install External Software Dependencies, configure the environment and set up USB rules.
@ -154,7 +154,7 @@ Follow the next steps to use the pre-trained face detection model using Inferenc
``` ```
4. Run the sample specifying the model, a path to the input image, and the VPU required to run with the Raspbian* OS: 4. Run the sample specifying the model, a path to the input image, and the VPU required to run with the Raspbian* OS:
```sh ```sh
./armv7l/Release/object_detection_sample_ssd -m face-detection-adas-0001.xml -d MYRIAD -i <path_to_image> ./armv7l/Release/object_detection_sample_ssd -m <path_to_model>/face-detection-adas-0001.xml -d MYRIAD -i <path_to_image>
``` ```
The application outputs an image (`out_0.bmp`) with detected faced enclosed in rectangles. The application outputs an image (`out_0.bmp`) with detected faced enclosed in rectangles.

View File

@ -1,7 +1,7 @@
# Intel® Distribution of OpenVINO™ Toolkit Developer Package # Intel® Distribution of OpenVINO™ Toolkit Developer Package
Copyright © 2018-2021 Intel Corporation
> **LEGAL NOTICE**: Your use of this software and any required dependent software (the > **LEGAL NOTICE**: Your use of this software and any required dependent software (the
“Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/en-us/license/eula-for-intel-software-development-products) for the Software Package, which may also include notices, disclaimers, or “Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/content/dam/develop/external/us/en/documents/intel-openvino-license-agreements.pdf) for the Software Package, which may also include notices, disclaimers, or
license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details. license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details.
## Introduction ## Introduction
@ -40,11 +40,7 @@ The table below lists the supported operating systems and Python* versions requi
## Install the Developer Package ## Install the Developer Package
### Step 1. Install External Software Dependencies ### Step 1. Set Up Python Virtual Environment
On Windows* OS you are required to install [Microsoft* Visual C++ Redistributable Package (x64)](https://visualstudio.microsoft.com/downloads/#microsoft-visual-c-redistributable-for-visual-studio-2019) to be able to run OpenVINO™ applications.
### Step 2. Set Up Python Virtual Environment
To avoid dependency conflicts, use a virtual environment. Skip this To avoid dependency conflicts, use a virtual environment. Skip this
step only if you do want to install all dependencies globally. step only if you do want to install all dependencies globally.
@ -62,7 +58,7 @@ On Windows:
python -m venv openvino_env python -m venv openvino_env
``` ```
### Step 3. Activate Virtual Environment ### Step 2. Activate Virtual Environment
On Linux and macOS: On Linux and macOS:
```sh ```sh
@ -73,14 +69,14 @@ On Windows:
openvino_env\Scripts\activate openvino_env\Scripts\activate
``` ```
### Step 4. Set Up and Update pip to the Highest Version ### Step 3. Set Up and Update PIP to the Highest Version
Run the command below: Run the command below:
```sh ```sh
python -m pip install --upgrade pip python -m pip install --upgrade pip
``` ```
### Step 5. Install the Package ### Step 4. Install the Package
Run the command below: <br> Run the command below: <br>
@ -88,7 +84,7 @@ Run the command below: <br>
pip install openvino-dev pip install openvino-dev
``` ```
### Step 6. Verify that the Package is Installed ### Step 5. Verify that the Package is Installed
Run the command below (this may take a few seconds): Run the command below (this may take a few seconds):
```sh ```sh
@ -97,6 +93,19 @@ pot -h
You will see the help message for Post-Training Optimization Tool if installation finished successfully. You will see the help message for Post-Training Optimization Tool if installation finished successfully.
## Troubleshooting
#### Error: Microsoft Visual C++ 14.0 is required. Get it with "Build Tools for Visual Studio"
On Windows* some dependencies may require compilation from source when installing. To resolve this issue, you need to install [Build Tools for Visual Studio* 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019) and repeat package installation.
#### ImportError: libpython3.7m.so.1.0: cannot open shared object file: No such file or directory
To resolve missing external dependency on Ubuntu*, execute the following command:
```sh
sudo apt-get install libpython3.7
```
## Additional Resources ## Additional Resources
- Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit) - Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit)

View File

@ -1,7 +1,7 @@
# Intel® Distribution of OpenVINO™ Toolkit Runtime Package # Intel® Distribution of OpenVINO™ Toolkit Runtime Package
Copyright © 2018-2021 Intel Corporation
> **LEGAL NOTICE**: Your use of this software and any required dependent software (the > **LEGAL NOTICE**: Your use of this software and any required dependent software (the
“Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/en-us/license/eula-for-intel-software-development-products) for the Software Package, which may also include notices, disclaimers, or “Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/content/dam/develop/external/us/en/documents/intel-openvino-license-agreements.pdf) for the Software Package, which may also include notices, disclaimers, or
license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details. license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details.
## Introduction ## Introduction
@ -37,11 +37,7 @@ The table below lists supported operating systems and Python* versions required
## Install the Runtime Package ## Install the Runtime Package
### Step 1. Install External Software Dependencies ### Step 1. Set Up Python Virtual Environment
On Windows* OS you are required to install [Microsoft* Visual C++ Redistributable Package (x64)](https://visualstudio.microsoft.com/downloads/#microsoft-visual-c-redistributable-for-visual-studio-2019) to be able to run OpenVINO™ applications.
### Step 2. Set Up Python Virtual Environment
To avoid dependency conflicts, use a virtual environment. Skip this To avoid dependency conflicts, use a virtual environment. Skip this
step only if you do want to install all dependencies globally. step only if you do want to install all dependencies globally.
@ -55,7 +51,7 @@ python -m venv openvino_env
> **NOTE**: On Linux and macOS, you may need to type `python3` instead of > **NOTE**: On Linux and macOS, you may need to type `python3` instead of
`python`. You may also need to [install pip](https://pip.pypa.io/en/stable/installing/). `python`. You may also need to [install pip](https://pip.pypa.io/en/stable/installing/).
### Step 3. Activate Virtual Environment ### Step 2. Activate Virtual Environment
On Linux and macOS: On Linux and macOS:
```sh ```sh
@ -66,14 +62,14 @@ On Windows:
openvino_env\Scripts\activate openvino_env\Scripts\activate
``` ```
### Step 4. Set Up and Update pip to the Highest Version ### Step 3. Set Up and Update PIP to the Highest Version
Run the command below: Run the command below:
```sh ```sh
python -m pip install --upgrade pip python -m pip install --upgrade pip
``` ```
### Step 5. Install the Package ### Step 4. Install the Package
Run the command below: <br> Run the command below: <br>
@ -81,7 +77,7 @@ Run the command below: <br>
pip install openvino pip install openvino
``` ```
### Step 6. Verify that the Package is Installed ### Step 5. Verify that the Package is Installed
Run the command below: Run the command below:
```sh ```sh
@ -90,6 +86,19 @@ python -c "from openvino.inference_engine import IECore"
You will not see any error messages if installation finished successfully. You will not see any error messages if installation finished successfully.
## Troubleshooting
#### Error: Microsoft Visual C++ 14.0 is required. Get it with "Build Tools for Visual Studio"
On Windows* some dependencies may require compilation from source when installing. To resolve this issue, you need to install [Build Tools for Visual Studio* 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019) and repeat package installation.
#### ImportError: libpython3.7m.so.1.0: cannot open shared object file: No such file or directory
To resolve missing external dependency on Ubuntu*, execute the following command:
```sh
sudo apt-get install libpython3.7
```
## Additional Resources ## Additional Resources
- [Intel® Distribution of OpenVINO™ toolkit](https://software.intel.com/en-us/openvino-toolkit). - [Intel® Distribution of OpenVINO™ toolkit](https://software.intel.com/en-us/openvino-toolkit).

View File

@ -9,7 +9,10 @@ set(TARGET_NAME "onnx_custom_op")
find_package(ngraph REQUIRED COMPONENTS onnx_importer) find_package(ngraph REQUIRED COMPONENTS onnx_importer)
add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp) add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp onnx_custom_op.hpp)
target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES}) target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES})
# [cmake:onnx_custom_op] # [cmake:onnx_custom_op]
# Enable code style check
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})

View File

@ -28,7 +28,7 @@ The `threshold` can be calculated with the following formula where `alpha` is th
-log(e^{10^{-\alpha}} - 1.0) < threshold < log(\beta) -log(e^{10^{-\alpha}} - 1.0) < threshold < log(\beta)
\f] \f]
For example, if *T* is `fp32`, `threshold` should be `20` or if *T* is `fp16`, `threshold` should be `12`. For example, if *T* is `fp32`, `threshold` should be `20` or if *T* is `fp16`, `threshold` should be `11`.
**Attributes**: *SoftPlus* operation has no attributes. **Attributes**: *SoftPlus* operation has no attributes.

View File

@ -2,31 +2,31 @@
**Versioned name**: *Floor-1* **Versioned name**: *Floor-1*
**Category**: Arithmetic unary operation **Category**: Arithmetic unary operation
**Short description**: *Floor* performs element-wise floor operation with given tensor. **Short description**: *Floor* performs element-wise floor operation with given tensor.
**Attributes**: **Detailed description**: For each element from the input tensor calculates corresponding
element in the output tensor with the following formula:
No attributes available. \f[
a_{i} = floor(a_{i})
\f]
**Attributes**: *Floor* operation has no attributes.
**Inputs** **Inputs**
* **1**: An tensor of type T. **Required.** * **1**: A tensor of type *T* and arbitrary shape. **Required.**
**Outputs** **Outputs**
* **1**: The result of element-wise floor operation. A tensor of type T. * **1**: The result of element-wise floor operation. A tensor of type *T*.
**Types** **Types**
* *T*: any numeric type. * *T*: any numeric type.
*Floor* does the following with the input tensor *a*:
\f[
a_{i} = floor(a_{i})
\f]
**Examples** **Examples**

View File

@ -8,7 +8,7 @@
**Detailed Description** **Detailed Description**
*VariadicSplit* operation splits a given input tensor `data` into chunks along a scalar `axis`. It produces multiple output tensors based on additional input tensor `split_lengths`. *VariadicSplit* operation splits a given input tensor `data` into chunks along a scalar or tensor with shape `[1]` `axis`. It produces multiple output tensors based on additional input tensor `split_lengths`.
The i-th output tensor shape is equal to the input tensor `data` shape, except for dimension along `axis` which is `split_lengths[i]`. The i-th output tensor shape is equal to the input tensor `data` shape, except for dimension along `axis` which is `split_lengths[i]`.
\f[ \f[
@ -23,7 +23,7 @@ Where D is the rank of input tensor `data`. The sum of elements in `split_length
* **1**: `data`. A tensor of type `T1` and arbitrary shape. **Required.** * **1**: `data`. A tensor of type `T1` and arbitrary shape. **Required.**
* **2**: `axis`. Axis along `data` to split. A scalar of type `T2` with value from range `-rank(data) .. rank(data)-1`. Negative values address dimensions from the end. * **2**: `axis`. Axis along `data` to split. A scalar or tensor with shape `[1]` of type `T2` with value from range `-rank(data) .. rank(data)-1`. Negative values address dimensions from the end.
**Required.** **Required.**
* **3**: `split_lengths`. A list containing the dimension values of each output tensor shape along the split `axis`. A 1D tensor of type `T2`. The number of elements in `split_lengths` determines the number of outputs. The sum of elements in `split_lengths` must match `data.shape[axis]`. In addition `split_lengths` can contain a single `-1` element, which means, all remaining items along specified `axis` that are not consumed by other parts. **Required.** * **3**: `split_lengths`. A list containing the dimension values of each output tensor shape along the split `axis`. A 1D tensor of type `T2`. The number of elements in `split_lengths` determines the number of outputs. The sum of elements in `split_lengths` must match `data.shape[axis]`. In addition `split_lengths` can contain a single `-1` element, which means, all remaining items along specified `axis` that are not consumed by other parts. **Required.**

View File

@ -58,7 +58,7 @@ For a particular activation, consider a mini-batch \f$\mathcal{B}\f$ of m values
* *epsilon* * *epsilon*
* **Description**: *epsilon* is a constant added to the variance for numerical stability. * **Description**: *epsilon* is a constant added to the variance for numerical stability.
* **Range of values**: a positive floating-point number * **Range of values**: a floating-point number greater than or equal to zero
* **Type**: `float` * **Type**: `float`
* **Default value**: none * **Default value**: none
* **Required**: *yes* * **Required**: *yes*

View File

@ -58,7 +58,7 @@ For a particular activation, consider a mini-batch \f$\mathcal{B}\f$ of m values
* *epsilon* * *epsilon*
* **Description**: *epsilon* is a constant added to the variance for numerical stability. * **Description**: *epsilon* is a constant added to the variance for numerical stability.
* **Range of values**: a positive floating-point number * **Range of values**: a floating-point number greater than or equal to zero
* **Type**: `float` * **Type**: `float`
* **Default value**: none * **Default value**: none
* **Required**: *yes* * **Required**: *yes*

View File

@ -154,7 +154,7 @@ auto consumers = output.get_target_inputs();
{ {
// ! [ngraph:shape] // ! [ngraph:shape]
auto partial_shape = node->input(0).get_partial_shape(); // get zero input partial shape auto partial_shape = node->input(0).get_partial_shape(); // get zero input partial shape
if (partial_shape.is_dynamic() /* or !partial_shape.is_staic() */) { if (partial_shape.is_dynamic() /* or !partial_shape.is_static() */) {
return false; return false;
} }
auto static_shape = partial_shape.get_shape(); auto static_shape = partial_shape.get_shape();
@ -311,4 +311,4 @@ void pass_manager_example3(std::shared_ptr<ngraph::Function> f) {
manager.run_passes(f); manager.run_passes(f);
} }
// ! [ngraph:disabled_by_default] // ! [ngraph:disabled_by_default]
} }

View File

@ -33,3 +33,7 @@ if (ngraph_onnx_importer_FOUND)
target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_IMPORT_ENABLED) target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_IMPORT_ENABLED)
endif() endif()
# [cmake:extension] # [cmake:extension]
# Enable code style check
file(GLOB_RECURSE template_extension_src "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp")
add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${template_extension_src})

View File

@ -3,13 +3,15 @@
// //
#include "cpu_kernel.hpp" #include "cpu_kernel.hpp"
#include "op.hpp"
#include <ie_layouts.h> #include <ie_layouts.h>
#include "op.hpp"
using namespace TemplateExtension; using namespace TemplateExtension;
//! [cpu_implementation:ctor] //! [cpu_implementation:ctor]
OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node> &node) { OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node>& node) {
try { try {
auto castedNode = std::dynamic_pointer_cast<Operation>(node); auto castedNode = std::dynamic_pointer_cast<Operation>(node);
if (!castedNode) if (!castedNode)
@ -32,8 +34,8 @@ OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node> &node) {
//! [cpu_implementation:ctor] //! [cpu_implementation:ctor]
//! [cpu_implementation:getSupportedConfigurations] //! [cpu_implementation:getSupportedConfigurations]
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf, InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc *resp) noexcept { InferenceEngine::ResponseDesc* resp) noexcept {
auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) { auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) {
InferenceEngine::LayerConfig config; InferenceEngine::LayerConfig config;
config.dynBatchSupport = false; config.dynBatchSupport = false;
@ -72,7 +74,7 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
if (!error.empty()) { if (!error.empty()) {
if (resp) { if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0; resp->msg[sizeof(resp->msg) - 1] = 0;
} }
return InferenceEngine::GENERAL_ERROR; return InferenceEngine::GENERAL_ERROR;
} }
@ -85,25 +87,24 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
//! [cpu_implementation:getSupportedConfigurations] //! [cpu_implementation:getSupportedConfigurations]
//! [cpu_implementation:init] //! [cpu_implementation:init]
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept { InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept {
try { try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) { if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!"; IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!";
} }
if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) { if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) {
IE_THROW() IE_THROW() << "Operation can be initialized only with 4d input/output tensors!";
<< "Operation can be initialized only with 4d input/output tensors!";
} }
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 || if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) { config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
IE_THROW() << "Operation supports only FP32 precisions!"; IE_THROW() << "Operation supports only FP32 precisions!";
} }
} catch (InferenceEngine::Exception& ex) { } catch (InferenceEngine::Exception& ex) {
if (resp) { if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0; resp->msg[sizeof(resp->msg) - 1] = 0;
} }
return InferenceEngine::GENERAL_ERROR; return InferenceEngine::GENERAL_ERROR;
} }
@ -113,11 +114,10 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig
//! [cpu_implementation:init] //! [cpu_implementation:init]
//! [cpu_implementation:execute] //! [cpu_implementation:execute]
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs, InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs, InferenceEngine::ResponseDesc* resp) noexcept {
InferenceEngine::ResponseDesc *resp) noexcept { const float* src_data = inputs[0]->cbuffer().as<const float*>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
const float* src_data = inputs[0]->cbuffer().as<const float *>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); float* dst_data = outputs[0]->buffer().as<float*>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float *dst_data = outputs[0]->buffer().as<float *>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
for (size_t i = 0; i < inputs[0]->size(); i++) { for (size_t i = 0; i < inputs[0]->size(); i++) {
dst_data[i] = src_data[i] + add; dst_data[i] = src_data[i] + add;

View File

@ -5,6 +5,7 @@
#pragma once #pragma once
#include <ie_iextension.h> #include <ie_iextension.h>
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
namespace TemplateExtension { namespace TemplateExtension {
@ -13,13 +14,12 @@ namespace TemplateExtension {
class OpImplementation : public InferenceEngine::ILayerExecImpl { class OpImplementation : public InferenceEngine::ILayerExecImpl {
public: public:
explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node); explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf, InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc *resp) noexcept override; InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config, InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::ResponseDesc *resp) noexcept override; InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs, InferenceEngine::ResponseDesc* resp) noexcept override;
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept override;
private: private:
int64_t add; int64_t add;
ngraph::Shape inShape; ngraph::Shape inShape;

View File

@ -3,15 +3,16 @@
// //
#include "extension.hpp" #include "extension.hpp"
#include "cpu_kernel.hpp" #include "cpu_kernel.hpp"
#include "op.hpp" #include "op.hpp"
#ifdef OPENCV_IMPORT_ENABLED #ifdef OPENCV_IMPORT_ENABLED
#include "fft_op.hpp" #include "fft_kernel.hpp"
#include "fft_kernel.hpp" #include "fft_op.hpp"
#endif #endif
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
#ifdef NGRAPH_ONNX_IMPORT_ENABLED #ifdef NGRAPH_ONNX_IMPORT_ENABLED
#include <onnx_import/onnx_utils.hpp> #include <onnx_import/onnx_utils.hpp>
#endif #endif
#include <map> #include <map>
@ -21,22 +22,19 @@
using namespace TemplateExtension; using namespace TemplateExtension;
//! [extension:ctor] //! [extension:ctor]
Extension::Extension() { Extension::Extension() {
#ifdef NGRAPH_ONNX_IMPORT_ENABLED #ifdef NGRAPH_ONNX_IMPORT_ENABLED
ngraph::onnx_import::register_operator( ngraph::onnx_import::register_operator(Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { ngraph::OutputVector ng_inputs {node.get_ng_inputs()};
ngraph::OutputVector ng_inputs{node.get_ng_inputs()}; int64_t add = node.get_attribute_value<int64_t>("add");
int64_t add = node.get_attribute_value<int64_t>("add"); return {std::make_shared<Operation>(ng_inputs.at(0), add)};
return {std::make_shared<Operation>(ng_inputs.at(0), add)};
}); });
#ifdef OPENCV_IMPORT_ENABLED #ifdef OPENCV_IMPORT_ENABLED
ngraph::onnx_import::register_operator( ngraph::onnx_import::register_operator(FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { ngraph::OutputVector ng_inputs {node.get_ng_inputs()};
ngraph::OutputVector ng_inputs{node.get_ng_inputs()}; bool inverse = node.get_attribute_value<int64_t>("inverse");
bool inverse = node.get_attribute_value<int64_t>("inverse"); return {std::make_shared<FFTOp>(ng_inputs.at(0), inverse)};
return {std::make_shared<FFTOp>(ng_inputs.at(0), inverse)};
}); });
#endif #endif
#endif #endif
@ -47,19 +45,19 @@ Extension::Extension() {
Extension::~Extension() { Extension::~Extension() {
#ifdef NGRAPH_ONNX_IMPORT_ENABLED #ifdef NGRAPH_ONNX_IMPORT_ENABLED
ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain"); ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain");
#ifdef OPENCV_IMPORT_ENABLED #ifdef OPENCV_IMPORT_ENABLED
ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain"); ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain");
#endif // OPENCV_IMPORT_ENABLED #endif // OPENCV_IMPORT_ENABLED
#endif // NGRAPH_ONNX_IMPORT_ENABLED #endif // NGRAPH_ONNX_IMPORT_ENABLED
} }
//! [extension:dtor] //! [extension:dtor]
//! [extension:GetVersion] //! [extension:GetVersion]
void Extension::GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept { void Extension::GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept {
static InferenceEngine::Version ExtensionDescription = { static InferenceEngine::Version ExtensionDescription = {
{1, 0}, // extension API version {1, 0}, // extension API version
"1.0", "1.0",
"template_ext" // extension description message "template_ext" // extension description message
}; };
versionInfo = &ExtensionDescription; versionInfo = &ExtensionDescription;
@ -80,7 +78,7 @@ std::map<std::string, ngraph::OpSet> Extension::getOpSets() {
//! [extension:getOpSets] //! [extension:getOpSets]
//! [extension:getImplTypes] //! [extension:getImplTypes]
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node> &node) { std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node>& node) {
if (std::dynamic_pointer_cast<Operation>(node)) { if (std::dynamic_pointer_cast<Operation>(node)) {
return {"CPU"}; return {"CPU"};
} }
@ -94,7 +92,7 @@ std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::N
//! [extension:getImplTypes] //! [extension:getImplTypes]
//! [extension:getImplementation] //! [extension:getImplementation]
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node> &node, const std::string &implType) { InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) {
if (implType == "CPU") { if (implType == "CPU") {
if (std::dynamic_pointer_cast<Operation>(node)) { if (std::dynamic_pointer_cast<Operation>(node)) {
return std::make_shared<OpImplementation>(node); return std::make_shared<OpImplementation>(node);
@ -110,16 +108,16 @@ InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_
//! [extension:getImplementation] //! [extension:getImplementation]
//! [extension:CreateExtension] //! [extension:CreateExtension]
//Generate exported function // Generate exported function
IE_DEFINE_EXTENSION_CREATE_FUNCTION(Extension) IE_DEFINE_EXTENSION_CREATE_FUNCTION(Extension)
//! [extension:CreateExtension] //! [extension:CreateExtension]
INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext, INFERENCE_EXTENSION_API(InferenceEngine::StatusCode)
InferenceEngine::ResponseDesc *resp) noexcept { InferenceEngine::CreateExtension(InferenceEngine::IExtension*& ext, InferenceEngine::ResponseDesc* resp) noexcept {
try { try {
ext = new Extension(); ext = new Extension();
return OK; return OK;
} catch (std::exception &ex) { } catch (std::exception& ex) {
if (resp) { if (resp) {
std::string err = ((std::string) "Couldn't create extension: ") + ex.what(); std::string err = ((std::string) "Couldn't create extension: ") + ex.what();
err.copy(resp->msg, 255); err.copy(resp->msg, 255);

View File

@ -4,13 +4,14 @@
#pragma once #pragma once
#include <ie_iextension.h>
#include <ie_api.h> #include <ie_api.h>
#include <ngraph/ngraph.hpp> #include <ie_iextension.h>
#include <memory>
#include <vector>
#include <string>
#include <map> #include <map>
#include <memory>
#include <ngraph/ngraph.hpp>
#include <string>
#include <vector>
//! [extension:header] //! [extension:header]
namespace TemplateExtension { namespace TemplateExtension {

View File

@ -4,14 +4,16 @@
//! [fft_kernel:implementation] //! [fft_kernel:implementation]
#include "fft_kernel.hpp" #include "fft_kernel.hpp"
#include "fft_op.hpp"
#include <ie_layouts.h> #include <ie_layouts.h>
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include "fft_op.hpp"
using namespace TemplateExtension; using namespace TemplateExtension;
FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node> &node) { FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node>& node) {
auto castedNode = std::dynamic_pointer_cast<FFTOp>(node); auto castedNode = std::dynamic_pointer_cast<FFTOp>(node);
if (!castedNode) if (!castedNode)
IE_THROW() << "Cannot create implementation for unknown operation!"; IE_THROW() << "Cannot create implementation for unknown operation!";
@ -26,8 +28,7 @@ FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node> &node) {
inverse = castedNode->inverse; inverse = castedNode->inverse;
} }
InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf, InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf, InferenceEngine::ResponseDesc* resp) noexcept {
InferenceEngine::ResponseDesc *resp) noexcept {
std::vector<InferenceEngine::DataConfig> inDataConfig; std::vector<InferenceEngine::DataConfig> inDataConfig;
std::vector<InferenceEngine::DataConfig> outDataConfig; std::vector<InferenceEngine::DataConfig> outDataConfig;
InferenceEngine::SizeVector order(inpShape.size()); InferenceEngine::SizeVector order(inpShape.size());
@ -54,28 +55,27 @@ InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<Infe
return InferenceEngine::StatusCode::OK; return InferenceEngine::StatusCode::OK;
} }
InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept { InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept {
try { try {
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) { if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!"; IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!";
} }
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 || if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) { config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
IE_THROW() << "Operation supports only FP32 precisions!"; IE_THROW() << "Operation supports only FP32 precisions!";
} }
} catch (InferenceEngine::Exception& ex) { } catch (InferenceEngine::Exception& ex) {
if (resp) { if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg)-1] = 0; resp->msg[sizeof(resp->msg) - 1] = 0;
} }
return InferenceEngine::GENERAL_ERROR; return InferenceEngine::GENERAL_ERROR;
} }
return InferenceEngine::OK; return InferenceEngine::OK;
} }
static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) {
{
// NOTE: Inference Engine sizes are reversed. // NOTE: Inference Engine sizes are reversed.
std::vector<size_t> dims = blob->getTensorDesc().getDims(); std::vector<size_t> dims = blob->getTensorDesc().getDims();
std::vector<int> size(dims.begin(), dims.end()); std::vector<int> size(dims.begin(), dims.end());
@ -84,9 +84,8 @@ static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
return cv::Mat(size, CV_32F, (void*)blob->buffer()); return cv::Mat(size, CV_32F, (void*)blob->buffer());
} }
InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs, InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
std::vector<InferenceEngine::Blob::Ptr> &outputs, InferenceEngine::ResponseDesc* resp) noexcept {
InferenceEngine::ResponseDesc *resp) noexcept {
cv::Mat inp = infEngineBlobToMat(inputs[0]); cv::Mat inp = infEngineBlobToMat(inputs[0]);
cv::Mat out = infEngineBlobToMat(outputs[0]); cv::Mat out = infEngineBlobToMat(outputs[0]);
@ -95,10 +94,7 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::
const int w = inp.size[3]; const int w = inp.size[3];
cv::Mat complex(h, w, CV_32FC2), interleavedOut(h, w, CV_32FC2); cv::Mat complex(h, w, CV_32FC2), interleavedOut(h, w, CV_32FC2);
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
std::vector<cv::Mat> components = { std::vector<cv::Mat> components = {cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 0)), cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 1))};
cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 0)),
cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 1))
};
cv::merge(components, complex); cv::merge(components, complex);
if (!inverse) if (!inverse)
@ -106,13 +102,9 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::
else else
cv::idft(complex, interleavedOut, cv::DFT_SCALE); cv::idft(complex, interleavedOut, cv::DFT_SCALE);
components = { components = {cv::Mat(h, w, CV_32F, out.ptr<float>(i, 0)), cv::Mat(h, w, CV_32F, out.ptr<float>(i, 1))};
cv::Mat(h, w, CV_32F, out.ptr<float>(i, 0)),
cv::Mat(h, w, CV_32F, out.ptr<float>(i, 1))
};
cv::split(interleavedOut, components); cv::split(interleavedOut, components);
} }
return InferenceEngine::OK; return InferenceEngine::OK;
} }
//! [fft_kernel:implementation] //! [fft_kernel:implementation]

View File

@ -6,6 +6,7 @@
#pragma once #pragma once
#include <ie_iextension.h> #include <ie_iextension.h>
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
namespace TemplateExtension { namespace TemplateExtension {
@ -13,13 +14,12 @@ namespace TemplateExtension {
class FFTImpl : public InferenceEngine::ILayerExecImpl { class FFTImpl : public InferenceEngine::ILayerExecImpl {
public: public:
explicit FFTImpl(const std::shared_ptr<ngraph::Node>& node); explicit FFTImpl(const std::shared_ptr<ngraph::Node>& node);
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf, InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc *resp) noexcept override; InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config, InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
InferenceEngine::ResponseDesc *resp) noexcept override; InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs, InferenceEngine::ResponseDesc* resp) noexcept override;
std::vector<InferenceEngine::Blob::Ptr> &outputs,
InferenceEngine::ResponseDesc *resp) noexcept override;
private: private:
ngraph::Shape inpShape; ngraph::Shape inpShape;
ngraph::Shape outShape; ngraph::Shape outShape;
@ -27,5 +27,5 @@ private:
std::string error; std::string error;
}; };
} } // namespace TemplateExtension
//! [fft_kernel:header] //! [fft_kernel:header]

View File

@ -9,7 +9,7 @@ using namespace TemplateExtension;
constexpr ngraph::NodeTypeInfo FFTOp::type_info; constexpr ngraph::NodeTypeInfo FFTOp::type_info;
FFTOp::FFTOp(const ngraph::Output<ngraph::Node>& inp, bool _inverse) : Op({inp}) { FFTOp::FFTOp(const ngraph::Output<ngraph::Node>& inp, bool _inverse): Op({inp}) {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
inverse = _inverse; inverse = _inverse;
} }
@ -19,16 +19,15 @@ void FFTOp::validate_and_infer_types() {
set_output_type(0, get_input_element_type(0), outShape); set_output_type(0, get_input_element_type(0), outShape);
} }
std::shared_ptr<ngraph::Node> FFTOp::clone_with_new_inputs(const ngraph::OutputVector &new_args) const { std::shared_ptr<ngraph::Node> FFTOp::clone_with_new_inputs(const ngraph::OutputVector& new_args) const {
if (new_args.size() != 1) { if (new_args.size() != 1) {
throw ngraph::ngraph_error("Incorrect number of new arguments"); throw ngraph::ngraph_error("Incorrect number of new arguments");
} }
return std::make_shared<FFTOp>(new_args.at(0), inverse); return std::make_shared<FFTOp>(new_args.at(0), inverse);
} }
bool FFTOp::visit_attributes(ngraph::AttributeVisitor &visitor) { bool FFTOp::visit_attributes(ngraph::AttributeVisitor& visitor) {
visitor.on_attribute("inverse", inverse); visitor.on_attribute("inverse", inverse);
return true; return true;
} }
//! [fft_op:implementation] //! [fft_op:implementation]

View File

@ -11,8 +11,10 @@ namespace TemplateExtension {
class FFTOp : public ngraph::op::Op { class FFTOp : public ngraph::op::Op {
public: public:
static constexpr ngraph::NodeTypeInfo type_info{"FFT", 0}; static constexpr ngraph::NodeTypeInfo type_info {"FFT", 0};
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; } const ngraph::NodeTypeInfo& get_type_info() const override {
return type_info;
}
FFTOp() = default; FFTOp() = default;
FFTOp(const ngraph::Output<ngraph::Node>& inp, bool inverse); FFTOp(const ngraph::Output<ngraph::Node>& inp, bool inverse);
@ -23,6 +25,5 @@ public:
bool inverse; bool inverse;
}; };
} } // namespace TemplateExtension
//! [fft_op:header] //! [fft_op:header]

View File

@ -9,7 +9,7 @@ using namespace TemplateExtension;
//! [op:ctor] //! [op:ctor]
NGRAPH_RTTI_DEFINITION(TemplateExtension::Operation, "Template", 0); NGRAPH_RTTI_DEFINITION(TemplateExtension::Operation, "Template", 0);
Operation::Operation(const ngraph::Output<ngraph::Node> &arg, int64_t add) : Op({arg}), add(add) { Operation::Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add): Op({arg}), add(add) {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
} }
//! [op:ctor] //! [op:ctor]
@ -22,7 +22,7 @@ void Operation::validate_and_infer_types() {
//! [op:validate] //! [op:validate]
//! [op:copy] //! [op:copy]
std::shared_ptr<ngraph::Node> Operation::clone_with_new_inputs(const ngraph::OutputVector &new_args) const { std::shared_ptr<ngraph::Node> Operation::clone_with_new_inputs(const ngraph::OutputVector& new_args) const {
if (new_args.size() != 1) { if (new_args.size() != 1) {
throw ngraph::ngraph_error("Incorrect number of new arguments"); throw ngraph::ngraph_error("Incorrect number of new arguments");
} }
@ -32,56 +32,77 @@ std::shared_ptr<ngraph::Node> Operation::clone_with_new_inputs(const ngraph::Out
//! [op:copy] //! [op:copy]
//! [op:visit_attributes] //! [op:visit_attributes]
bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) { bool Operation::visit_attributes(ngraph::AttributeVisitor& visitor) {
visitor.on_attribute("add", add); visitor.on_attribute("add", add);
return true; return true;
} }
//! [op:visit_attributes] //! [op:visit_attributes]
//! [op:evaluate] //! [op:evaluate]
namespace namespace {
{
template <class T> template <class T>
void implementation(const T* input, void implementation(const T* input, T* output, int64_t add, size_t size) {
T* output,
int64_t add,
size_t size) {
for (size_t i = 0; i < size; i++) { for (size_t i = 0; i < size; i++) {
output[i] = input[i] + add; output[i] = input[i] + add;
} }
} }
template <ngraph::element::Type_t ET> template <ngraph::element::Type_t ET>
bool evaluate_op(const ngraph::HostTensorPtr& arg0, bool evaluate_op(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, int64_t add) {
const ngraph::HostTensorPtr& out, int64_t add)
{
size_t size = ngraph::shape_size(arg0->get_shape()); size_t size = ngraph::shape_size(arg0->get_shape());
implementation(arg0->get_data_ptr<ET>(), implementation(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), add, size);
out->get_data_ptr<ET>(),
add,
size);
return true; return true;
} }
} // namespace } // namespace
bool Operation::evaluate(const ngraph::HostTensorVector& outputs, bool Operation::evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const {
const ngraph::HostTensorVector& inputs) const { switch (inputs[0]->get_element_type()) {
switch (inputs[0]->get_element_type()) case ngraph::element::Type_t::i8:
{ return evaluate_op<ngraph::element::Type_t::i8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::i8: return evaluate_op<ngraph::element::Type_t::i8>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::i16:
case ngraph::element::Type_t::i16: return evaluate_op<ngraph::element::Type_t::i16>(inputs[0], outputs[0], getAddAttr()); return evaluate_op<ngraph::element::Type_t::i16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::i32: return evaluate_op<ngraph::element::Type_t::i32>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::i32:
case ngraph::element::Type_t::i64: return evaluate_op<ngraph::element::Type_t::i64>(inputs[0], outputs[0], getAddAttr()); return evaluate_op<ngraph::element::Type_t::i32>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u8: return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::i64:
case ngraph::element::Type_t::u16: return evaluate_op<ngraph::element::Type_t::u16>(inputs[0], outputs[0], getAddAttr()); return evaluate_op<ngraph::element::Type_t::i64>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u32: return evaluate_op<ngraph::element::Type_t::u32>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::u8:
case ngraph::element::Type_t::u64: return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr()); return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::bf16: return evaluate_op<ngraph::element::Type_t::bf16>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::u16:
case ngraph::element::Type_t::f16: return evaluate_op<ngraph::element::Type_t::f16>(inputs[0], outputs[0], getAddAttr()); return evaluate_op<ngraph::element::Type_t::u16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::f32: return evaluate_op<ngraph::element::Type_t::f32>(inputs[0], outputs[0], getAddAttr()); case ngraph::element::Type_t::u32:
default: break; return evaluate_op<ngraph::element::Type_t::u32>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::u64:
return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::bf16:
return evaluate_op<ngraph::element::Type_t::bf16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::f16:
return evaluate_op<ngraph::element::Type_t::f16>(inputs[0], outputs[0], getAddAttr());
case ngraph::element::Type_t::f32:
return evaluate_op<ngraph::element::Type_t::f32>(inputs[0], outputs[0], getAddAttr());
default:
break;
}
return false;
}
bool Operation::has_evaluate() const {
switch (get_input_element_type(0)) {
case ngraph::element::Type_t::i8:
case ngraph::element::Type_t::i16:
case ngraph::element::Type_t::i32:
case ngraph::element::Type_t::i64:
case ngraph::element::Type_t::u8:
case ngraph::element::Type_t::u16:
case ngraph::element::Type_t::u32:
case ngraph::element::Type_t::u64:
case ngraph::element::Type_t::bf16:
case ngraph::element::Type_t::f16:
case ngraph::element::Type_t::f32:
return true;
default:
break;
} }
return false; return false;
} }

View File

@ -18,9 +18,11 @@ public:
void validate_and_infer_types() override; void validate_and_infer_types() override;
std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& new_args) const override; std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& new_args) const override;
bool visit_attributes(ngraph::AttributeVisitor& visitor) override; bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
int64_t getAddAttr() const { return add; } int64_t getAddAttr() const {
bool evaluate(const ngraph::HostTensorVector& outputs, return add;
const ngraph::HostTensorVector& inputs) const override; }
bool evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const override;
bool has_evaluate() const override;
private: private:
int64_t add; int64_t add;

View File

@ -13,7 +13,8 @@ ie_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "TEMPLATE" DEVICE_NAME "TEMPLATE"
SOURCES ${SOURCES} ${HEADERS} SOURCES ${SOURCES} ${HEADERS}
SKIP_INSTALL # ATTENTION: uncomment to install component SKIP_INSTALL # ATTENTION: uncomment to install component
VERSION_DEFINES_FOR template_plugin.cpp) VERSION_DEFINES_FOR template_plugin.cpp
ADD_CLANG_FORMAT)
target_include_directories(${TARGET_NAME} PRIVATE target_include_directories(${TARGET_NAME} PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}"

View File

@ -3,18 +3,16 @@
// //
#include "template_async_infer_request.hpp" #include "template_async_infer_request.hpp"
#include "template_itt.hpp" #include "template_itt.hpp"
using namespace TemplatePlugin; using namespace TemplatePlugin;
// ! [async_infer_request:ctor] // ! [async_infer_request:ctor]
TemplateAsyncInferRequest::TemplateAsyncInferRequest( TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor, const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor)
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, : AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), _inferRequest(inferRequest), _waitExecutor(waitExecutor) {
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) :
AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor),
_inferRequest(inferRequest), _waitExecutor(waitExecutor) {
// In current implementation we have CPU only tasks and no needs in 2 executors // In current implementation we have CPU only tasks and no needs in 2 executors
// So, by default single stage pipeline is created. // So, by default single stage pipeline is created.
// This stage executes InferRequest::Infer() using cpuTaskExecutor. // This stage executes InferRequest::Infer() using cpuTaskExecutor.
@ -23,24 +21,21 @@ TemplateAsyncInferRequest::TemplateAsyncInferRequest(
constexpr const auto remoteDevice = false; constexpr const auto remoteDevice = false;
if (remoteDevice) { if (remoteDevice) {
_pipeline = { _pipeline = {{cpuTaskExecutor,
{cpuTaskExecutor, [this] { [this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::PreprocessingAndStartPipeline");
"TemplateAsyncInferRequest::PreprocessingAndStartPipeline"); _inferRequest->inferPreprocess();
_inferRequest->inferPreprocess(); _inferRequest->startPipeline();
_inferRequest->startPipeline(); }},
}}, {_waitExecutor,
{_waitExecutor, [this] { [this] {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::WaitPipeline");
"TemplateAsyncInferRequest::WaitPipeline"); _inferRequest->waitPipeline();
_inferRequest->waitPipeline(); }},
}}, {cpuTaskExecutor, [this] {
{cpuTaskExecutor, [this] { OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::Postprocessing");
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, _inferRequest->inferPostprocess();
"TemplateAsyncInferRequest::Postprocessing"); }}};
_inferRequest->inferPostprocess();
}}
};
} }
} }
// ! [async_infer_request:ctor] // ! [async_infer_request:ctor]

View File

@ -13,15 +13,13 @@ namespace TemplatePlugin {
// ! [async_infer_request:header] // ! [async_infer_request:header]
class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault { class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
public: public:
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
const InferenceEngine::ITaskExecutor::Ptr& taskExecutor, const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
~TemplateAsyncInferRequest(); ~TemplateAsyncInferRequest();
private: private:
TemplateInferRequest::Ptr _inferRequest; TemplateInferRequest::Ptr _inferRequest;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor; InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
}; };
// ! [async_infer_request:header] // ! [async_infer_request:header]

View File

@ -2,17 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ie_plugin_config.hpp>
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include "template_config.hpp" #include "template_config.hpp"
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include <ie_plugin_config.hpp>
#include "template/template_config.hpp" #include "template/template_config.hpp"
using namespace TemplatePlugin; using namespace TemplatePlugin;
Configuration::Configuration() { } Configuration::Configuration() {}
Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) { Configuration::Configuration(const ConfigMap& config, const Configuration& defaultCfg, bool throwOnUnsupported) {
*this = defaultCfg; *this = defaultCfg;
// If plugin needs to use InferenceEngine::StreamsExecutor it should be able to process its configuration // If plugin needs to use InferenceEngine::StreamsExecutor it should be able to process its configuration
auto streamExecutorConfigKeys = _streamsExecutorConfig.SupportedKeys(); auto streamExecutorConfigKeys = _streamsExecutorConfig.SupportedKeys();
@ -22,8 +23,7 @@ Configuration::Configuration(const ConfigMap& config, const Configuration & defa
if (TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) == key) { if (TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) == key) {
_streamsExecutorConfig.SetConfig(CONFIG_KEY(CPU_THROUGHPUT_STREAMS), value); _streamsExecutorConfig.SetConfig(CONFIG_KEY(CPU_THROUGHPUT_STREAMS), value);
} else if (streamExecutorConfigKeys.end() != } else if (streamExecutorConfigKeys.end() != std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) {
std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) {
_streamsExecutorConfig.SetConfig(key, value); _streamsExecutorConfig.SetConfig(key, value);
} else if (CONFIG_KEY(DEVICE_ID) == key) { } else if (CONFIG_KEY(DEVICE_ID) == key) {
deviceId = std::stoi(value); deviceId = std::stoi(value);

View File

@ -4,11 +4,9 @@
#pragma once #pragma once
#include <string>
#include <map>
#include <ie_parameter.hpp> #include <ie_parameter.hpp>
#include <map>
#include <string>
#include <threading/ie_istreams_executor.hpp> #include <threading/ie_istreams_executor.hpp>
namespace TemplatePlugin { namespace TemplatePlugin {
@ -18,19 +16,19 @@ using ConfigMap = std::map<std::string, std::string>;
struct Configuration { struct Configuration {
Configuration(); Configuration();
Configuration(const Configuration&) = default; Configuration(const Configuration&) = default;
Configuration(Configuration&&) = default; Configuration(Configuration&&) = default;
Configuration& operator=(const Configuration&) = default; Configuration& operator=(const Configuration&) = default;
Configuration& operator=(Configuration&&) = default; Configuration& operator=(Configuration&&) = default;
explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true); explicit Configuration(const ConfigMap& config, const Configuration& defaultCfg = {}, const bool throwOnUnsupported = true);
InferenceEngine::Parameter Get(const std::string& name) const; InferenceEngine::Parameter Get(const std::string& name) const;
// Plugin configuration parameters // Plugin configuration parameters
int deviceId = 0; int deviceId = 0;
bool perfCount = true; bool perfCount = true;
InferenceEngine::IStreamsExecutor::Config _streamsExecutorConfig; InferenceEngine::IStreamsExecutor::Config _streamsExecutorConfig;
}; };
// ! [configuration:header] // ! [configuration:header]

View File

@ -2,36 +2,35 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "template_executable_network.hpp"
#include <ie_metric_helpers.hpp> #include <ie_metric_helpers.hpp>
#include <ie_plugin_config.hpp> #include <ie_plugin_config.hpp>
#include <threading/ie_executor_manager.hpp> #include <threading/ie_executor_manager.hpp>
#include "transformations/serialize.hpp"
#include "template/template_config.hpp" #include "template/template_config.hpp"
#include "template_plugin.hpp"
#include "template_executable_network.hpp"
#include "template_itt.hpp" #include "template_itt.hpp"
#include "template_plugin.hpp"
#include "transformations/serialize.hpp"
using namespace TemplatePlugin; using namespace TemplatePlugin;
// ! [executable_network:ctor_cnnnetwork] // ! [executable_network:ctor_cnnnetwork]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function, TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap, const Configuration& cfg, const Plugin::Ptr& plugin)
const Configuration& cfg, : InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation
const Plugin::Ptr& plugin) : _cfg(cfg),
InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation _plugin(plugin) {
_cfg(cfg),
_plugin(plugin) {
// TODO: if your plugin supports device ID (more that single instance of device can be on host machine) // TODO: if your plugin supports device ID (more that single instance of device can be on host machine)
// you should select proper device based on KEY_DEVICE_ID or automatic behavior // you should select proper device based on KEY_DEVICE_ID or automatic behavior
// In this case, _waitExecutor should also be created per device. // In this case, _waitExecutor should also be created per device.
try { try {
CompileNetwork(function, inputInfoMap, outputsInfoMap); CompileNetwork(function, inputInfoMap, outputsInfoMap);
InitExecutor(); // creates thread-based executor using for async requests InitExecutor(); // creates thread-based executor using for async requests
} catch (const InferenceEngine::Exception&) { } catch (const InferenceEngine::Exception&) {
throw; throw;
} catch (const std::exception & e) { } catch (const std::exception& e) {
IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what(); IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what();
} catch (...) { } catch (...) {
IE_THROW(Unexpected) << "Generic exception is thrown"; IE_THROW(Unexpected) << "Generic exception is thrown";
@ -40,11 +39,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const
// ! [executable_network:ctor_cnnnetwork] // ! [executable_network:ctor_cnnnetwork]
// ! [executable_network:ctor_import_stream] // ! [executable_network:ctor_import_stream]
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model, TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream& model, const Configuration& cfg, const Plugin::Ptr& plugin): _cfg(cfg), _plugin(plugin) {
const Configuration& cfg,
const Plugin::Ptr& plugin) :
_cfg(cfg),
_plugin(plugin) {
// read XML content // read XML content
std::string xmlString; std::string xmlString;
std::uint64_t dataSize = 0; std::uint64_t dataSize = 0;
@ -57,9 +52,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
model.read(reinterpret_cast<char*>(&dataSize), sizeof(dataSize)); model.read(reinterpret_cast<char*>(&dataSize), sizeof(dataSize));
if (0 != dataSize) { if (0 != dataSize) {
dataBlob = InferenceEngine::make_shared_blob<std::uint8_t>( dataBlob = InferenceEngine::make_shared_blob<std::uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {static_cast<std::size_t>(dataSize)}, InferenceEngine::Layout::C));
{static_cast<std::size_t>(dataSize)},
InferenceEngine::Layout::C));
dataBlob->allocate(); dataBlob->allocate();
model.read(dataBlob->buffer(), dataSize); model.read(dataBlob->buffer(), dataSize);
} }
@ -77,10 +70,10 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
try { try {
CompileNetwork(cnnnetwork.getFunction(), inputInfoMap, outputInfoMap); CompileNetwork(cnnnetwork.getFunction(), inputInfoMap, outputInfoMap);
InitExecutor(); // creates thread-based executor using for async requests InitExecutor(); // creates thread-based executor using for async requests
} catch (const InferenceEngine::Exception&) { } catch (const InferenceEngine::Exception&) {
throw; throw;
} catch (const std::exception & e) { } catch (const std::exception& e) {
IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what(); IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what();
} catch (...) { } catch (...) {
IE_THROW(Unexpected) << "Generic exception is thrown"; IE_THROW(Unexpected) << "Generic exception is thrown";
@ -90,12 +83,11 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
// ! [executable_network:map_graph] // ! [executable_network:map_graph]
// forward declaration // forward declaration
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::InputsDataMap & inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap); const InferenceEngine::OutputsDataMap& outputsInfoMap);
void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<const ngraph::Function>& function, void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<const ngraph::Function>& function,
const InferenceEngine::InputsDataMap & inputInfoMap, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap) { const InferenceEngine::OutputsDataMap& outputsInfoMap) {
// TODO: perform actual graph compilation / mapping to backend graph representation / kernels // TODO: perform actual graph compilation / mapping to backend graph representation / kernels
@ -120,7 +112,6 @@ void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<con
} }
// ! [executable_network:map_graph] // ! [executable_network:map_graph]
// ! [executable_network:init_executor] // ! [executable_network:init_executor]
void TemplatePlugin::ExecutableNetwork::InitExecutor() { void TemplatePlugin::ExecutableNetwork::InitExecutor() {
// Default multi-threaded configuration is balanced for throughtput and latency cases and takes into account // Default multi-threaded configuration is balanced for throughtput and latency cases and takes into account
@ -137,10 +128,9 @@ void TemplatePlugin::ExecutableNetwork::InitExecutor() {
} }
// ! [executable_network:init_executor] // ! [executable_network:init_executor]
// ! [executable_network:create_infer_request_impl] // ! [executable_network:create_infer_request_impl]
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) { InferenceEngine::OutputsDataMap networkOutputs) {
return std::make_shared<TemplateInferRequest>(networkInputs, networkOutputs, std::static_pointer_cast<ExecutableNetwork>(shared_from_this())); return std::make_shared<TemplateInferRequest>(networkInputs, networkOutputs, std::static_pointer_cast<ExecutableNetwork>(shared_from_this()));
} }
// ! [executable_network:create_infer_request_impl] // ! [executable_network:create_infer_request_impl]
@ -148,32 +138,26 @@ InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::C
// ! [executable_network:create_infer_request] // ! [executable_network:create_infer_request]
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() { InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() {
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs); auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
return std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest), return std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest), _taskExecutor, _plugin->_waitExecutor,
_taskExecutor, _plugin->_waitExecutor, _callbackExecutor); _callbackExecutor);
} }
// ! [executable_network:create_infer_request] // ! [executable_network:create_infer_request]
// ! [executable_network:get_config] // ! [executable_network:get_config]
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name) const { InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string& name) const {
return _cfg.Get(name); return _cfg.Get(name);
} }
// ! [executable_network:get_config] // ! [executable_network:get_config]
// ! [executable_network:get_metric] // ! [executable_network:get_metric]
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name) const { InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string& name) const {
// TODO: return more supported values for metrics // TODO: return more supported values for metrics
if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) { if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) {
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string>{ IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string> {METRIC_KEY(NETWORK_NAME), METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(NETWORK_NAME), METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
} else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { } else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> configKeys = { std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
CONFIG_KEY(DEVICE_ID), auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys();
CONFIG_KEY(PERF_COUNT),
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) };
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
for (auto&& configKey : streamExecutorConfigKeys) { for (auto&& configKey : streamExecutorConfigKeys) {
configKeys.emplace_back(configKey); configKeys.emplace_back(configKey);
} }
@ -197,8 +181,7 @@ void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& modelStream) {
// Note: custom ngraph extensions are not supported // Note: custom ngraph extensions are not supported
std::map<std::string, ngraph::OpSet> custom_opsets; std::map<std::string, ngraph::OpSet> custom_opsets;
std::stringstream xmlFile, binFile; std::stringstream xmlFile, binFile;
ngraph::pass::Serialize serializer(xmlFile, binFile, ngraph::pass::Serialize serializer(xmlFile, binFile, ngraph::pass::Serialize::Version::IR_V10, custom_opsets);
ngraph::pass::Serialize::Version::IR_V10, custom_opsets);
serializer.run_on_function(_function); serializer.run_on_function(_function);
auto m_constants = binFile.str(); auto m_constants = binFile.str();

View File

@ -4,13 +4,12 @@
#pragma once #pragma once
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include <ngraph/function.hpp> #include <ngraph/function.hpp>
#include "template_async_infer_request.hpp"
#include "template_config.hpp" #include "template_config.hpp"
#include "template_infer_request.hpp" #include "template_infer_request.hpp"
#include "template_async_infer_request.hpp"
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
namespace TemplatePlugin { namespace TemplatePlugin {
@ -24,15 +23,10 @@ class Plugin;
// ! [executable_network:header] // ! [executable_network:header]
class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault { class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public: public:
ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function, ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap, const Configuration& cfg, const std::shared_ptr<Plugin>& plugin);
const InferenceEngine::OutputsDataMap& outputsInfoMap,
const Configuration& cfg,
const std::shared_ptr<Plugin>& plugin);
ExecutableNetwork(std::istream& model, ExecutableNetwork(std::istream& model, const Configuration& cfg, const std::shared_ptr<Plugin>& plugin);
const Configuration& cfg,
const std::shared_ptr<Plugin>& plugin);
~ExecutableNetwork() override = default; ~ExecutableNetwork() override = default;
@ -42,23 +36,22 @@ public:
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override; InferenceEngine::OutputsDataMap networkOutputs) override;
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequest() override; InferenceEngine::IInferRequestInternal::Ptr CreateInferRequest() override;
InferenceEngine::Parameter GetMetric(const std::string &name) const override; InferenceEngine::Parameter GetMetric(const std::string& name) const override;
InferenceEngine::Parameter GetConfig(const std::string &name) const override; InferenceEngine::Parameter GetConfig(const std::string& name) const override;
private: private:
friend class TemplateInferRequest; friend class TemplateInferRequest;
void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function, void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap);
const InferenceEngine::OutputsDataMap& outputsInfoMap);
void InitExecutor(); void InitExecutor();
std::atomic<std::size_t> _requestId = {0}; std::atomic<std::size_t> _requestId = {0};
Configuration _cfg; Configuration _cfg;
std::shared_ptr<Plugin> _plugin; std::shared_ptr<Plugin> _plugin;
std::shared_ptr<ngraph::Function> _function; std::shared_ptr<ngraph::Function> _function;
std::map<std::string, std::size_t> _inputIndex; std::map<std::string, std::size_t> _inputIndex;
std::map<std::string, std::size_t> _outputIndex; std::map<std::string, std::size_t> _outputIndex;
}; };
// ! [executable_network:header] // ! [executable_network:header]

View File

@ -2,16 +2,20 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <utility>
#include <algorithm>
#include <memory>
#include <string>
#include <map>
#include "template_infer_request.hpp" #include "template_infer_request.hpp"
#include <algorithm>
#include <map>
#include <memory>
#include <ngraph/runtime/reference/convert.hpp>
#include <string>
#include <utility>
#include "blob_factory.hpp"
#include "ie_ngraph_utils.hpp"
#include "template_executable_network.hpp" #include "template_executable_network.hpp"
#include "template_plugin.hpp"
#include "template_itt.hpp" #include "template_itt.hpp"
#include "template_plugin.hpp"
using namespace TemplatePlugin; using namespace TemplatePlugin;
using namespace InferenceEngine; using namespace InferenceEngine;
@ -19,11 +23,9 @@ using namespace InferenceEngine;
using Time = std::chrono::high_resolution_clock; using Time = std::chrono::high_resolution_clock;
// ! [infer_request:ctor] // ! [infer_request:ctor]
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs,
const InferenceEngine::OutputsDataMap& networkOutputs, const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork)
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork) : : IInferRequestInternal(networkInputs, networkOutputs), _executableNetwork(executableNetwork) {
IInferRequestInternal(networkInputs, networkOutputs),
_executableNetwork(executableNetwork) {
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks // TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
auto requestID = std::to_string(_executableNetwork->_requestId.fetch_add(1)); auto requestID = std::to_string(_executableNetwork->_requestId.fetch_add(1));
@ -57,66 +59,47 @@ void TemplateInferRequest::allocateDeviceBuffers() {
_outputTensors.resize(_networkOutputs.size()); _outputTensors.resize(_networkOutputs.size());
} }
template<typename BlobDataMap, typename GetNetworkPrecisionF> template <typename BlobDataMap, typename GetNetworkPrecisionF>
static void AllocateImpl(const BlobDataMap& userDataMap, static void AllocateImpl(const BlobDataMap& userDataMap, BlobMap& userBlobMap, BlobMap& deviceBlobMap, GetNetworkPrecisionF&& GetNetworkPrecision,
BlobMap& userBlobMap,
BlobMap& deviceBlobMap,
GetNetworkPrecisionF&& GetNetworkPrecision,
bool isInputBlob = true) { bool isInputBlob = true) {
for (auto&& userData : userDataMap) { for (auto&& userData : userDataMap) {
auto& dims = userData.second->getTensorDesc().getDims(); const auto& dims = userData.second->getTensorDesc().getDims();
const auto devicePrecision = Precision::FP32;
const auto deviceLayout = TensorDesc::getLayoutByDims(dims); const auto deviceLayout = TensorDesc::getLayoutByDims(dims);
auto userPrecision = userData.second->getTensorDesc().getPrecision(); const auto userPrecision = userData.second->getTensorDesc().getPrecision();
auto userLayout = userData.second->getTensorDesc().getLayout(); const auto userLayout = userData.second->getTensorDesc().getLayout();
Blob::Ptr userBlob; const auto networkPrecision = InferenceEngine::details::convertPrecision(GetNetworkPrecision(userData.first));
switch (userPrecision) { Blob::Ptr userBlob = make_blob_with_precision({userPrecision, dims, userLayout});
case Precision::U8: {
userBlob = InferenceEngine::make_shared_blob<std::uint8_t>({userPrecision, dims, userLayout});
} break;
case Precision::FP32 : {
userBlob = InferenceEngine::make_shared_blob<float>({userPrecision, dims, userLayout});
} break;
default: IE_THROW(NotImplemented) << "Template Plugin: Unsupported Input/Output Precision";
}
userBlob->allocate(); userBlob->allocate();
userBlobMap[userData.first] = userBlob; userBlobMap[userData.first] = userBlob;
auto networkPrecision = GetNetworkPrecision(userData.first);
Blob::Ptr deviceBlob; Blob::Ptr deviceBlob;
switch (networkPrecision) { if (userPrecision == networkPrecision && userLayout == deviceLayout) {
case ngraph::element::Type_t::f32 : { deviceBlob = userBlob;
if (userPrecision == devicePrecision && userLayout == deviceLayout) { } else {
deviceBlob = userBlob; if (userLayout != deviceLayout && !isInputBlob) {
} else { IE_THROW(NotImplemented) << "Template Plugin: does not support setLayout for outputs";
deviceBlob = InferenceEngine::make_shared_blob<float>({devicePrecision, dims, deviceLayout});
}
} break;
default: IE_THROW(NotImplemented) << "Template Plugin: Unsupported network Input/Output Presision";
}
if (userBlob != deviceBlob) {
if (isInputBlob) {
// preprocessing converts user input blob to desired device input blob automatically
deviceBlob->allocate();
} else {
// NOTE: this is not supported for output user blobs yet
IE_THROW(NotImplemented) << "Template Plugin: does not support setPrecision, setLayout for outputs";
} }
deviceBlob = make_blob_with_precision({networkPrecision, dims, deviceLayout});
deviceBlob->allocate();
} }
deviceBlobMap[userData.first] = deviceBlob; deviceBlobMap[userData.first] = deviceBlob;
} }
} }
void TemplateInferRequest::allocateBlobs() { void TemplateInferRequest::allocateBlobs() {
auto&& parameters = _executableNetwork->_function->get_parameters(); auto&& parameters = _executableNetwork->_function->get_parameters();
AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&] (const std::string& blobName) { AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&](const std::string& blobName) {
return parameters.at(_executableNetwork->_inputIndex.at(blobName))->get_element_type(); return parameters.at(_executableNetwork->_inputIndex.at(blobName))->get_element_type();
}); });
auto&& results = _executableNetwork->_function->get_results(); auto&& results = _executableNetwork->_function->get_results();
AllocateImpl(_networkOutputs, _outputs, _networkOutputBlobs, [&] (const std::string& blobName) { AllocateImpl(
return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type(); _networkOutputs, _outputs, _networkOutputBlobs,
}, false); [&](const std::string& blobName) {
return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type();
},
false);
} }
// ! [infer_request:infer_impl] // ! [infer_request:infer_impl]
@ -129,42 +112,108 @@ void TemplateInferRequest::InferImpl() {
} }
// ! [infer_request:infer_impl] // ! [infer_request:infer_impl]
template<typename SrcT, typename DstT> template <typename SrcT, typename DstT>
static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
std::copy_n(InferenceEngine::as<InferenceEngine::MemoryBlob>(src)->rmap().as<const SrcT*>(), ngraph::runtime::reference::convert<SrcT, DstT>(InferenceEngine::as<InferenceEngine::MemoryBlob>(src)->rmap().as<const SrcT*>(),
src->size(), InferenceEngine::as<InferenceEngine::MemoryBlob>(dst)->wmap().as<DstT*>(), src->size());
InferenceEngine::as<InferenceEngine::MemoryBlob>(dst)->wmap().as<DstT*>());
} }
static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
switch (src->getTensorDesc().getPrecision()) { switch (src->getTensorDesc().getPrecision()) {
case Precision::U8 : { case Precision::U8: {
switch (dst->getTensorDesc().getPrecision()) { switch (dst->getTensorDesc().getPrecision()) {
case Precision::U8 : break; case Precision::U8:
case Precision::FP32 : { break;
blobCopy<std::uint8_t, float>(src, dst); case Precision::FP32: {
} break; blobCopy<std::uint8_t, float>(src, dst);
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
}
}
} break; } break;
case Precision::FP32 : { default: {
switch (dst->getTensorDesc().getPrecision()) { IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
case Precision::FP32 : break; << dst->getTensorDesc().getPrecision();
case Precision::U8 : {
blobCopy<float, std::uint8_t>(src, dst);
} break;
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
}
}
} break;
default : {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision();
} }
}
} break;
case Precision::FP32: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::FP32:
break;
case Precision::U8: {
blobCopy<float, std::uint8_t>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::I64: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::I64:
break;
case Precision::I32: {
blobCopy<int64_t, int32_t>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::I16: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::I16:
break;
case Precision::FP32: {
blobCopy<int16_t, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::I8: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::I8:
break;
case Precision::FP32: {
blobCopy<int8_t, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::BOOL: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::BOOL:
break;
case Precision::FP32: {
blobCopy<bool, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
case Precision::U16: {
switch (dst->getTensorDesc().getPrecision()) {
case Precision::U16:
break;
case Precision::FP32: {
blobCopy<uint16_t, float>(src, dst);
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
<< dst->getTensorDesc().getPrecision();
}
}
} break;
default: {
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision();
}
} }
} }
@ -180,8 +229,8 @@ void TemplateInferRequest::inferPreprocess() {
const auto& parameter = _parameters[index]; const auto& parameter = _parameters[index];
const auto& parameterShape = parameter->get_shape(); const auto& parameterShape = parameter->get_shape();
const auto& parameterType = parameter->get_element_type(); const auto& parameterType = parameter->get_element_type();
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType, parameterShape, _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>()); parameterType, parameterShape, InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>());
} }
for (auto&& output : _outputs) { for (auto&& output : _outputs) {
auto outputBlob = output.second; auto outputBlob = output.second;
@ -193,8 +242,8 @@ void TemplateInferRequest::inferPreprocess() {
const auto& result = _results[index]; const auto& result = _results[index];
const auto& resultShape = result->get_shape(); const auto& resultShape = result->get_shape();
const auto& resultType = result->get_element_type(); const auto& resultType = result->get_element_type();
_outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(resultType, resultShape, _outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkOutput)->wmap().as<void*>()); resultType, resultShape, InferenceEngine::as<InferenceEngine::MemoryBlob>(networkOutput)->wmap().as<void*>());
} }
_durations[Preprocess] = Time::now() - start; _durations[Preprocess] = Time::now() - start;
} }

View File

@ -4,20 +4,17 @@
#pragma once #pragma once
#include <array>
#include <chrono>
#include <cpp_interfaces/interface/ie_iinfer_request_internal.hpp>
#include <executable.hpp>
#include <ie_input_info.hpp>
#include <map> #include <map>
#include <memory>
#include <ngraph/runtime/tensor.hpp>
#include <openvino/itt.hpp>
#include <string> #include <string>
#include <vector> #include <vector>
#include <array>
#include <memory>
#include <chrono>
#include <openvino/itt.hpp>
#include <ie_input_info.hpp>
#include <cpp_interfaces/interface/ie_iinfer_request_internal.hpp>
#include <ngraph/runtime/tensor.hpp>
#include <executable.hpp>
namespace TemplatePlugin { namespace TemplatePlugin {
@ -29,8 +26,7 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal {
public: public:
typedef std::shared_ptr<TemplateInferRequest> Ptr; typedef std::shared_ptr<TemplateInferRequest> Ptr;
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs,
const InferenceEngine::OutputsDataMap& networkOutputs,
const std::shared_ptr<ExecutableNetwork>& executableNetwork); const std::shared_ptr<ExecutableNetwork>& executableNetwork);
~TemplateInferRequest(); ~TemplateInferRequest();
@ -47,26 +43,20 @@ private:
void allocateDeviceBuffers(); void allocateDeviceBuffers();
void allocateBlobs(); void allocateBlobs();
enum { enum { Preprocess, Postprocess, StartPipeline, WaitPipeline, numOfStages };
Preprocess,
Postprocess,
StartPipeline,
WaitPipeline,
numOfStages
};
std::shared_ptr<ExecutableNetwork> _executableNetwork; std::shared_ptr<ExecutableNetwork> _executableNetwork;
std::array<openvino::itt::handle_t, numOfStages> _profilingTask; std::array<openvino::itt::handle_t, numOfStages> _profilingTask;
// for performance counters // for performance counters
std::array<std::chrono::duration<float, std::micro>, numOfStages> _durations; std::array<std::chrono::duration<float, std::micro>, numOfStages> _durations;
InferenceEngine::BlobMap _networkOutputBlobs; InferenceEngine::BlobMap _networkOutputBlobs;
ngraph::ParameterVector _parameters; ngraph::ParameterVector _parameters;
ngraph::ResultVector _results; ngraph::ResultVector _results;
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _inputTensors; std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _inputTensors;
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _outputTensors; std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _outputTensors;
std::shared_ptr<ngraph::runtime::Executable> _executable; std::shared_ptr<ngraph::runtime::Executable> _executable;
}; };
// ! [infer_request:header] // ! [infer_request:header]

View File

@ -14,7 +14,7 @@
namespace TemplatePlugin { namespace TemplatePlugin {
namespace itt { namespace itt {
namespace domains { namespace domains {
OV_ITT_DOMAIN(TemplatePlugin); OV_ITT_DOMAIN(TemplatePlugin);
}
}
} }
} // namespace itt
} // namespace TemplatePlugin

View File

@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
// clang-format off
#include <ie_metric_helpers.hpp> #include <ie_metric_helpers.hpp>
#include <ie_plugin_config.hpp> #include <ie_plugin_config.hpp>
#include <ie_algorithm.hpp> #include <ie_algorithm.hpp>
@ -24,6 +25,7 @@
#include "template_infer_request.hpp" #include "template_infer_request.hpp"
#include "transformations/template_pattern_transformation.hpp" #include "transformations/template_pattern_transformation.hpp"
#include "transformations/preprocessing/preprocessing.hpp" #include "transformations/preprocessing/preprocessing.hpp"
// clang-format on
using namespace TemplatePlugin; using namespace TemplatePlugin;
@ -53,8 +55,7 @@ Plugin::~Plugin() {
// ! [plugin:transform_network] // ! [plugin:transform_network]
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
const InferenceEngine::InputsDataMap & inputInfoMap,
const InferenceEngine::OutputsDataMap& outputsInfoMap) { const InferenceEngine::OutputsDataMap& outputsInfoMap) {
// 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function // 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function
auto transformedNetwork = ngraph::clone_function(*function); auto transformedNetwork = ngraph::clone_function(*function);
@ -67,7 +68,7 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
// Example: register CommonOptimizations transformation from transformations library // Example: register CommonOptimizations transformation from transformations library
passManager.register_pass<ngraph::pass::CommonOptimizations>(); passManager.register_pass<ngraph::pass::CommonOptimizations>();
// Template plugin handles only FP32 networks // Template plugin handles only FP32 networks
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32 }}); passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
// Example: register plugin specific transformation // Example: register plugin specific transformation
passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>(); passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
passManager.register_pass<ngraph::pass::ReluReluFusionMatcher>(); passManager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
@ -83,36 +84,32 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
// ! [plugin:transform_network] // ! [plugin:transform_network]
// ! [plugin:load_exe_network_impl] // ! [plugin:load_exe_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork & network, InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) {
const ConfigMap &config) {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::LoadExeNetworkImpl"); OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::LoadExeNetworkImpl");
InferenceEngine::InputsDataMap networkInputs = network.getInputsInfo(); InferenceEngine::InputsDataMap networkInputs = network.getInputsInfo();
InferenceEngine::OutputsDataMap networkOutputs = network.getOutputsInfo(); InferenceEngine::OutputsDataMap networkOutputs = network.getOutputsInfo();
auto fullConfig = Configuration{ config, _cfg }; auto fullConfig = Configuration {config, _cfg};
return std::make_shared<ExecutableNetwork>(network.getFunction(), return std::make_shared<ExecutableNetwork>(network.getFunction(), networkInputs, networkOutputs, fullConfig,
networkInputs, networkOutputs, fullConfig, std::static_pointer_cast<Plugin>(shared_from_this()));
std::static_pointer_cast<Plugin>(shared_from_this()));
} }
// ! [plugin:load_exe_network_impl] // ! [plugin:load_exe_network_impl]
// ! [plugin:import_network_impl] // ! [plugin:import_network_impl]
InferenceEngine::ExecutableNetworkInternal::Ptr InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map<std::string, std::string>& config) {
Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map<std::string, std::string>& config) {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetworkImpl"); OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetworkImpl");
auto fullConfig = Configuration{ config, _cfg }; auto fullConfig = Configuration {config, _cfg};
return std::make_shared<ExecutableNetwork>(modelStream, fullConfig, return std::make_shared<ExecutableNetwork>(modelStream, fullConfig, std::static_pointer_cast<Plugin>(shared_from_this()));
std::static_pointer_cast<Plugin>(shared_from_this()));
} }
// ! [plugin:import_network_impl] // ! [plugin:import_network_impl]
// ! [plugin:query_network] // ! [plugin:query_network]
InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork &network, const ConfigMap& config) const { InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) const {
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::QueryNetwork"); OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::QueryNetwork");
Configuration fullConfig{config, _cfg, false}; Configuration fullConfig {config, _cfg, false};
auto function = network.getFunction(); auto function = network.getFunction();
// 1. First of all we should store initial input operation set // 1. First of all we should store initial input operation set
@ -198,36 +195,28 @@ void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) {
// ! [plugin:add_extension] // ! [plugin:add_extension]
// ! [plugin:set_config] // ! [plugin:set_config]
void Plugin::SetConfig(const ConfigMap &config) { void Plugin::SetConfig(const ConfigMap& config) {
_cfg = Configuration{config, _cfg}; _cfg = Configuration {config, _cfg};
} }
// ! [plugin:set_config] // ! [plugin:set_config]
// ! [plugin:get_config] // ! [plugin:get_config]
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & /*options*/) const { InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& /*options*/) const {
return _cfg.Get(name); return _cfg.Get(name);
} }
// ! [plugin:get_config] // ! [plugin:get_config]
// ! [plugin:get_metric] // ! [plugin:get_metric]
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const { InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const {
if (METRIC_KEY(SUPPORTED_METRICS) == name) { if (METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> supportedMetrics = { std::vector<std::string> supportedMetrics = {METRIC_KEY(AVAILABLE_DEVICES), METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(AVAILABLE_DEVICES), METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(SUPPORTED_METRICS), METRIC_KEY(IMPORT_EXPORT_SUPPORT), METRIC_KEY(DEVICE_ARCHITECTURE),
METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(OPTIMIZATION_CAPABILITIES), METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)};
METRIC_KEY(FULL_DEVICE_NAME),
METRIC_KEY(IMPORT_EXPORT_SUPPORT),
METRIC_KEY(DEVICE_ARCHITECTURE),
METRIC_KEY(OPTIMIZATION_CAPABILITIES),
METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) };
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics); IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> configKeys = { std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
CONFIG_KEY(DEVICE_ID), auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys();
CONFIG_KEY(PERF_COUNT),
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
for (auto&& configKey : streamExecutorConfigKeys) { for (auto&& configKey : streamExecutorConfigKeys) {
if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) { if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) {
configKeys.emplace_back(configKey); configKeys.emplace_back(configKey);
@ -236,7 +225,7 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std:
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys); IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);
} else if (METRIC_KEY(AVAILABLE_DEVICES) == name) { } else if (METRIC_KEY(AVAILABLE_DEVICES) == name) {
// TODO: fill list of available devices // TODO: fill list of available devices
std::vector<std::string> availableDevices = { "" }; std::vector<std::string> availableDevices = {""};
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices); IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
} else if (METRIC_KEY(FULL_DEVICE_NAME) == name) { } else if (METRIC_KEY(FULL_DEVICE_NAME) == name) {
std::string name = "Template Device Full Name"; std::string name = "Template Device Full Name";
@ -249,13 +238,13 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std:
IE_SET_METRIC_RETURN(DEVICE_ARCHITECTURE, arch); IE_SET_METRIC_RETURN(DEVICE_ARCHITECTURE, arch);
} else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) { } else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) {
// TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32 // TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32
std::vector<std::string> capabilities = { METRIC_VALUE(FP32) /*, TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION)*/ }; std::vector<std::string> capabilities = {METRIC_VALUE(FP32) /*, TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION)*/};
IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities); IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities);
} else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) { } else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) {
// TODO: fill with actual values // TODO: fill with actual values
using uint = unsigned int; using uint = unsigned int;
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1})); IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint {1}, uint {1}, uint {1}));
} else { } else {
IE_THROW(NotFound) << "Unsupported device metric: " << name; IE_THROW(NotFound) << "Unsupported device metric: " << name;
} }
} }

View File

@ -4,11 +4,11 @@
#pragma once #pragma once
#include "template_config.hpp"
#include "template_executable_network.hpp"
#include <cpp_interfaces/impl/ie_plugin_internal.hpp> #include <cpp_interfaces/impl/ie_plugin_internal.hpp>
#include "backend.hpp" #include "backend.hpp"
#include "template_config.hpp"
#include "template_executable_network.hpp"
//! [plugin:header] //! [plugin:header]
namespace TemplatePlugin { namespace TemplatePlugin {
@ -20,26 +20,24 @@ public:
Plugin(); Plugin();
~Plugin(); ~Plugin();
void SetConfig(const std::map<std::string, std::string> &config) override; void SetConfig(const std::map<std::string, std::string>& config) override;
InferenceEngine::QueryNetworkResult InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network,
QueryNetwork(const InferenceEngine::CNNNetwork &network, const std::map<std::string, std::string>& config) const override;
const std::map<std::string, std::string>& config) const override; InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
InferenceEngine::ExecutableNetworkInternal::Ptr const std::map<std::string, std::string>& config) override;
LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network,
const std::map<std::string, std::string> &config) override;
void AddExtension(InferenceEngine::IExtensionPtr extension) override; void AddExtension(InferenceEngine::IExtensionPtr extension) override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override; InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override; InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) override; InferenceEngine::ExecutableNetworkInternal::Ptr ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) override;
private: private:
friend class ExecutableNetwork; friend class ExecutableNetwork;
friend class TemplateInferRequest; friend class TemplateInferRequest;
std::shared_ptr<ngraph::runtime::Backend> _backend; std::shared_ptr<ngraph::runtime::Backend> _backend;
Configuration _cfg; Configuration _cfg;
InferenceEngine::ITaskExecutor::Ptr _waitExecutor; InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
}; };
} // namespace TemplatePlugin } // namespace TemplatePlugin
//! [plugin:header] //! [plugin:header]

View File

@ -2,21 +2,21 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "transformations/preprocessing/mean_image_or_value.hpp"
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp> #include <ngraph/pass/manager.hpp>
#include <ngraph/pattern/op/wrap_type.hpp> #include <ngraph/pattern/op/wrap_type.hpp>
#include "transformations/preprocessing/mean_image_or_value.hpp"
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddMeanSubtract, "AddMeanSubtract", 0); NGRAPH_RTTI_DEFINITION(ngraph::pass::AddMeanSubtract, "AddMeanSubtract", 0);
ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) { ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap& inputInfoMap) {
// RUN_ON_FUNCTION_SCOPE(AddMeanSubtract); // RUN_ON_FUNCTION_SCOPE(AddMeanSubtract);
auto label = ngraph::pattern::wrap_type<ngraph::opset3::Parameter>(); auto label = ngraph::pattern::wrap_type<ngraph::opset3::Parameter>();
ngraph::matcher_pass_callback callback = [=] (pattern::Matcher& m) { ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) {
auto param = std::dynamic_pointer_cast<ngraph::opset3::Parameter>(m.get_match_root()); auto param = std::dynamic_pointer_cast<ngraph::opset3::Parameter>(m.get_match_root());
if (!param) { if (!param) {
return false; return false;
@ -28,8 +28,7 @@ ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) {
} }
auto mean_const = it->second; auto mean_const = it->second;
NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, "Mean for ", param->get_friendly_name(), " must have f32 type");
"Mean for ", param->get_friendly_name(), " must have f32 type");
auto copy_param = param->clone_with_new_inputs({}); auto copy_param = param->clone_with_new_inputs({});
auto sub = std::make_shared<ngraph::opset3::Subtract>(copy_param, mean_const); auto sub = std::make_shared<ngraph::opset3::Subtract>(copy_param, mean_const);

View File

@ -5,10 +5,9 @@
#pragma once #pragma once
#include <map> #include <map>
#include <string>
#include <ngraph/op/constant.hpp> #include <ngraph/op/constant.hpp>
#include <ngraph/pass/graph_rewrite.hpp> #include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include "transformations_visibility.hpp" #include "transformations_visibility.hpp"
@ -29,5 +28,5 @@ public:
using MeanMap = std::map<std::string, std::shared_ptr<ngraph::op::v0::Constant>>; using MeanMap = std::map<std::string, std::shared_ptr<ngraph::op::v0::Constant>>;
NGRAPH_RTTI_DECLARATION; NGRAPH_RTTI_DECLARATION;
explicit AddMeanSubtract(const MeanMap & inputInfoMap); explicit AddMeanSubtract(const MeanMap& inputInfoMap);
}; };

View File

@ -2,26 +2,26 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ngraph/pass/manager.hpp> #include "transformations/preprocessing/preprocessing.hpp"
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp>
#include "transformations/preprocessing/mean_image_or_value.hpp" #include "transformations/preprocessing/mean_image_or_value.hpp"
#include "transformations/preprocessing/std_scale.hpp" #include "transformations/preprocessing/std_scale.hpp"
#include "transformations/preprocessing/preprocessing.hpp"
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddPreprocessing, "AddPreprocessing", 0); NGRAPH_RTTI_DEFINITION(ngraph::pass::AddPreprocessing, "AddPreprocessing", 0);
ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap) ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap): m_inputInfoMap(inputInfoMap) {}
: m_inputInfoMap(inputInfoMap) { }
bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Function> f) { bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Function> f) {
ngraph::pass::AddMeanSubtract::MeanMap meanMap; ngraph::pass::AddMeanSubtract::MeanMap meanMap;
ngraph::pass::AddStdScale::ScaleMap scaleMap; ngraph::pass::AddStdScale::ScaleMap scaleMap;
for (const auto & it : m_inputInfoMap) { for (const auto& it : m_inputInfoMap) {
bool has_scales = false, has_mean_values = false, has_mean_image = false; bool has_scales = false, has_mean_values = false, has_mean_image = false;
const InferenceEngine::PreProcessInfo & pInfo = it.second->getPreProcess(); const InferenceEngine::PreProcessInfo& pInfo = it.second->getPreProcess();
const auto & inputDims = it.second->getTensorDesc().getDims(); const auto& inputDims = it.second->getTensorDesc().getDims();
const size_t cn = pInfo.getNumberOfChannels(); const size_t cn = pInfo.getNumberOfChannels();
std::vector<float> meanValues(cn), stdScales(cn); std::vector<float> meanValues(cn), stdScales(cn);
InferenceEngine::Blob::Ptr meanImage = nullptr; InferenceEngine::Blob::Ptr meanImage = nullptr;
@ -40,10 +40,10 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
if (c == 0) { if (c == 0) {
meanImage = pInfo[c]->meanData; meanImage = pInfo[c]->meanData;
NGRAPH_CHECK(meanImage->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32, NGRAPH_CHECK(meanImage->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32,
"Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData"); "Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData");
} else { } else {
NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), NGRAPH_CHECK(pInfo[c]->meanData != nullptr, "pInfo[c]->meanData is nullptr");
"TensorDesc for PreProcessChannel::meanData must be equal"); NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), "TensorDesc for PreProcessChannel::meanData must be equal");
} }
} }
} }
@ -53,35 +53,33 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
continue; continue;
} }
NGRAPH_CHECK(!(has_mean_image && has_scales), NGRAPH_CHECK(!(has_mean_image && has_scales), "Only PreProcessChannel::meanData or PreProcessChannel::meanValue can be set.");
"Only PreProcessChannel::meanData or PreProcessChannel::meanValue can be set.");
if (has_scales) { if (has_scales) {
ngraph::Shape shape(inputDims.size(), 1); ngraph::Shape shape(inputDims.size(), 1);
shape[1] = stdScales.size(); // C shape[1] = stdScales.size(); // C
scaleMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, stdScales); scaleMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, stdScales);
} }
if (has_mean_values) { if (has_mean_values) {
ngraph::Shape shape(inputDims.size(), 1); ngraph::Shape shape(inputDims.size(), 1);
shape[1] = meanValues.size(); // C shape[1] = meanValues.size(); // C
meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, meanValues); meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, meanValues);
} else if (has_mean_image) { } else if (has_mean_image) {
ngraph::Shape shape = { cn }; ngraph::Shape shape = {cn};
auto dims = meanImage->getTensorDesc().getDims(); auto dims = meanImage->getTensorDesc().getDims();
std::copy(dims.begin(), dims.end(), std::back_inserter(shape)); std::copy(dims.begin(), dims.end(), std::back_inserter(shape));
std::vector<float> meanImageData(ngraph::shape_size(shape)); std::vector<float> meanImageData(ngraph::shape_size(shape));
for (size_t c = 0, i = 0; c < cn; ++c) { for (size_t c = 0, i = 0; c < cn; ++c) {
auto lm = pInfo[c]->meanData->buffer(); auto lm = pInfo[c]->meanData->buffer();
const float *data = lm.as<const float *>(); const float* data = lm.as<const float*>();
std::memcpy(&meanImageData[i], data, meanImage->byteSize()); std::memcpy(&meanImageData[i], data, meanImage->byteSize());
i += meanImage->size(); i += meanImage->size();
} }
meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, meanImageData);
shape, meanImageData);
} }
} }

View File

@ -26,10 +26,11 @@ class AddPreprocessing;
* (x - mean) * stdScale * (x - mean) * stdScale
*/ */
class ngraph::pass::AddPreprocessing : public ngraph::pass::FunctionPass { class ngraph::pass::AddPreprocessing : public ngraph::pass::FunctionPass {
const InferenceEngine::InputsDataMap & m_inputInfoMap; const InferenceEngine::InputsDataMap& m_inputInfoMap;
public: public:
NGRAPH_RTTI_DECLARATION; NGRAPH_RTTI_DECLARATION;
explicit AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap); explicit AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap);
bool run_on_function(std::shared_ptr<ngraph::Function> f) override; bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
}; };

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "transformations/preprocessing/std_scale.hpp"
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp> #include <ngraph/pass/manager.hpp>
#include <ngraph/pattern/op/wrap_type.hpp> #include <ngraph/pattern/op/wrap_type.hpp>
#include "transformations/preprocessing/std_scale.hpp"
using namespace ngraph; using namespace ngraph;
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddStdScale, "AddStdScale", 0); NGRAPH_RTTI_DEFINITION(ngraph::pass::AddStdScale, "AddStdScale", 0);
@ -16,7 +16,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) {
// RUN_ON_FUNCTION_SCOPE(AddStdScale); // RUN_ON_FUNCTION_SCOPE(AddStdScale);
auto label = ngraph::pattern::wrap_type<ngraph::opset3::Parameter>(); auto label = ngraph::pattern::wrap_type<ngraph::opset3::Parameter>();
ngraph::matcher_pass_callback callback = [=] (pattern::Matcher& m) { ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) {
auto param = std::dynamic_pointer_cast<ngraph::opset3::Parameter>(m.get_match_root()); auto param = std::dynamic_pointer_cast<ngraph::opset3::Parameter>(m.get_match_root());
if (!param) { if (!param) {
return false; return false;
@ -28,8 +28,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) {
} }
auto scale_const = it->second; auto scale_const = it->second;
NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, "Scale for ", param->get_friendly_name(), " must have f32 type");
"Scale for ", param->get_friendly_name(), " must have f32 type");
auto copy_param = param->clone_with_new_inputs({}); auto copy_param = param->clone_with_new_inputs({});
auto mul = std::make_shared<ngraph::opset3::Multiply>(copy_param, it->second); auto mul = std::make_shared<ngraph::opset3::Multiply>(copy_param, it->second);

View File

@ -5,10 +5,9 @@
#pragma once #pragma once
#include <map> #include <map>
#include <string>
#include <ngraph/op/constant.hpp> #include <ngraph/op/constant.hpp>
#include <ngraph/pass/graph_rewrite.hpp> #include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include "transformations_visibility.hpp" #include "transformations_visibility.hpp"

View File

@ -15,7 +15,7 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr<ngraph::Fun
NodeVector nodes; NodeVector nodes;
// Traverse nGraph Function in topological order // Traverse nGraph Function in topological order
for (auto & node : f->get_ordered_ops()) { for (auto& node : f->get_ordered_ops()) {
// Check that number of input and output ports are equal to 1 // Check that number of input and output ports are equal to 1
if (node->inputs().size() == 1 && node->outputs().size() == 1) { if (node->inputs().size() == 1 && node->outputs().size() == 1) {
// Check that input and output shape a fully defined (not dynamic) and number of consumers equal to 1 // Check that input and output shape a fully defined (not dynamic) and number of consumers equal to 1
@ -28,9 +28,8 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr<ngraph::Fun
} }
// Print types and names for collected nodes // Print types and names for collected nodes
for (auto & node : nodes) { for (auto& node : nodes) {
std::cout << "Type: " << node->get_type_info().name << std::endl std::cout << "Type: " << node->get_type_info().name << std::endl << "Name: " << node->get_friendly_name() << std::endl;
<< "Name: " << node->get_friendly_name() << std::endl;
} }
// Return false because we didn't change nGraph Function // Return false because we didn't change nGraph Function

View File

@ -16,7 +16,7 @@ class MyFunctionTransformation;
// ! [function_pass:template_transformation_hpp] // ! [function_pass:template_transformation_hpp]
// template_function_transformation.hpp // template_function_transformation.hpp
class ngraph::pass::MyFunctionTransformation: public ngraph::pass::FunctionPass { class ngraph::pass::MyFunctionTransformation : public ngraph::pass::FunctionPass {
public: public:
NGRAPH_RTTI_DECLARATION; NGRAPH_RTTI_DECLARATION;
bool run_on_function(std::shared_ptr<ngraph::Function> f) override; bool run_on_function(std::shared_ptr<ngraph::Function> f) override;

View File

@ -3,13 +3,14 @@
// //
#include "transformations/template_pattern_transformation.hpp" #include "transformations/template_pattern_transformation.hpp"
#include "transformations/template_function_transformation.hpp"
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <ngraph/pass/manager.hpp> #include <ngraph/pass/manager.hpp>
#include <ngraph/pattern/op/wrap_type.hpp> #include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/rt_info.hpp> #include <ngraph/rt_info.hpp>
#include "transformations/template_function_transformation.hpp"
using namespace ngraph; using namespace ngraph;
// ! [graph_rewrite:template_transformation_cpp] // ! [graph_rewrite:template_transformation_cpp]
@ -23,15 +24,14 @@ ngraph::pass::DecomposeDivideMatcher::DecomposeDivideMatcher() {
auto div = std::make_shared<ngraph::opset3::Divide>(input0, input1); auto div = std::make_shared<ngraph::opset3::Divide>(input0, input1);
ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) { ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) {
auto div = std::dynamic_pointer_cast<ngraph::opset3::Divide> (m.get_match_root()); auto div = std::dynamic_pointer_cast<ngraph::opset3::Divide>(m.get_match_root());
// We can not apply this transformation in case with integer input data type // We can not apply this transformation in case with integer input data type
if (!div || div->input(0).get_element_type().is_integral()) { if (!div || div->input(0).get_element_type().is_integral()) {
return false; return false;
} }
// Decompose Divide into Multiply with Power operations // Decompose Divide into Multiply with Power operations
auto pow = std::make_shared<ngraph::opset3::Power>(div->input_value(1), auto pow = std::make_shared<ngraph::opset3::Power>(div->input_value(1), opset3::Constant::create(div->get_input_element_type(1), Shape {1}, {-1}));
opset3::Constant::create(div->get_input_element_type(1), Shape{1}, {-1}));
auto mul = std::make_shared<ngraph::opset3::Multiply>(div->input_value(0), pow); auto mul = std::make_shared<ngraph::opset3::Multiply>(div->input_value(0), pow);
@ -67,8 +67,7 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() {
auto& node_to_output = m.get_pattern_value_map(); auto& node_to_output = m.get_pattern_value_map();
// Create new Relu operation and add register it for additional execution // Create new Relu operation and add register it for additional execution
auto new_relu = register_new_node<ngraph::opset3::Relu>( auto new_relu = register_new_node<ngraph::opset3::Relu>(node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0));
node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0));
// Copy runtime info attributes to newly created operation // Copy runtime info attributes to newly created operation
ngraph::copy_runtime_info(m.get_matched_nodes(), new_relu); ngraph::copy_runtime_info(m.get_matched_nodes(), new_relu);
@ -91,60 +90,60 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() {
// ! [matcher_pass:relu_fusion] // ! [matcher_pass:relu_fusion]
void run_matcher_on_node(std::shared_ptr<ngraph::Node> node) { void run_matcher_on_node(std::shared_ptr<ngraph::Node> node) {
// ! [matcher_pass:run_on_node] // ! [matcher_pass:run_on_node]
if (ngraph::pass::DecomposeDivideMatcher().apply(node)) { if (ngraph::pass::DecomposeDivideMatcher().apply(node)) {
// successful execution (root node was replaced) // successful execution (root node was replaced)
} }
// ! [matcher_pass:run_on_node] // ! [matcher_pass:run_on_node]
} }
void run_matcher_with_manager(std::shared_ptr<ngraph::Function> f) { void run_matcher_with_manager(std::shared_ptr<ngraph::Function> f) {
// ! [matcher_pass:manager] // ! [matcher_pass:manager]
// Two matchers will run independently (two independent graph traversals) // Two matchers will run independently (two independent graph traversals)
// pass::Manager automatically creates GraphRewrite container for each MatcherPass // pass::Manager automatically creates GraphRewrite container for each MatcherPass
pass::Manager manager; pass::Manager manager;
manager.register_pass<ngraph::pass::DecomposeDivideMatcher>(); manager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
manager.register_pass<ngraph::pass::ReluReluFusionMatcher>(); manager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
manager.run_passes(f); manager.run_passes(f);
// ! [matcher_pass:manager] // ! [matcher_pass:manager]
} }
void run_matcher_with_manager2(std::shared_ptr<ngraph::Function> f) { void run_matcher_with_manager2(std::shared_ptr<ngraph::Function> f) {
// ! [matcher_pass:manager2] // ! [matcher_pass:manager2]
// Register anchor GraphRewrite pass inside manager that will execute two matchers simultaneously // Register anchor GraphRewrite pass inside manager that will execute two matchers simultaneously
pass::Manager manager; pass::Manager manager;
auto anchor = manager.register_pass<ngraph::pass::GraphRewrite>(); auto anchor = manager.register_pass<ngraph::pass::GraphRewrite>();
anchor->add_matcher<ngraph::pass::DecomposeDivideMatcher>(); anchor->add_matcher<ngraph::pass::DecomposeDivideMatcher>();
anchor->add_matcher<ngraph::pass::ReluReluFusionMatcher>(); anchor->add_matcher<ngraph::pass::ReluReluFusionMatcher>();
manager.run_passes(f); manager.run_passes(f);
// ! [matcher_pass:manager2] // ! [matcher_pass:manager2]
} }
void run_matcher_with_manager3(std::shared_ptr<ngraph::Function> f) { void run_matcher_with_manager3(std::shared_ptr<ngraph::Function> f) {
// ! [matcher_pass:manager3] // ! [matcher_pass:manager3]
pass::Manager manager; pass::Manager manager;
manager.register_pass<ngraph::pass::MyFunctionTransformation>(); manager.register_pass<ngraph::pass::MyFunctionTransformation>();
// Two matchers will run independently (two independent graph traversals) // Two matchers will run independently (two independent graph traversals)
// pass::Manager automatically creates GraphRewrite container for each MatcherPass // pass::Manager automatically creates GraphRewrite container for each MatcherPass
manager.register_pass<ngraph::pass::DecomposeDivideMatcher>(); manager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
manager.register_pass<ngraph::pass::ReluReluFusionMatcher>(); manager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
manager.run_passes(f); manager.run_passes(f);
// ! [matcher_pass:manager3] // ! [matcher_pass:manager3]
} }
void run_matcher_with_gr(std::shared_ptr<ngraph::Function> f) { void run_matcher_with_gr(std::shared_ptr<ngraph::Function> f) {
// ! [matcher_pass:graph_rewrite] // ! [matcher_pass:graph_rewrite]
// Two matcher passes will run simultaneously in a single graph traversal // Two matcher passes will run simultaneously in a single graph traversal
ngraph::pass::GraphRewrite pass; ngraph::pass::GraphRewrite pass;
pass.add_matcher<ngraph::pass::DecomposeDivideMatcher>(); pass.add_matcher<ngraph::pass::DecomposeDivideMatcher>();
pass.add_matcher<ngraph::pass::ReluReluFusionMatcher>(); pass.add_matcher<ngraph::pass::ReluReluFusionMatcher>();
pass.run_on_function(f); pass.run_on_function(f);
// ! [matcher_pass:graph_rewrite] // ! [matcher_pass:graph_rewrite]
} }
// ! [manual_constant_folding] // ! [manual_constant_folding]
template <class T> template <class T>
Output<Node> eltwise_fold(const Output<Node> & input0, const Output<Node> & input1) { Output<Node> eltwise_fold(const Output<Node>& input0, const Output<Node>& input1) {
auto eltwise = std::make_shared<T>(input0, input1); auto eltwise = std::make_shared<T>(input0, input1);
OutputVector output(eltwise->get_output_size()); OutputVector output(eltwise->get_output_size());
// If constant folding wasn't successful return eltwise output // If constant folding wasn't successful return eltwise output

View File

@ -21,14 +21,14 @@ class ReluReluFusionMatcher;
* @ingroup ie_transformation_common_api * @ingroup ie_transformation_common_api
* @brief Add transformation description. * @brief Add transformation description.
*/ */
class ngraph::pass::DecomposeDivideMatcher: public ngraph::pass::MatcherPass { class ngraph::pass::DecomposeDivideMatcher : public ngraph::pass::MatcherPass {
public: public:
NGRAPH_RTTI_DECLARATION; NGRAPH_RTTI_DECLARATION;
DecomposeDivideMatcher(); DecomposeDivideMatcher();
}; };
// ! [graph_rewrite:template_transformation_hpp] // ! [graph_rewrite:template_transformation_hpp]
class ngraph::pass::ReluReluFusionMatcher: public ngraph::pass::MatcherPass { class ngraph::pass::ReluReluFusionMatcher : public ngraph::pass::MatcherPass {
public: public:
NGRAPH_RTTI_DECLARATION; NGRAPH_RTTI_DECLARATION;
ReluReluFusionMatcher(); ReluReluFusionMatcher();

View File

@ -14,7 +14,7 @@ addIeTargetTest(
IE::funcSharedTests IE::funcSharedTests
INCLUDES INCLUDES
"${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include" "${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include"
ADD_CPPLINT ADD_CLANG_FORMAT
LABELS LABELS
TEMPLATE TEMPLATE
) )

View File

@ -4,5 +4,4 @@
#include "functional_test_utils/core_config.hpp" #include "functional_test_utils/core_config.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) { void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {}
}

View File

@ -7,19 +7,14 @@
using namespace LayerTestsDefinitions; using namespace LayerTestsDefinitions;
namespace { namespace {
static const std::vector<ngraph::element::Type> precisionsTemplate = { static const std::vector<ngraph::element::Type> precisionsTemplate = {
ngraph::element::f32, ngraph::element::f32,
}; };
static const std::vector<std::size_t> batchSizesTemplate = { static const std::vector<std::size_t> batchSizesTemplate = {1, 2};
1, 2
};
INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase, INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()), ::testing::ValuesIn(precisionsTemplate),
::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()), ::testing::ValuesIn(batchSizesTemplate), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::ValuesIn(precisionsTemplate), LoadNetworkCacheTestBase::getTestCaseName);
::testing::ValuesIn(batchSizesTemplate), } // namespace
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
LoadNetworkCacheTestBase::getTestCaseName);
} // namespace

View File

@ -2,19 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "multi-device/multi_device_config.hpp"
#include "behavior/config.hpp" #include "behavior/config.hpp"
#include <template/template_config.hpp> #include <template/template_config.hpp>
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {
{{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, {{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
@ -27,32 +25,23 @@ const std::vector<std::map<std::string, std::string>> inconfigs = {
}; };
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(inconfigs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(inconfigs)),
IncorrectConfigTests::getTestCaseName); IncorrectConfigTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(inconfigs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(inconfigs)),
IncorrectConfigAPITests::getTestCaseName); IncorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
CorrectConfigAPITests::getTestCaseName); CorrectConfigAPITests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigTests, INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
CorrectConfigAPITests::getTestCaseName); CorrectConfigAPITests::getTestCaseName);
} // namespace } // namespace

View File

@ -2,12 +2,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <utility>
#include <string>
#include <vector>
#include "behavior/core_integration.hpp" #include "behavior/core_integration.hpp"
#include <string>
#include <utility>
#include <vector>
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
@ -16,54 +16,31 @@ namespace {
// IE Class Common tests with <pluginName, deviceName params> // IE Class Common tests with <pluginName, deviceName params>
// //
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassBasicTestP, IEClassBasicTestP, ::testing::Values(std::make_pair("templatePlugin", CommonTestUtils::DEVICE_TEMPLATE)));
smoke_IEClassBasicTestP, IEClassBasicTestP,
::testing::Values(std::make_pair("templatePlugin", CommonTestUtils::DEVICE_TEMPLATE)));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassNetworkTestP, IEClassNetworkTestP, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassNetworkTestP, IEClassNetworkTestP,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// //
// IE Class GetMetric // IE Class GetMetric
// //
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(
smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// //
// IE Class SetConfig // IE Class SetConfig
@ -111,9 +88,7 @@ TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
// IE Class GetConfig // IE Class GetConfig
// //
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassGetConfigTest, IEClassGetConfigTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassGetConfigTest, IEClassGetConfigTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest; using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest;
@ -125,7 +100,7 @@ TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
std::vector<std::string> configValues = p; std::vector<std::string> configValues = p;
for (auto &&confKey : configValues) { for (auto&& confKey : configValues) {
if (CONFIG_KEY(DEVICE_ID) == confKey) { if (CONFIG_KEY(DEVICE_ID) == confKey) {
std::string defaultDeviceID = ie.GetConfig(deviceName, CONFIG_KEY(DEVICE_ID)); std::string defaultDeviceID = ie.GetConfig(deviceName, CONFIG_KEY(DEVICE_ID));
std::cout << CONFIG_KEY(DEVICE_ID) << " : " << defaultDeviceID << std::endl; std::cout << CONFIG_KEY(DEVICE_ID) << " : " << defaultDeviceID << std::endl;
@ -143,48 +118,37 @@ TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
// Executable Network GetMetric // Executable Network GetMetric
// //
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, IEClassExecutableNetworkGetMetricTest,
smoke_IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, IEClassExecutableNetworkGetMetricTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
// //
// Executable Network GetConfig / SetConfig // Executable Network GetConfig / SetConfig
// //
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// IE Class Query network // IE Class Query network
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// IE Class Load network // IE Class Load network
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest,
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
// //
// Hetero Executable Network GetMetric // Hetero Executable Network GetMetric
@ -192,21 +156,17 @@ INSTANTIATE_TEST_CASE_P(
#ifdef ENABLE_MKL_DNN #ifdef ENABLE_MKL_DNN
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
#endif // ENABLE_MKL_DNN #endif // ENABLE_MKL_DNN
} // namespace } // namespace

View File

@ -8,32 +8,20 @@ using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<std::vector<int >> orders = { const std::vector<std::vector<int>> orders = {
// 0 - plugin // 0 - plugin
// 1 - executable_network // 1 - executable_network
// 2 - infer_request // 2 - infer_request
{0, 1, 2}, {0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}};
{0, 2, 1},
{1, 0, 2},
{1, 2, 0},
{2, 0, 1},
{2, 1, 0}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTest, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTest, ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(orders)),
::testing::Combine( HoldersTest::getTestCaseName);
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(orders)),
HoldersTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestImportNetwork, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestImportNetwork,
::testing::Combine( ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"), ::testing::ValuesIn(orders)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"), HoldersTest::getTestCaseName);
::testing::ValuesIn(orders)),
HoldersTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestOnImportedNetwork, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestOnImportedNetwork, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"), HoldersTestOnImportedNetwork::getTestCaseName);
HoldersTestOnImportedNetwork::getTestCaseName);
} // namespace } // namespace

View File

@ -2,28 +2,21 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include "behavior/exec_graph_info.hpp" #include "behavior/exec_graph_info.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
ExecGraphTests::getTestCaseName); ExecGraphTests::getTestCaseName);
} // namespace } // namespace

View File

@ -2,28 +2,21 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include "behavior/infer_request.hpp" #include "behavior/infer_request.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
InferRequestTests::getTestCaseName); InferRequestTests::getTestCaseName);
} // namespace } // namespace

View File

@ -2,27 +2,20 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include "behavior/infer_request_callback.hpp" #include "behavior/infer_request_callback.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), CallbackTests::getTestCaseName);
::testing::ValuesIn(configs)),
CallbackTests::getTestCaseName);
} // namespace } // namespace

View File

@ -2,28 +2,21 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include "behavior/infer_request_config.hpp" #include "behavior/infer_request_config.hpp"
#include <vector>
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
InferConfigTests::getTestCaseName); InferConfigTests::getTestCaseName);
} // namespace } // namespace

View File

@ -2,28 +2,21 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_input.hpp" #include "behavior/infer_request_input.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
InferRequestInputTests::getTestCaseName); InferRequestInputTests::getTestCaseName);
} // namespace } // namespace

View File

@ -2,28 +2,21 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "multi-device/multi_device_config.hpp"
#include "behavior/infer_request_output.hpp" #include "behavior/infer_request_output.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
InferRequestOutputTests::getTestCaseName); InferRequestOutputTests::getTestCaseName);
} // namespace } // namespace

View File

@ -8,31 +8,16 @@ using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
const std::vector<InferenceEngine::Layout> Layout = { const std::vector<InferenceEngine::Layout> Layout = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::CHW, InferenceEngine::Layout::NC,
InferenceEngine::Layout::NCHW, InferenceEngine::Layout::C};
InferenceEngine::Layout::CHW,
InferenceEngine::Layout::NC,
InferenceEngine::Layout::C
};
const std::vector<std::vector<size_t>> inputShapes = { const std::vector<std::vector<size_t>> inputShapes = {{1, 3, 16, 16}, {3, 32, 16}, {1, 3}, {3}};
{ 1, 3, 16, 16 },
{ 3, 32, 16 },
{ 1, 3 },
{ 3 }
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, LayoutTest, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, LayoutTest,
::testing::Combine( ::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(InferenceEngine::Precision::FP32), ::testing::ValuesIn(configs), ::testing::ValuesIn(Layout), ::testing::ValuesIn(inputShapes)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs),
::testing::ValuesIn(Layout),
::testing::ValuesIn(inputShapes)),
LayoutTest::getTestCaseName); LayoutTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,39 +2,30 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "multi-device/multi_device_config.hpp"
#include "behavior/preprocessing.hpp" #include "behavior/preprocessing.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> inputPrecisions = { const std::vector<InferenceEngine::Precision> inputPrecisions = {InferenceEngine::Precision::U8, InferenceEngine::Precision::FP32};
InferenceEngine::Precision::U8,
InferenceEngine::Precision::FP32
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaSetInput, PreprocessingPrecisionConvertTest, INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaSetInput, PreprocessingPrecisionConvertTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(inputPrecisions), ::testing::Values(4), // Number of input tensor channels
::testing::Values(4), // Number of input tensor channels ::testing::Values(true), // Use SetInput
::testing::Values(true), // Use SetInput ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
PreprocessingPrecisionConvertTest::getTestCaseName); PreprocessingPrecisionConvertTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaGetBlob, PreprocessingPrecisionConvertTest, INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaGetBlob, PreprocessingPrecisionConvertTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(inputPrecisions),
::testing::ValuesIn(inputPrecisions), ::testing::Values(4), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
::testing::Values(4), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors) ::testing::Values(false), // use GetBlob
::testing::Values(false), // use GetBlob ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
PreprocessingPrecisionConvertTest::getTestCaseName); PreprocessingPrecisionConvertTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,79 +2,50 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "multi-device/multi_device_config.hpp"
#include "behavior/set_preprocess.hpp" #include "behavior/set_preprocess.hpp"
#include "multi-device/multi_device_config.hpp"
using namespace BehaviorTestsDefinitions; using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
const std::vector<std::map<std::string, std::string>> multiConfigs = { const std::vector<std::map<std::string, std::string>> multiConfigs = {
{{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_TEMPLATE}}};
CommonTestUtils::DEVICE_TEMPLATE }}
};
const std::vector<std::map<std::string, std::string>> heteroConfigs = { const std::vector<std::map<std::string, std::string>> heteroConfigs = {{{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}};
{{ "TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE }}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
PreprocessTest::getTestCaseName); PreprocessTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest, INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(multiConfigs)),
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiConfigs)),
PreprocessTest::getTestCaseName); PreprocessTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Hetero_BehaviorTests, PreprocessTest, INSTANTIATE_TEST_CASE_P(smoke_Hetero_BehaviorTests, PreprocessTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(heteroConfigs)),
::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(heteroConfigs)),
PreprocessTest::getTestCaseName); PreprocessTest::getTestCaseName);
const std::vector<InferenceEngine::Precision> ioPrecisions = { const std::vector<InferenceEngine::Precision> ioPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::U8};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::U8
};
const std::vector<InferenceEngine::Layout> netLayouts = { const std::vector<InferenceEngine::Layout> netLayouts = {
InferenceEngine::Layout::NCHW, InferenceEngine::Layout::NCHW,
// InferenceEngine::Layout::NHWC // InferenceEngine::Layout::NHWC
}; };
const std::vector<InferenceEngine::Layout> ioLayouts = { const std::vector<InferenceEngine::Layout> ioLayouts = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::NHWC};
InferenceEngine::Layout::NCHW,
InferenceEngine::Layout::NHWC
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessConversionTest, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessConversionTest,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(ioPrecisions), ::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(netLayouts), ::testing::ValuesIn(ioLayouts), ::testing::ValuesIn(ioLayouts), ::testing::Bool(),
::testing::ValuesIn(ioPrecisions), ::testing::Bool(), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
::testing::ValuesIn(ioPrecisions),
::testing::ValuesIn(netLayouts),
::testing::ValuesIn(ioLayouts),
::testing::ValuesIn(ioLayouts),
::testing::Bool(),
::testing::Bool(),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
PreprocessConversionTest::getTestCaseName); PreprocessConversionTest::getTestCaseName);
} // namespace } // namespace

View File

@ -8,34 +8,23 @@ using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTests, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTests,
::testing::Combine( ::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(InferenceEngine::Precision::FP32), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
BehaviorTests::getTestCaseName); BehaviorTests::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestInput, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestInput,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
BehaviorTestInput::getTestCaseName); BehaviorTestInput::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestOutput, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestOutput,
::testing::Combine( ::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
BehaviorTestOutput::getTestCaseName); BehaviorTestOutput::getTestCaseName);
} // namespace } // namespace

View File

@ -8,15 +8,11 @@ using namespace BehaviorTestsDefinitions;
namespace { namespace {
const std::vector<std::map<std::string, std::string>> configs = { const std::vector<std::map<std::string, std::string>> configs = {{}};
{}
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, VersionTest, INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, VersionTest,
::testing::Combine( ::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(InferenceEngine::Precision::FP32), ::testing::ValuesIn(configs)),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(configs)),
VersionTest::getTestCaseName); VersionTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "hetero/query_network.hpp"
#include <vector> #include <vector>
#include "hetero/query_network.hpp"
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "ngraph_functions/subgraph_builders.hpp" #include "ngraph_functions/subgraph_builders.hpp"
@ -14,8 +15,7 @@ using namespace HeteroTests;
auto ConvBias = ngraph::builder::subgraph::makeConvBias(); auto ConvBias = ngraph::builder::subgraph::makeConvBias();
INSTANTIATE_TEST_CASE_P(smoke_FullySupportedTopologies, QueryNetworkTest, INSTANTIATE_TEST_CASE_P(smoke_FullySupportedTopologies, QueryNetworkTest,
::testing::Combine( ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE", "MULTI:TEMPLATE"),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE", "MULTI:TEMPLATE"), ::testing::Values(ConvBias)),
::testing::Values(ConvBias)),
QueryNetworkTest::getTestCaseName); QueryNetworkTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "hetero/synthetic.hpp"
#include <vector> #include <vector>
#include "hetero/synthetic.hpp"
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "ngraph_functions/subgraph_builders.hpp" #include "ngraph_functions/subgraph_builders.hpp"
@ -12,14 +13,12 @@ namespace {
using namespace HeteroTests; using namespace HeteroTests;
INSTANTIATE_TEST_CASE_P(smoke_SingleMajorNode, HeteroSyntheticTest, INSTANTIATE_TEST_CASE_P(smoke_SingleMajorNode, HeteroSyntheticTest,
::testing::Combine( ::testing::Combine(::testing::Values(std::vector<PluginParameter> {{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
::testing::Values(std::vector<PluginParameter>{{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}), ::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_singleMajorNodeFunctions)),
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_singleMajorNodeFunctions)),
HeteroSyntheticTest::getTestCaseName); HeteroSyntheticTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(nightly_RandomMajorNodes, HeteroSyntheticTest, INSTANTIATE_TEST_CASE_P(nightly_RandomMajorNodes, HeteroSyntheticTest,
::testing::Combine( ::testing::Combine(::testing::Values(std::vector<PluginParameter> {{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
::testing::Values(std::vector<PluginParameter>{{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}), ::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_randomMajorNodeFunctions)),
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_randomMajorNodeFunctions)),
HeteroSyntheticTest::getTestCaseName); HeteroSyntheticTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "single_layer_tests/convolution.hpp"
#include <vector> #include <vector>
#include "single_layer_tests/convolution.hpp"
#include "common_test_utils/test_constants.hpp" #include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions; using namespace LayerTestsDefinitions;
@ -19,122 +20,72 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
/* ============= 2D Convolution ============= */ /* ============= 2D Convolution ============= */
const std::vector<std::vector<size_t >> kernels = {{3, 3}, const std::vector<std::vector<size_t>> kernels = {{3, 3}, {3, 5}};
{3, 5}}; const std::vector<std::vector<size_t>> strides = {{1, 1}, {1, 3}};
const std::vector<std::vector<size_t >> strides = {{1, 1}, const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0}, {0, 3}};
{1, 3}}; const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0}, {0, 3}};
const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0}, const std::vector<std::vector<size_t>> dilations = {{1, 1}, {3, 1}};
{0, 3}};
const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0},
{0, 3}};
const std::vector<std::vector<size_t >> dilations = {{1, 1},
{3, 1}};
const std::vector<size_t> numOutChannels = {1, 5}; const std::vector<size_t> numOutChannels = {1, 5};
const std::vector<ngraph::op::PadType> padTypes = { const std::vector<ngraph::op::PadType> padTypes = {ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID};
ngraph::op::PadType::EXPLICIT,
ngraph::op::PadType::VALID
};
const auto conv2DParams_ExplicitPadding = ::testing::Combine( const auto conv2DParams_ExplicitPadding =
::testing::ValuesIn(kernels), ::testing::Combine(::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds),
::testing::ValuesIn(strides), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), ::testing::Values(ngraph::op::PadType::EXPLICIT));
::testing::ValuesIn(padBegins),
::testing::ValuesIn(padEnds),
::testing::ValuesIn(dilations),
::testing::ValuesIn(numOutChannels),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
// ! [test_convolution:declare_parameters] // ! [test_convolution:declare_parameters]
const auto conv2DParams_AutoPadValid = ::testing::Combine( const auto conv2DParams_AutoPadValid =
::testing::ValuesIn(kernels), ::testing::Combine(::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::ValuesIn(strides), ::testing::Values(std::vector<ptrdiff_t>({0, 0})), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels),
::testing::Values(std::vector<ptrdiff_t>({0, 0})), ::testing::Values(ngraph::op::PadType::VALID));
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
::testing::ValuesIn(dilations),
::testing::ValuesIn(numOutChannels),
::testing::Values(ngraph::op::PadType::VALID)
);
// ! [test_convolution:instantiate] // ! [test_convolution:instantiate]
INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest, INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
::testing::Combine( ::testing::Combine(conv2DParams_ExplicitPadding, ::testing::ValuesIn(netPrecisions),
conv2DParams_ExplicitPadding, ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::ValuesIn(netPrecisions), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName); ConvolutionLayerTest::getTestCaseName);
// ! [test_convolution:instantiate] // ! [test_convolution:instantiate]
INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest, INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest,
::testing::Combine( ::testing::Combine(conv2DParams_AutoPadValid, ::testing::ValuesIn(netPrecisions),
conv2DParams_AutoPadValid, ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::ValuesIn(netPrecisions), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName); ConvolutionLayerTest::getTestCaseName);
/* ============= 3D Convolution ============= */ /* ============= 3D Convolution ============= */
const std::vector<std::vector<size_t >> kernels3d = {{3, 3, 3}, const std::vector<std::vector<size_t>> kernels3d = {{3, 3, 3}, {3, 5, 3}};
{3, 5, 3}}; const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0}, {0, 2, 0}};
const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0},
{0, 2, 0}};
const std::vector<std::vector<size_t >> strides3d = {{1, 1, 1}, const std::vector<std::vector<size_t>> strides3d = {{1, 1, 1}, {1, 2, 1}};
{1, 2, 1}}; const std::vector<std::vector<size_t>> dilations3d = {{1, 1, 1}, {1, 2, 1}};
const std::vector<std::vector<size_t >> dilations3d = {{1, 1, 1},
{1, 2, 1}};
const auto conv3DParams_ExplicitPadding = ::testing::Combine( const auto conv3DParams_ExplicitPadding =
::testing::ValuesIn(kernels3d), ::testing::Combine(::testing::ValuesIn(kernels3d), ::testing::ValuesIn(strides3d), ::testing::ValuesIn(paddings3d), ::testing::ValuesIn(paddings3d),
::testing::ValuesIn(strides3d), ::testing::ValuesIn(dilations3d), ::testing::Values(5), ::testing::Values(ngraph::op::PadType::EXPLICIT));
::testing::ValuesIn(paddings3d), const auto conv3DParams_AutoPadValid =
::testing::ValuesIn(paddings3d), ::testing::Combine(::testing::ValuesIn(kernels3d), ::testing::ValuesIn(strides3d), ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
::testing::ValuesIn(dilations3d), ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})), ::testing::ValuesIn(dilations3d), ::testing::Values(5),
::testing::Values(5), ::testing::Values(ngraph::op::PadType::VALID));
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv3DParams_AutoPadValid = ::testing::Combine(
::testing::ValuesIn(kernels3d),
::testing::ValuesIn(strides3d),
::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
::testing::ValuesIn(dilations3d),
::testing::Values(5),
::testing::Values(ngraph::op::PadType::VALID)
);
INSTANTIATE_TEST_CASE_P(smoke_Convolution3D_ExplicitPadding, ConvolutionLayerTest, INSTANTIATE_TEST_CASE_P(smoke_Convolution3D_ExplicitPadding, ConvolutionLayerTest,
::testing::Combine( ::testing::Combine(conv3DParams_ExplicitPadding, ::testing::ValuesIn(netPrecisions),
conv3DParams_ExplicitPadding, ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::ValuesIn(netPrecisions), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 10, 10, 10})),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName); ConvolutionLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(nightly_Convolution3D_AutoPadValid, ConvolutionLayerTest, INSTANTIATE_TEST_CASE_P(nightly_Convolution3D_AutoPadValid, ConvolutionLayerTest,
::testing::Combine( ::testing::Combine(conv3DParams_AutoPadValid, ::testing::ValuesIn(netPrecisions),
conv3DParams_AutoPadValid, ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::ValuesIn(netPrecisions), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 10, 10, 10})),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
ConvolutionLayerTest::getTestCaseName); ConvolutionLayerTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,43 +2,34 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "single_layer_tests/reshape.hpp"
#include <vector> #include <vector>
#include "single_layer_tests/reshape.hpp"
#include "common_test_utils/test_constants.hpp" #include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions; using namespace LayerTestsDefinitions;
namespace { namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
}; };
INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheckDynBatch, ReshapeLayerTest, INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheckDynBatch, ReshapeLayerTest,
::testing::Combine( ::testing::Combine(::testing::Values(true), ::testing::ValuesIn(netPrecisions),
::testing::Values(true), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::ValuesIn(netPrecisions), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(std::vector<size_t>({30, 30, 30, 30})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::map<std::string, std::string>({}))),
::testing::Values(InferenceEngine::Layout::ANY), ReshapeLayerTest::getTestCaseName);
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(std::map<std::string, std::string>({}))),
ReshapeLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheck, ReshapeLayerTest, INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheck, ReshapeLayerTest,
::testing::Combine( ::testing::Combine(::testing::Values(true), ::testing::ValuesIn(netPrecisions),
::testing::Values(true), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::ValuesIn(netPrecisions), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({10, 10, 10, 10})),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(std::vector<size_t>({10, 0, 100})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::map<std::string, std::string>({}))),
::testing::Values(InferenceEngine::Layout::ANY), ReshapeLayerTest::getTestCaseName);
::testing::Values(std::vector<size_t>({10, 10, 10, 10})),
::testing::Values(std::vector<size_t>({10, 0, 100})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::Values(std::map<std::string, std::string>({}))),
ReshapeLayerTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "single_layer_tests/softmax.hpp"
#include <vector> #include <vector>
#include "single_layer_tests/softmax.hpp"
#include "common_test_utils/test_constants.hpp" #include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions; using namespace LayerTestsDefinitions;
@ -25,28 +26,14 @@ const std::vector<InferenceEngine::SizeVector> inputShapes2D = {
InferenceEngine::SizeVector {10, 10}, InferenceEngine::SizeVector {10, 10},
}; };
const std::vector<size_t> axis2D = { const std::vector<size_t> axis2D = {0, 1};
0, 1
};
const auto params2D = testing::Combine( const auto params2D = testing::Combine(testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED),
testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::ValuesIn(inputLayouts2D),
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(InferenceEngine::Layout::ANY), testing::ValuesIn(inputShapes2D), testing::ValuesIn(axis2D),
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(CommonTestUtils::DEVICE_TEMPLATE), testing::Values(std::map<std::string, std::string>()));
testing::ValuesIn(inputLayouts2D),
testing::Values(InferenceEngine::Layout::ANY),
testing::ValuesIn(inputShapes2D),
testing::ValuesIn(axis2D),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
testing::Values(std::map<std::string, std::string>())
);
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_SoftMax2D, SoftMaxLayerTest, params2D, SoftMaxLayerTest::getTestCaseName);
smoke_SoftMax2D,
SoftMaxLayerTest,
params2D,
SoftMaxLayerTest::getTestCaseName
);
const std::vector<InferenceEngine::SizeVector> inputShapes4D = { const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
InferenceEngine::SizeVector {1, 100, 1, 1}, InferenceEngine::SizeVector {1, 100, 1, 1},
@ -56,23 +43,11 @@ const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
const std::vector<size_t> axis4D = {0, 1, 2, 3}; const std::vector<size_t> axis4D = {0, 1, 2, 3};
const auto params4D = testing::Combine( const auto params4D = testing::Combine(testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED),
testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(InferenceEngine::Layout::NCHW),
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(InferenceEngine::Layout::ANY), testing::ValuesIn(inputShapes4D), testing::ValuesIn(axis4D),
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(CommonTestUtils::DEVICE_TEMPLATE), testing::Values(std::map<std::string, std::string>()));
testing::Values(InferenceEngine::Layout::NCHW),
testing::Values(InferenceEngine::Layout::ANY),
testing::ValuesIn(inputShapes4D),
testing::ValuesIn(axis4D),
testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
testing::Values(std::map<std::string, std::string>())
);
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(smoke_SoftMax4D, SoftMaxLayerTest, params4D, SoftMaxLayerTest::getTestCaseName);
smoke_SoftMax4D,
SoftMaxLayerTest,
params4D,
SoftMaxLayerTest::getTestCaseName
);
} // namespace } // namespace

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "single_layer_tests/split.hpp"
#include <vector> #include <vector>
#include "single_layer_tests/split.hpp"
#include "common_test_utils/test_constants.hpp" #include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions; using namespace LayerTestsDefinitions;
@ -12,17 +13,11 @@ using namespace LayerTestsDefinitions;
namespace { namespace {
INSTANTIATE_TEST_CASE_P(smoke_NumSplitsCheck, SplitLayerTest, INSTANTIATE_TEST_CASE_P(smoke_NumSplitsCheck, SplitLayerTest,
::testing::Combine( ::testing::Combine(::testing::Values(1, 2, 3, 5, 6, 10, 30), ::testing::Values(0, 1, 2, 3),
::testing::Values(1, 2, 3, 5, 6, 10, 30), ::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(0, 1, 2, 3), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(std::vector<size_t>({})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
::testing::Values(std::vector<size_t>({})),
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
SplitLayerTest::getTestCaseName); SplitLayerTest::getTestCaseName);
} // namespace } // namespace

View File

@ -2,20 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <vector>
#include <string>
#include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/skip_tests_config.hpp"
#include <string>
#include <vector>
std::vector<std::string> disabledTestPatterns() { std::vector<std::string> disabledTestPatterns() {
return { return {
".*ExclusiveAsyncRequests.*", ".*ExclusiveAsyncRequests.*",
".*reusableCPUStreamsExecutor.*", ".*reusableCPUStreamsExecutor.*",
R"(.*SplitLayerTest.*numSplits\=30.*)", R"(.*SplitLayerTest.*numSplits\=30.*)",
// CVS-51758 // CVS-51758
".*PreprocessConversionTest.*oPRC=U8.*",
".*PreprocessConversionTest.*oLT=NHWC.*", ".*PreprocessConversionTest.*oLT=NHWC.*",
".*PreprocessingPrecisionConvertTestsViaSetInput.*SetInput.*",
".*PreprocessingPrecisionConvertTestsViaGetBlob.*GetBlob.*",
}; };
} }

View File

@ -18,11 +18,9 @@
// #include "common_test_utils/ngraph_test_utils.hpp" // #include "common_test_utils/ngraph_test_utils.hpp"
// using namespace testing; // using namespace testing;
// using namespace ngraph; // using namespace ngraph;
// TEST(TransformationTests, Preprocessing_AddStdScale) { // TEST(TransformationTests, Preprocessing_AddStdScale) {
// std::shared_ptr<Function> f(nullptr), f_ref(nullptr); // std::shared_ptr<Function> f(nullptr), f_ref(nullptr);

View File

@ -4,12 +4,11 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <string>
#include <memory> #include <memory>
#include <queue>
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
#include <ngraph/opsets/opset3.hpp> #include <ngraph/opsets/opset3.hpp>
#include <queue>
#include <string>
#include <transformations/init_node_info.hpp> #include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp> #include <transformations/utils/utils.hpp>
@ -24,11 +23,11 @@ TEST(TransformationTests, DISABLED_TemplateTest) {
// f_ref - ngraph::Function that is expected after applying transformation // f_ref - ngraph::Function that is expected after applying transformation
{ {
// Example function // Example function
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2}); auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape {3, 1, 2});
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.5}); auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {1.5});
auto divide = std::make_shared<ngraph::opset3::Divide>(data, divide_constant); auto divide = std::make_shared<ngraph::opset3::Divide>(data, divide_constant);
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{divide}, ngraph::ParameterVector{data}); f = std::make_shared<ngraph::Function>(ngraph::NodeVector {divide}, ngraph::ParameterVector {data});
// This transformation init runtime info attributes // This transformation init runtime info attributes
ngraph::pass::InitNodeInfo().run_on_function(f); ngraph::pass::InitNodeInfo().run_on_function(f);
@ -42,13 +41,12 @@ TEST(TransformationTests, DISABLED_TemplateTest) {
{ {
// Example reference function // Example reference function
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2}); auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape {3, 1, 2});
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.5}); auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {1.5});
auto pow = std::make_shared<ngraph::opset3::Power>(divide_constant, auto pow = std::make_shared<ngraph::opset3::Power>(divide_constant, ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {-1}));
ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {-1}));
auto mul = std::make_shared<ngraph::opset3::Multiply>(data, pow); auto mul = std::make_shared<ngraph::opset3::Multiply>(data, pow);
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{data}); f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector {mul}, ngraph::ParameterVector {data});
} }
// Compare that processed function and expected function are the same // Compare that processed function and expected function are the same

View File

@ -148,7 +148,7 @@ configure_file(
configure_file( configure_file(
"${IE_MAIN_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-version.cmake.in" "${IE_MAIN_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-version.cmake.in"
"${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig-version.cmake" "${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig-version.cmake"
COPYONLY) @ONLY)
# #
# Coverage # Coverage

View File

@ -261,8 +261,8 @@ if (ENABLE_GNA)
set(GNA_HASH "cc954e67525006bf8bd353a6682e38bf208f6d74e973e0fc292850e721f17452") set(GNA_HASH "cc954e67525006bf8bd353a6682e38bf208f6d74e973e0fc292850e721f17452")
endif() endif()
if(GNA_LIBRARY_VERSION STREQUAL "GNA2") if(GNA_LIBRARY_VERSION STREQUAL "GNA2")
set(GNA_VERSION "02.00.00.1191.0") set(GNA_VERSION "02.00.00.1226")
set(GNA_HASH "a61b4a9133549b0a9f0b46d069f72906ced28bcbbe7d5c361e687645f53a1c8b") set(GNA_HASH "d5450af15c993e264c25ac4591a7dab44722e10d15fca4f222a1b84429d4e5b6")
endif() endif()
set(FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/include) set(FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/include)

View File

@ -46,8 +46,8 @@ function(set_ie_threading_interface_for TARGET_NAME)
# they don't have TBB in public headers => PRIVATE # they don't have TBB in public headers => PRIVATE
set(LINK_TYPE "PRIVATE") set(LINK_TYPE "PRIVATE")
elseif(target_type STREQUAL "SHARED_LIBRARY") elseif(target_type STREQUAL "SHARED_LIBRARY")
# TODO: inference_engine only # Affected libraries: inference_engine only
# Why TBB propogates its headers to inference_engine? # TODO: why TBB propogates its headers to inference_engine?
set(LINK_TYPE "PRIVATE") set(LINK_TYPE "PRIVATE")
else() else()
ext_message(WARNING "Unknown target type") ext_message(WARNING "Unknown target type")

View File

@ -2,17 +2,19 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
# TODO: hardcode will be fixed separatelly set(PACKAGE_VERSION_MAJOR @IE_VERSION_MAJOR@)
set(PACKAGE_VERSION_MAJOR 2) set(PACKAGE_VERSION_MINOR @IE_VERSION_MINOR@)
set(PACKAGE_VERSION_MINOR 1) set(PACKAGE_VERSION_PATCH @IE_VERSION_PATCH@)
set(PACKAGE_VERSION_PATCH 0)
set(PACKAGE_VERSION_COUNT 3)
set(PACKAGE_VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}.${PACKAGE_VERSION_PATCH}") set(PACKAGE_VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}.${PACKAGE_VERSION_PATCH}")
set(PACKAGE_VERSION_EXACT False) set(PACKAGE_VERSION_EXACT False)
set(PACKAGE_VERSION_COMPATIBLE False) set(PACKAGE_VERSION_COMPATIBLE False)
# Compatibility with old versioning for 2.x
if(PACKAGE_FIND_VERSION_MAJOR VERSION_EQUAL 2)
set(PACKAGE_VERSION_COMPATIBLE True)
endif()
if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
set(PACKAGE_VERSION_EXACT True) set(PACKAGE_VERSION_EXACT True)
set(PACKAGE_VERSION_COMPATIBLE True) set(PACKAGE_VERSION_COMPATIBLE True)

View File

@ -16,6 +16,12 @@
# IE::inference_engine - The Inference Engine library # IE::inference_engine - The Inference Engine library
# IE::inference_engine_c_api - The Inference Engine C API library # IE::inference_engine_c_api - The Inference Engine C API library
# #
# Inference Engine version variables:
#
# InferenceEngine_VERSION_MAJOR - major version component
# InferenceEngine_VERSION_MINOR - minor version component
# InferenceEngine_VERSION_PATCH - patch version component
#
@PACKAGE_INIT@ @PACKAGE_INIT@

View File

@ -6,14 +6,14 @@ include_guard(GLOBAL)
set(VPU_SUPPORTED_FIRMWARES usb-ma2x8x pcie-ma2x8x) set(VPU_SUPPORTED_FIRMWARES usb-ma2x8x pcie-ma2x8x)
set(VPU_SUPPORTED_FIRMWARES_HASH set(VPU_SUPPORTED_FIRMWARES_HASH
"11a6db07d3a17c9c0fc4247fce47c942e0dcd59f8d70665a96bae0d7b7121fe9" "dc93ba50e2096759aa3aeae67a85be1d49d2ba0ca84f319ca5ff911b13788f2c"
"43f3dc0f0a8114ca34226167970aafdc869600929d6e3761c1eaa6eec71f2237") "c50db9859c4851fd4a3a5822ff05fc0af3d16a972625f965527a450aa4bb4624")
# #
# Default packages # Default packages
# #
set(FIRMWARE_PACKAGE_VERSION 1658) set(FIRMWARE_PACKAGE_VERSION 1676)
set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.09.2") set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.09.2")
# #

View File

@ -29,13 +29,10 @@ ie_add_vs_version_file(NAME ${TARGET_NAME}
export(TARGETS ${TARGET_NAME} NAMESPACE IE:: export(TARGETS ${TARGET_NAME} NAMESPACE IE::
APPEND FILE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake") APPEND FILE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake")
# WA for CI issue
export(TARGETS ${TARGET_NAME} NAMESPACE IE::
APPEND FILE "${CMAKE_BINARY_DIR}/share/InferenceEngineTargets.cmake")
# install # install
ie_cpack_add_component(core_c DEPENDS core) ie_cpack_add_component(core_c DEPENDS core)
ie_cpack_add_component(core_c_dev DEPENDS core_c)
install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core_c RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core_c
@ -44,4 +41,4 @@ install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/ install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/
DESTINATION ${IE_CPACK_IE_DIR}/include DESTINATION ${IE_CPACK_IE_DIR}/include
COMPONENT core_c) COMPONENT core_c_dev)

View File

@ -134,7 +134,8 @@ std::map<IE::ColorFormat, colorformat_e> colorformat_map = {{IE::ColorFormat::RA
CATCH_IE_EXCEPTION(NOT_ALLOCATED, NotAllocated) \ CATCH_IE_EXCEPTION(NOT_ALLOCATED, NotAllocated) \
CATCH_IE_EXCEPTION(INFER_NOT_STARTED, InferNotStarted) \ CATCH_IE_EXCEPTION(INFER_NOT_STARTED, InferNotStarted) \
CATCH_IE_EXCEPTION(NETWORK_NOT_READ, NetworkNotRead) \ CATCH_IE_EXCEPTION(NETWORK_NOT_READ, NetworkNotRead) \
CATCH_IE_EXCEPTION(INFER_CANCELLED, InferCancelled) CATCH_IE_EXCEPTION(INFER_CANCELLED, InferCancelled) \
catch (...) {return IEStatusCode::UNEXPECTED;}
/** /**
*@brief convert the config type data to map type data. *@brief convert the config type data to map type data.
@ -237,9 +238,7 @@ IEStatusCode ie_core_create(const char *xml_config_file, ie_core_t **core) {
std::unique_ptr<ie_core_t> tmp(new ie_core_t); std::unique_ptr<ie_core_t> tmp(new ie_core_t);
tmp->object = IE::Core(xml_config_file); tmp->object = IE::Core(xml_config_file);
*core = tmp.release(); *core = tmp.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -275,15 +274,15 @@ IEStatusCode ie_core_get_versions(const ie_core_t *core, const char *device_name
char *_deviceName = deviceName.release(); char *_deviceName = deviceName.release();
memcpy(_deviceName, iter->first.c_str(), iter->first.length() + 1); memcpy(_deviceName, iter->first.c_str(), iter->first.length() + 1);
vers_ptrs[i].device_name = _deviceName; vers_ptrs[i].device_name = _deviceName;
IE_SUPPRESS_DEPRECATED_START
vers_ptrs[i].major = iter->second.apiVersion.major; vers_ptrs[i].major = iter->second.apiVersion.major;
vers_ptrs[i].minor = iter->second.apiVersion.minor; vers_ptrs[i].minor = iter->second.apiVersion.minor;
IE_SUPPRESS_DEPRECATED_END
vers_ptrs[i].build_number = iter->second.buildNumber; vers_ptrs[i].build_number = iter->second.buildNumber;
vers_ptrs[i].description = iter->second.description; vers_ptrs[i].description = iter->second.description;
} }
versions->versions = vers_ptrs.release(); versions->versions = vers_ptrs.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -314,9 +313,7 @@ IEStatusCode ie_core_read_network(ie_core_t *core, const char *xml, const char *
} }
network_result->object = core->object.ReadNetwork(xml, bin); network_result->object = core->object.ReadNetwork(xml, bin);
*network = network_result.release(); *network = network_result.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -334,9 +331,7 @@ IEStatusCode ie_core_read_network_from_memory(ie_core_t *core, const uint8_t *xm
network_result->object = core->object.ReadNetwork(std::string(reinterpret_cast<const char *>(xml_content), network_result->object = core->object.ReadNetwork(std::string(reinterpret_cast<const char *>(xml_content),
reinterpret_cast<const char *>(xml_content + xml_content_size)), weight_blob->object); reinterpret_cast<const char *>(xml_content + xml_content_size)), weight_blob->object);
*network = network_result.release(); *network = network_result.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -358,9 +353,7 @@ IEStatusCode ie_core_load_network(ie_core_t *core, const ie_network_t *network,
// create plugin in the registery and then create ExecutableNetwork. // create plugin in the registery and then create ExecutableNetwork.
exe_net->object = core->object.LoadNetwork(network->object, device_name, conf_map); exe_net->object = core->object.LoadNetwork(network->object, device_name, conf_map);
*exe_network = exe_net.release(); *exe_network = exe_net.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -381,9 +374,7 @@ IEStatusCode ie_core_load_network_from_file(ie_core_t *core, const char *xml, co
exe_net->object = core->object.LoadNetwork(xml, device_name, conf_map); exe_net->object = core->object.LoadNetwork(xml, device_name, conf_map);
*exe_network = exe_net.release(); *exe_network = exe_net.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -404,9 +395,7 @@ IEStatusCode ie_core_set_config(ie_core_t *core, const ie_config_t *ie_core_conf
try { try {
core->object.SetConfig(conf_map, deviceName); core->object.SetConfig(conf_map, deviceName);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -421,9 +410,7 @@ IEStatusCode ie_core_register_plugin(ie_core_t *core, const char *plugin_name, c
try { try {
core->object.RegisterPlugin(plugin_name, device_name); core->object.RegisterPlugin(plugin_name, device_name);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -438,9 +425,7 @@ IEStatusCode ie_core_register_plugins(ie_core_t *core, const char *xml_config_fi
try { try {
core->object.RegisterPlugins(xml_config_file); core->object.RegisterPlugins(xml_config_file);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -455,9 +440,7 @@ IEStatusCode ie_core_unregister_plugin(ie_core_t *core, const char *device_name)
try { try {
core->object.UnregisterPlugin(device_name); core->object.UnregisterPlugin(device_name);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -474,9 +457,7 @@ IEStatusCode ie_core_add_extension(ie_core_t *core, const char *extension_path,
auto extension_ptr = std::make_shared<InferenceEngine::Extension>(std::string{extension_path}); auto extension_ptr = std::make_shared<InferenceEngine::Extension>(std::string{extension_path});
auto extension = std::dynamic_pointer_cast<InferenceEngine::IExtension>(extension_ptr); auto extension = std::dynamic_pointer_cast<InferenceEngine::IExtension>(extension_ptr);
core->object.AddExtension(extension, device_name); core->object.AddExtension(extension, device_name);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -492,9 +473,7 @@ IEStatusCode ie_core_get_metric(const ie_core_t *core, const char *device_name,
try { try {
IE::Parameter param = core->object.GetMetric(device_name, metric_name); IE::Parameter param = core->object.GetMetric(device_name, metric_name);
parameter2IEparam(param, param_result); parameter2IEparam(param, param_result);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -512,9 +491,7 @@ IEStatusCode ie_core_get_config(const ie_core_t *core, const char *device_name,
// convert the parameter to ie_param_t // convert the parameter to ie_param_t
parameter2IEparam(param, param_result); parameter2IEparam(param, param_result);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -536,9 +513,7 @@ IEStatusCode ie_core_get_available_devices(const ie_core_t *core, ie_available_d
memcpy(dev_ptrs[i], _devices[i].c_str(), _devices[i].length() + 1); memcpy(dev_ptrs[i], _devices[i].c_str(), _devices[i].length() + 1);
} }
avai_devices->devices = dev_ptrs.release(); avai_devices->devices = dev_ptrs.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return IEStatusCode::OK; return IEStatusCode::OK;
} }
@ -575,9 +550,7 @@ IEStatusCode ie_exec_network_create_infer_request(ie_executable_network_t *ie_ex
std::unique_ptr<ie_infer_request_t> req(new ie_infer_request_t); std::unique_ptr<ie_infer_request_t> req(new ie_infer_request_t);
req->object = ie_exec_network->object.CreateInferRequest(); req->object = ie_exec_network->object.CreateInferRequest();
*request = req.release(); *request = req.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -593,9 +566,7 @@ IEStatusCode ie_exec_network_get_metric(const ie_executable_network_t *ie_exec_n
try { try {
InferenceEngine::Parameter parameter = ie_exec_network->object.GetMetric(metric_name); InferenceEngine::Parameter parameter = ie_exec_network->object.GetMetric(metric_name);
parameter2IEparam(parameter, param_result); parameter2IEparam(parameter, param_result);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -611,9 +582,7 @@ IEStatusCode ie_exec_network_set_config(ie_executable_network_t *ie_exec_network
try { try {
const std::map<std::string, IE::Parameter> conf_map = config2ParamMap(param_config); const std::map<std::string, IE::Parameter> conf_map = config2ParamMap(param_config);
ie_exec_network->object.SetConfig(conf_map); ie_exec_network->object.SetConfig(conf_map);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -629,9 +598,7 @@ IEStatusCode ie_exec_network_get_config(const ie_executable_network_t *ie_exec_n
try { try {
InferenceEngine::Parameter parameter = ie_exec_network->object.GetConfig(metric_config); InferenceEngine::Parameter parameter = ie_exec_network->object.GetConfig(metric_config);
parameter2IEparam(parameter, param_result); parameter2IEparam(parameter, param_result);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -653,9 +620,7 @@ IEStatusCode ie_network_get_name(const ie_network_t *network, char **name) {
std::unique_ptr<char[]> netName(new char[_name.length() + 1]); std::unique_ptr<char[]> netName(new char[_name.length() + 1]);
*name = netName.release(); *name = netName.release();
memcpy(*name, _name.c_str(), _name.length() + 1); memcpy(*name, _name.c_str(), _name.length() + 1);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return IEStatusCode::OK; return IEStatusCode::OK;
} }
@ -670,9 +635,7 @@ IEStatusCode ie_network_get_inputs_number(const ie_network_t *network, size_t *s
try { try {
IE::InputsDataMap inputs = network->object.getInputsInfo(); IE::InputsDataMap inputs = network->object.getInputsInfo();
*size_result = inputs.size(); *size_result = inputs.size();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -699,9 +662,7 @@ IEStatusCode ie_network_get_input_name(const ie_network_t *network, size_t numbe
*name = inputName.release(); *name = inputName.release();
memcpy(*name, iter->first.c_str(), iter->first.length() + 1); memcpy(*name, iter->first.c_str(), iter->first.length() + 1);
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -722,9 +683,7 @@ IEStatusCode ie_network_get_input_precision(const ie_network_t *network, const c
IE::Precision p = inputs[input_name]->getPrecision(); IE::Precision p = inputs[input_name]->getPrecision();
*prec_result = precision_map[p]; *prec_result = precision_map[p];
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -751,9 +710,7 @@ IEStatusCode ie_network_set_input_precision(ie_network_t *network, const char *i
} }
inputs[input_name]->setPrecision(precision); inputs[input_name]->setPrecision(precision);
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -774,9 +731,7 @@ IEStatusCode ie_network_get_input_layout(const ie_network_t *network, const char
IE::Layout l = inputs[input_name]->getLayout(); IE::Layout l = inputs[input_name]->getLayout();
*layout_result = layout_map[l]; *layout_result = layout_map[l];
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -803,9 +758,7 @@ IEStatusCode ie_network_set_input_layout(ie_network_t *network, const char *inpu
} }
inputs[input_name]->setLayout(layout); inputs[input_name]->setLayout(layout);
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -829,9 +782,7 @@ IEStatusCode ie_network_get_input_dims(const ie_network_t *network, const char *
dims_result->dims[i] = dims[i]; dims_result->dims[i] = dims[i];
} }
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -852,9 +803,7 @@ IEStatusCode ie_network_get_input_resize_algorithm(const ie_network_t *network,
IE::ResizeAlgorithm resize = inputs[input_name]->getPreProcess().getResizeAlgorithm(); IE::ResizeAlgorithm resize = inputs[input_name]->getPreProcess().getResizeAlgorithm();
*resize_alg_result = resize_alg_map[resize]; *resize_alg_result = resize_alg_map[resize];
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -881,9 +830,7 @@ IEStatusCode ie_network_set_input_resize_algorithm(ie_network_t *network, const
} }
inputs[input_name]->getPreProcess().setResizeAlgorithm(resize); inputs[input_name]->getPreProcess().setResizeAlgorithm(resize);
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -904,9 +851,7 @@ IEStatusCode ie_network_get_color_format(const ie_network_t *network, const char
IE::ColorFormat color = inputs[input_name]->getPreProcess().getColorFormat(); IE::ColorFormat color = inputs[input_name]->getPreProcess().getColorFormat();
*colformat_result = colorformat_map[color]; *colformat_result = colorformat_map[color];
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -933,9 +878,7 @@ IEStatusCode ie_network_set_color_format(ie_network_t *network, const char *inpu
} }
inputs[input_name]->getPreProcess().setColorFormat(color); inputs[input_name]->getPreProcess().setColorFormat(color);
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -971,9 +914,7 @@ IEStatusCode ie_network_get_input_shapes(ie_network *network, input_shapes_t *sh
} }
shapes->shapes = shape_ptrs.release(); shapes->shapes = shape_ptrs.release();
status = IEStatusCode::OK; status = IEStatusCode::OK;
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -998,9 +939,7 @@ IEStatusCode ie_network_reshape(ie_network_t *network, const input_shapes_t shap
} }
network->object.reshape(net_shapes); network->object.reshape(net_shapes);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1016,9 +955,7 @@ IEStatusCode ie_network_get_outputs_number(const ie_network_t *network, size_t *
try { try {
IE::OutputsDataMap outputs = network->object.getOutputsInfo(); IE::OutputsDataMap outputs = network->object.getOutputsInfo();
*size_result = outputs.size(); *size_result = outputs.size();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1045,9 +982,7 @@ IEStatusCode ie_network_get_output_name(const ie_network_t *network, const size_
*name = outputName.release(); *name = outputName.release();
memcpy(*name, iter->first.c_str(), iter->first.length() + 1); memcpy(*name, iter->first.c_str(), iter->first.length() + 1);
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1068,9 +1003,7 @@ IEStatusCode ie_network_get_output_precision(const ie_network_t *network, const
IE::Precision p = outputs[output_name]->getPrecision(); IE::Precision p = outputs[output_name]->getPrecision();
*prec_result = precision_map[p]; *prec_result = precision_map[p];
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1097,9 +1030,7 @@ IEStatusCode ie_network_set_output_precision(ie_network_t *network, const char *
} }
outputs[output_name]->setPrecision(precision); outputs[output_name]->setPrecision(precision);
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1120,9 +1051,7 @@ IEStatusCode ie_network_get_output_layout(const ie_network_t *network, const cha
IE::Layout l = outputs[output_name]->getLayout(); IE::Layout l = outputs[output_name]->getLayout();
*layout_result = layout_map[l]; *layout_result = layout_map[l];
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1149,9 +1078,7 @@ IEStatusCode ie_network_set_output_layout(ie_network_t *network, const char *out
} }
outputs[output_name]->setLayout(layout); outputs[output_name]->setLayout(layout);
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1175,9 +1102,7 @@ IEStatusCode ie_network_get_output_dims(const ie_network_t *network, const char
dims_result->dims[i] = dims[i]; dims_result->dims[i] = dims[i];
} }
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1220,9 +1145,7 @@ IEStatusCode ie_infer_request_get_blob(ie_infer_request_t *infer_request, const
std::unique_ptr<ie_blob_t> blob_result(new ie_blob_t); std::unique_ptr<ie_blob_t> blob_result(new ie_blob_t);
blob_result->object = blob_ptr; blob_result->object = blob_ptr;
*blob = blob_result.release(); *blob = blob_result.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1237,9 +1160,7 @@ IEStatusCode ie_infer_request_set_blob(ie_infer_request_t *infer_request, const
try { try {
infer_request->object.SetBlob(name, blob->object); infer_request->object.SetBlob(name, blob->object);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1254,9 +1175,7 @@ IEStatusCode ie_infer_request_infer(ie_infer_request_t *infer_request) {
try { try {
infer_request->object.Infer(); infer_request->object.Infer();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1271,9 +1190,7 @@ IEStatusCode ie_infer_request_infer_async(ie_infer_request_t *infer_request) {
try { try {
infer_request->object.StartAsync(); infer_request->object.StartAsync();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1291,9 +1208,7 @@ IEStatusCode ie_infer_set_completion_callback(ie_infer_request_t *infer_request,
callback->completeCallBackFunc(callback->args); callback->completeCallBackFunc(callback->args);
}; };
infer_request->object.SetCompletionCallback(fun); infer_request->object.SetCompletionCallback(fun);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1309,9 +1224,7 @@ IEStatusCode ie_infer_request_wait(ie_infer_request_t *infer_request, const int6
try { try {
IE::StatusCode status_code = infer_request->object.Wait(timeout); IE::StatusCode status_code = infer_request->object.Wait(timeout);
status = status_map[status_code]; status = status_map[status_code];
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1326,9 +1239,7 @@ IEStatusCode ie_infer_request_set_batch(ie_infer_request_t *infer_request, const
try { try {
infer_request->object.SetBatch(size); infer_request->object.SetBatch(size);
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1390,9 +1301,7 @@ IEStatusCode ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **bl
_blob->object->allocate(); _blob->object->allocate();
*blob = _blob.release(); *blob = _blob.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1462,9 +1371,7 @@ IEStatusCode ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDe
_blob->object = IE::make_shared_blob(tensor, p, size); _blob->object = IE::make_shared_blob(tensor, p, size);
} }
*blob = _blob.release(); *blob = _blob.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1480,9 +1387,7 @@ IEStatusCode ie_blob_make_memory_with_roi(const ie_blob_t *inputBlob, const roi_
IE::ROI roi_d = {roi->id, roi->posX, roi->posY, roi->sizeX, roi->sizeY}; IE::ROI roi_d = {roi->id, roi->posX, roi->posY, roi->sizeX, roi->sizeY};
_blob->object = IE::make_shared_blob(inputBlob->object, roi_d); _blob->object = IE::make_shared_blob(inputBlob->object, roi_d);
*blob = _blob.release(); *blob = _blob.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1496,9 +1401,7 @@ IEStatusCode ie_blob_make_memory_nv12(const ie_blob_t *y, const ie_blob_t *uv, i
std::unique_ptr<ie_blob_t> _blob(new ie_blob_t); std::unique_ptr<ie_blob_t> _blob(new ie_blob_t);
_blob->object = IE::make_shared_blob<IE::NV12Blob>(y->object, uv->object); _blob->object = IE::make_shared_blob<IE::NV12Blob>(y->object, uv->object);
*nv12Blob = _blob.release(); *nv12Blob = _blob.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return IEStatusCode::OK; return IEStatusCode::OK;
} }
@ -1512,9 +1415,7 @@ IEStatusCode ie_blob_make_memory_i420(const ie_blob_t *y, const ie_blob_t *u, co
std::unique_ptr<ie_blob_t> _blob(new ie_blob_t); std::unique_ptr<ie_blob_t> _blob(new ie_blob_t);
_blob->object = IE::make_shared_blob<IE::I420Blob>(y->object, u->object, v->object); _blob->object = IE::make_shared_blob<IE::I420Blob>(y->object, u->object, v->object);
*i420Blob = _blob.release(); *i420Blob = _blob.release();
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return IEStatusCode::OK; return IEStatusCode::OK;
} }
@ -1587,9 +1488,7 @@ IEStatusCode ie_blob_get_dims(const ie_blob_t *blob, dimensions_t *dims_result)
for (size_t i = 0; i< dims_result->ranks; ++i) { for (size_t i = 0; i< dims_result->ranks; ++i) {
dims_result->dims[i] = size_vector[i]; dims_result->dims[i] = size_vector[i];
} }
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1605,9 +1504,7 @@ IEStatusCode ie_blob_get_layout(const ie_blob_t *blob, layout_e *layout_result)
try { try {
IE::Layout l = blob->object->getTensorDesc().getLayout(); IE::Layout l = blob->object->getTensorDesc().getLayout();
*layout_result = layout_map[l]; *layout_result = layout_map[l];
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }
@ -1623,9 +1520,7 @@ IEStatusCode ie_blob_get_precision(const ie_blob_t *blob, precision_e *prec_resu
try { try {
IE::Precision p = blob->object->getTensorDesc().getPrecision(); IE::Precision p = blob->object->getTensorDesc().getPrecision();
*prec_result = precision_map[p]; *prec_result = precision_map[p];
} CATCH_IE_EXCEPTIONS catch (...) { } CATCH_IE_EXCEPTIONS
return IEStatusCode::UNEXPECTED;
}
return status; return status;
} }

View File

@ -6,6 +6,7 @@
#include "hetero/hetero_plugin_config.hpp" #include "hetero/hetero_plugin_config.hpp"
#include "ie_iinfer_request.hpp" #include "ie_iinfer_request.hpp"
#include "ie_plugin_config.hpp"
const std::string EXPORTED_NETWORK_NAME = "undefined"; const std::string EXPORTED_NETWORK_NAME = "undefined";
std::map<std::string, InferenceEngine::Precision> precision_map = { std::map<std::string, InferenceEngine::Precision> precision_map = {
@ -70,6 +71,11 @@ PyObject* parse_parameter(const InferenceEngine::Parameter& param) {
auto val = param.as<unsigned int>(); auto val = param.as<unsigned int>();
return PyLong_FromLong((unsigned long)val); return PyLong_FromLong((unsigned long)val);
} }
// Check for uint64_t
else if (param.is<uint64_t>()) {
auto val = param.as<uint64_t>();
return PyLong_FromLong((unsigned long)val);
}
// Check for float // Check for float
else if (param.is<float>()) { else if (param.is<float>()) {
auto val = param.as<float>(); auto val = param.as<float>();
@ -151,6 +157,21 @@ PyObject* parse_parameter(const InferenceEngine::Parameter& param) {
PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long)it.second)); PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long)it.second));
} }
return dict; return dict;
} else if (param.is<std::map<InferenceEngine::Precision, float>>()) {
auto val = param.as<std::map<InferenceEngine::Precision, float>>();
PyObject* dict = PyDict_New();
for (const auto& it : val) {
std::stringstream s;
s << it.first;
PyDict_SetItemString(dict, s.str().c_str(), PyFloat_FromDouble((double)it.second));
}
return dict;
} else if (param.is<InferenceEngine::Metrics::DeviceType>()) {
auto val = param.as<InferenceEngine::Metrics::DeviceType>();
using namespace InferenceEngine;
std::stringstream s;
s << val;
return PyUnicode_FromString(s.str().c_str());
} else { } else {
PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!"); PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!");
return (PyObject*)NULL; return (PyObject*)NULL;

View File

@ -0,0 +1,38 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header that defines advanced related properties for Auto plugin.
* These properties should be used in SetConfig() and LoadNetwork() methods
*
* @file auto_config.hpp
*/
#pragma once
#include "ie_plugin_config.hpp"
namespace InferenceEngine {
/**
* @brief Auto plugin configuration
*/
namespace AutoConfigParams {
/**
* @def AUTO_CONFIG_KEY(name)
* @brief A macro which provides a AUTO-mangled name for configuration key with name `name`
*/
#define AUTO_CONFIG_KEY(name) InferenceEngine::AutoConfigParams::_CONFIG_KEY(AUTO_##name)
#define DECLARE_AUTO_CONFIG_KEY(name) DECLARE_CONFIG_KEY(AUTO_##name)
#define DECLARE_AUTO_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(AUTO_##name)
/**
* @brief Limit device list config option, with comma-separated devices listed
*/
DECLARE_AUTO_CONFIG_KEY(DEVICE_LIST);
} // namespace AutoConfigParams
} // namespace InferenceEngine

View File

@ -14,6 +14,44 @@
namespace InferenceEngine { namespace InferenceEngine {
namespace Metrics {
/**
* @def GPU_METRIC_KEY(name)
* @brief shortcut for defining GPU plugin metrics
*/
#define GPU_METRIC_KEY(name) METRIC_KEY(GPU_##name)
#define DECLARE_GPU_METRIC_KEY(name, ...) DECLARE_METRIC_KEY(GPU_##name, __VA_ARGS__)
/**
* @def DECLARE_GPU_METRIC_VALUE(name)
* @brief shortcut for defining gpu metric values
*/
#define DECLARE_GPU_METRIC_VALUE(name) DECLARE_METRIC_VALUE(GPU_##name)
/**
* @brief Metric which defines size of memory in bytes available for the device. For iGPU it returns host memory size, for dGPU - dedicated gpu memory size
*/
DECLARE_GPU_METRIC_KEY(DEVICE_TOTAL_MEM_SIZE, uint64_t);
/**
* @brief Metric to get microarchitecture identifier in major.minor.revision format
*/
DECLARE_GPU_METRIC_KEY(UARCH_VERSION, std::string);
/**
* @brief Metric to get count of execution units for current GPU
*/
DECLARE_GPU_METRIC_KEY(EXECUTION_UNITS_COUNT, int);
/**
* @brief Possible return value for OPTIMIZATION_CAPABILITIES metric
* - "HW_MATMUL" - Defines if device has hardware block for matrix multiplication
*/
DECLARE_GPU_METRIC_VALUE(HW_MATMUL);
} // namespace Metrics
/** /**
* @brief GPU plugin configuration * @brief GPU plugin configuration
*/ */

View File

@ -41,10 +41,12 @@ public:
IE_SUPPRESS_DEPRECATED_START IE_SUPPRESS_DEPRECATED_START
/** /**
* @deprecated Don't use this constructor. It will be removed soon
* @brief Allows helper class to manage lifetime of network object * @brief Allows helper class to manage lifetime of network object
* *
* @param network Pointer to the network object * @param network Pointer to the network object
*/ */
INFERENCE_ENGINE_DEPRECATED("Don't use this constructor. It will be removed soon")
explicit CNNNetwork(std::shared_ptr<ICNNNetwork> network); explicit CNNNetwork(std::shared_ptr<ICNNNetwork> network);
IE_SUPPRESS_DEPRECATED_END IE_SUPPRESS_DEPRECATED_END
@ -59,55 +61,69 @@ public:
const std::vector<IExtensionPtr>& exts = {}); const std::vector<IExtensionPtr>& exts = {});
/** /**
* @copybrief ICNNNetwork::getOutputsInfo * @brief Gets the network output Data node information. The received info is stored in the given Data node.
* *
* Wraps ICNNNetwork::getOutputsInfo * For single and multiple outputs networks.
* *
* @return outputs Reference to the OutputsDataMap object * This method need to be called to find out OpenVINO output names for using them later
* when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob
*
* If you want to use framework names, you can use InferenceEngine::CNNNetwork::getOVNameForTensor
* method to map framework names to OpenVINO names
*
* @return the InferenceEngine::OutputsDataMap object
*/ */
OutputsDataMap getOutputsInfo() const; OutputsDataMap getOutputsInfo() const;
/** /**
* @copybrief ICNNNetwork::getInputsInfo * @brief Gets the network input Data node information. The received info is stored in the given InputsDataMap
* object.
* *
* Wraps ICNNNetwork::getInputsInfo * For single and multiple inputs networks.
* This method need to be called to find out OpenVINO input names for using them later
* when calling InferenceEngine::InferRequest::SetBlob
* *
* @return inputs Reference to InputsDataMap object * If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor
* method to map framework names to OpenVINO names
*
* @return The InferenceEngine::InputsDataMap object.
*/ */
InputsDataMap getInputsInfo() const; InputsDataMap getInputsInfo() const;
/** /**
* @copybrief ICNNNetwork::layerCount * @brief Returns the number of layers in the network as an integer value
*
* Wraps ICNNNetwork::layerCount
*
* @return The number of layers as an integer value * @return The number of layers as an integer value
*/ */
size_t layerCount() const; size_t layerCount() const;
/** /**
* @copybrief ICNNNetwork::getName * @brief Returns the network name.
*
* Wraps ICNNNetwork::getName
*
* @return Network name * @return Network name
*/ */
const std::string& getName() const; const std::string& getName() const;
/** /**
* @copybrief ICNNNetwork::setBatchSize * @brief Changes the inference batch size.
* *
* Wraps ICNNNetwork::setBatchSize * @note There are several limitations and it's not recommended to use it. Set batch to the input shape and call
* InferenceEngine::CNNNetwork::reshape.
* *
* @param size Size of batch to set * @param size Size of batch to set
*
* @note Current implementation of the function sets batch size to the first dimension of all layers in the
* networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
* method works incorrectly. This limitation is resolved via shape inference feature by using
* InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
*
* @note Current implementation of the function sets batch size to the first dimension of all layers in the
* networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
* method works incorrectly. This limitation is resolved via shape inference feature by using
* InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
*/ */
void setBatchSize(const size_t size); void setBatchSize(const size_t size);
/** /**
* @copybrief ICNNNetwork::getBatchSize * @brief Gets the inference batch size
*
* Wraps ICNNNetwork::getBatchSize
*
* @return The size of batch as a size_t value * @return The size of batch as a size_t value
*/ */
size_t getBatchSize() const; size_t getBatchSize() const;
@ -119,7 +135,7 @@ public:
* *
* @return A shared pointer of the current network * @return A shared pointer of the current network
*/ */
// INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated") INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated")
operator ICNNNetwork::Ptr(); operator ICNNNetwork::Ptr();
/** /**
@ -128,7 +144,7 @@ public:
* *
* @return An instance of the current network * @return An instance of the current network
*/ */
// INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated") INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated")
operator ICNNNetwork&(); operator ICNNNetwork&();
/** /**
@ -137,47 +153,42 @@ public:
* *
* @return A const reference of the current network * @return A const reference of the current network
*/ */
// INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated") INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated")
operator const ICNNNetwork&() const; operator const ICNNNetwork&() const;
IE_SUPPRESS_DEPRECATED_END IE_SUPPRESS_DEPRECATED_END
/** /**
* @brief Returns constant nGraph function * @brief Returns constant nGraph function
*
* @return constant nGraph function * @return constant nGraph function
*/ */
std::shared_ptr<ngraph::Function> getFunction(); std::shared_ptr<ngraph::Function> getFunction();
/** /**
* @brief Returns constant nGraph function * @brief Returns constant nGraph function
*
* @return constant nGraph function * @return constant nGraph function
*/ */
std::shared_ptr<const ngraph::Function> getFunction() const; std::shared_ptr<const ngraph::Function> getFunction() const;
/** /**
* @copybrief ICNNNetwork::addOutput * @brief Adds output to the layer
*
* Wraps ICNNNetwork::addOutput
*
* @param layerName Name of the layer * @param layerName Name of the layer
* @param outputIndex Index of the output * @param outputIndex Index of the output
*/ */
void addOutput(const std::string& layerName, size_t outputIndex = 0); void addOutput(const std::string& layerName, size_t outputIndex = 0);
IE_SUPPRESS_DEPRECATED_START
/** /**
* @brief Helper method to get collect all input shapes with names of corresponding Data objects * @brief Helper method to get collect all input shapes with names of corresponding Data objects
*
* @return Map of pairs: input name and its dimension. * @return Map of pairs: input name and its dimension.
*/ */
ICNNNetwork::InputShapes getInputShapes() const; ICNNNetwork::InputShapes getInputShapes() const;
/** /**
* @brief Run shape inference with new input shapes for the network * @brief Run shape inference with new input shapes for the network
* * @param inputShapes A map of pairs: name of corresponding data and its dimension.
* @param inputShapes - map of pairs: name of corresponding data and its dimension.
*/ */
void reshape(const ICNNNetwork::InputShapes& inputShapes); void reshape(const ICNNNetwork::InputShapes& inputShapes);
IE_SUPPRESS_DEPRECATED_END
/** /**
* @brief Serialize network to IR and weights files. * @brief Serialize network to IR and weights files.

View File

@ -16,40 +16,52 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "ie_parameter.hpp"
#include "ie_remote_context.hpp"
#include "cpp/ie_cnn_network.h" #include "cpp/ie_cnn_network.h"
#include "cpp/ie_infer_request.hpp" #include "cpp/ie_infer_request.hpp"
#include "details/ie_so_loader.h"
#include "ie_iexecutable_network.hpp"
namespace InferenceEngine { namespace InferenceEngine {
namespace details {
class SharedObjectLoader;
}
class IExecutableNetworkInternal; class IExecutableNetworkInternal;
class IExecutableNetwork;
/** /**
* @brief This is an interface of an executable network * @brief This is an interface of an executable network
*/ */
class INFERENCE_ENGINE_API_CLASS(ExecutableNetwork) { class INFERENCE_ENGINE_API_CLASS(ExecutableNetwork) {
std::shared_ptr<IExecutableNetworkInternal> _impl; details::SharedObjectLoader _so;
std::shared_ptr<details::SharedObjectLoader> _so; std::shared_ptr<IExecutableNetworkInternal> _impl;
IE_SUPPRESS_DEPRECATED_START
std::shared_ptr<IExecutableNetwork> actual;
IE_SUPPRESS_DEPRECATED_END
ExecutableNetwork(const std::shared_ptr<IExecutableNetworkInternal>& impl, /**
const std::shared_ptr<details::SharedObjectLoader>& so); * @brief Constructs ExecutableNetwork from the initialized std::shared_ptr
* @param so Plugin to use. This is required to ensure that ExecutableNetwork can work properly even if plugin object is destroyed.
friend class InferencePlugin; * @param impl Initialized shared pointer
*/
ExecutableNetwork(const details::SharedObjectLoader& so,
const std::shared_ptr<IExecutableNetworkInternal>& impl);
friend class Core;
public: public:
/** /**
* @brief Default constructor * @brief A default constructor.
*/ */
ExecutableNetwork() = default; ExecutableNetwork() = default;
IE_SUPPRESS_DEPRECATED_START
/** /**
* @brief Default destructor * @deprecated This ctor will be removed in 2022.1
* @brief Constructs ExecutableNetwork from the initialized std::shared_ptr
* @param exec Initialized shared pointer
* @param splg Plugin to use. This is required to ensure that ExecutableNetwork can work properly even if plugin object is destroyed.
*/ */
~ExecutableNetwork(); INFERENCE_ENGINE_DEPRECATED("This ctor will be removed in 2022.1")
explicit ExecutableNetwork(std::shared_ptr<IExecutableNetwork> exec,
std::shared_ptr<details::SharedObjectLoader> splg = {});
IE_SUPPRESS_DEPRECATED_END
/** /**
* @brief Gets the Executable network output Data node information. * @brief Gets the Executable network output Data node information.

View File

@ -13,19 +13,19 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include "ie_blob.h"
#include "cpp/ie_memory_state.hpp" #include "cpp/ie_memory_state.hpp"
#include "ie_remote_context.hpp"
#include "ie_iinfer_request.hpp" #include "ie_iinfer_request.hpp"
#include "details/ie_so_loader.h" #include "details/ie_so_loader.h"
#include "ie_blob.h"
namespace InferenceEngine { namespace InferenceEngine {
namespace details {
class SharedObjectLoader;
}
class IInferRequestInternal; class IInferRequestInternal;
namespace details {
class ICompletionCallbackWrapper;
} // namespace details
/** /**
* @copybrief IInferRequest * @copybrief IInferRequest
* *
@ -33,12 +33,20 @@ class IInferRequestInternal;
* It can throw exceptions safely for the application, where it is properly handled. * It can throw exceptions safely for the application, where it is properly handled.
*/ */
class INFERENCE_ENGINE_API_CLASS(InferRequest) { class INFERENCE_ENGINE_API_CLASS(InferRequest) {
std::shared_ptr<IInferRequestInternal> _impl; details::SharedObjectLoader _so;
std::shared_ptr<details::SharedObjectLoader> _so; std::shared_ptr<IInferRequestInternal> _impl;
IE_SUPPRESS_DEPRECATED_START
InferRequest(const std::shared_ptr<IInferRequestInternal>& impl, IInferRequest::Ptr actual;
const std::shared_ptr<details::SharedObjectLoader>& so); std::shared_ptr<details::ICompletionCallbackWrapper> callback;
IE_SUPPRESS_DEPRECATED_END
/**
* @brief Constructs InferRequest from the initialized std::shared_ptr
* @param so Plugin to use. This is required to ensure that InferRequest can work properly even if plugin object is destroyed.
* @param impl Initialized shared pointer
*/
InferRequest(const details::SharedObjectLoader& so,
const std::shared_ptr<IInferRequestInternal>& impl);
friend class ExecutableNetwork; friend class ExecutableNetwork;
public: public:
@ -63,10 +71,17 @@ public:
*/ */
InferRequest() = default; InferRequest() = default;
IE_SUPPRESS_DEPRECATED_START
/** /**
* @brief Destructor * @deprecated This ctor will be removed in 2022.1
* @brief Constructs InferRequest from the initialized std::shared_ptr
* @param request Initialized shared pointer
* @param splg Plugin to use. This is required to ensure that InferRequest can work properly even if plugin object is destroyed.
*/ */
~InferRequest(); INFERENCE_ENGINE_DEPRECATED("This ctor will be removed in 2022.1")
explicit InferRequest(IInferRequest::Ptr request,
std::shared_ptr<details::SharedObjectLoader> splg = {});
IE_SUPPRESS_DEPRECATED_END
/** /**
* @brief Sets input/output data to infer * @brief Sets input/output data to infer
@ -222,6 +237,18 @@ public:
* @return true if current InferRequest object is initialized, false - otherwise * @return true if current InferRequest object is initialized, false - otherwise
*/ */
explicit operator bool() const noexcept; explicit operator bool() const noexcept;
/**
* @brief Compares whether this request wraps the same impl underneath
* @return true if current InferRequest object doesn't wrap the same impl as the operator's arg
*/
bool operator!=(const InferRequest&) const noexcept;
/**
* @brief Compares whether this request wraps the same impl underneath
* @return true if current InferRequest object wraps the same impl as the operator's arg
*/
bool operator==(const InferRequest&) const noexcept;
}; };
template<> template<>

View File

@ -15,34 +15,51 @@
#include "ie_api.h" #include "ie_api.h"
#include "ie_blob.h" #include "ie_blob.h"
#include "details/ie_so_loader.h"
#include "ie_imemory_state.hpp"
namespace InferenceEngine { namespace InferenceEngine {
namespace details {
class SharedObjectLoader;
}
class IVariableStateInternal; class IVariableStateInternal;
/** /**
* @brief C++ exception based error reporting wrapper of API class IVariableState * @brief C++ exception based error reporting wrapper of API class IVariableState
*/ */
class INFERENCE_ENGINE_API_CLASS(VariableState) { class INFERENCE_ENGINE_API_CLASS(VariableState) {
std::shared_ptr<IVariableStateInternal> _impl = nullptr; details::SharedObjectLoader _so;
std::shared_ptr<details::SharedObjectLoader> _so = nullptr; std::shared_ptr<IVariableStateInternal> _impl;
IE_SUPPRESS_DEPRECATED_START
std::shared_ptr<IVariableState> actual;
IE_SUPPRESS_DEPRECATED_END
/** /**
* @brief Constructs VariableState from the initialized std::shared_ptr * @brief Constructs VariableState from the initialized std::shared_ptr
* @param impl Initialized shared pointer * @param impl Initialized shared pointer
* @param so Optional: Plugin to use. This is required to ensure that VariableState can work properly even if plugin object is destroyed. * @param so Optional: Plugin to use. This is required to ensure that VariableState can work properly even if plugin object is destroyed.
*/ */
VariableState(const std::shared_ptr<IVariableStateInternal>& impl, VariableState(const details::SharedObjectLoader& so,
const std::shared_ptr<details::SharedObjectLoader>& so); const std::shared_ptr<IVariableStateInternal>& impl);
friend class InferRequest; friend class InferRequest;
friend class ExecutableNetwork; friend class ExecutableNetwork;
public: public:
/**
* @brief Default constructor
*/
VariableState() = default;
IE_SUPPRESS_DEPRECATED_START
/**
* @deprecated This ctor will be removed in 2022.1
* @brief constructs VariableState from the initialized std::shared_ptr
* @param pState Initialized shared pointer
* @param plg Optional: Plugin to use. This is required to ensure that VariableState can work properly even if plugin object is destroyed.
*/
INFERENCE_ENGINE_DEPRECATED("This ctor will be removed in 2022.1")
explicit VariableState(std::shared_ptr<IVariableState> pState,
std::shared_ptr<details::SharedObjectLoader> plg = {});
IE_SUPPRESS_DEPRECATED_END
/** /**
* @copybrief IVariableState::Reset * @copybrief IVariableState::Reset
* *
@ -62,7 +79,7 @@ public:
* @copybrief IVariableState::GetState * @copybrief IVariableState::GetState
* *
* Wraps IVariableState::GetState * Wraps IVariableState::GetState
* @return A blob representing a state * @return A blob representing a state
*/ */
Blob::CPtr GetState() const; Blob::CPtr GetState() const;

View File

@ -4,7 +4,7 @@
/** /**
* @brief A header file for definition of abstraction over platform specific shared objects * @brief A header file for definition of abstraction over platform specific shared objects
* *
* @file ie_so_loader.h * @file ie_so_loader.h
*/ */
#pragma once #pragma once
@ -25,9 +25,9 @@ class INFERENCE_ENGINE_API_CLASS(SharedObjectLoader) {
public: public:
/** /**
* @brief A shared pointer to SharedObjectLoader * @brief Default constructor
*/ */
using Ptr = std::shared_ptr<SharedObjectLoader>; SharedObjectLoader() = default;
#ifdef ENABLE_UNICODE_PATH_SUPPORT #ifdef ENABLE_UNICODE_PATH_SUPPORT
/** /**

View File

@ -36,19 +36,19 @@ using enableIfSupportedChar = typename std::enable_if<(std::is_same<C, char>::va
/** /**
* @brief This class instantiate object using shared library * @brief This class instantiate object using shared library
* @tparam T An type of object SOPointer can hold * @tparam T An type of object SOPointer can hold
* @tparam Loader A loader used to load a library
*/ */
template <class T, class Loader = SharedObjectLoader> template <class T>
class SOPointer { class SOPointer {
template <class U, class W> template <class U>
friend class SOPointer; friend class SOPointer;
IE_SUPPRESS_DEPRECATED_START
IE_SUPPRESS_DEPRECATED_START
struct HasRelease { struct HasRelease {
template <typename C> static char test(decltype(&C::Release)); template <typename C> static char test(decltype(&C::Release));
template <typename C> static long test(...); template <typename C> static long test(...);
constexpr static const bool value = sizeof(test<T>(nullptr)) == sizeof(char); constexpr static const bool value = sizeof(test<T>(nullptr)) == sizeof(char);
}; };
IE_SUPPRESS_DEPRECATED_END IE_SUPPRESS_DEPRECATED_END
public: public:
/** /**
@ -62,48 +62,36 @@ public:
*/ */
template <typename C, template <typename C,
typename = enableIfSupportedChar<C>> typename = enableIfSupportedChar<C>>
explicit SOPointer(const std::basic_string<C> & name) SOPointer(const std::basic_string<C> & name)
: _so_loader(new Loader(name.c_str())) { : _so(name.c_str()) {
Load(std::integral_constant<bool, HasRelease::value>{}); Load(std::integral_constant<bool, HasRelease::value>{});
}
/**
* @brief The main constructor
* @param name Name of a shared library file
*/
explicit SOPointer(const char * name)
: _so_loader(new Loader(name)) {
Load(std::integral_constant<bool, HasRelease::value>{});
}
/**
* @brief Constructs an object with existing reference
* @param pointedObj Existing reference to wrap
*/
explicit SOPointer(T* pointedObj): _so_loader(), _pointedObj(pointedObj) {
if (_pointedObj == nullptr) {
IE_THROW() << "Cannot create SOPointer<T, Loader> from nullptr";
}
} }
/** /**
* @brief Constructs an object with existing reference
* @brief Constructs an object with existing loader * @brief Constructs an object with existing loader
* @param so_loader Existing pointer to a library loader * @param soLoader Existing pointer to a library loader
*/ */
explicit SOPointer(const std::shared_ptr<Loader>& so_loader) SOPointer(const SharedObjectLoader& so, const std::shared_ptr<T>& ptr) : _so{so}, _ptr{ptr} {}
: _so_loader(so_loader) {
Load(std::integral_constant<bool, HasRelease::value>{}); /**
} * @brief Constructs an object with existing loader
* @param so Existing pointer to a library loader
*/
explicit SOPointer(const SharedObjectLoader& so)
: _so(so) {
Load(std::integral_constant<bool, HasRelease::value>{});
}
/** /**
* @brief The copy-like constructor, can create So Pointer that dereferenced into child type if T is derived of U * @brief The copy-like constructor, can create So Pointer that dereferenced into child type if T is derived of U
* @param that copied SOPointer object * @param that copied SOPointer object
*/ */
template <class U, class W> template <typename U>
SOPointer(const SOPointer<U, W>& that) SOPointer(const SOPointer<U>& that)
: _so_loader(std::dynamic_pointer_cast<Loader>(that._so_loader)), : _so(that._so),
_pointedObj(std::dynamic_pointer_cast<T>(that._pointedObj)) { _ptr(std::dynamic_pointer_cast<T>(that._ptr)) {
IE_ASSERT(_pointedObj != nullptr); IE_ASSERT(_ptr != nullptr);
} }
/** /**
@ -111,19 +99,11 @@ public:
* @return underlined interface with disabled Release method * @return underlined interface with disabled Release method
*/ */
T* operator->() const noexcept { T* operator->() const noexcept {
return _pointedObj.get(); return _ptr.get();
}
/**
* @brief Standard dereference operator
* @return underlined interface with disabled Release method
*/
const T* operator*() const noexcept {
return this->operator->();
} }
explicit operator bool() const noexcept { explicit operator bool() const noexcept {
return (nullptr != _pointedObj); return _ptr != nullptr;
} }
friend bool operator==(std::nullptr_t, const SOPointer& ptr) noexcept { friend bool operator==(std::nullptr_t, const SOPointer& ptr) noexcept {
@ -139,33 +119,15 @@ public:
return static_cast<bool>(ptr); return static_cast<bool>(ptr);
} }
SOPointer& operator=(const SOPointer& pointer) noexcept { operator const SharedObjectLoader&() const noexcept {
_pointedObj = pointer._pointedObj; return _so;
_so_loader = pointer._so_loader;
return *this;
} }
operator const std::shared_ptr<Loader>&() const noexcept { operator std::shared_ptr<T>& () noexcept {
return _so_loader; return _ptr;
} }
protected: protected:
#define CATCH_IE_EXCEPTION(ExceptionType) catch (const InferenceEngine::ExceptionType& e) {throw e;}
#define CATCH_IE_EXCEPTIONS \
CATCH_IE_EXCEPTION(GeneralError) \
CATCH_IE_EXCEPTION(NotImplemented) \
CATCH_IE_EXCEPTION(NetworkNotLoaded) \
CATCH_IE_EXCEPTION(ParameterMismatch) \
CATCH_IE_EXCEPTION(NotFound) \
CATCH_IE_EXCEPTION(OutOfBounds) \
CATCH_IE_EXCEPTION(Unexpected) \
CATCH_IE_EXCEPTION(RequestBusy) \
CATCH_IE_EXCEPTION(ResultNotReady) \
CATCH_IE_EXCEPTION(NotAllocated) \
CATCH_IE_EXCEPTION(InferNotStarted) \
CATCH_IE_EXCEPTION(NetworkNotRead) \
CATCH_IE_EXCEPTION(InferCancelled)
/** /**
* @brief Implements load of object from library if Release method is presented * @brief Implements load of object from library if Release method is presented
*/ */
@ -173,10 +135,10 @@ protected:
try { try {
void* create = nullptr; void* create = nullptr;
try { try {
create = _so_loader->get_symbol((SOCreatorTrait<T>::name + std::string("Shared")).c_str()); create = _so.get_symbol((SOCreatorTrait<T>::name + std::string("Shared")).c_str());
} catch (const NotFound&) {} } catch (const NotFound&) {}
if (create == nullptr) { if (create == nullptr) {
create = _so_loader->get_symbol(SOCreatorTrait<T>::name); create = _so.get_symbol(SOCreatorTrait<T>::name);
using CreateF = StatusCode(T*&, ResponseDesc*); using CreateF = StatusCode(T*&, ResponseDesc*);
T* object = nullptr; T* object = nullptr;
ResponseDesc desc; ResponseDesc desc;
@ -186,17 +148,13 @@ protected:
InferenceEngine::details::ThrowNow<ExceptionType>{} <<= std::stringstream{} << IE_LOCATION << desc.msg) InferenceEngine::details::ThrowNow<ExceptionType>{} <<= std::stringstream{} << IE_LOCATION << desc.msg)
} }
IE_SUPPRESS_DEPRECATED_START IE_SUPPRESS_DEPRECATED_START
_pointedObj = std::shared_ptr<T>(object, [] (T* ptr){ptr->Release();}); _ptr = std::shared_ptr<T>(object, [] (T* ptr){ptr->Release();});
IE_SUPPRESS_DEPRECATED_END IE_SUPPRESS_DEPRECATED_END
} else { } else {
using CreateF = void(std::shared_ptr<T>&); using CreateF = void(std::shared_ptr<T>&);
reinterpret_cast<CreateF*>(create)(_pointedObj); reinterpret_cast<CreateF*>(create)(_ptr);
} }
} CATCH_IE_EXCEPTIONS catch (const std::exception& ex) { } catch(...) {details::Rethrow();}
IE_THROW() << ex.what();
} catch(...) {
IE_THROW(Unexpected);
}
} }
/** /**
@ -205,25 +163,19 @@ protected:
void Load(std::false_type) { void Load(std::false_type) {
try { try {
using CreateF = void(std::shared_ptr<T>&); using CreateF = void(std::shared_ptr<T>&);
reinterpret_cast<CreateF*>(_so_loader->get_symbol(SOCreatorTrait<T>::name))(_pointedObj); reinterpret_cast<CreateF*>(_so.get_symbol(SOCreatorTrait<T>::name))(_ptr);
} CATCH_IE_EXCEPTIONS catch (const std::exception& ex) { } catch(...) {details::Rethrow();}
IE_THROW() << ex.what();
} catch(...) {
IE_THROW(Unexpected);
}
} }
#undef CATCH_IE_EXCEPTION
#undef CATCH_IE_EXCEPTIONS
/** /**
* @brief Gets a smart pointer to the DLL * @brief The DLL
*/ */
std::shared_ptr<Loader> _so_loader; SharedObjectLoader _so;
/** /**
* @brief Gets a smart pointer to the custom object * @brief Gets a smart pointer to the custom object
*/ */
std::shared_ptr<T> _pointedObj; std::shared_ptr<T> _ptr;
}; };
} // namespace details } // namespace details
} // namespace InferenceEngine } // namespace InferenceEngine

View File

@ -799,6 +799,7 @@ protected:
} }
}; };
#ifdef __clang__
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<float>); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<float>);
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<double>); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<double>);
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<int8_t>); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<int8_t>);
@ -813,6 +814,7 @@ extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<unsigned
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<unsigned long long>); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<unsigned long long>);
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<bool>); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<bool>);
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<char>); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<char>);
#endif // __clang__
/** /**
* @brief Creates a blob with the given tensor descriptor. * @brief Creates a blob with the given tensor descriptor.

Some files were not shown because too many files have changed in this diff Show More