Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
0a6c3cc9bf
@ -133,9 +133,10 @@ jobs:
|
||||
displayName: 'IE FuncTests'
|
||||
continueOnError: false
|
||||
|
||||
- script: $(BIN_DIR)/cpuFuncTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml
|
||||
- script: $(BIN_DIR)/cpuFuncTests --gtest_filter=*smoke*:-smoke_LPT/ReduceMinTransformation.CompareWithRefImpl/f32_Shape* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml
|
||||
displayName: 'CPU FuncTests'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: |
|
||||
export DATA_PATH=$(MODELS_PATH)
|
||||
|
@ -103,6 +103,7 @@ function(build_ngraph)
|
||||
endif()
|
||||
|
||||
ie_cpack_add_component(ngraph REQUIRED)
|
||||
ie_cpack_add_component(ngraph_dev REQUIRED DEPENDS ngraph)
|
||||
|
||||
set(SDL_cmake_included ON)
|
||||
add_subdirectory(ngraph)
|
||||
|
@ -14,7 +14,13 @@ set(CMAKE_MODULE_PATH "${IEDevScripts_DIR}")
|
||||
function(set_ci_build_number)
|
||||
set(repo_root "${CMAKE_SOURCE_DIR}")
|
||||
include(version)
|
||||
set(CI_BUILD_NUMBER "${CI_BUILD_NUMBER}" PARENT_SCOPE)
|
||||
foreach(var CI_BUILD_NUMBER IE_VERSION
|
||||
IE_VERSION_MAJOR IE_VERSION_MINOR IE_VERSION_PATCH)
|
||||
if(NOT DEFINED ${var})
|
||||
message(FATAL_ERROR "${var} version component is not defined")
|
||||
endif()
|
||||
set(${var} "${${var}}" PARENT_SCOPE)
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
set_ci_build_number()
|
||||
|
@ -31,6 +31,7 @@ addIeTarget(
|
||||
function(addIeTarget)
|
||||
set(options
|
||||
ADD_CPPLINT # Enables code style checks for the target
|
||||
ADD_CLANG_FORMAT # Enables code style checks for the target
|
||||
)
|
||||
set(oneValueRequiredArgs
|
||||
TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable
|
||||
@ -119,6 +120,10 @@ function(addIeTarget)
|
||||
# code style
|
||||
add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME})
|
||||
endif()
|
||||
if (ARG_ADD_CLANG_FORMAT)
|
||||
# code style
|
||||
add_clang_format_target(${ARG_NAME}_clang FOR_TARGETS ${ARG_NAME})
|
||||
endif()
|
||||
if (ARG_DEVELOPER_PACKAGE)
|
||||
# developer package
|
||||
openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE}
|
||||
@ -128,7 +133,6 @@ function(addIeTarget)
|
||||
# Provide default compile pdb name equal to target name
|
||||
set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME})
|
||||
endif()
|
||||
|
||||
endfunction()
|
||||
|
||||
#[[
|
||||
|
@ -27,7 +27,10 @@ endif()
|
||||
# )
|
||||
#
|
||||
function(ie_add_plugin)
|
||||
set(options SKIP_INSTALL)
|
||||
set(options
|
||||
SKIP_INSTALL
|
||||
ADD_CLANG_FORMAT
|
||||
)
|
||||
set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR)
|
||||
set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS)
|
||||
cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
@ -73,7 +76,11 @@ function(ie_add_plugin)
|
||||
string(CONCAT custom_filter "${custom_filter}" "," "${filter}")
|
||||
endforeach()
|
||||
|
||||
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
|
||||
if (IE_PLUGIN_ADD_CLANG_FORMAT)
|
||||
add_clang_format_target(${IE_PLUGIN_NAME}_clang FOR_TARGETS ${IE_PLUGIN_NAME})
|
||||
else()
|
||||
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
|
||||
endif()
|
||||
|
||||
# check that plugin with such name is not registered
|
||||
|
||||
|
@ -26,6 +26,60 @@ function (commitHash VAR)
|
||||
set (${VAR} ${GIT_COMMIT_HASH} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
macro(ie_parse_ci_build_number)
|
||||
if(CI_BUILD_NUMBER MATCHES "^([0-9]+)\.([0-9]+)\.([0-9]+)\-.*")
|
||||
set(IE_VERSION_MAJOR ${CMAKE_MATCH_1})
|
||||
set(IE_VERSION_MINOR ${CMAKE_MATCH_2})
|
||||
set(IE_VERSION_PATCH ${CMAKE_MATCH_3})
|
||||
set(has_ci_version ON)
|
||||
else()
|
||||
set(IE_VERSION_MAJOR 0)
|
||||
set(IE_VERSION_MINOR 0)
|
||||
set(IE_VERSION_PATCH 0)
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED repo_root)
|
||||
message(FATAL_ERROR "repo_root is not defined")
|
||||
endif()
|
||||
|
||||
if(DEFINED IEDevScripts_DIR AND DEFINED IE_MAIN_SOURCE_DIR AND NOT DEFINED custom_build)
|
||||
set(ie_version_hpp "${IE_MAIN_SOURCE_DIR}/include/ie_version.hpp")
|
||||
if(NOT EXISTS ${ie_version_hpp})
|
||||
message(FATAL_ERROR "File ie_version.hpp with IE_VERSION definitions is not found")
|
||||
endif()
|
||||
|
||||
file(STRINGS "${ie_version_hpp}" IE_VERSION_PARTS REGEX "#define IE_VERSION_[A-Z]+[ ]+" )
|
||||
|
||||
string(REGEX REPLACE ".+IE_VERSION_MAJOR[ ]+([0-9]+).*" "\\1"
|
||||
IE_VERSION_MAJOR_HPP "${IE_VERSION_PARTS}")
|
||||
string(REGEX REPLACE ".+IE_VERSION_MINOR[ ]+([0-9]+).*" "\\1"
|
||||
IE_VERSION_MINOR_HPP "${IE_VERSION_PARTS}")
|
||||
string(REGEX REPLACE ".+IE_VERSION_PATCH[ ]+([0-9]+).*" "\\1"
|
||||
IE_VERSION_PATCH_HPP "${IE_VERSION_PARTS}")
|
||||
|
||||
foreach(var IE_VERSION_MAJOR IE_VERSION_MINOR IE_VERSION_PATCH)
|
||||
if(DEFINED ${var} AND NOT ${var} EQUAL ${var}_HPP)
|
||||
message(FATAL_ERROR "${var} parsed from CI_BUILD_NUMBER (${${var}}) \
|
||||
and from ie_version.hpp (${${var}_HPP}) are different")
|
||||
else()
|
||||
# CI_BUILD_NUMBER is not defined well, take info from ie_verison.hpp as a baseline
|
||||
set(${var} ${${var}_HPP})
|
||||
endif()
|
||||
endforeach()
|
||||
elseif(has_ci_version)
|
||||
message(WARNING "IE_MAIN_SOURCE_DIR is not defined. No way to compare versions")
|
||||
else()
|
||||
message(WARNING "No way to detect OpenVINO version. Supposing 0.0.0.0")
|
||||
endif()
|
||||
|
||||
set(IE_VERSION "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}")
|
||||
endmacro()
|
||||
|
||||
# WA for DL Benchmark
|
||||
if(DEFINED ENV{CI_BUILD_NUMBER} AND "$ENV{CI_BUILD_NUMBER}" STREQUAL "1")
|
||||
unset(ENV{CI_BUILD_NUMBER})
|
||||
endif()
|
||||
|
||||
if (DEFINED ENV{CI_BUILD_NUMBER})
|
||||
set(CI_BUILD_NUMBER $ENV{CI_BUILD_NUMBER})
|
||||
else()
|
||||
@ -36,6 +90,11 @@ else()
|
||||
set(CI_BUILD_NUMBER "${custom_build}")
|
||||
endif()
|
||||
|
||||
# provides Inference Engine version
|
||||
# 1. If CI_BUILD_NUMBER is defined, parses this information
|
||||
# 2. Otherwise, parses ie_version.hpp
|
||||
ie_parse_ci_build_number()
|
||||
|
||||
function (addVersionDefines FILE)
|
||||
foreach (VAR ${ARGN})
|
||||
if (DEFINED ${VAR} AND NOT "${${VAR}}" STREQUAL "")
|
||||
|
@ -2,24 +2,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
macro(ie_parse_ci_build_number)
|
||||
if(CI_BUILD_NUMBER MATCHES "^([0-9]+)\.([0-9]+)\.([0-9]+)\-.*")
|
||||
set(IE_VERSION_MAJOR ${CMAKE_MATCH_1})
|
||||
set(IE_VERSION_MINOR ${CMAKE_MATCH_2})
|
||||
set(IE_VERSION_PATCH ${CMAKE_MATCH_3})
|
||||
set(IE_VS_VER_HAS_VERSION 1)
|
||||
else()
|
||||
set(IE_VS_VER_HAS_VERSION 0)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
ie_parse_ci_build_number()
|
||||
|
||||
if(IE_VS_VER_HAS_VERSION)
|
||||
set(IE_VS_VER_FILEVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0")
|
||||
set(IE_VS_VER_PRODUCTVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0")
|
||||
set(IE_VS_VER_FILEVERSION_STR "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}.0")
|
||||
endif()
|
||||
set(IE_VS_VER_FILEVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0")
|
||||
set(IE_VS_VER_PRODUCTVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0")
|
||||
set(IE_VS_VER_FILEVERSION_STR "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}.0")
|
||||
|
||||
set(IE_VS_VER_COMPANY_NAME_STR "Intel Corporation")
|
||||
set(IE_VS_VER_PRODUCTVERSION_STR "${CI_BUILD_NUMBER}")
|
||||
|
@ -1,10 +1,8 @@
|
||||
#include <winver.h>
|
||||
|
||||
VS_VERSION_INFO VERSIONINFO
|
||||
#if @IE_VS_VER_HAS_VERSION@
|
||||
FILEVERSION @IE_VS_VER_FILEVERSION_QUAD@
|
||||
PRODUCTVERSION @IE_VS_VER_PRODUCTVERSION_QUAD@
|
||||
#endif
|
||||
FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
|
||||
#ifdef _DEBUG
|
||||
FILEFLAGS 1
|
||||
@ -21,9 +19,7 @@ BEGIN
|
||||
BEGIN
|
||||
VALUE "CompanyName", "@IE_VS_VER_COMPANY_NAME_STR@\0"
|
||||
VALUE "FileDescription", "@IE_VS_VER_FILEDESCRIPTION_STR@\0"
|
||||
#if @IE_VS_VER_HAS_VERSION@
|
||||
VALUE "FileVersion", "@IE_VS_VER_FILEVERSION_STR@\0"
|
||||
#endif
|
||||
VALUE "InternalName", "@IE_VS_VER_INTERNALNAME_STR@\0"
|
||||
VALUE "LegalCopyright", "@IE_VS_VER_COPYRIGHT_STR@\0"
|
||||
VALUE "OriginalFilename", "@IE_VS_VER_ORIGINALFILENAME_STR@\0"
|
||||
|
25
docs/.clang-format
Normal file
25
docs/.clang-format
Normal file
@ -0,0 +1,25 @@
|
||||
BasedOnStyle: Google
|
||||
IndentWidth: 4
|
||||
UseTab: Never
|
||||
|
||||
Language: Cpp
|
||||
Standard: Cpp11
|
||||
|
||||
AccessModifierOffset: -4
|
||||
AlignConsecutiveMacros: true
|
||||
AllowAllArgumentsOnNextLine: false
|
||||
AllowAllParametersOfDeclarationOnNextLine: false
|
||||
AllowShortFunctionsOnASingleLine: Empty
|
||||
AllowShortIfStatementsOnASingleLine: Never
|
||||
AllowShortLambdasOnASingleLine: Empty
|
||||
AllowShortLoopsOnASingleLine: false
|
||||
AlwaysBreakBeforeMultilineStrings: false
|
||||
ColumnLimit: 160
|
||||
# Specialize this comment pragma in order to avoid changes in SEA copyrights
|
||||
CommentPragmas: '^#'
|
||||
DerivePointerAlignment: false
|
||||
FixNamespaceComments: true
|
||||
IndentCaseLabels: false
|
||||
IndentPPDirectives: BeforeHash
|
||||
SpaceBeforeCpp11BracedList: true
|
||||
SpaceBeforeCtorInitializerColon: false
|
@ -16,7 +16,7 @@ To add your custom nGraph operation, create a new class that extends `ngraph::Op
|
||||
|
||||
5. Override the `visit_attributes` method, which enables serialization and deserialization of operation attributes. An `AttributeVisitor` is passed to the method, and the implementation is expected to walk over all the attributes in the op using the type-aware `on_attribute` helper. Helpers are already implemented for standard C++ types like `int64_t`, `float`, `bool`, `vector`, and for existing nGraph defined types.
|
||||
|
||||
6. Override `evaluate`, which is an optional method that enables the application of constant folding if there is a custom operation on the constant branch.
|
||||
6. Override `evaluate`, which is an optional method that enables the application of constant folding if there is a custom operation on the constant branch. If your operation contains `evaluate` method you also need to override the `has_evaluate` method, this method allow to get information about availability of `evaluate` method for the operation.
|
||||
|
||||
Based on that, declaration of an operation class can look as follows:
|
||||
|
||||
@ -55,7 +55,7 @@ nGraph operation contains two constructors:
|
||||
|
||||
@snippet template_extension/op.cpp op:visit_attributes
|
||||
|
||||
### `evaluate()`
|
||||
### `evaluate()` and `has_evaluate()`
|
||||
|
||||
`ngraph::Node::evaluate` method enables you to apply constant folding to an operation.
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6c9ddc759bc419268f4c23089b91a9e3373114a1d36b01d6fe62a5e87b5c0ad4
|
||||
size 59827
|
||||
oid sha256:4b14b03ebb6a00b5f52a8404282f83d4ad214c8d04aea74738027a775c4ef545
|
||||
size 100581
|
||||
|
@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:59890c0c4a6d1c721dfaca22f0c1d0b305401f75dcd30418f858382830be2d31
|
||||
size 49598
|
||||
oid sha256:cbfadd457b4d943ffb46906a7daf03516e971fe49d2806cd32c84c5015178f03
|
||||
size 92819
|
||||
|
@ -2,36 +2,36 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
Inference Engine CPU plugin can infer models in the 8-bit integer (INT8) precision.
|
||||
For details, refer to [INT8 inference on the CPU](../../../IE_DG/Int8Inference.md).
|
||||
Inference Engine CPU and GPU plugin can infer models in the low precision.
|
||||
For details, refer to [Low Precision Inference on the CPU](../../../IE_DG/Int8Inference.md).
|
||||
|
||||
Intermediate Representation (IR) should be specifically formed to be suitable for INT8 inference.
|
||||
Such an IR is called an INT8 IR and you can generate it in two ways:
|
||||
- [Quantize model with the Post-Training Optimization tool](@ref pot_README)
|
||||
- Use the Model Optimizer for TensorFlow\* pre-TFLite models (`.pb` model file with `FakeQuantize*` operations)
|
||||
Intermediate Representation (IR) should be specifically formed to be suitable for low precision inference.
|
||||
Such an IR is called a Low Precision IR and you can generate it in two ways:
|
||||
- [Quantize regular IR with the Post-Training Optimization tool](@ref pot_README)
|
||||
- Use the Model Optimizer for a model pretrained for Low Precision inference: TensorFlow\* pre-TFLite models (`.pb` model file with `FakeQuantize*` operations) and ONNX\* quantized models.
|
||||
Both Tensorflow and ONNX quantized models could be prepared by [Neural Network Compression Framework](https://github.com/openvinotoolkit/nncf/blob/develop/README.md)
|
||||
|
||||
For an operation to be executed in INT8, it must have `FakeQuantize` operations as inputs with the `levels` attribute set to `255` or `256`.
|
||||
For an operation to be executed in INT8, it must have `FakeQuantize` operations as inputs.
|
||||
See the [specification of `FakeQuantize` operation](../../../ops/quantization/FakeQuantize_1.md) for details.
|
||||
To see the list of supported INT8 layers, refer to [INT8 inference on the CPU](../../../IE_DG/Int8Inference.md).
|
||||
|
||||
To execute the `Convolution` operation in INT8 on CPU, both data and weight inputs should have `FakeQuantize` as an input operation:
|
||||

|
||||
|
||||
INT8 IR is also suitable for FP32 and FP16 inference if a chosen plugin supports all operations of the IR, because the only difference between an INT8 IR and FP16 or FP32 IR is the existence of `FakeQuantize` in the INT8 IR.
|
||||
Plugins with INT8 inference support recognize these sub-graphs and quantize them during the inference time.
|
||||
Plugins without INT8 support execute all operations, including `FakeQuantize`, as is in the FP32 or FP16 precision.
|
||||
Low pecision IR is also suitable for FP32 and FP16 inference if a chosen plugin supports all operations of the IR, because the only difference between a Low Precision IR and FP16 or FP32 IR is the existence of `FakeQuantize` in the Low Precision IR.
|
||||
Plugins with Low Precision Inference support recognize these sub-graphs and quantize them during the inference time.
|
||||
Plugins without Low Precision support execute all operations, including `FakeQuantize`, as is in the FP32 or FP16 precision.
|
||||
|
||||
Accordingly, the presence of FakeQuantize operations in the IR is a recommendation for a plugin on how to quantize particular operations in the model.
|
||||
If capable, a plugin accepts the recommendation and performs INT8 inference, otherwise the plugin ignores the recommendation and executes a model in the floating-point precision.
|
||||
If capable, a plugin accepts the recommendation and performs Low Precision Inference, otherwise, the plugin ignores the recommendation and executes a model in the floating-point precision.
|
||||
|
||||
## Compressed INT8 Weights
|
||||
## Compressed Low Precision Weights
|
||||
|
||||
Weighted operations, like `Convolution`, `MatMul`, and others, store weights as floating-point `Constant` in the graph followed by the `FakeQuantize` operation.
|
||||
`Constant` followed by the `FakeQuantize` operation could be optimized memory-wise due to the `FakeQuantize` operation semantics.
|
||||
The resulting weights sub-graph stores weights in INT8 `Constant`, which gets unpacked back to floating point with the `Convert` operation.
|
||||
Weights compression leaves `FakeQuantize` output arithmetically the same and weights storing takes four times less memory.
|
||||
The resulting weights sub-graph stores weights in Low Precision `Constant`, which gets unpacked back to floating point with the `Convert` operation.
|
||||
Weights compression replaces `FakeQuantize` with optional `Subtract` and `Multiply` operation leaving output arithmetically the same and weights storing takes four times less memory.
|
||||
|
||||
See the visualization of `Convolution` with the compressed weights:
|
||||

|
||||
|
||||
Both Model Optimizer and Post-Training Optimization tool generate a compressed IR by default. To generate an expanded INT8 IR, use `--disable_weights_compression`.
|
||||
Both Model Optimizer and Post-Training Optimization tool generate a compressed IR by default.
|
||||
|
@ -75,11 +75,11 @@ The guide assumes you downloaded the OpenVINO toolkit for Raspbian* OS. If you d
|
||||
By default, the package file is saved as `l_openvino_toolkit_runtime_raspbian_p_<version>.tgz`.
|
||||
3. Create an installation folder.
|
||||
```sh
|
||||
sudo mkdir -p /opt/intel/openvino
|
||||
sudo mkdir -p /opt/intel/openvino_2021
|
||||
```
|
||||
4. Unpack the archive:
|
||||
```sh
|
||||
sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_<version>.tgz --strip 1 -C /opt/intel/openvino
|
||||
sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_<version>.tgz --strip 1 -C /opt/intel/openvino_2021
|
||||
```
|
||||
|
||||
Now the OpenVINO toolkit components are installed. Additional configuration steps are still required. Continue to the next sections to install External Software Dependencies, configure the environment and set up USB rules.
|
||||
@ -154,7 +154,7 @@ Follow the next steps to use the pre-trained face detection model using Inferenc
|
||||
```
|
||||
4. Run the sample specifying the model, a path to the input image, and the VPU required to run with the Raspbian* OS:
|
||||
```sh
|
||||
./armv7l/Release/object_detection_sample_ssd -m face-detection-adas-0001.xml -d MYRIAD -i <path_to_image>
|
||||
./armv7l/Release/object_detection_sample_ssd -m <path_to_model>/face-detection-adas-0001.xml -d MYRIAD -i <path_to_image>
|
||||
```
|
||||
The application outputs an image (`out_0.bmp`) with detected faced enclosed in rectangles.
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Intel® Distribution of OpenVINO™ Toolkit Developer Package
|
||||
|
||||
Copyright © 2018-2021 Intel Corporation
|
||||
> **LEGAL NOTICE**: Your use of this software and any required dependent software (the
|
||||
“Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/en-us/license/eula-for-intel-software-development-products) for the Software Package, which may also include notices, disclaimers, or
|
||||
“Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/content/dam/develop/external/us/en/documents/intel-openvino-license-agreements.pdf) for the Software Package, which may also include notices, disclaimers, or
|
||||
license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details.
|
||||
|
||||
## Introduction
|
||||
@ -40,11 +40,7 @@ The table below lists the supported operating systems and Python* versions requi
|
||||
|
||||
## Install the Developer Package
|
||||
|
||||
### Step 1. Install External Software Dependencies
|
||||
|
||||
On Windows* OS you are required to install [Microsoft* Visual C++ Redistributable Package (x64)](https://visualstudio.microsoft.com/downloads/#microsoft-visual-c-redistributable-for-visual-studio-2019) to be able to run OpenVINO™ applications.
|
||||
|
||||
### Step 2. Set Up Python Virtual Environment
|
||||
### Step 1. Set Up Python Virtual Environment
|
||||
|
||||
To avoid dependency conflicts, use a virtual environment. Skip this
|
||||
step only if you do want to install all dependencies globally.
|
||||
@ -62,7 +58,7 @@ On Windows:
|
||||
python -m venv openvino_env
|
||||
```
|
||||
|
||||
### Step 3. Activate Virtual Environment
|
||||
### Step 2. Activate Virtual Environment
|
||||
|
||||
On Linux and macOS:
|
||||
```sh
|
||||
@ -73,14 +69,14 @@ On Windows:
|
||||
openvino_env\Scripts\activate
|
||||
```
|
||||
|
||||
### Step 4. Set Up and Update pip to the Highest Version
|
||||
### Step 3. Set Up and Update PIP to the Highest Version
|
||||
|
||||
Run the command below:
|
||||
```sh
|
||||
python -m pip install --upgrade pip
|
||||
```
|
||||
|
||||
### Step 5. Install the Package
|
||||
### Step 4. Install the Package
|
||||
|
||||
Run the command below: <br>
|
||||
|
||||
@ -88,7 +84,7 @@ Run the command below: <br>
|
||||
pip install openvino-dev
|
||||
```
|
||||
|
||||
### Step 6. Verify that the Package is Installed
|
||||
### Step 5. Verify that the Package is Installed
|
||||
|
||||
Run the command below (this may take a few seconds):
|
||||
```sh
|
||||
@ -97,6 +93,19 @@ pot -h
|
||||
|
||||
You will see the help message for Post-Training Optimization Tool if installation finished successfully.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
#### Error: Microsoft Visual C++ 14.0 is required. Get it with "Build Tools for Visual Studio"
|
||||
|
||||
On Windows* some dependencies may require compilation from source when installing. To resolve this issue, you need to install [Build Tools for Visual Studio* 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019) and repeat package installation.
|
||||
|
||||
#### ImportError: libpython3.7m.so.1.0: cannot open shared object file: No such file or directory
|
||||
|
||||
To resolve missing external dependency on Ubuntu*, execute the following command:
|
||||
```sh
|
||||
sudo apt-get install libpython3.7
|
||||
```
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit)
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Intel® Distribution of OpenVINO™ Toolkit Runtime Package
|
||||
|
||||
Copyright © 2018-2021 Intel Corporation
|
||||
> **LEGAL NOTICE**: Your use of this software and any required dependent software (the
|
||||
“Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/en-us/license/eula-for-intel-software-development-products) for the Software Package, which may also include notices, disclaimers, or
|
||||
“Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/content/dam/develop/external/us/en/documents/intel-openvino-license-agreements.pdf) for the Software Package, which may also include notices, disclaimers, or
|
||||
license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details.
|
||||
|
||||
## Introduction
|
||||
@ -37,11 +37,7 @@ The table below lists supported operating systems and Python* versions required
|
||||
|
||||
## Install the Runtime Package
|
||||
|
||||
### Step 1. Install External Software Dependencies
|
||||
|
||||
On Windows* OS you are required to install [Microsoft* Visual C++ Redistributable Package (x64)](https://visualstudio.microsoft.com/downloads/#microsoft-visual-c-redistributable-for-visual-studio-2019) to be able to run OpenVINO™ applications.
|
||||
|
||||
### Step 2. Set Up Python Virtual Environment
|
||||
### Step 1. Set Up Python Virtual Environment
|
||||
|
||||
To avoid dependency conflicts, use a virtual environment. Skip this
|
||||
step only if you do want to install all dependencies globally.
|
||||
@ -55,7 +51,7 @@ python -m venv openvino_env
|
||||
> **NOTE**: On Linux and macOS, you may need to type `python3` instead of
|
||||
`python`. You may also need to [install pip](https://pip.pypa.io/en/stable/installing/).
|
||||
|
||||
### Step 3. Activate Virtual Environment
|
||||
### Step 2. Activate Virtual Environment
|
||||
|
||||
On Linux and macOS:
|
||||
```sh
|
||||
@ -66,14 +62,14 @@ On Windows:
|
||||
openvino_env\Scripts\activate
|
||||
```
|
||||
|
||||
### Step 4. Set Up and Update pip to the Highest Version
|
||||
### Step 3. Set Up and Update PIP to the Highest Version
|
||||
|
||||
Run the command below:
|
||||
```sh
|
||||
python -m pip install --upgrade pip
|
||||
```
|
||||
|
||||
### Step 5. Install the Package
|
||||
### Step 4. Install the Package
|
||||
|
||||
Run the command below: <br>
|
||||
|
||||
@ -81,7 +77,7 @@ Run the command below: <br>
|
||||
pip install openvino
|
||||
```
|
||||
|
||||
### Step 6. Verify that the Package is Installed
|
||||
### Step 5. Verify that the Package is Installed
|
||||
|
||||
Run the command below:
|
||||
```sh
|
||||
@ -90,6 +86,19 @@ python -c "from openvino.inference_engine import IECore"
|
||||
|
||||
You will not see any error messages if installation finished successfully.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
#### Error: Microsoft Visual C++ 14.0 is required. Get it with "Build Tools for Visual Studio"
|
||||
|
||||
On Windows* some dependencies may require compilation from source when installing. To resolve this issue, you need to install [Build Tools for Visual Studio* 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019) and repeat package installation.
|
||||
|
||||
#### ImportError: libpython3.7m.so.1.0: cannot open shared object file: No such file or directory
|
||||
|
||||
To resolve missing external dependency on Ubuntu*, execute the following command:
|
||||
```sh
|
||||
sudo apt-get install libpython3.7
|
||||
```
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Intel® Distribution of OpenVINO™ toolkit](https://software.intel.com/en-us/openvino-toolkit).
|
||||
|
@ -9,7 +9,10 @@ set(TARGET_NAME "onnx_custom_op")
|
||||
|
||||
find_package(ngraph REQUIRED COMPONENTS onnx_importer)
|
||||
|
||||
add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp)
|
||||
add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp onnx_custom_op.hpp)
|
||||
|
||||
target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES})
|
||||
# [cmake:onnx_custom_op]
|
||||
|
||||
# Enable code style check
|
||||
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})
|
||||
|
@ -28,7 +28,7 @@ The `threshold` can be calculated with the following formula where `alpha` is th
|
||||
-log(e^{10^{-\alpha}} - 1.0) < threshold < log(\beta)
|
||||
\f]
|
||||
|
||||
For example, if *T* is `fp32`, `threshold` should be `20` or if *T* is `fp16`, `threshold` should be `12`.
|
||||
For example, if *T* is `fp32`, `threshold` should be `20` or if *T* is `fp16`, `threshold` should be `11`.
|
||||
|
||||
**Attributes**: *SoftPlus* operation has no attributes.
|
||||
|
||||
|
@ -2,31 +2,31 @@
|
||||
|
||||
**Versioned name**: *Floor-1*
|
||||
|
||||
**Category**: Arithmetic unary operation
|
||||
**Category**: Arithmetic unary operation
|
||||
|
||||
**Short description**: *Floor* performs element-wise floor operation with given tensor.
|
||||
|
||||
**Attributes**:
|
||||
**Detailed description**: For each element from the input tensor calculates corresponding
|
||||
element in the output tensor with the following formula:
|
||||
|
||||
No attributes available.
|
||||
\f[
|
||||
a_{i} = floor(a_{i})
|
||||
\f]
|
||||
|
||||
**Attributes**: *Floor* operation has no attributes.
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: An tensor of type T. **Required.**
|
||||
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
|
||||
|
||||
**Outputs**
|
||||
|
||||
* **1**: The result of element-wise floor operation. A tensor of type T.
|
||||
* **1**: The result of element-wise floor operation. A tensor of type *T*.
|
||||
|
||||
**Types**
|
||||
|
||||
* *T*: any numeric type.
|
||||
|
||||
*Floor* does the following with the input tensor *a*:
|
||||
|
||||
\f[
|
||||
a_{i} = floor(a_{i})
|
||||
\f]
|
||||
|
||||
**Examples**
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
**Detailed Description**
|
||||
|
||||
*VariadicSplit* operation splits a given input tensor `data` into chunks along a scalar `axis`. It produces multiple output tensors based on additional input tensor `split_lengths`.
|
||||
*VariadicSplit* operation splits a given input tensor `data` into chunks along a scalar or tensor with shape `[1]` `axis`. It produces multiple output tensors based on additional input tensor `split_lengths`.
|
||||
The i-th output tensor shape is equal to the input tensor `data` shape, except for dimension along `axis` which is `split_lengths[i]`.
|
||||
|
||||
\f[
|
||||
@ -23,7 +23,7 @@ Where D is the rank of input tensor `data`. The sum of elements in `split_length
|
||||
|
||||
* **1**: `data`. A tensor of type `T1` and arbitrary shape. **Required.**
|
||||
|
||||
* **2**: `axis`. Axis along `data` to split. A scalar of type `T2` with value from range `-rank(data) .. rank(data)-1`. Negative values address dimensions from the end.
|
||||
* **2**: `axis`. Axis along `data` to split. A scalar or tensor with shape `[1]` of type `T2` with value from range `-rank(data) .. rank(data)-1`. Negative values address dimensions from the end.
|
||||
**Required.**
|
||||
|
||||
* **3**: `split_lengths`. A list containing the dimension values of each output tensor shape along the split `axis`. A 1D tensor of type `T2`. The number of elements in `split_lengths` determines the number of outputs. The sum of elements in `split_lengths` must match `data.shape[axis]`. In addition `split_lengths` can contain a single `-1` element, which means, all remaining items along specified `axis` that are not consumed by other parts. **Required.**
|
||||
|
@ -58,7 +58,7 @@ For a particular activation, consider a mini-batch \f$\mathcal{B}\f$ of m values
|
||||
|
||||
* *epsilon*
|
||||
* **Description**: *epsilon* is a constant added to the variance for numerical stability.
|
||||
* **Range of values**: a positive floating-point number
|
||||
* **Range of values**: a floating-point number greater than or equal to zero
|
||||
* **Type**: `float`
|
||||
* **Default value**: none
|
||||
* **Required**: *yes*
|
||||
|
@ -58,7 +58,7 @@ For a particular activation, consider a mini-batch \f$\mathcal{B}\f$ of m values
|
||||
|
||||
* *epsilon*
|
||||
* **Description**: *epsilon* is a constant added to the variance for numerical stability.
|
||||
* **Range of values**: a positive floating-point number
|
||||
* **Range of values**: a floating-point number greater than or equal to zero
|
||||
* **Type**: `float`
|
||||
* **Default value**: none
|
||||
* **Required**: *yes*
|
||||
|
@ -154,7 +154,7 @@ auto consumers = output.get_target_inputs();
|
||||
{
|
||||
// ! [ngraph:shape]
|
||||
auto partial_shape = node->input(0).get_partial_shape(); // get zero input partial shape
|
||||
if (partial_shape.is_dynamic() /* or !partial_shape.is_staic() */) {
|
||||
if (partial_shape.is_dynamic() /* or !partial_shape.is_static() */) {
|
||||
return false;
|
||||
}
|
||||
auto static_shape = partial_shape.get_shape();
|
||||
@ -311,4 +311,4 @@ void pass_manager_example3(std::shared_ptr<ngraph::Function> f) {
|
||||
manager.run_passes(f);
|
||||
}
|
||||
// ! [ngraph:disabled_by_default]
|
||||
}
|
||||
}
|
||||
|
@ -33,3 +33,7 @@ if (ngraph_onnx_importer_FOUND)
|
||||
target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_IMPORT_ENABLED)
|
||||
endif()
|
||||
# [cmake:extension]
|
||||
|
||||
# Enable code style check
|
||||
file(GLOB_RECURSE template_extension_src "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp")
|
||||
add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${template_extension_src})
|
||||
|
@ -3,13 +3,15 @@
|
||||
//
|
||||
|
||||
#include "cpu_kernel.hpp"
|
||||
#include "op.hpp"
|
||||
|
||||
#include <ie_layouts.h>
|
||||
|
||||
#include "op.hpp"
|
||||
|
||||
using namespace TemplateExtension;
|
||||
|
||||
//! [cpu_implementation:ctor]
|
||||
OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node> &node) {
|
||||
OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node>& node) {
|
||||
try {
|
||||
auto castedNode = std::dynamic_pointer_cast<Operation>(node);
|
||||
if (!castedNode)
|
||||
@ -32,8 +34,8 @@ OpImplementation::OpImplementation(const std::shared_ptr<ngraph::Node> &node) {
|
||||
//! [cpu_implementation:ctor]
|
||||
|
||||
//! [cpu_implementation:getSupportedConfigurations]
|
||||
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept {
|
||||
InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
|
||||
InferenceEngine::ResponseDesc* resp) noexcept {
|
||||
auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) {
|
||||
InferenceEngine::LayerConfig config;
|
||||
config.dynBatchSupport = false;
|
||||
@ -72,7 +74,7 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
|
||||
if (!error.empty()) {
|
||||
if (resp) {
|
||||
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
|
||||
resp->msg[sizeof(resp->msg)-1] = 0;
|
||||
resp->msg[sizeof(resp->msg) - 1] = 0;
|
||||
}
|
||||
return InferenceEngine::GENERAL_ERROR;
|
||||
}
|
||||
@ -85,25 +87,24 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve
|
||||
//! [cpu_implementation:getSupportedConfigurations]
|
||||
|
||||
//! [cpu_implementation:init]
|
||||
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept {
|
||||
InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept {
|
||||
try {
|
||||
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
|
||||
IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!";
|
||||
}
|
||||
|
||||
if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) {
|
||||
IE_THROW()
|
||||
<< "Operation can be initialized only with 4d input/output tensors!";
|
||||
IE_THROW() << "Operation can be initialized only with 4d input/output tensors!";
|
||||
}
|
||||
|
||||
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
|
||||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
|
||||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
|
||||
IE_THROW() << "Operation supports only FP32 precisions!";
|
||||
}
|
||||
} catch (InferenceEngine::Exception& ex) {
|
||||
if (resp) {
|
||||
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
|
||||
resp->msg[sizeof(resp->msg)-1] = 0;
|
||||
resp->msg[sizeof(resp->msg) - 1] = 0;
|
||||
}
|
||||
return InferenceEngine::GENERAL_ERROR;
|
||||
}
|
||||
@ -113,11 +114,10 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig
|
||||
//! [cpu_implementation:init]
|
||||
|
||||
//! [cpu_implementation:execute]
|
||||
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
|
||||
std::vector<InferenceEngine::Blob::Ptr> &outputs,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept {
|
||||
const float* src_data = inputs[0]->cbuffer().as<const float *>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
|
||||
float *dst_data = outputs[0]->buffer().as<float *>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
|
||||
InferenceEngine::StatusCode OpImplementation::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
|
||||
InferenceEngine::ResponseDesc* resp) noexcept {
|
||||
const float* src_data = inputs[0]->cbuffer().as<const float*>() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
|
||||
float* dst_data = outputs[0]->buffer().as<float*>() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
|
||||
|
||||
for (size_t i = 0; i < inputs[0]->size(); i++) {
|
||||
dst_data[i] = src_data[i] + add;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <ie_iextension.h>
|
||||
|
||||
#include <ngraph/ngraph.hpp>
|
||||
|
||||
namespace TemplateExtension {
|
||||
@ -13,13 +14,12 @@ namespace TemplateExtension {
|
||||
class OpImplementation : public InferenceEngine::ILayerExecImpl {
|
||||
public:
|
||||
explicit OpImplementation(const std::shared_ptr<ngraph::Node>& node);
|
||||
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept override;
|
||||
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept override;
|
||||
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
|
||||
std::vector<InferenceEngine::Blob::Ptr> &outputs,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept override;
|
||||
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
|
||||
InferenceEngine::ResponseDesc* resp) noexcept override;
|
||||
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
|
||||
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
|
||||
InferenceEngine::ResponseDesc* resp) noexcept override;
|
||||
|
||||
private:
|
||||
int64_t add;
|
||||
ngraph::Shape inShape;
|
||||
|
@ -3,15 +3,16 @@
|
||||
//
|
||||
|
||||
#include "extension.hpp"
|
||||
|
||||
#include "cpu_kernel.hpp"
|
||||
#include "op.hpp"
|
||||
#ifdef OPENCV_IMPORT_ENABLED
|
||||
#include "fft_op.hpp"
|
||||
#include "fft_kernel.hpp"
|
||||
#include "fft_kernel.hpp"
|
||||
#include "fft_op.hpp"
|
||||
#endif
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#ifdef NGRAPH_ONNX_IMPORT_ENABLED
|
||||
#include <onnx_import/onnx_utils.hpp>
|
||||
#include <onnx_import/onnx_utils.hpp>
|
||||
#endif
|
||||
|
||||
#include <map>
|
||||
@ -21,22 +22,19 @@
|
||||
|
||||
using namespace TemplateExtension;
|
||||
|
||||
|
||||
//! [extension:ctor]
|
||||
Extension::Extension() {
|
||||
#ifdef NGRAPH_ONNX_IMPORT_ENABLED
|
||||
ngraph::onnx_import::register_operator(
|
||||
Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
|
||||
ngraph::OutputVector ng_inputs{node.get_ng_inputs()};
|
||||
int64_t add = node.get_attribute_value<int64_t>("add");
|
||||
return {std::make_shared<Operation>(ng_inputs.at(0), add)};
|
||||
ngraph::onnx_import::register_operator(Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
|
||||
ngraph::OutputVector ng_inputs {node.get_ng_inputs()};
|
||||
int64_t add = node.get_attribute_value<int64_t>("add");
|
||||
return {std::make_shared<Operation>(ng_inputs.at(0), add)};
|
||||
});
|
||||
#ifdef OPENCV_IMPORT_ENABLED
|
||||
ngraph::onnx_import::register_operator(
|
||||
FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
|
||||
ngraph::OutputVector ng_inputs{node.get_ng_inputs()};
|
||||
bool inverse = node.get_attribute_value<int64_t>("inverse");
|
||||
return {std::make_shared<FFTOp>(ng_inputs.at(0), inverse)};
|
||||
ngraph::onnx_import::register_operator(FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector {
|
||||
ngraph::OutputVector ng_inputs {node.get_ng_inputs()};
|
||||
bool inverse = node.get_attribute_value<int64_t>("inverse");
|
||||
return {std::make_shared<FFTOp>(ng_inputs.at(0), inverse)};
|
||||
});
|
||||
#endif
|
||||
#endif
|
||||
@ -47,19 +45,19 @@ Extension::Extension() {
|
||||
Extension::~Extension() {
|
||||
#ifdef NGRAPH_ONNX_IMPORT_ENABLED
|
||||
ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain");
|
||||
#ifdef OPENCV_IMPORT_ENABLED
|
||||
#ifdef OPENCV_IMPORT_ENABLED
|
||||
ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain");
|
||||
#endif // OPENCV_IMPORT_ENABLED
|
||||
#endif // NGRAPH_ONNX_IMPORT_ENABLED
|
||||
#endif // OPENCV_IMPORT_ENABLED
|
||||
#endif // NGRAPH_ONNX_IMPORT_ENABLED
|
||||
}
|
||||
//! [extension:dtor]
|
||||
|
||||
//! [extension:GetVersion]
|
||||
void Extension::GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept {
|
||||
void Extension::GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept {
|
||||
static InferenceEngine::Version ExtensionDescription = {
|
||||
{1, 0}, // extension API version
|
||||
{1, 0}, // extension API version
|
||||
"1.0",
|
||||
"template_ext" // extension description message
|
||||
"template_ext" // extension description message
|
||||
};
|
||||
|
||||
versionInfo = &ExtensionDescription;
|
||||
@ -80,7 +78,7 @@ std::map<std::string, ngraph::OpSet> Extension::getOpSets() {
|
||||
//! [extension:getOpSets]
|
||||
|
||||
//! [extension:getImplTypes]
|
||||
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node> &node) {
|
||||
std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::Node>& node) {
|
||||
if (std::dynamic_pointer_cast<Operation>(node)) {
|
||||
return {"CPU"};
|
||||
}
|
||||
@ -94,7 +92,7 @@ std::vector<std::string> Extension::getImplTypes(const std::shared_ptr<ngraph::N
|
||||
//! [extension:getImplTypes]
|
||||
|
||||
//! [extension:getImplementation]
|
||||
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node> &node, const std::string &implType) {
|
||||
InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) {
|
||||
if (implType == "CPU") {
|
||||
if (std::dynamic_pointer_cast<Operation>(node)) {
|
||||
return std::make_shared<OpImplementation>(node);
|
||||
@ -110,16 +108,16 @@ InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_
|
||||
//! [extension:getImplementation]
|
||||
|
||||
//! [extension:CreateExtension]
|
||||
//Generate exported function
|
||||
// Generate exported function
|
||||
IE_DEFINE_EXTENSION_CREATE_FUNCTION(Extension)
|
||||
//! [extension:CreateExtension]
|
||||
|
||||
INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept {
|
||||
INFERENCE_EXTENSION_API(InferenceEngine::StatusCode)
|
||||
InferenceEngine::CreateExtension(InferenceEngine::IExtension*& ext, InferenceEngine::ResponseDesc* resp) noexcept {
|
||||
try {
|
||||
ext = new Extension();
|
||||
return OK;
|
||||
} catch (std::exception &ex) {
|
||||
} catch (std::exception& ex) {
|
||||
if (resp) {
|
||||
std::string err = ((std::string) "Couldn't create extension: ") + ex.what();
|
||||
err.copy(resp->msg, 255);
|
||||
|
@ -4,13 +4,14 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ie_iextension.h>
|
||||
#include <ie_api.h>
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <ie_iextension.h>
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
//! [extension:header]
|
||||
namespace TemplateExtension {
|
||||
|
@ -4,14 +4,16 @@
|
||||
|
||||
//! [fft_kernel:implementation]
|
||||
#include "fft_kernel.hpp"
|
||||
#include "fft_op.hpp"
|
||||
|
||||
#include <ie_layouts.h>
|
||||
|
||||
#include <opencv2/opencv.hpp>
|
||||
|
||||
#include "fft_op.hpp"
|
||||
|
||||
using namespace TemplateExtension;
|
||||
|
||||
FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node> &node) {
|
||||
FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node>& node) {
|
||||
auto castedNode = std::dynamic_pointer_cast<FFTOp>(node);
|
||||
if (!castedNode)
|
||||
IE_THROW() << "Cannot create implementation for unknown operation!";
|
||||
@ -26,8 +28,7 @@ FFTImpl::FFTImpl(const std::shared_ptr<ngraph::Node> &node) {
|
||||
inverse = castedNode->inverse;
|
||||
}
|
||||
|
||||
InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept {
|
||||
InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf, InferenceEngine::ResponseDesc* resp) noexcept {
|
||||
std::vector<InferenceEngine::DataConfig> inDataConfig;
|
||||
std::vector<InferenceEngine::DataConfig> outDataConfig;
|
||||
InferenceEngine::SizeVector order(inpShape.size());
|
||||
@ -54,28 +55,27 @@ InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector<Infe
|
||||
return InferenceEngine::StatusCode::OK;
|
||||
}
|
||||
|
||||
InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept {
|
||||
InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept {
|
||||
try {
|
||||
if (config.inConfs.size() != 1 || config.outConfs.size() != 1) {
|
||||
IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!";
|
||||
}
|
||||
|
||||
if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 ||
|
||||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
|
||||
config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) {
|
||||
IE_THROW() << "Operation supports only FP32 precisions!";
|
||||
}
|
||||
} catch (InferenceEngine::Exception& ex) {
|
||||
if (resp) {
|
||||
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
|
||||
resp->msg[sizeof(resp->msg)-1] = 0;
|
||||
resp->msg[sizeof(resp->msg) - 1] = 0;
|
||||
}
|
||||
return InferenceEngine::GENERAL_ERROR;
|
||||
}
|
||||
return InferenceEngine::OK;
|
||||
}
|
||||
|
||||
static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
|
||||
{
|
||||
static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) {
|
||||
// NOTE: Inference Engine sizes are reversed.
|
||||
std::vector<size_t> dims = blob->getTensorDesc().getDims();
|
||||
std::vector<int> size(dims.begin(), dims.end());
|
||||
@ -84,9 +84,8 @@ static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
|
||||
return cv::Mat(size, CV_32F, (void*)blob->buffer());
|
||||
}
|
||||
|
||||
InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
|
||||
std::vector<InferenceEngine::Blob::Ptr> &outputs,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept {
|
||||
InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
|
||||
InferenceEngine::ResponseDesc* resp) noexcept {
|
||||
cv::Mat inp = infEngineBlobToMat(inputs[0]);
|
||||
cv::Mat out = infEngineBlobToMat(outputs[0]);
|
||||
|
||||
@ -95,10 +94,7 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::
|
||||
const int w = inp.size[3];
|
||||
cv::Mat complex(h, w, CV_32FC2), interleavedOut(h, w, CV_32FC2);
|
||||
for (int i = 0; i < n; ++i) {
|
||||
std::vector<cv::Mat> components = {
|
||||
cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 0)),
|
||||
cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 1))
|
||||
};
|
||||
std::vector<cv::Mat> components = {cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 0)), cv::Mat(h, w, CV_32F, inp.ptr<float>(i, 1))};
|
||||
cv::merge(components, complex);
|
||||
|
||||
if (!inverse)
|
||||
@ -106,13 +102,9 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector<InferenceEngine::Blob::
|
||||
else
|
||||
cv::idft(complex, interleavedOut, cv::DFT_SCALE);
|
||||
|
||||
components = {
|
||||
cv::Mat(h, w, CV_32F, out.ptr<float>(i, 0)),
|
||||
cv::Mat(h, w, CV_32F, out.ptr<float>(i, 1))
|
||||
};
|
||||
components = {cv::Mat(h, w, CV_32F, out.ptr<float>(i, 0)), cv::Mat(h, w, CV_32F, out.ptr<float>(i, 1))};
|
||||
cv::split(interleavedOut, components);
|
||||
}
|
||||
return InferenceEngine::OK;
|
||||
}
|
||||
//! [fft_kernel:implementation]
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <ie_iextension.h>
|
||||
|
||||
#include <ngraph/ngraph.hpp>
|
||||
|
||||
namespace TemplateExtension {
|
||||
@ -13,13 +14,12 @@ namespace TemplateExtension {
|
||||
class FFTImpl : public InferenceEngine::ILayerExecImpl {
|
||||
public:
|
||||
explicit FFTImpl(const std::shared_ptr<ngraph::Node>& node);
|
||||
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig> &conf,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept override;
|
||||
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept override;
|
||||
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr> &inputs,
|
||||
std::vector<InferenceEngine::Blob::Ptr> &outputs,
|
||||
InferenceEngine::ResponseDesc *resp) noexcept override;
|
||||
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
|
||||
InferenceEngine::ResponseDesc* resp) noexcept override;
|
||||
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override;
|
||||
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
|
||||
InferenceEngine::ResponseDesc* resp) noexcept override;
|
||||
|
||||
private:
|
||||
ngraph::Shape inpShape;
|
||||
ngraph::Shape outShape;
|
||||
@ -27,5 +27,5 @@ private:
|
||||
std::string error;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace TemplateExtension
|
||||
//! [fft_kernel:header]
|
||||
|
@ -9,7 +9,7 @@ using namespace TemplateExtension;
|
||||
|
||||
constexpr ngraph::NodeTypeInfo FFTOp::type_info;
|
||||
|
||||
FFTOp::FFTOp(const ngraph::Output<ngraph::Node>& inp, bool _inverse) : Op({inp}) {
|
||||
FFTOp::FFTOp(const ngraph::Output<ngraph::Node>& inp, bool _inverse): Op({inp}) {
|
||||
constructor_validate_and_infer_types();
|
||||
inverse = _inverse;
|
||||
}
|
||||
@ -19,16 +19,15 @@ void FFTOp::validate_and_infer_types() {
|
||||
set_output_type(0, get_input_element_type(0), outShape);
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Node> FFTOp::clone_with_new_inputs(const ngraph::OutputVector &new_args) const {
|
||||
std::shared_ptr<ngraph::Node> FFTOp::clone_with_new_inputs(const ngraph::OutputVector& new_args) const {
|
||||
if (new_args.size() != 1) {
|
||||
throw ngraph::ngraph_error("Incorrect number of new arguments");
|
||||
}
|
||||
return std::make_shared<FFTOp>(new_args.at(0), inverse);
|
||||
}
|
||||
|
||||
bool FFTOp::visit_attributes(ngraph::AttributeVisitor &visitor) {
|
||||
bool FFTOp::visit_attributes(ngraph::AttributeVisitor& visitor) {
|
||||
visitor.on_attribute("inverse", inverse);
|
||||
return true;
|
||||
}
|
||||
//! [fft_op:implementation]
|
||||
|
||||
|
@ -11,8 +11,10 @@ namespace TemplateExtension {
|
||||
|
||||
class FFTOp : public ngraph::op::Op {
|
||||
public:
|
||||
static constexpr ngraph::NodeTypeInfo type_info{"FFT", 0};
|
||||
const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
static constexpr ngraph::NodeTypeInfo type_info {"FFT", 0};
|
||||
const ngraph::NodeTypeInfo& get_type_info() const override {
|
||||
return type_info;
|
||||
}
|
||||
|
||||
FFTOp() = default;
|
||||
FFTOp(const ngraph::Output<ngraph::Node>& inp, bool inverse);
|
||||
@ -23,6 +25,5 @@ public:
|
||||
bool inverse;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace TemplateExtension
|
||||
//! [fft_op:header]
|
||||
|
||||
|
@ -9,7 +9,7 @@ using namespace TemplateExtension;
|
||||
//! [op:ctor]
|
||||
NGRAPH_RTTI_DEFINITION(TemplateExtension::Operation, "Template", 0);
|
||||
|
||||
Operation::Operation(const ngraph::Output<ngraph::Node> &arg, int64_t add) : Op({arg}), add(add) {
|
||||
Operation::Operation(const ngraph::Output<ngraph::Node>& arg, int64_t add): Op({arg}), add(add) {
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
//! [op:ctor]
|
||||
@ -22,7 +22,7 @@ void Operation::validate_and_infer_types() {
|
||||
//! [op:validate]
|
||||
|
||||
//! [op:copy]
|
||||
std::shared_ptr<ngraph::Node> Operation::clone_with_new_inputs(const ngraph::OutputVector &new_args) const {
|
||||
std::shared_ptr<ngraph::Node> Operation::clone_with_new_inputs(const ngraph::OutputVector& new_args) const {
|
||||
if (new_args.size() != 1) {
|
||||
throw ngraph::ngraph_error("Incorrect number of new arguments");
|
||||
}
|
||||
@ -32,56 +32,77 @@ std::shared_ptr<ngraph::Node> Operation::clone_with_new_inputs(const ngraph::Out
|
||||
//! [op:copy]
|
||||
|
||||
//! [op:visit_attributes]
|
||||
bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) {
|
||||
bool Operation::visit_attributes(ngraph::AttributeVisitor& visitor) {
|
||||
visitor.on_attribute("add", add);
|
||||
return true;
|
||||
}
|
||||
//! [op:visit_attributes]
|
||||
|
||||
//! [op:evaluate]
|
||||
namespace
|
||||
{
|
||||
namespace {
|
||||
|
||||
template <class T>
|
||||
void implementation(const T* input,
|
||||
T* output,
|
||||
int64_t add,
|
||||
size_t size) {
|
||||
void implementation(const T* input, T* output, int64_t add, size_t size) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
output[i] = input[i] + add;
|
||||
}
|
||||
}
|
||||
|
||||
template <ngraph::element::Type_t ET>
|
||||
bool evaluate_op(const ngraph::HostTensorPtr& arg0,
|
||||
const ngraph::HostTensorPtr& out, int64_t add)
|
||||
{
|
||||
bool evaluate_op(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, int64_t add) {
|
||||
size_t size = ngraph::shape_size(arg0->get_shape());
|
||||
implementation(arg0->get_data_ptr<ET>(),
|
||||
out->get_data_ptr<ET>(),
|
||||
add,
|
||||
size);
|
||||
implementation(arg0->get_data_ptr<ET>(), out->get_data_ptr<ET>(), add, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool Operation::evaluate(const ngraph::HostTensorVector& outputs,
|
||||
const ngraph::HostTensorVector& inputs) const {
|
||||
switch (inputs[0]->get_element_type())
|
||||
{
|
||||
case ngraph::element::Type_t::i8: return evaluate_op<ngraph::element::Type_t::i8>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::i16: return evaluate_op<ngraph::element::Type_t::i16>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::i32: return evaluate_op<ngraph::element::Type_t::i32>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::i64: return evaluate_op<ngraph::element::Type_t::i64>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::u8: return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::u16: return evaluate_op<ngraph::element::Type_t::u16>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::u32: return evaluate_op<ngraph::element::Type_t::u32>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::u64: return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::bf16: return evaluate_op<ngraph::element::Type_t::bf16>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::f16: return evaluate_op<ngraph::element::Type_t::f16>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::f32: return evaluate_op<ngraph::element::Type_t::f32>(inputs[0], outputs[0], getAddAttr());
|
||||
default: break;
|
||||
bool Operation::evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const {
|
||||
switch (inputs[0]->get_element_type()) {
|
||||
case ngraph::element::Type_t::i8:
|
||||
return evaluate_op<ngraph::element::Type_t::i8>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::i16:
|
||||
return evaluate_op<ngraph::element::Type_t::i16>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::i32:
|
||||
return evaluate_op<ngraph::element::Type_t::i32>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::i64:
|
||||
return evaluate_op<ngraph::element::Type_t::i64>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::u8:
|
||||
return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::u16:
|
||||
return evaluate_op<ngraph::element::Type_t::u16>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::u32:
|
||||
return evaluate_op<ngraph::element::Type_t::u32>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::u64:
|
||||
return evaluate_op<ngraph::element::Type_t::u8>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::bf16:
|
||||
return evaluate_op<ngraph::element::Type_t::bf16>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::f16:
|
||||
return evaluate_op<ngraph::element::Type_t::f16>(inputs[0], outputs[0], getAddAttr());
|
||||
case ngraph::element::Type_t::f32:
|
||||
return evaluate_op<ngraph::element::Type_t::f32>(inputs[0], outputs[0], getAddAttr());
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Operation::has_evaluate() const {
|
||||
switch (get_input_element_type(0)) {
|
||||
case ngraph::element::Type_t::i8:
|
||||
case ngraph::element::Type_t::i16:
|
||||
case ngraph::element::Type_t::i32:
|
||||
case ngraph::element::Type_t::i64:
|
||||
case ngraph::element::Type_t::u8:
|
||||
case ngraph::element::Type_t::u16:
|
||||
case ngraph::element::Type_t::u32:
|
||||
case ngraph::element::Type_t::u64:
|
||||
case ngraph::element::Type_t::bf16:
|
||||
case ngraph::element::Type_t::f16:
|
||||
case ngraph::element::Type_t::f32:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -18,9 +18,11 @@ public:
|
||||
void validate_and_infer_types() override;
|
||||
std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& new_args) const override;
|
||||
bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
|
||||
int64_t getAddAttr() const { return add; }
|
||||
bool evaluate(const ngraph::HostTensorVector& outputs,
|
||||
const ngraph::HostTensorVector& inputs) const override;
|
||||
int64_t getAddAttr() const {
|
||||
return add;
|
||||
}
|
||||
bool evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const override;
|
||||
bool has_evaluate() const override;
|
||||
|
||||
private:
|
||||
int64_t add;
|
||||
|
@ -13,7 +13,8 @@ ie_add_plugin(NAME ${TARGET_NAME}
|
||||
DEVICE_NAME "TEMPLATE"
|
||||
SOURCES ${SOURCES} ${HEADERS}
|
||||
SKIP_INSTALL # ATTENTION: uncomment to install component
|
||||
VERSION_DEFINES_FOR template_plugin.cpp)
|
||||
VERSION_DEFINES_FOR template_plugin.cpp
|
||||
ADD_CLANG_FORMAT)
|
||||
|
||||
target_include_directories(${TARGET_NAME} PRIVATE
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
|
@ -3,18 +3,16 @@
|
||||
//
|
||||
|
||||
#include "template_async_infer_request.hpp"
|
||||
|
||||
#include "template_itt.hpp"
|
||||
|
||||
using namespace TemplatePlugin;
|
||||
|
||||
// ! [async_infer_request:ctor]
|
||||
TemplateAsyncInferRequest::TemplateAsyncInferRequest(
|
||||
const TemplateInferRequest::Ptr& inferRequest,
|
||||
const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
|
||||
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
|
||||
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) :
|
||||
AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor),
|
||||
_inferRequest(inferRequest), _waitExecutor(waitExecutor) {
|
||||
TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor,
|
||||
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
|
||||
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor)
|
||||
: AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), _inferRequest(inferRequest), _waitExecutor(waitExecutor) {
|
||||
// In current implementation we have CPU only tasks and no needs in 2 executors
|
||||
// So, by default single stage pipeline is created.
|
||||
// This stage executes InferRequest::Infer() using cpuTaskExecutor.
|
||||
@ -23,24 +21,21 @@ TemplateAsyncInferRequest::TemplateAsyncInferRequest(
|
||||
constexpr const auto remoteDevice = false;
|
||||
|
||||
if (remoteDevice) {
|
||||
_pipeline = {
|
||||
{cpuTaskExecutor, [this] {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin,
|
||||
"TemplateAsyncInferRequest::PreprocessingAndStartPipeline");
|
||||
_inferRequest->inferPreprocess();
|
||||
_inferRequest->startPipeline();
|
||||
}},
|
||||
{_waitExecutor, [this] {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin,
|
||||
"TemplateAsyncInferRequest::WaitPipeline");
|
||||
_inferRequest->waitPipeline();
|
||||
}},
|
||||
{cpuTaskExecutor, [this] {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin,
|
||||
"TemplateAsyncInferRequest::Postprocessing");
|
||||
_inferRequest->inferPostprocess();
|
||||
}}
|
||||
};
|
||||
_pipeline = {{cpuTaskExecutor,
|
||||
[this] {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::PreprocessingAndStartPipeline");
|
||||
_inferRequest->inferPreprocess();
|
||||
_inferRequest->startPipeline();
|
||||
}},
|
||||
{_waitExecutor,
|
||||
[this] {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::WaitPipeline");
|
||||
_inferRequest->waitPipeline();
|
||||
}},
|
||||
{cpuTaskExecutor, [this] {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::Postprocessing");
|
||||
_inferRequest->inferPostprocess();
|
||||
}}};
|
||||
}
|
||||
}
|
||||
// ! [async_infer_request:ctor]
|
||||
|
@ -13,15 +13,13 @@ namespace TemplatePlugin {
|
||||
// ! [async_infer_request:header]
|
||||
class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
|
||||
public:
|
||||
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest,
|
||||
const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
|
||||
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor,
|
||||
const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
|
||||
TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
|
||||
const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
|
||||
|
||||
~TemplateAsyncInferRequest();
|
||||
|
||||
private:
|
||||
TemplateInferRequest::Ptr _inferRequest;
|
||||
TemplateInferRequest::Ptr _inferRequest;
|
||||
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
|
||||
};
|
||||
// ! [async_infer_request:header]
|
||||
|
@ -2,17 +2,18 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <ie_plugin_config.hpp>
|
||||
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
|
||||
|
||||
#include "template_config.hpp"
|
||||
|
||||
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
|
||||
#include <ie_plugin_config.hpp>
|
||||
|
||||
#include "template/template_config.hpp"
|
||||
|
||||
using namespace TemplatePlugin;
|
||||
|
||||
Configuration::Configuration() { }
|
||||
Configuration::Configuration() {}
|
||||
|
||||
Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) {
|
||||
Configuration::Configuration(const ConfigMap& config, const Configuration& defaultCfg, bool throwOnUnsupported) {
|
||||
*this = defaultCfg;
|
||||
// If plugin needs to use InferenceEngine::StreamsExecutor it should be able to process its configuration
|
||||
auto streamExecutorConfigKeys = _streamsExecutorConfig.SupportedKeys();
|
||||
@ -22,8 +23,7 @@ Configuration::Configuration(const ConfigMap& config, const Configuration & defa
|
||||
|
||||
if (TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) == key) {
|
||||
_streamsExecutorConfig.SetConfig(CONFIG_KEY(CPU_THROUGHPUT_STREAMS), value);
|
||||
} else if (streamExecutorConfigKeys.end() !=
|
||||
std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) {
|
||||
} else if (streamExecutorConfigKeys.end() != std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) {
|
||||
_streamsExecutorConfig.SetConfig(key, value);
|
||||
} else if (CONFIG_KEY(DEVICE_ID) == key) {
|
||||
deviceId = std::stoi(value);
|
||||
|
@ -4,11 +4,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include <ie_parameter.hpp>
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <threading/ie_istreams_executor.hpp>
|
||||
|
||||
namespace TemplatePlugin {
|
||||
@ -18,19 +16,19 @@ using ConfigMap = std::map<std::string, std::string>;
|
||||
|
||||
struct Configuration {
|
||||
Configuration();
|
||||
Configuration(const Configuration&) = default;
|
||||
Configuration(Configuration&&) = default;
|
||||
Configuration& operator=(const Configuration&) = default;
|
||||
Configuration& operator=(Configuration&&) = default;
|
||||
Configuration(const Configuration&) = default;
|
||||
Configuration(Configuration&&) = default;
|
||||
Configuration& operator=(const Configuration&) = default;
|
||||
Configuration& operator=(Configuration&&) = default;
|
||||
|
||||
explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true);
|
||||
explicit Configuration(const ConfigMap& config, const Configuration& defaultCfg = {}, const bool throwOnUnsupported = true);
|
||||
|
||||
InferenceEngine::Parameter Get(const std::string& name) const;
|
||||
|
||||
// Plugin configuration parameters
|
||||
|
||||
int deviceId = 0;
|
||||
bool perfCount = true;
|
||||
int deviceId = 0;
|
||||
bool perfCount = true;
|
||||
InferenceEngine::IStreamsExecutor::Config _streamsExecutorConfig;
|
||||
};
|
||||
// ! [configuration:header]
|
||||
|
@ -2,36 +2,35 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "template_executable_network.hpp"
|
||||
|
||||
#include <ie_metric_helpers.hpp>
|
||||
#include <ie_plugin_config.hpp>
|
||||
#include <threading/ie_executor_manager.hpp>
|
||||
#include "transformations/serialize.hpp"
|
||||
|
||||
#include "template/template_config.hpp"
|
||||
#include "template_plugin.hpp"
|
||||
#include "template_executable_network.hpp"
|
||||
#include "template_itt.hpp"
|
||||
#include "template_plugin.hpp"
|
||||
#include "transformations/serialize.hpp"
|
||||
|
||||
using namespace TemplatePlugin;
|
||||
|
||||
// ! [executable_network:ctor_cnnnetwork]
|
||||
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function,
|
||||
const InferenceEngine::InputsDataMap& inputInfoMap,
|
||||
const InferenceEngine::OutputsDataMap& outputsInfoMap,
|
||||
const Configuration& cfg,
|
||||
const Plugin::Ptr& plugin) :
|
||||
InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation
|
||||
_cfg(cfg),
|
||||
_plugin(plugin) {
|
||||
const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap,
|
||||
const Configuration& cfg, const Plugin::Ptr& plugin)
|
||||
: InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation
|
||||
_cfg(cfg),
|
||||
_plugin(plugin) {
|
||||
// TODO: if your plugin supports device ID (more that single instance of device can be on host machine)
|
||||
// you should select proper device based on KEY_DEVICE_ID or automatic behavior
|
||||
// In this case, _waitExecutor should also be created per device.
|
||||
try {
|
||||
CompileNetwork(function, inputInfoMap, outputsInfoMap);
|
||||
InitExecutor(); // creates thread-based executor using for async requests
|
||||
InitExecutor(); // creates thread-based executor using for async requests
|
||||
} catch (const InferenceEngine::Exception&) {
|
||||
throw;
|
||||
} catch (const std::exception & e) {
|
||||
} catch (const std::exception& e) {
|
||||
IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what();
|
||||
} catch (...) {
|
||||
IE_THROW(Unexpected) << "Generic exception is thrown";
|
||||
@ -40,11 +39,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const
|
||||
// ! [executable_network:ctor_cnnnetwork]
|
||||
|
||||
// ! [executable_network:ctor_import_stream]
|
||||
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
|
||||
const Configuration& cfg,
|
||||
const Plugin::Ptr& plugin) :
|
||||
_cfg(cfg),
|
||||
_plugin(plugin) {
|
||||
TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream& model, const Configuration& cfg, const Plugin::Ptr& plugin): _cfg(cfg), _plugin(plugin) {
|
||||
// read XML content
|
||||
std::string xmlString;
|
||||
std::uint64_t dataSize = 0;
|
||||
@ -57,9 +52,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
|
||||
model.read(reinterpret_cast<char*>(&dataSize), sizeof(dataSize));
|
||||
if (0 != dataSize) {
|
||||
dataBlob = InferenceEngine::make_shared_blob<std::uint8_t>(
|
||||
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8,
|
||||
{static_cast<std::size_t>(dataSize)},
|
||||
InferenceEngine::Layout::C));
|
||||
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {static_cast<std::size_t>(dataSize)}, InferenceEngine::Layout::C));
|
||||
dataBlob->allocate();
|
||||
model.read(dataBlob->buffer(), dataSize);
|
||||
}
|
||||
@ -77,10 +70,10 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
|
||||
|
||||
try {
|
||||
CompileNetwork(cnnnetwork.getFunction(), inputInfoMap, outputInfoMap);
|
||||
InitExecutor(); // creates thread-based executor using for async requests
|
||||
InitExecutor(); // creates thread-based executor using for async requests
|
||||
} catch (const InferenceEngine::Exception&) {
|
||||
throw;
|
||||
} catch (const std::exception & e) {
|
||||
} catch (const std::exception& e) {
|
||||
IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what();
|
||||
} catch (...) {
|
||||
IE_THROW(Unexpected) << "Generic exception is thrown";
|
||||
@ -90,12 +83,11 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model,
|
||||
|
||||
// ! [executable_network:map_graph]
|
||||
// forward declaration
|
||||
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function,
|
||||
const InferenceEngine::InputsDataMap & inputInfoMap,
|
||||
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
|
||||
const InferenceEngine::OutputsDataMap& outputsInfoMap);
|
||||
|
||||
void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<const ngraph::Function>& function,
|
||||
const InferenceEngine::InputsDataMap & inputInfoMap,
|
||||
const InferenceEngine::InputsDataMap& inputInfoMap,
|
||||
const InferenceEngine::OutputsDataMap& outputsInfoMap) {
|
||||
// TODO: perform actual graph compilation / mapping to backend graph representation / kernels
|
||||
|
||||
@ -120,7 +112,6 @@ void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<con
|
||||
}
|
||||
// ! [executable_network:map_graph]
|
||||
|
||||
|
||||
// ! [executable_network:init_executor]
|
||||
void TemplatePlugin::ExecutableNetwork::InitExecutor() {
|
||||
// Default multi-threaded configuration is balanced for throughtput and latency cases and takes into account
|
||||
@ -137,10 +128,9 @@ void TemplatePlugin::ExecutableNetwork::InitExecutor() {
|
||||
}
|
||||
// ! [executable_network:init_executor]
|
||||
|
||||
|
||||
// ! [executable_network:create_infer_request_impl]
|
||||
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
|
||||
InferenceEngine::OutputsDataMap networkOutputs) {
|
||||
InferenceEngine::OutputsDataMap networkOutputs) {
|
||||
return std::make_shared<TemplateInferRequest>(networkInputs, networkOutputs, std::static_pointer_cast<ExecutableNetwork>(shared_from_this()));
|
||||
}
|
||||
// ! [executable_network:create_infer_request_impl]
|
||||
@ -148,32 +138,26 @@ InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::C
|
||||
// ! [executable_network:create_infer_request]
|
||||
InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() {
|
||||
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
|
||||
return std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest),
|
||||
_taskExecutor, _plugin->_waitExecutor, _callbackExecutor);
|
||||
return std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest), _taskExecutor, _plugin->_waitExecutor,
|
||||
_callbackExecutor);
|
||||
}
|
||||
// ! [executable_network:create_infer_request]
|
||||
|
||||
// ! [executable_network:get_config]
|
||||
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name) const {
|
||||
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string& name) const {
|
||||
return _cfg.Get(name);
|
||||
}
|
||||
// ! [executable_network:get_config]
|
||||
|
||||
// ! [executable_network:get_metric]
|
||||
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name) const {
|
||||
InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string& name) const {
|
||||
// TODO: return more supported values for metrics
|
||||
if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) {
|
||||
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string>{
|
||||
METRIC_KEY(NETWORK_NAME),
|
||||
METRIC_KEY(SUPPORTED_METRICS),
|
||||
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
|
||||
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
|
||||
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string> {METRIC_KEY(NETWORK_NAME), METRIC_KEY(SUPPORTED_METRICS),
|
||||
METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
|
||||
} else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
|
||||
std::vector<std::string> configKeys = {
|
||||
CONFIG_KEY(DEVICE_ID),
|
||||
CONFIG_KEY(PERF_COUNT),
|
||||
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) };
|
||||
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
|
||||
std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
|
||||
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys();
|
||||
for (auto&& configKey : streamExecutorConfigKeys) {
|
||||
configKeys.emplace_back(configKey);
|
||||
}
|
||||
@ -197,8 +181,7 @@ void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& modelStream) {
|
||||
// Note: custom ngraph extensions are not supported
|
||||
std::map<std::string, ngraph::OpSet> custom_opsets;
|
||||
std::stringstream xmlFile, binFile;
|
||||
ngraph::pass::Serialize serializer(xmlFile, binFile,
|
||||
ngraph::pass::Serialize::Version::IR_V10, custom_opsets);
|
||||
ngraph::pass::Serialize serializer(xmlFile, binFile, ngraph::pass::Serialize::Version::IR_V10, custom_opsets);
|
||||
serializer.run_on_function(_function);
|
||||
|
||||
auto m_constants = binFile.str();
|
||||
|
@ -4,13 +4,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
|
||||
#include <ngraph/function.hpp>
|
||||
|
||||
#include "template_async_infer_request.hpp"
|
||||
#include "template_config.hpp"
|
||||
#include "template_infer_request.hpp"
|
||||
#include "template_async_infer_request.hpp"
|
||||
|
||||
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
|
||||
|
||||
namespace TemplatePlugin {
|
||||
|
||||
@ -24,15 +23,10 @@ class Plugin;
|
||||
// ! [executable_network:header]
|
||||
class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
|
||||
public:
|
||||
ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function,
|
||||
const InferenceEngine::InputsDataMap& inputInfoMap,
|
||||
const InferenceEngine::OutputsDataMap& outputsInfoMap,
|
||||
const Configuration& cfg,
|
||||
const std::shared_ptr<Plugin>& plugin);
|
||||
ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
|
||||
const InferenceEngine::OutputsDataMap& outputsInfoMap, const Configuration& cfg, const std::shared_ptr<Plugin>& plugin);
|
||||
|
||||
ExecutableNetwork(std::istream& model,
|
||||
const Configuration& cfg,
|
||||
const std::shared_ptr<Plugin>& plugin);
|
||||
ExecutableNetwork(std::istream& model, const Configuration& cfg, const std::shared_ptr<Plugin>& plugin);
|
||||
|
||||
~ExecutableNetwork() override = default;
|
||||
|
||||
@ -42,23 +36,22 @@ public:
|
||||
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
|
||||
InferenceEngine::OutputsDataMap networkOutputs) override;
|
||||
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequest() override;
|
||||
InferenceEngine::Parameter GetMetric(const std::string &name) const override;
|
||||
InferenceEngine::Parameter GetConfig(const std::string &name) const override;
|
||||
InferenceEngine::Parameter GetMetric(const std::string& name) const override;
|
||||
InferenceEngine::Parameter GetConfig(const std::string& name) const override;
|
||||
|
||||
private:
|
||||
friend class TemplateInferRequest;
|
||||
|
||||
void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function,
|
||||
const InferenceEngine::InputsDataMap& inputInfoMap,
|
||||
const InferenceEngine::OutputsDataMap& outputsInfoMap);
|
||||
void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
|
||||
const InferenceEngine::OutputsDataMap& outputsInfoMap);
|
||||
void InitExecutor();
|
||||
|
||||
std::atomic<std::size_t> _requestId = {0};
|
||||
Configuration _cfg;
|
||||
std::shared_ptr<Plugin> _plugin;
|
||||
std::shared_ptr<ngraph::Function> _function;
|
||||
std::map<std::string, std::size_t> _inputIndex;
|
||||
std::map<std::string, std::size_t> _outputIndex;
|
||||
std::atomic<std::size_t> _requestId = {0};
|
||||
Configuration _cfg;
|
||||
std::shared_ptr<Plugin> _plugin;
|
||||
std::shared_ptr<ngraph::Function> _function;
|
||||
std::map<std::string, std::size_t> _inputIndex;
|
||||
std::map<std::string, std::size_t> _outputIndex;
|
||||
};
|
||||
// ! [executable_network:header]
|
||||
|
||||
|
@ -2,16 +2,20 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <utility>
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "template_infer_request.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <ngraph/runtime/reference/convert.hpp>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "blob_factory.hpp"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "template_executable_network.hpp"
|
||||
#include "template_plugin.hpp"
|
||||
#include "template_itt.hpp"
|
||||
#include "template_plugin.hpp"
|
||||
|
||||
using namespace TemplatePlugin;
|
||||
using namespace InferenceEngine;
|
||||
@ -19,11 +23,9 @@ using namespace InferenceEngine;
|
||||
using Time = std::chrono::high_resolution_clock;
|
||||
|
||||
// ! [infer_request:ctor]
|
||||
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
|
||||
const InferenceEngine::OutputsDataMap& networkOutputs,
|
||||
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork) :
|
||||
IInferRequestInternal(networkInputs, networkOutputs),
|
||||
_executableNetwork(executableNetwork) {
|
||||
TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs,
|
||||
const std::shared_ptr<TemplatePlugin::ExecutableNetwork>& executableNetwork)
|
||||
: IInferRequestInternal(networkInputs, networkOutputs), _executableNetwork(executableNetwork) {
|
||||
// TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks
|
||||
|
||||
auto requestID = std::to_string(_executableNetwork->_requestId.fetch_add(1));
|
||||
@ -57,66 +59,47 @@ void TemplateInferRequest::allocateDeviceBuffers() {
|
||||
_outputTensors.resize(_networkOutputs.size());
|
||||
}
|
||||
|
||||
template<typename BlobDataMap, typename GetNetworkPrecisionF>
|
||||
static void AllocateImpl(const BlobDataMap& userDataMap,
|
||||
BlobMap& userBlobMap,
|
||||
BlobMap& deviceBlobMap,
|
||||
GetNetworkPrecisionF&& GetNetworkPrecision,
|
||||
template <typename BlobDataMap, typename GetNetworkPrecisionF>
|
||||
static void AllocateImpl(const BlobDataMap& userDataMap, BlobMap& userBlobMap, BlobMap& deviceBlobMap, GetNetworkPrecisionF&& GetNetworkPrecision,
|
||||
bool isInputBlob = true) {
|
||||
for (auto&& userData : userDataMap) {
|
||||
auto& dims = userData.second->getTensorDesc().getDims();
|
||||
const auto devicePrecision = Precision::FP32;
|
||||
const auto& dims = userData.second->getTensorDesc().getDims();
|
||||
const auto deviceLayout = TensorDesc::getLayoutByDims(dims);
|
||||
auto userPrecision = userData.second->getTensorDesc().getPrecision();
|
||||
auto userLayout = userData.second->getTensorDesc().getLayout();
|
||||
const auto userPrecision = userData.second->getTensorDesc().getPrecision();
|
||||
const auto userLayout = userData.second->getTensorDesc().getLayout();
|
||||
|
||||
Blob::Ptr userBlob;
|
||||
switch (userPrecision) {
|
||||
case Precision::U8: {
|
||||
userBlob = InferenceEngine::make_shared_blob<std::uint8_t>({userPrecision, dims, userLayout});
|
||||
} break;
|
||||
case Precision::FP32 : {
|
||||
userBlob = InferenceEngine::make_shared_blob<float>({userPrecision, dims, userLayout});
|
||||
} break;
|
||||
default: IE_THROW(NotImplemented) << "Template Plugin: Unsupported Input/Output Precision";
|
||||
}
|
||||
const auto networkPrecision = InferenceEngine::details::convertPrecision(GetNetworkPrecision(userData.first));
|
||||
Blob::Ptr userBlob = make_blob_with_precision({userPrecision, dims, userLayout});
|
||||
userBlob->allocate();
|
||||
userBlobMap[userData.first] = userBlob;
|
||||
|
||||
auto networkPrecision = GetNetworkPrecision(userData.first);
|
||||
Blob::Ptr deviceBlob;
|
||||
switch (networkPrecision) {
|
||||
case ngraph::element::Type_t::f32 : {
|
||||
if (userPrecision == devicePrecision && userLayout == deviceLayout) {
|
||||
deviceBlob = userBlob;
|
||||
} else {
|
||||
deviceBlob = InferenceEngine::make_shared_blob<float>({devicePrecision, dims, deviceLayout});
|
||||
}
|
||||
} break;
|
||||
default: IE_THROW(NotImplemented) << "Template Plugin: Unsupported network Input/Output Presision";
|
||||
}
|
||||
if (userBlob != deviceBlob) {
|
||||
if (isInputBlob) {
|
||||
// preprocessing converts user input blob to desired device input blob automatically
|
||||
deviceBlob->allocate();
|
||||
} else {
|
||||
// NOTE: this is not supported for output user blobs yet
|
||||
IE_THROW(NotImplemented) << "Template Plugin: does not support setPrecision, setLayout for outputs";
|
||||
if (userPrecision == networkPrecision && userLayout == deviceLayout) {
|
||||
deviceBlob = userBlob;
|
||||
} else {
|
||||
if (userLayout != deviceLayout && !isInputBlob) {
|
||||
IE_THROW(NotImplemented) << "Template Plugin: does not support setLayout for outputs";
|
||||
}
|
||||
deviceBlob = make_blob_with_precision({networkPrecision, dims, deviceLayout});
|
||||
deviceBlob->allocate();
|
||||
}
|
||||
|
||||
deviceBlobMap[userData.first] = deviceBlob;
|
||||
}
|
||||
}
|
||||
|
||||
void TemplateInferRequest::allocateBlobs() {
|
||||
auto&& parameters = _executableNetwork->_function->get_parameters();
|
||||
AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&] (const std::string& blobName) {
|
||||
AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&](const std::string& blobName) {
|
||||
return parameters.at(_executableNetwork->_inputIndex.at(blobName))->get_element_type();
|
||||
});
|
||||
auto&& results = _executableNetwork->_function->get_results();
|
||||
AllocateImpl(_networkOutputs, _outputs, _networkOutputBlobs, [&] (const std::string& blobName) {
|
||||
return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type();
|
||||
}, false);
|
||||
AllocateImpl(
|
||||
_networkOutputs, _outputs, _networkOutputBlobs,
|
||||
[&](const std::string& blobName) {
|
||||
return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type();
|
||||
},
|
||||
false);
|
||||
}
|
||||
|
||||
// ! [infer_request:infer_impl]
|
||||
@ -129,42 +112,108 @@ void TemplateInferRequest::InferImpl() {
|
||||
}
|
||||
// ! [infer_request:infer_impl]
|
||||
|
||||
template<typename SrcT, typename DstT>
|
||||
template <typename SrcT, typename DstT>
|
||||
static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
|
||||
std::copy_n(InferenceEngine::as<InferenceEngine::MemoryBlob>(src)->rmap().as<const SrcT*>(),
|
||||
src->size(),
|
||||
InferenceEngine::as<InferenceEngine::MemoryBlob>(dst)->wmap().as<DstT*>());
|
||||
ngraph::runtime::reference::convert<SrcT, DstT>(InferenceEngine::as<InferenceEngine::MemoryBlob>(src)->rmap().as<const SrcT*>(),
|
||||
InferenceEngine::as<InferenceEngine::MemoryBlob>(dst)->wmap().as<DstT*>(), src->size());
|
||||
}
|
||||
|
||||
static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) {
|
||||
switch (src->getTensorDesc().getPrecision()) {
|
||||
case Precision::U8 : {
|
||||
switch (dst->getTensorDesc().getPrecision()) {
|
||||
case Precision::U8 : break;
|
||||
case Precision::FP32 : {
|
||||
blobCopy<std::uint8_t, float>(src, dst);
|
||||
} break;
|
||||
default : {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
|
||||
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
|
||||
}
|
||||
}
|
||||
case Precision::U8: {
|
||||
switch (dst->getTensorDesc().getPrecision()) {
|
||||
case Precision::U8:
|
||||
break;
|
||||
case Precision::FP32: {
|
||||
blobCopy<std::uint8_t, float>(src, dst);
|
||||
} break;
|
||||
case Precision::FP32 : {
|
||||
switch (dst->getTensorDesc().getPrecision()) {
|
||||
case Precision::FP32 : break;
|
||||
case Precision::U8 : {
|
||||
blobCopy<float, std::uint8_t>(src, dst);
|
||||
} break;
|
||||
default : {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from "
|
||||
<< src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision();
|
||||
}
|
||||
}
|
||||
} break;
|
||||
default : {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision();
|
||||
default: {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
|
||||
<< dst->getTensorDesc().getPrecision();
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case Precision::FP32: {
|
||||
switch (dst->getTensorDesc().getPrecision()) {
|
||||
case Precision::FP32:
|
||||
break;
|
||||
case Precision::U8: {
|
||||
blobCopy<float, std::uint8_t>(src, dst);
|
||||
} break;
|
||||
default: {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
|
||||
<< dst->getTensorDesc().getPrecision();
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case Precision::I64: {
|
||||
switch (dst->getTensorDesc().getPrecision()) {
|
||||
case Precision::I64:
|
||||
break;
|
||||
case Precision::I32: {
|
||||
blobCopy<int64_t, int32_t>(src, dst);
|
||||
} break;
|
||||
default: {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
|
||||
<< dst->getTensorDesc().getPrecision();
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case Precision::I16: {
|
||||
switch (dst->getTensorDesc().getPrecision()) {
|
||||
case Precision::I16:
|
||||
break;
|
||||
case Precision::FP32: {
|
||||
blobCopy<int16_t, float>(src, dst);
|
||||
} break;
|
||||
default: {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
|
||||
<< dst->getTensorDesc().getPrecision();
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case Precision::I8: {
|
||||
switch (dst->getTensorDesc().getPrecision()) {
|
||||
case Precision::I8:
|
||||
break;
|
||||
case Precision::FP32: {
|
||||
blobCopy<int8_t, float>(src, dst);
|
||||
} break;
|
||||
default: {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
|
||||
<< dst->getTensorDesc().getPrecision();
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case Precision::BOOL: {
|
||||
switch (dst->getTensorDesc().getPrecision()) {
|
||||
case Precision::BOOL:
|
||||
break;
|
||||
case Precision::FP32: {
|
||||
blobCopy<bool, float>(src, dst);
|
||||
} break;
|
||||
default: {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
|
||||
<< dst->getTensorDesc().getPrecision();
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case Precision::U16: {
|
||||
switch (dst->getTensorDesc().getPrecision()) {
|
||||
case Precision::U16:
|
||||
break;
|
||||
case Precision::FP32: {
|
||||
blobCopy<uint16_t, float>(src, dst);
|
||||
} break;
|
||||
default: {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to "
|
||||
<< dst->getTensorDesc().getPrecision();
|
||||
}
|
||||
}
|
||||
} break;
|
||||
default: {
|
||||
IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -180,8 +229,8 @@ void TemplateInferRequest::inferPreprocess() {
|
||||
const auto& parameter = _parameters[index];
|
||||
const auto& parameterShape = parameter->get_shape();
|
||||
const auto& parameterType = parameter->get_element_type();
|
||||
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType, parameterShape,
|
||||
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>());
|
||||
_inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
|
||||
parameterType, parameterShape, InferenceEngine::as<InferenceEngine::MemoryBlob>(networkInput.second)->rmap().as<void*>());
|
||||
}
|
||||
for (auto&& output : _outputs) {
|
||||
auto outputBlob = output.second;
|
||||
@ -193,8 +242,8 @@ void TemplateInferRequest::inferPreprocess() {
|
||||
const auto& result = _results[index];
|
||||
const auto& resultShape = result->get_shape();
|
||||
const auto& resultType = result->get_element_type();
|
||||
_outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(resultType, resultShape,
|
||||
InferenceEngine::as<InferenceEngine::MemoryBlob>(networkOutput)->wmap().as<void*>());
|
||||
_outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(
|
||||
resultType, resultShape, InferenceEngine::as<InferenceEngine::MemoryBlob>(networkOutput)->wmap().as<void*>());
|
||||
}
|
||||
_durations[Preprocess] = Time::now() - start;
|
||||
}
|
||||
|
@ -4,20 +4,17 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <chrono>
|
||||
#include <cpp_interfaces/interface/ie_iinfer_request_internal.hpp>
|
||||
#include <executable.hpp>
|
||||
#include <ie_input_info.hpp>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <ngraph/runtime/tensor.hpp>
|
||||
#include <openvino/itt.hpp>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <chrono>
|
||||
|
||||
#include <openvino/itt.hpp>
|
||||
|
||||
#include <ie_input_info.hpp>
|
||||
#include <cpp_interfaces/interface/ie_iinfer_request_internal.hpp>
|
||||
|
||||
#include <ngraph/runtime/tensor.hpp>
|
||||
#include <executable.hpp>
|
||||
|
||||
namespace TemplatePlugin {
|
||||
|
||||
@ -29,8 +26,7 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal {
|
||||
public:
|
||||
typedef std::shared_ptr<TemplateInferRequest> Ptr;
|
||||
|
||||
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs,
|
||||
const InferenceEngine::OutputsDataMap& networkOutputs,
|
||||
TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs,
|
||||
const std::shared_ptr<ExecutableNetwork>& executableNetwork);
|
||||
~TemplateInferRequest();
|
||||
|
||||
@ -47,26 +43,20 @@ private:
|
||||
void allocateDeviceBuffers();
|
||||
void allocateBlobs();
|
||||
|
||||
enum {
|
||||
Preprocess,
|
||||
Postprocess,
|
||||
StartPipeline,
|
||||
WaitPipeline,
|
||||
numOfStages
|
||||
};
|
||||
enum { Preprocess, Postprocess, StartPipeline, WaitPipeline, numOfStages };
|
||||
|
||||
std::shared_ptr<ExecutableNetwork> _executableNetwork;
|
||||
std::array<openvino::itt::handle_t, numOfStages> _profilingTask;
|
||||
std::shared_ptr<ExecutableNetwork> _executableNetwork;
|
||||
std::array<openvino::itt::handle_t, numOfStages> _profilingTask;
|
||||
// for performance counters
|
||||
std::array<std::chrono::duration<float, std::micro>, numOfStages> _durations;
|
||||
std::array<std::chrono::duration<float, std::micro>, numOfStages> _durations;
|
||||
|
||||
InferenceEngine::BlobMap _networkOutputBlobs;
|
||||
ngraph::ParameterVector _parameters;
|
||||
ngraph::ResultVector _results;
|
||||
InferenceEngine::BlobMap _networkOutputBlobs;
|
||||
ngraph::ParameterVector _parameters;
|
||||
ngraph::ResultVector _results;
|
||||
|
||||
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _inputTensors;
|
||||
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _outputTensors;
|
||||
std::shared_ptr<ngraph::runtime::Executable> _executable;
|
||||
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _inputTensors;
|
||||
std::vector<std::shared_ptr<ngraph::runtime::Tensor>> _outputTensors;
|
||||
std::shared_ptr<ngraph::runtime::Executable> _executable;
|
||||
};
|
||||
// ! [infer_request:header]
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
namespace TemplatePlugin {
|
||||
namespace itt {
|
||||
namespace domains {
|
||||
OV_ITT_DOMAIN(TemplatePlugin);
|
||||
}
|
||||
}
|
||||
OV_ITT_DOMAIN(TemplatePlugin);
|
||||
}
|
||||
} // namespace itt
|
||||
} // namespace TemplatePlugin
|
||||
|
@ -2,6 +2,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// clang-format off
|
||||
#include <ie_metric_helpers.hpp>
|
||||
#include <ie_plugin_config.hpp>
|
||||
#include <ie_algorithm.hpp>
|
||||
@ -24,6 +25,7 @@
|
||||
#include "template_infer_request.hpp"
|
||||
#include "transformations/template_pattern_transformation.hpp"
|
||||
#include "transformations/preprocessing/preprocessing.hpp"
|
||||
// clang-format on
|
||||
|
||||
using namespace TemplatePlugin;
|
||||
|
||||
@ -53,8 +55,7 @@ Plugin::~Plugin() {
|
||||
|
||||
// ! [plugin:transform_network]
|
||||
|
||||
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function,
|
||||
const InferenceEngine::InputsDataMap & inputInfoMap,
|
||||
std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function, const InferenceEngine::InputsDataMap& inputInfoMap,
|
||||
const InferenceEngine::OutputsDataMap& outputsInfoMap) {
|
||||
// 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function
|
||||
auto transformedNetwork = ngraph::clone_function(*function);
|
||||
@ -67,7 +68,7 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
|
||||
// Example: register CommonOptimizations transformation from transformations library
|
||||
passManager.register_pass<ngraph::pass::CommonOptimizations>();
|
||||
// Template plugin handles only FP32 networks
|
||||
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32 }});
|
||||
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
|
||||
// Example: register plugin specific transformation
|
||||
passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
|
||||
passManager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
|
||||
@ -83,36 +84,32 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
|
||||
// ! [plugin:transform_network]
|
||||
|
||||
// ! [plugin:load_exe_network_impl]
|
||||
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork & network,
|
||||
const ConfigMap &config) {
|
||||
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::LoadExeNetworkImpl");
|
||||
|
||||
InferenceEngine::InputsDataMap networkInputs = network.getInputsInfo();
|
||||
InferenceEngine::OutputsDataMap networkOutputs = network.getOutputsInfo();
|
||||
|
||||
auto fullConfig = Configuration{ config, _cfg };
|
||||
return std::make_shared<ExecutableNetwork>(network.getFunction(),
|
||||
networkInputs, networkOutputs, fullConfig,
|
||||
std::static_pointer_cast<Plugin>(shared_from_this()));
|
||||
auto fullConfig = Configuration {config, _cfg};
|
||||
return std::make_shared<ExecutableNetwork>(network.getFunction(), networkInputs, networkOutputs, fullConfig,
|
||||
std::static_pointer_cast<Plugin>(shared_from_this()));
|
||||
}
|
||||
// ! [plugin:load_exe_network_impl]
|
||||
|
||||
// ! [plugin:import_network_impl]
|
||||
InferenceEngine::ExecutableNetworkInternal::Ptr
|
||||
Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map<std::string, std::string>& config) {
|
||||
InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map<std::string, std::string>& config) {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetworkImpl");
|
||||
|
||||
auto fullConfig = Configuration{ config, _cfg };
|
||||
return std::make_shared<ExecutableNetwork>(modelStream, fullConfig,
|
||||
std::static_pointer_cast<Plugin>(shared_from_this()));
|
||||
auto fullConfig = Configuration {config, _cfg};
|
||||
return std::make_shared<ExecutableNetwork>(modelStream, fullConfig, std::static_pointer_cast<Plugin>(shared_from_this()));
|
||||
}
|
||||
// ! [plugin:import_network_impl]
|
||||
|
||||
// ! [plugin:query_network]
|
||||
InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork &network, const ConfigMap& config) const {
|
||||
InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) const {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::QueryNetwork");
|
||||
|
||||
Configuration fullConfig{config, _cfg, false};
|
||||
Configuration fullConfig {config, _cfg, false};
|
||||
auto function = network.getFunction();
|
||||
|
||||
// 1. First of all we should store initial input operation set
|
||||
@ -198,36 +195,28 @@ void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) {
|
||||
// ! [plugin:add_extension]
|
||||
|
||||
// ! [plugin:set_config]
|
||||
void Plugin::SetConfig(const ConfigMap &config) {
|
||||
_cfg = Configuration{config, _cfg};
|
||||
void Plugin::SetConfig(const ConfigMap& config) {
|
||||
_cfg = Configuration {config, _cfg};
|
||||
}
|
||||
// ! [plugin:set_config]
|
||||
|
||||
// ! [plugin:get_config]
|
||||
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & /*options*/) const {
|
||||
InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& /*options*/) const {
|
||||
return _cfg.Get(name);
|
||||
}
|
||||
// ! [plugin:get_config]
|
||||
|
||||
// ! [plugin:get_metric]
|
||||
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const {
|
||||
InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const {
|
||||
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
|
||||
std::vector<std::string> supportedMetrics = {
|
||||
METRIC_KEY(AVAILABLE_DEVICES),
|
||||
METRIC_KEY(SUPPORTED_METRICS),
|
||||
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
|
||||
METRIC_KEY(FULL_DEVICE_NAME),
|
||||
METRIC_KEY(IMPORT_EXPORT_SUPPORT),
|
||||
METRIC_KEY(DEVICE_ARCHITECTURE),
|
||||
METRIC_KEY(OPTIMIZATION_CAPABILITIES),
|
||||
METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) };
|
||||
std::vector<std::string> supportedMetrics = {METRIC_KEY(AVAILABLE_DEVICES), METRIC_KEY(SUPPORTED_METRICS),
|
||||
METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(FULL_DEVICE_NAME),
|
||||
METRIC_KEY(IMPORT_EXPORT_SUPPORT), METRIC_KEY(DEVICE_ARCHITECTURE),
|
||||
METRIC_KEY(OPTIMIZATION_CAPABILITIES), METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)};
|
||||
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics);
|
||||
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
|
||||
std::vector<std::string> configKeys = {
|
||||
CONFIG_KEY(DEVICE_ID),
|
||||
CONFIG_KEY(PERF_COUNT),
|
||||
TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
|
||||
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys();
|
||||
std::vector<std::string> configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)};
|
||||
auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys();
|
||||
for (auto&& configKey : streamExecutorConfigKeys) {
|
||||
if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) {
|
||||
configKeys.emplace_back(configKey);
|
||||
@ -236,7 +225,7 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std:
|
||||
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);
|
||||
} else if (METRIC_KEY(AVAILABLE_DEVICES) == name) {
|
||||
// TODO: fill list of available devices
|
||||
std::vector<std::string> availableDevices = { "" };
|
||||
std::vector<std::string> availableDevices = {""};
|
||||
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
|
||||
} else if (METRIC_KEY(FULL_DEVICE_NAME) == name) {
|
||||
std::string name = "Template Device Full Name";
|
||||
@ -249,13 +238,13 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std:
|
||||
IE_SET_METRIC_RETURN(DEVICE_ARCHITECTURE, arch);
|
||||
} else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) {
|
||||
// TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32
|
||||
std::vector<std::string> capabilities = { METRIC_VALUE(FP32) /*, TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION)*/ };
|
||||
std::vector<std::string> capabilities = {METRIC_VALUE(FP32) /*, TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION)*/};
|
||||
IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities);
|
||||
} else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) {
|
||||
// TODO: fill with actual values
|
||||
using uint = unsigned int;
|
||||
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1}));
|
||||
} else {
|
||||
IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint {1}, uint {1}, uint {1}));
|
||||
} else {
|
||||
IE_THROW(NotFound) << "Unsupported device metric: " << name;
|
||||
}
|
||||
}
|
||||
|
@ -4,11 +4,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "template_config.hpp"
|
||||
#include "template_executable_network.hpp"
|
||||
#include <cpp_interfaces/impl/ie_plugin_internal.hpp>
|
||||
|
||||
#include "backend.hpp"
|
||||
#include "template_config.hpp"
|
||||
#include "template_executable_network.hpp"
|
||||
|
||||
//! [plugin:header]
|
||||
namespace TemplatePlugin {
|
||||
@ -20,26 +20,24 @@ public:
|
||||
Plugin();
|
||||
~Plugin();
|
||||
|
||||
void SetConfig(const std::map<std::string, std::string> &config) override;
|
||||
InferenceEngine::QueryNetworkResult
|
||||
QueryNetwork(const InferenceEngine::CNNNetwork &network,
|
||||
const std::map<std::string, std::string>& config) const override;
|
||||
InferenceEngine::ExecutableNetworkInternal::Ptr
|
||||
LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network,
|
||||
const std::map<std::string, std::string> &config) override;
|
||||
void SetConfig(const std::map<std::string, std::string>& config) override;
|
||||
InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network,
|
||||
const std::map<std::string, std::string>& config) const override;
|
||||
InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
|
||||
const std::map<std::string, std::string>& config) override;
|
||||
void AddExtension(InferenceEngine::IExtensionPtr extension) override;
|
||||
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
|
||||
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter> & options) const override;
|
||||
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
|
||||
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
|
||||
InferenceEngine::ExecutableNetworkInternal::Ptr ImportNetworkImpl(std::istream& model, const std::map<std::string, std::string>& config) override;
|
||||
|
||||
private:
|
||||
friend class ExecutableNetwork;
|
||||
friend class TemplateInferRequest;
|
||||
|
||||
std::shared_ptr<ngraph::runtime::Backend> _backend;
|
||||
Configuration _cfg;
|
||||
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
|
||||
std::shared_ptr<ngraph::runtime::Backend> _backend;
|
||||
Configuration _cfg;
|
||||
InferenceEngine::ITaskExecutor::Ptr _waitExecutor;
|
||||
};
|
||||
|
||||
} // namespace TemplatePlugin
|
||||
//! [plugin:header]
|
||||
//! [plugin:header]
|
||||
|
@ -2,21 +2,21 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "transformations/preprocessing/mean_image_or_value.hpp"
|
||||
|
||||
#include <ngraph/opsets/opset3.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||
|
||||
#include "transformations/preprocessing/mean_image_or_value.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddMeanSubtract, "AddMeanSubtract", 0);
|
||||
|
||||
ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) {
|
||||
ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap& inputInfoMap) {
|
||||
// RUN_ON_FUNCTION_SCOPE(AddMeanSubtract);
|
||||
auto label = ngraph::pattern::wrap_type<ngraph::opset3::Parameter>();
|
||||
|
||||
ngraph::matcher_pass_callback callback = [=] (pattern::Matcher& m) {
|
||||
ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) {
|
||||
auto param = std::dynamic_pointer_cast<ngraph::opset3::Parameter>(m.get_match_root());
|
||||
if (!param) {
|
||||
return false;
|
||||
@ -28,8 +28,7 @@ ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) {
|
||||
}
|
||||
|
||||
auto mean_const = it->second;
|
||||
NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32,
|
||||
"Mean for ", param->get_friendly_name(), " must have f32 type");
|
||||
NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, "Mean for ", param->get_friendly_name(), " must have f32 type");
|
||||
|
||||
auto copy_param = param->clone_with_new_inputs({});
|
||||
auto sub = std::make_shared<ngraph::opset3::Subtract>(copy_param, mean_const);
|
||||
|
@ -5,10 +5,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include <ngraph/op/constant.hpp>
|
||||
#include <ngraph/pass/graph_rewrite.hpp>
|
||||
#include <string>
|
||||
|
||||
#include "transformations_visibility.hpp"
|
||||
|
||||
@ -29,5 +28,5 @@ public:
|
||||
using MeanMap = std::map<std::string, std::shared_ptr<ngraph::op::v0::Constant>>;
|
||||
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
explicit AddMeanSubtract(const MeanMap & inputInfoMap);
|
||||
explicit AddMeanSubtract(const MeanMap& inputInfoMap);
|
||||
};
|
||||
|
@ -2,26 +2,26 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include "transformations/preprocessing/preprocessing.hpp"
|
||||
|
||||
#include <ngraph/opsets/opset3.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
|
||||
#include "transformations/preprocessing/mean_image_or_value.hpp"
|
||||
#include "transformations/preprocessing/std_scale.hpp"
|
||||
#include "transformations/preprocessing/preprocessing.hpp"
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddPreprocessing, "AddPreprocessing", 0);
|
||||
|
||||
ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap)
|
||||
: m_inputInfoMap(inputInfoMap) { }
|
||||
ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap): m_inputInfoMap(inputInfoMap) {}
|
||||
|
||||
bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Function> f) {
|
||||
ngraph::pass::AddMeanSubtract::MeanMap meanMap;
|
||||
ngraph::pass::AddStdScale::ScaleMap scaleMap;
|
||||
|
||||
for (const auto & it : m_inputInfoMap) {
|
||||
for (const auto& it : m_inputInfoMap) {
|
||||
bool has_scales = false, has_mean_values = false, has_mean_image = false;
|
||||
const InferenceEngine::PreProcessInfo & pInfo = it.second->getPreProcess();
|
||||
const auto & inputDims = it.second->getTensorDesc().getDims();
|
||||
const InferenceEngine::PreProcessInfo& pInfo = it.second->getPreProcess();
|
||||
const auto& inputDims = it.second->getTensorDesc().getDims();
|
||||
const size_t cn = pInfo.getNumberOfChannels();
|
||||
std::vector<float> meanValues(cn), stdScales(cn);
|
||||
InferenceEngine::Blob::Ptr meanImage = nullptr;
|
||||
@ -40,10 +40,10 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
|
||||
if (c == 0) {
|
||||
meanImage = pInfo[c]->meanData;
|
||||
NGRAPH_CHECK(meanImage->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32,
|
||||
"Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData");
|
||||
"Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData");
|
||||
} else {
|
||||
NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(),
|
||||
"TensorDesc for PreProcessChannel::meanData must be equal");
|
||||
NGRAPH_CHECK(pInfo[c]->meanData != nullptr, "pInfo[c]->meanData is nullptr");
|
||||
NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), "TensorDesc for PreProcessChannel::meanData must be equal");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -53,35 +53,33 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr<ngraph::Fun
|
||||
continue;
|
||||
}
|
||||
|
||||
NGRAPH_CHECK(!(has_mean_image && has_scales),
|
||||
"Only PreProcessChannel::meanData or PreProcessChannel::meanValue can be set.");
|
||||
NGRAPH_CHECK(!(has_mean_image && has_scales), "Only PreProcessChannel::meanData or PreProcessChannel::meanValue can be set.");
|
||||
|
||||
if (has_scales) {
|
||||
ngraph::Shape shape(inputDims.size(), 1);
|
||||
shape[1] = stdScales.size(); // C
|
||||
shape[1] = stdScales.size(); // C
|
||||
scaleMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, stdScales);
|
||||
}
|
||||
|
||||
if (has_mean_values) {
|
||||
ngraph::Shape shape(inputDims.size(), 1);
|
||||
shape[1] = meanValues.size(); // C
|
||||
shape[1] = meanValues.size(); // C
|
||||
meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, meanValues);
|
||||
} else if (has_mean_image) {
|
||||
ngraph::Shape shape = { cn };
|
||||
ngraph::Shape shape = {cn};
|
||||
auto dims = meanImage->getTensorDesc().getDims();
|
||||
std::copy(dims.begin(), dims.end(), std::back_inserter(shape));
|
||||
|
||||
std::vector<float> meanImageData(ngraph::shape_size(shape));
|
||||
for (size_t c = 0, i = 0; c < cn; ++c) {
|
||||
auto lm = pInfo[c]->meanData->buffer();
|
||||
const float *data = lm.as<const float *>();
|
||||
const float* data = lm.as<const float*>();
|
||||
|
||||
std::memcpy(&meanImageData[i], data, meanImage->byteSize());
|
||||
i += meanImage->size();
|
||||
}
|
||||
|
||||
meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32,
|
||||
shape, meanImageData);
|
||||
meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, meanImageData);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,10 +26,11 @@ class AddPreprocessing;
|
||||
* (x - mean) * stdScale
|
||||
*/
|
||||
class ngraph::pass::AddPreprocessing : public ngraph::pass::FunctionPass {
|
||||
const InferenceEngine::InputsDataMap & m_inputInfoMap;
|
||||
const InferenceEngine::InputsDataMap& m_inputInfoMap;
|
||||
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
explicit AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap);
|
||||
explicit AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap);
|
||||
|
||||
bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
|
||||
};
|
||||
|
@ -2,12 +2,12 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "transformations/preprocessing/std_scale.hpp"
|
||||
|
||||
#include <ngraph/opsets/opset3.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||
|
||||
#include "transformations/preprocessing/std_scale.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(ngraph::pass::AddStdScale, "AddStdScale", 0);
|
||||
@ -16,7 +16,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) {
|
||||
// RUN_ON_FUNCTION_SCOPE(AddStdScale);
|
||||
auto label = ngraph::pattern::wrap_type<ngraph::opset3::Parameter>();
|
||||
|
||||
ngraph::matcher_pass_callback callback = [=] (pattern::Matcher& m) {
|
||||
ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) {
|
||||
auto param = std::dynamic_pointer_cast<ngraph::opset3::Parameter>(m.get_match_root());
|
||||
if (!param) {
|
||||
return false;
|
||||
@ -28,8 +28,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) {
|
||||
}
|
||||
|
||||
auto scale_const = it->second;
|
||||
NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32,
|
||||
"Scale for ", param->get_friendly_name(), " must have f32 type");
|
||||
NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, "Scale for ", param->get_friendly_name(), " must have f32 type");
|
||||
|
||||
auto copy_param = param->clone_with_new_inputs({});
|
||||
auto mul = std::make_shared<ngraph::opset3::Multiply>(copy_param, it->second);
|
||||
|
@ -5,10 +5,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include <ngraph/op/constant.hpp>
|
||||
#include <ngraph/pass/graph_rewrite.hpp>
|
||||
#include <string>
|
||||
|
||||
#include "transformations_visibility.hpp"
|
||||
|
||||
|
@ -15,7 +15,7 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr<ngraph::Fun
|
||||
NodeVector nodes;
|
||||
|
||||
// Traverse nGraph Function in topological order
|
||||
for (auto & node : f->get_ordered_ops()) {
|
||||
for (auto& node : f->get_ordered_ops()) {
|
||||
// Check that number of input and output ports are equal to 1
|
||||
if (node->inputs().size() == 1 && node->outputs().size() == 1) {
|
||||
// Check that input and output shape a fully defined (not dynamic) and number of consumers equal to 1
|
||||
@ -28,9 +28,8 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr<ngraph::Fun
|
||||
}
|
||||
|
||||
// Print types and names for collected nodes
|
||||
for (auto & node : nodes) {
|
||||
std::cout << "Type: " << node->get_type_info().name << std::endl
|
||||
<< "Name: " << node->get_friendly_name() << std::endl;
|
||||
for (auto& node : nodes) {
|
||||
std::cout << "Type: " << node->get_type_info().name << std::endl << "Name: " << node->get_friendly_name() << std::endl;
|
||||
}
|
||||
|
||||
// Return false because we didn't change nGraph Function
|
||||
|
@ -16,7 +16,7 @@ class MyFunctionTransformation;
|
||||
|
||||
// ! [function_pass:template_transformation_hpp]
|
||||
// template_function_transformation.hpp
|
||||
class ngraph::pass::MyFunctionTransformation: public ngraph::pass::FunctionPass {
|
||||
class ngraph::pass::MyFunctionTransformation : public ngraph::pass::FunctionPass {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
|
||||
|
@ -3,13 +3,14 @@
|
||||
//
|
||||
|
||||
#include "transformations/template_pattern_transformation.hpp"
|
||||
#include "transformations/template_function_transformation.hpp"
|
||||
|
||||
#include <ngraph/opsets/opset3.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||
#include <ngraph/rt_info.hpp>
|
||||
|
||||
#include "transformations/template_function_transformation.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
// ! [graph_rewrite:template_transformation_cpp]
|
||||
@ -23,15 +24,14 @@ ngraph::pass::DecomposeDivideMatcher::DecomposeDivideMatcher() {
|
||||
auto div = std::make_shared<ngraph::opset3::Divide>(input0, input1);
|
||||
|
||||
ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) {
|
||||
auto div = std::dynamic_pointer_cast<ngraph::opset3::Divide> (m.get_match_root());
|
||||
auto div = std::dynamic_pointer_cast<ngraph::opset3::Divide>(m.get_match_root());
|
||||
// We can not apply this transformation in case with integer input data type
|
||||
if (!div || div->input(0).get_element_type().is_integral()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Decompose Divide into Multiply with Power operations
|
||||
auto pow = std::make_shared<ngraph::opset3::Power>(div->input_value(1),
|
||||
opset3::Constant::create(div->get_input_element_type(1), Shape{1}, {-1}));
|
||||
auto pow = std::make_shared<ngraph::opset3::Power>(div->input_value(1), opset3::Constant::create(div->get_input_element_type(1), Shape {1}, {-1}));
|
||||
|
||||
auto mul = std::make_shared<ngraph::opset3::Multiply>(div->input_value(0), pow);
|
||||
|
||||
@ -67,8 +67,7 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() {
|
||||
auto& node_to_output = m.get_pattern_value_map();
|
||||
|
||||
// Create new Relu operation and add register it for additional execution
|
||||
auto new_relu = register_new_node<ngraph::opset3::Relu>(
|
||||
node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0));
|
||||
auto new_relu = register_new_node<ngraph::opset3::Relu>(node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0));
|
||||
|
||||
// Copy runtime info attributes to newly created operation
|
||||
ngraph::copy_runtime_info(m.get_matched_nodes(), new_relu);
|
||||
@ -91,60 +90,60 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() {
|
||||
// ! [matcher_pass:relu_fusion]
|
||||
|
||||
void run_matcher_on_node(std::shared_ptr<ngraph::Node> node) {
|
||||
// ! [matcher_pass:run_on_node]
|
||||
if (ngraph::pass::DecomposeDivideMatcher().apply(node)) {
|
||||
// successful execution (root node was replaced)
|
||||
}
|
||||
// ! [matcher_pass:run_on_node]
|
||||
// ! [matcher_pass:run_on_node]
|
||||
if (ngraph::pass::DecomposeDivideMatcher().apply(node)) {
|
||||
// successful execution (root node was replaced)
|
||||
}
|
||||
// ! [matcher_pass:run_on_node]
|
||||
}
|
||||
|
||||
void run_matcher_with_manager(std::shared_ptr<ngraph::Function> f) {
|
||||
// ! [matcher_pass:manager]
|
||||
// Two matchers will run independently (two independent graph traversals)
|
||||
// pass::Manager automatically creates GraphRewrite container for each MatcherPass
|
||||
pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
|
||||
manager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
|
||||
manager.run_passes(f);
|
||||
// ! [matcher_pass:manager]
|
||||
// ! [matcher_pass:manager]
|
||||
// Two matchers will run independently (two independent graph traversals)
|
||||
// pass::Manager automatically creates GraphRewrite container for each MatcherPass
|
||||
pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
|
||||
manager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
|
||||
manager.run_passes(f);
|
||||
// ! [matcher_pass:manager]
|
||||
}
|
||||
|
||||
void run_matcher_with_manager2(std::shared_ptr<ngraph::Function> f) {
|
||||
// ! [matcher_pass:manager2]
|
||||
// Register anchor GraphRewrite pass inside manager that will execute two matchers simultaneously
|
||||
pass::Manager manager;
|
||||
auto anchor = manager.register_pass<ngraph::pass::GraphRewrite>();
|
||||
anchor->add_matcher<ngraph::pass::DecomposeDivideMatcher>();
|
||||
anchor->add_matcher<ngraph::pass::ReluReluFusionMatcher>();
|
||||
manager.run_passes(f);
|
||||
// ! [matcher_pass:manager2]
|
||||
// ! [matcher_pass:manager2]
|
||||
// Register anchor GraphRewrite pass inside manager that will execute two matchers simultaneously
|
||||
pass::Manager manager;
|
||||
auto anchor = manager.register_pass<ngraph::pass::GraphRewrite>();
|
||||
anchor->add_matcher<ngraph::pass::DecomposeDivideMatcher>();
|
||||
anchor->add_matcher<ngraph::pass::ReluReluFusionMatcher>();
|
||||
manager.run_passes(f);
|
||||
// ! [matcher_pass:manager2]
|
||||
}
|
||||
|
||||
void run_matcher_with_manager3(std::shared_ptr<ngraph::Function> f) {
|
||||
// ! [matcher_pass:manager3]
|
||||
pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::MyFunctionTransformation>();
|
||||
// Two matchers will run independently (two independent graph traversals)
|
||||
// pass::Manager automatically creates GraphRewrite container for each MatcherPass
|
||||
manager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
|
||||
manager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
|
||||
manager.run_passes(f);
|
||||
// ! [matcher_pass:manager3]
|
||||
// ! [matcher_pass:manager3]
|
||||
pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::MyFunctionTransformation>();
|
||||
// Two matchers will run independently (two independent graph traversals)
|
||||
// pass::Manager automatically creates GraphRewrite container for each MatcherPass
|
||||
manager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
|
||||
manager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
|
||||
manager.run_passes(f);
|
||||
// ! [matcher_pass:manager3]
|
||||
}
|
||||
|
||||
void run_matcher_with_gr(std::shared_ptr<ngraph::Function> f) {
|
||||
// ! [matcher_pass:graph_rewrite]
|
||||
// Two matcher passes will run simultaneously in a single graph traversal
|
||||
ngraph::pass::GraphRewrite pass;
|
||||
pass.add_matcher<ngraph::pass::DecomposeDivideMatcher>();
|
||||
pass.add_matcher<ngraph::pass::ReluReluFusionMatcher>();
|
||||
pass.run_on_function(f);
|
||||
// ! [matcher_pass:graph_rewrite]
|
||||
// ! [matcher_pass:graph_rewrite]
|
||||
// Two matcher passes will run simultaneously in a single graph traversal
|
||||
ngraph::pass::GraphRewrite pass;
|
||||
pass.add_matcher<ngraph::pass::DecomposeDivideMatcher>();
|
||||
pass.add_matcher<ngraph::pass::ReluReluFusionMatcher>();
|
||||
pass.run_on_function(f);
|
||||
// ! [matcher_pass:graph_rewrite]
|
||||
}
|
||||
|
||||
// ! [manual_constant_folding]
|
||||
template <class T>
|
||||
Output<Node> eltwise_fold(const Output<Node> & input0, const Output<Node> & input1) {
|
||||
Output<Node> eltwise_fold(const Output<Node>& input0, const Output<Node>& input1) {
|
||||
auto eltwise = std::make_shared<T>(input0, input1);
|
||||
OutputVector output(eltwise->get_output_size());
|
||||
// If constant folding wasn't successful return eltwise output
|
||||
|
@ -21,14 +21,14 @@ class ReluReluFusionMatcher;
|
||||
* @ingroup ie_transformation_common_api
|
||||
* @brief Add transformation description.
|
||||
*/
|
||||
class ngraph::pass::DecomposeDivideMatcher: public ngraph::pass::MatcherPass {
|
||||
class ngraph::pass::DecomposeDivideMatcher : public ngraph::pass::MatcherPass {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
DecomposeDivideMatcher();
|
||||
};
|
||||
// ! [graph_rewrite:template_transformation_hpp]
|
||||
|
||||
class ngraph::pass::ReluReluFusionMatcher: public ngraph::pass::MatcherPass {
|
||||
class ngraph::pass::ReluReluFusionMatcher : public ngraph::pass::MatcherPass {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
ReluReluFusionMatcher();
|
||||
|
@ -14,7 +14,7 @@ addIeTargetTest(
|
||||
IE::funcSharedTests
|
||||
INCLUDES
|
||||
"${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include"
|
||||
ADD_CPPLINT
|
||||
ADD_CLANG_FORMAT
|
||||
LABELS
|
||||
TEMPLATE
|
||||
)
|
||||
|
@ -4,5 +4,4 @@
|
||||
|
||||
#include "functional_test_utils/core_config.hpp"
|
||||
|
||||
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
|
||||
}
|
||||
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {}
|
||||
|
@ -7,19 +7,14 @@
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
static const std::vector<ngraph::element::Type> precisionsTemplate = {
|
||||
ngraph::element::f32,
|
||||
};
|
||||
static const std::vector<ngraph::element::Type> precisionsTemplate = {
|
||||
ngraph::element::f32,
|
||||
};
|
||||
|
||||
static const std::vector<std::size_t> batchSizesTemplate = {
|
||||
1, 2
|
||||
};
|
||||
static const std::vector<std::size_t> batchSizesTemplate = {1, 2};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()),
|
||||
::testing::ValuesIn(precisionsTemplate),
|
||||
::testing::ValuesIn(batchSizesTemplate),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
LoadNetworkCacheTestBase::getTestCaseName);
|
||||
} // namespace
|
||||
INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase,
|
||||
::testing::Combine(::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()), ::testing::ValuesIn(precisionsTemplate),
|
||||
::testing::ValuesIn(batchSizesTemplate), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
LoadNetworkCacheTestBase::getTestCaseName);
|
||||
} // namespace
|
||||
|
@ -2,19 +2,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "multi-device/multi_device_config.hpp"
|
||||
|
||||
#include "behavior/config.hpp"
|
||||
|
||||
#include <template/template_config.hpp>
|
||||
|
||||
#include "multi-device/multi_device_config.hpp"
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
|
||||
@ -27,32 +25,23 @@ const std::vector<std::map<std::string, std::string>> inconfigs = {
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(inconfigs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(inconfigs)),
|
||||
IncorrectConfigTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(inconfigs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(inconfigs)),
|
||||
IncorrectConfigAPITests::getTestCaseName);
|
||||
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
CorrectConfigAPITests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigTests,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
CorrectConfigAPITests::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
} // namespace
|
@ -2,12 +2,12 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <utility>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/core_integration.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
@ -16,54 +16,31 @@ namespace {
|
||||
// IE Class Common tests with <pluginName, deviceName params>
|
||||
//
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassBasicTestP, IEClassBasicTestP,
|
||||
::testing::Values(std::make_pair("templatePlugin", CommonTestUtils::DEVICE_TEMPLATE)));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassBasicTestP, IEClassBasicTestP, ::testing::Values(std::make_pair("templatePlugin", CommonTestUtils::DEVICE_TEMPLATE)));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassNetworkTestP, IEClassNetworkTestP,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassNetworkTestP, IEClassNetworkTestP, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
//
|
||||
// IE Class GetMetric
|
||||
//
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
//
|
||||
// IE Class SetConfig
|
||||
@ -111,9 +88,7 @@ TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
|
||||
// IE Class GetConfig
|
||||
//
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassGetConfigTest, IEClassGetConfigTest,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassGetConfigTest, IEClassGetConfigTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest;
|
||||
|
||||
@ -125,7 +100,7 @@ TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
|
||||
ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
|
||||
std::vector<std::string> configValues = p;
|
||||
|
||||
for (auto &&confKey : configValues) {
|
||||
for (auto&& confKey : configValues) {
|
||||
if (CONFIG_KEY(DEVICE_ID) == confKey) {
|
||||
std::string defaultDeviceID = ie.GetConfig(deviceName, CONFIG_KEY(DEVICE_ID));
|
||||
std::cout << CONFIG_KEY(DEVICE_ID) << " : " << defaultDeviceID << std::endl;
|
||||
@ -143,48 +118,37 @@ TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
|
||||
// Executable Network GetMetric
|
||||
//
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, IEClassExecutableNetworkGetMetricTest,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, IEClassExecutableNetworkGetMetricTest,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "MULTI:TEMPLATE", "HETERO:TEMPLATE"));
|
||||
//
|
||||
// Executable Network GetConfig / SetConfig
|
||||
//
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
// IE Class Query network
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
// IE Class Load network
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
//
|
||||
// Hetero Executable Network GetMetric
|
||||
@ -192,21 +156,17 @@ INSTANTIATE_TEST_CASE_P(
|
||||
|
||||
#ifdef ENABLE_MKL_DNN
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
INSTANTIATE_TEST_CASE_P(smoke_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE));
|
||||
|
||||
#endif // ENABLE_MKL_DNN
|
||||
} // namespace
|
||||
} // namespace
|
@ -8,32 +8,20 @@ using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<std::vector<int >> orders = {
|
||||
const std::vector<std::vector<int>> orders = {
|
||||
// 0 - plugin
|
||||
// 1 - executable_network
|
||||
// 2 - infer_request
|
||||
{0, 1, 2},
|
||||
{0, 2, 1},
|
||||
{1, 0, 2},
|
||||
{1, 2, 0},
|
||||
{2, 0, 1},
|
||||
{2, 1, 0}
|
||||
};
|
||||
{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(orders)),
|
||||
HoldersTest::getTestCaseName);
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTest, ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(orders)),
|
||||
HoldersTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestImportNetwork,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"),
|
||||
::testing::ValuesIn(orders)),
|
||||
HoldersTest::getTestCaseName);
|
||||
::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"), ::testing::ValuesIn(orders)),
|
||||
HoldersTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestOnImportedNetwork,
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"),
|
||||
HoldersTestOnImportedNetwork::getTestCaseName);
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTestOnImportedNetwork, ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE"),
|
||||
HoldersTestOnImportedNetwork::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,28 +2,21 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/exec_graph_info.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
ExecGraphTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -2,28 +2,21 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/infer_request.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
InferRequestTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,27 +2,20 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/infer_request_callback.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
CallbackTests::getTestCaseName);
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
CallbackTests::getTestCaseName);
|
||||
} // namespace
|
||||
|
@ -2,28 +2,21 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "behavior/infer_request_config.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
InferConfigTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,28 +2,21 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "multi-device/multi_device_config.hpp"
|
||||
|
||||
#include "behavior/infer_request_input.hpp"
|
||||
|
||||
#include "multi-device/multi_device_config.hpp"
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
InferRequestInputTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,28 +2,21 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "multi-device/multi_device_config.hpp"
|
||||
|
||||
#include "behavior/infer_request_output.hpp"
|
||||
|
||||
#include "multi-device/multi_device_config.hpp"
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
InferRequestOutputTests::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -8,31 +8,16 @@ using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
const std::vector<InferenceEngine::Layout> Layout = {
|
||||
InferenceEngine::Layout::NCHW,
|
||||
InferenceEngine::Layout::CHW,
|
||||
InferenceEngine::Layout::NC,
|
||||
InferenceEngine::Layout::C
|
||||
};
|
||||
const std::vector<InferenceEngine::Layout> Layout = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::CHW, InferenceEngine::Layout::NC,
|
||||
InferenceEngine::Layout::C};
|
||||
|
||||
const std::vector<std::vector<size_t>> inputShapes = {
|
||||
{ 1, 3, 16, 16 },
|
||||
{ 3, 32, 16 },
|
||||
{ 1, 3 },
|
||||
{ 3 }
|
||||
};
|
||||
const std::vector<std::vector<size_t>> inputShapes = {{1, 3, 16, 16}, {3, 32, 16}, {1, 3}, {3}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, LayoutTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(InferenceEngine::Precision::FP32),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs),
|
||||
::testing::ValuesIn(Layout),
|
||||
::testing::ValuesIn(inputShapes)),
|
||||
::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs), ::testing::ValuesIn(Layout), ::testing::ValuesIn(inputShapes)),
|
||||
LayoutTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -2,39 +2,30 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "multi-device/multi_device_config.hpp"
|
||||
|
||||
#include "behavior/preprocessing.hpp"
|
||||
|
||||
#include "multi-device/multi_device_config.hpp"
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> inputPrecisions = {
|
||||
InferenceEngine::Precision::U8,
|
||||
InferenceEngine::Precision::FP32
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> inputPrecisions = {InferenceEngine::Precision::U8, InferenceEngine::Precision::FP32};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaSetInput, PreprocessingPrecisionConvertTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::Values(4), // Number of input tensor channels
|
||||
::testing::Values(true), // Use SetInput
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(inputPrecisions),
|
||||
::testing::Values(4), // Number of input tensor channels
|
||||
::testing::Values(true), // Use SetInput
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
|
||||
PreprocessingPrecisionConvertTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_PreprocessingPrecisionConvertTestsViaGetBlob, PreprocessingPrecisionConvertTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::Values(4), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
|
||||
::testing::Values(false), // use GetBlob
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(inputPrecisions),
|
||||
::testing::Values(4), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
|
||||
::testing::Values(false), // use GetBlob
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
|
||||
PreprocessingPrecisionConvertTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,79 +2,50 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "multi-device/multi_device_config.hpp"
|
||||
|
||||
#include "behavior/set_preprocess.hpp"
|
||||
|
||||
#include "multi-device/multi_device_config.hpp"
|
||||
|
||||
using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> multiConfigs = {
|
||||
{{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES,
|
||||
CommonTestUtils::DEVICE_TEMPLATE }}
|
||||
};
|
||||
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_TEMPLATE}}};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> heteroConfigs = {
|
||||
{{ "TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE }}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> heteroConfigs = {{{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
PreprocessTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, PreprocessTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_MULTI),
|
||||
::testing::ValuesIn(multiConfigs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_MULTI),
|
||||
::testing::ValuesIn(multiConfigs)),
|
||||
PreprocessTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_Hetero_BehaviorTests, PreprocessTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_HETERO),
|
||||
::testing::ValuesIn(heteroConfigs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_HETERO),
|
||||
::testing::ValuesIn(heteroConfigs)),
|
||||
PreprocessTest::getTestCaseName);
|
||||
|
||||
const std::vector<InferenceEngine::Precision> ioPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::U8
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> ioPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::U8};
|
||||
const std::vector<InferenceEngine::Layout> netLayouts = {
|
||||
InferenceEngine::Layout::NCHW,
|
||||
// InferenceEngine::Layout::NHWC
|
||||
};
|
||||
|
||||
const std::vector<InferenceEngine::Layout> ioLayouts = {
|
||||
InferenceEngine::Layout::NCHW,
|
||||
InferenceEngine::Layout::NHWC
|
||||
};
|
||||
const std::vector<InferenceEngine::Layout> ioLayouts = {InferenceEngine::Layout::NCHW, InferenceEngine::Layout::NHWC};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessConversionTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::ValuesIn(ioPrecisions),
|
||||
::testing::ValuesIn(ioPrecisions),
|
||||
::testing::ValuesIn(netLayouts),
|
||||
::testing::ValuesIn(ioLayouts),
|
||||
::testing::ValuesIn(ioLayouts),
|
||||
::testing::Bool(),
|
||||
::testing::Bool(),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(ioPrecisions), ::testing::ValuesIn(ioPrecisions),
|
||||
::testing::ValuesIn(netLayouts), ::testing::ValuesIn(ioLayouts), ::testing::ValuesIn(ioLayouts), ::testing::Bool(),
|
||||
::testing::Bool(), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE), ::testing::ValuesIn(configs)),
|
||||
PreprocessConversionTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
@ -8,34 +8,23 @@ using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTests,
|
||||
::testing::Combine(
|
||||
::testing::Values(InferenceEngine::Precision::FP32),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
BehaviorTests::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestInput,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
BehaviorTestInput::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestOutput,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
BehaviorTestOutput::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -8,15 +8,11 @@ using namespace BehaviorTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
|
||||
const std::vector<std::map<std::string, std::string>> configs = {
|
||||
{}
|
||||
};
|
||||
const std::vector<std::map<std::string, std::string>> configs = {{}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, VersionTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(InferenceEngine::Precision::FP32),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
::testing::Combine(::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::ValuesIn(configs)),
|
||||
VersionTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,9 +2,10 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "hetero/query_network.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "hetero/query_network.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "ngraph_functions/subgraph_builders.hpp"
|
||||
|
||||
@ -14,8 +15,7 @@ using namespace HeteroTests;
|
||||
auto ConvBias = ngraph::builder::subgraph::makeConvBias();
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_FullySupportedTopologies, QueryNetworkTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE", "MULTI:TEMPLATE"),
|
||||
::testing::Values(ConvBias)),
|
||||
::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_TEMPLATE, "HETERO:TEMPLATE", "MULTI:TEMPLATE"),
|
||||
::testing::Values(ConvBias)),
|
||||
QueryNetworkTest::getTestCaseName);
|
||||
} // namespace
|
||||
|
@ -2,9 +2,10 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "hetero/synthetic.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "hetero/synthetic.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "ngraph_functions/subgraph_builders.hpp"
|
||||
|
||||
@ -12,14 +13,12 @@ namespace {
|
||||
using namespace HeteroTests;
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_SingleMajorNode, HeteroSyntheticTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(std::vector<PluginParameter>{{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
|
||||
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_singleMajorNodeFunctions)),
|
||||
::testing::Combine(::testing::Values(std::vector<PluginParameter> {{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
|
||||
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_singleMajorNodeFunctions)),
|
||||
HeteroSyntheticTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(nightly_RandomMajorNodes, HeteroSyntheticTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(std::vector<PluginParameter>{{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
|
||||
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_randomMajorNodeFunctions)),
|
||||
::testing::Combine(::testing::Values(std::vector<PluginParameter> {{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
|
||||
::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_randomMajorNodeFunctions)),
|
||||
HeteroSyntheticTest::getTestCaseName);
|
||||
} // namespace
|
||||
|
@ -2,9 +2,10 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "single_layer_tests/convolution.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "single_layer_tests/convolution.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
@ -19,122 +20,72 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
|
||||
/* ============= 2D Convolution ============= */
|
||||
|
||||
const std::vector<std::vector<size_t >> kernels = {{3, 3},
|
||||
{3, 5}};
|
||||
const std::vector<std::vector<size_t >> strides = {{1, 1},
|
||||
{1, 3}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0},
|
||||
{0, 3}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0},
|
||||
{0, 3}};
|
||||
const std::vector<std::vector<size_t >> dilations = {{1, 1},
|
||||
{3, 1}};
|
||||
const std::vector<std::vector<size_t>> kernels = {{3, 3}, {3, 5}};
|
||||
const std::vector<std::vector<size_t>> strides = {{1, 1}, {1, 3}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0}, {0, 3}};
|
||||
const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0}, {0, 3}};
|
||||
const std::vector<std::vector<size_t>> dilations = {{1, 1}, {3, 1}};
|
||||
const std::vector<size_t> numOutChannels = {1, 5};
|
||||
const std::vector<ngraph::op::PadType> padTypes = {
|
||||
ngraph::op::PadType::EXPLICIT,
|
||||
ngraph::op::PadType::VALID
|
||||
};
|
||||
const std::vector<ngraph::op::PadType> padTypes = {ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID};
|
||||
|
||||
const auto conv2DParams_ExplicitPadding = ::testing::Combine(
|
||||
::testing::ValuesIn(kernels),
|
||||
::testing::ValuesIn(strides),
|
||||
::testing::ValuesIn(padBegins),
|
||||
::testing::ValuesIn(padEnds),
|
||||
::testing::ValuesIn(dilations),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT)
|
||||
);
|
||||
const auto conv2DParams_ExplicitPadding =
|
||||
::testing::Combine(::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds),
|
||||
::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), ::testing::Values(ngraph::op::PadType::EXPLICIT));
|
||||
// ! [test_convolution:declare_parameters]
|
||||
|
||||
const auto conv2DParams_AutoPadValid = ::testing::Combine(
|
||||
::testing::ValuesIn(kernels),
|
||||
::testing::ValuesIn(strides),
|
||||
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
|
||||
::testing::Values(std::vector<ptrdiff_t>({0, 0})),
|
||||
::testing::ValuesIn(dilations),
|
||||
::testing::ValuesIn(numOutChannels),
|
||||
::testing::Values(ngraph::op::PadType::VALID)
|
||||
);
|
||||
const auto conv2DParams_AutoPadValid =
|
||||
::testing::Combine(::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::Values(std::vector<ptrdiff_t>({0, 0})),
|
||||
::testing::Values(std::vector<ptrdiff_t>({0, 0})), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels),
|
||||
::testing::Values(ngraph::op::PadType::VALID));
|
||||
|
||||
// ! [test_convolution:instantiate]
|
||||
INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
|
||||
::testing::Combine(
|
||||
conv2DParams_ExplicitPadding,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
::testing::Combine(conv2DParams_ExplicitPadding, ::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
ConvolutionLayerTest::getTestCaseName);
|
||||
// ! [test_convolution:instantiate]
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest,
|
||||
::testing::Combine(
|
||||
conv2DParams_AutoPadValid,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
::testing::Combine(conv2DParams_AutoPadValid, ::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
ConvolutionLayerTest::getTestCaseName);
|
||||
|
||||
/* ============= 3D Convolution ============= */
|
||||
|
||||
const std::vector<std::vector<size_t >> kernels3d = {{3, 3, 3},
|
||||
{3, 5, 3}};
|
||||
const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0},
|
||||
{0, 2, 0}};
|
||||
const std::vector<std::vector<size_t>> kernels3d = {{3, 3, 3}, {3, 5, 3}};
|
||||
const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0}, {0, 2, 0}};
|
||||
|
||||
const std::vector<std::vector<size_t >> strides3d = {{1, 1, 1},
|
||||
{1, 2, 1}};
|
||||
const std::vector<std::vector<size_t >> dilations3d = {{1, 1, 1},
|
||||
{1, 2, 1}};
|
||||
const std::vector<std::vector<size_t>> strides3d = {{1, 1, 1}, {1, 2, 1}};
|
||||
const std::vector<std::vector<size_t>> dilations3d = {{1, 1, 1}, {1, 2, 1}};
|
||||
|
||||
const auto conv3DParams_ExplicitPadding = ::testing::Combine(
|
||||
::testing::ValuesIn(kernels3d),
|
||||
::testing::ValuesIn(strides3d),
|
||||
::testing::ValuesIn(paddings3d),
|
||||
::testing::ValuesIn(paddings3d),
|
||||
::testing::ValuesIn(dilations3d),
|
||||
::testing::Values(5),
|
||||
::testing::Values(ngraph::op::PadType::EXPLICIT)
|
||||
);
|
||||
const auto conv3DParams_AutoPadValid = ::testing::Combine(
|
||||
::testing::ValuesIn(kernels3d),
|
||||
::testing::ValuesIn(strides3d),
|
||||
::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
|
||||
::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
|
||||
::testing::ValuesIn(dilations3d),
|
||||
::testing::Values(5),
|
||||
::testing::Values(ngraph::op::PadType::VALID)
|
||||
);
|
||||
const auto conv3DParams_ExplicitPadding =
|
||||
::testing::Combine(::testing::ValuesIn(kernels3d), ::testing::ValuesIn(strides3d), ::testing::ValuesIn(paddings3d), ::testing::ValuesIn(paddings3d),
|
||||
::testing::ValuesIn(dilations3d), ::testing::Values(5), ::testing::Values(ngraph::op::PadType::EXPLICIT));
|
||||
const auto conv3DParams_AutoPadValid =
|
||||
::testing::Combine(::testing::ValuesIn(kernels3d), ::testing::ValuesIn(strides3d), ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
|
||||
::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})), ::testing::ValuesIn(dilations3d), ::testing::Values(5),
|
||||
::testing::Values(ngraph::op::PadType::VALID));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_Convolution3D_ExplicitPadding, ConvolutionLayerTest,
|
||||
::testing::Combine(
|
||||
conv3DParams_ExplicitPadding,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
::testing::Combine(conv3DParams_ExplicitPadding, ::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 10, 10, 10})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
ConvolutionLayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(nightly_Convolution3D_AutoPadValid, ConvolutionLayerTest,
|
||||
::testing::Combine(
|
||||
conv3DParams_AutoPadValid,
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
::testing::Combine(conv3DParams_AutoPadValid, ::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({1, 3, 10, 10, 10})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
ConvolutionLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,43 +2,34 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "single_layer_tests/reshape.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "single_layer_tests/reshape.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP32,
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheckDynBatch, ReshapeLayerTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(true),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
|
||||
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::Values(std::map<std::string, std::string>({}))),
|
||||
ReshapeLayerTest::getTestCaseName);
|
||||
::testing::Combine(::testing::Values(true), ::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
|
||||
::testing::Values(std::vector<size_t>({30, 30, 30, 30})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::Values(std::map<std::string, std::string>({}))),
|
||||
ReshapeLayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_ReshapeCheck, ReshapeLayerTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(true),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(std::vector<size_t>({10, 10, 10, 10})),
|
||||
::testing::Values(std::vector<size_t>({10, 0, 100})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::Values(std::map<std::string, std::string>({}))),
|
||||
ReshapeLayerTest::getTestCaseName);
|
||||
::testing::Combine(::testing::Values(true), ::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({10, 10, 10, 10})),
|
||||
::testing::Values(std::vector<size_t>({10, 0, 100})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
::testing::Values(std::map<std::string, std::string>({}))),
|
||||
ReshapeLayerTest::getTestCaseName);
|
||||
} // namespace
|
@ -2,9 +2,10 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "single_layer_tests/softmax.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "single_layer_tests/softmax.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
@ -25,28 +26,14 @@ const std::vector<InferenceEngine::SizeVector> inputShapes2D = {
|
||||
InferenceEngine::SizeVector {10, 10},
|
||||
};
|
||||
|
||||
const std::vector<size_t> axis2D = {
|
||||
0, 1
|
||||
};
|
||||
const std::vector<size_t> axis2D = {0, 1};
|
||||
|
||||
const auto params2D = testing::Combine(
|
||||
testing::ValuesIn(netPrecisions),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::ValuesIn(inputLayouts2D),
|
||||
testing::Values(InferenceEngine::Layout::ANY),
|
||||
testing::ValuesIn(inputShapes2D),
|
||||
testing::ValuesIn(axis2D),
|
||||
testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
testing::Values(std::map<std::string, std::string>())
|
||||
);
|
||||
const auto params2D = testing::Combine(testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::ValuesIn(inputLayouts2D),
|
||||
testing::Values(InferenceEngine::Layout::ANY), testing::ValuesIn(inputShapes2D), testing::ValuesIn(axis2D),
|
||||
testing::Values(CommonTestUtils::DEVICE_TEMPLATE), testing::Values(std::map<std::string, std::string>()));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_SoftMax2D,
|
||||
SoftMaxLayerTest,
|
||||
params2D,
|
||||
SoftMaxLayerTest::getTestCaseName
|
||||
);
|
||||
INSTANTIATE_TEST_CASE_P(smoke_SoftMax2D, SoftMaxLayerTest, params2D, SoftMaxLayerTest::getTestCaseName);
|
||||
|
||||
const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
|
||||
InferenceEngine::SizeVector {1, 100, 1, 1},
|
||||
@ -56,23 +43,11 @@ const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
|
||||
|
||||
const std::vector<size_t> axis4D = {0, 1, 2, 3};
|
||||
|
||||
const auto params4D = testing::Combine(
|
||||
testing::ValuesIn(netPrecisions),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Layout::NCHW),
|
||||
testing::Values(InferenceEngine::Layout::ANY),
|
||||
testing::ValuesIn(inputShapes4D),
|
||||
testing::ValuesIn(axis4D),
|
||||
testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
|
||||
testing::Values(std::map<std::string, std::string>())
|
||||
);
|
||||
const auto params4D = testing::Combine(testing::ValuesIn(netPrecisions), testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED), testing::Values(InferenceEngine::Layout::NCHW),
|
||||
testing::Values(InferenceEngine::Layout::ANY), testing::ValuesIn(inputShapes4D), testing::ValuesIn(axis4D),
|
||||
testing::Values(CommonTestUtils::DEVICE_TEMPLATE), testing::Values(std::map<std::string, std::string>()));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
smoke_SoftMax4D,
|
||||
SoftMaxLayerTest,
|
||||
params4D,
|
||||
SoftMaxLayerTest::getTestCaseName
|
||||
);
|
||||
INSTANTIATE_TEST_CASE_P(smoke_SoftMax4D, SoftMaxLayerTest, params4D, SoftMaxLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,9 +2,10 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "single_layer_tests/split.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "single_layer_tests/split.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
@ -12,17 +13,11 @@ using namespace LayerTestsDefinitions;
|
||||
namespace {
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_NumSplitsCheck, SplitLayerTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(1, 2, 3, 5, 6, 10, 30),
|
||||
::testing::Values(0, 1, 2, 3),
|
||||
::testing::Values(InferenceEngine::Precision::FP32),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
|
||||
::testing::Values(std::vector<size_t>({})),
|
||||
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
::testing::Combine(::testing::Values(1, 2, 3, 5, 6, 10, 30), ::testing::Values(0, 1, 2, 3),
|
||||
::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY),
|
||||
::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
|
||||
::testing::Values(std::vector<size_t>({})), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)),
|
||||
SplitLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,20 +2,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include "functional_test_utils/skip_tests_config.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
std::vector<std::string> disabledTestPatterns() {
|
||||
return {
|
||||
".*ExclusiveAsyncRequests.*",
|
||||
".*reusableCPUStreamsExecutor.*",
|
||||
R"(.*SplitLayerTest.*numSplits\=30.*)",
|
||||
// CVS-51758
|
||||
".*PreprocessConversionTest.*oPRC=U8.*",
|
||||
".*PreprocessConversionTest.*oLT=NHWC.*",
|
||||
".*PreprocessingPrecisionConvertTestsViaSetInput.*SetInput.*",
|
||||
".*PreprocessingPrecisionConvertTestsViaGetBlob.*GetBlob.*",
|
||||
};
|
||||
}
|
@ -18,11 +18,9 @@
|
||||
|
||||
// #include "common_test_utils/ngraph_test_utils.hpp"
|
||||
|
||||
|
||||
// using namespace testing;
|
||||
// using namespace ngraph;
|
||||
|
||||
|
||||
// TEST(TransformationTests, Preprocessing_AddStdScale) {
|
||||
// std::shared_ptr<Function> f(nullptr), f_ref(nullptr);
|
||||
|
||||
|
@ -4,12 +4,11 @@
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <ngraph/opsets/opset3.hpp>
|
||||
#include <queue>
|
||||
#include <string>
|
||||
#include <transformations/init_node_info.hpp>
|
||||
#include <transformations/utils/utils.hpp>
|
||||
|
||||
@ -24,11 +23,11 @@ TEST(TransformationTests, DISABLED_TemplateTest) {
|
||||
// f_ref - ngraph::Function that is expected after applying transformation
|
||||
{
|
||||
// Example function
|
||||
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
|
||||
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.5});
|
||||
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape {3, 1, 2});
|
||||
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {1.5});
|
||||
auto divide = std::make_shared<ngraph::opset3::Divide>(data, divide_constant);
|
||||
|
||||
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{divide}, ngraph::ParameterVector{data});
|
||||
f = std::make_shared<ngraph::Function>(ngraph::NodeVector {divide}, ngraph::ParameterVector {data});
|
||||
|
||||
// This transformation init runtime info attributes
|
||||
ngraph::pass::InitNodeInfo().run_on_function(f);
|
||||
@ -42,13 +41,12 @@ TEST(TransformationTests, DISABLED_TemplateTest) {
|
||||
|
||||
{
|
||||
// Example reference function
|
||||
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
|
||||
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.5});
|
||||
auto pow = std::make_shared<ngraph::opset3::Power>(divide_constant,
|
||||
ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {-1}));
|
||||
auto data = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::f32, ngraph::Shape {3, 1, 2});
|
||||
auto divide_constant = ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {1.5});
|
||||
auto pow = std::make_shared<ngraph::opset3::Power>(divide_constant, ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape {1}, {-1}));
|
||||
auto mul = std::make_shared<ngraph::opset3::Multiply>(data, pow);
|
||||
|
||||
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{mul}, ngraph::ParameterVector{data});
|
||||
f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector {mul}, ngraph::ParameterVector {data});
|
||||
}
|
||||
|
||||
// Compare that processed function and expected function are the same
|
||||
|
@ -148,7 +148,7 @@ configure_file(
|
||||
configure_file(
|
||||
"${IE_MAIN_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-version.cmake.in"
|
||||
"${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig-version.cmake"
|
||||
COPYONLY)
|
||||
@ONLY)
|
||||
|
||||
#
|
||||
# Coverage
|
||||
|
@ -261,8 +261,8 @@ if (ENABLE_GNA)
|
||||
set(GNA_HASH "cc954e67525006bf8bd353a6682e38bf208f6d74e973e0fc292850e721f17452")
|
||||
endif()
|
||||
if(GNA_LIBRARY_VERSION STREQUAL "GNA2")
|
||||
set(GNA_VERSION "02.00.00.1191.0")
|
||||
set(GNA_HASH "a61b4a9133549b0a9f0b46d069f72906ced28bcbbe7d5c361e687645f53a1c8b")
|
||||
set(GNA_VERSION "02.00.00.1226")
|
||||
set(GNA_HASH "d5450af15c993e264c25ac4591a7dab44722e10d15fca4f222a1b84429d4e5b6")
|
||||
endif()
|
||||
|
||||
set(FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/include)
|
||||
|
@ -46,8 +46,8 @@ function(set_ie_threading_interface_for TARGET_NAME)
|
||||
# they don't have TBB in public headers => PRIVATE
|
||||
set(LINK_TYPE "PRIVATE")
|
||||
elseif(target_type STREQUAL "SHARED_LIBRARY")
|
||||
# TODO: inference_engine only
|
||||
# Why TBB propogates its headers to inference_engine?
|
||||
# Affected libraries: inference_engine only
|
||||
# TODO: why TBB propogates its headers to inference_engine?
|
||||
set(LINK_TYPE "PRIVATE")
|
||||
else()
|
||||
ext_message(WARNING "Unknown target type")
|
||||
|
@ -2,17 +2,19 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# TODO: hardcode will be fixed separatelly
|
||||
set(PACKAGE_VERSION_MAJOR 2)
|
||||
set(PACKAGE_VERSION_MINOR 1)
|
||||
set(PACKAGE_VERSION_PATCH 0)
|
||||
set(PACKAGE_VERSION_COUNT 3)
|
||||
|
||||
set(PACKAGE_VERSION_MAJOR @IE_VERSION_MAJOR@)
|
||||
set(PACKAGE_VERSION_MINOR @IE_VERSION_MINOR@)
|
||||
set(PACKAGE_VERSION_PATCH @IE_VERSION_PATCH@)
|
||||
set(PACKAGE_VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}.${PACKAGE_VERSION_PATCH}")
|
||||
|
||||
set(PACKAGE_VERSION_EXACT False)
|
||||
set(PACKAGE_VERSION_COMPATIBLE False)
|
||||
|
||||
# Compatibility with old versioning for 2.x
|
||||
if(PACKAGE_FIND_VERSION_MAJOR VERSION_EQUAL 2)
|
||||
set(PACKAGE_VERSION_COMPATIBLE True)
|
||||
endif()
|
||||
|
||||
if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
|
||||
set(PACKAGE_VERSION_EXACT True)
|
||||
set(PACKAGE_VERSION_COMPATIBLE True)
|
||||
|
@ -16,6 +16,12 @@
|
||||
# IE::inference_engine - The Inference Engine library
|
||||
# IE::inference_engine_c_api - The Inference Engine C API library
|
||||
#
|
||||
# Inference Engine version variables:
|
||||
#
|
||||
# InferenceEngine_VERSION_MAJOR - major version component
|
||||
# InferenceEngine_VERSION_MINOR - minor version component
|
||||
# InferenceEngine_VERSION_PATCH - patch version component
|
||||
#
|
||||
|
||||
@PACKAGE_INIT@
|
||||
|
||||
|
@ -6,14 +6,14 @@ include_guard(GLOBAL)
|
||||
|
||||
set(VPU_SUPPORTED_FIRMWARES usb-ma2x8x pcie-ma2x8x)
|
||||
set(VPU_SUPPORTED_FIRMWARES_HASH
|
||||
"11a6db07d3a17c9c0fc4247fce47c942e0dcd59f8d70665a96bae0d7b7121fe9"
|
||||
"43f3dc0f0a8114ca34226167970aafdc869600929d6e3761c1eaa6eec71f2237")
|
||||
"dc93ba50e2096759aa3aeae67a85be1d49d2ba0ca84f319ca5ff911b13788f2c"
|
||||
"c50db9859c4851fd4a3a5822ff05fc0af3d16a972625f965527a450aa4bb4624")
|
||||
|
||||
#
|
||||
# Default packages
|
||||
#
|
||||
|
||||
set(FIRMWARE_PACKAGE_VERSION 1658)
|
||||
set(FIRMWARE_PACKAGE_VERSION 1676)
|
||||
set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.09.2")
|
||||
|
||||
#
|
||||
|
@ -29,13 +29,10 @@ ie_add_vs_version_file(NAME ${TARGET_NAME}
|
||||
export(TARGETS ${TARGET_NAME} NAMESPACE IE::
|
||||
APPEND FILE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake")
|
||||
|
||||
# WA for CI issue
|
||||
export(TARGETS ${TARGET_NAME} NAMESPACE IE::
|
||||
APPEND FILE "${CMAKE_BINARY_DIR}/share/InferenceEngineTargets.cmake")
|
||||
|
||||
# install
|
||||
|
||||
ie_cpack_add_component(core_c DEPENDS core)
|
||||
ie_cpack_add_component(core_c_dev DEPENDS core_c)
|
||||
|
||||
install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets
|
||||
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core_c
|
||||
@ -44,4 +41,4 @@ install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets
|
||||
|
||||
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/
|
||||
DESTINATION ${IE_CPACK_IE_DIR}/include
|
||||
COMPONENT core_c)
|
||||
COMPONENT core_c_dev)
|
||||
|
@ -134,7 +134,8 @@ std::map<IE::ColorFormat, colorformat_e> colorformat_map = {{IE::ColorFormat::RA
|
||||
CATCH_IE_EXCEPTION(NOT_ALLOCATED, NotAllocated) \
|
||||
CATCH_IE_EXCEPTION(INFER_NOT_STARTED, InferNotStarted) \
|
||||
CATCH_IE_EXCEPTION(NETWORK_NOT_READ, NetworkNotRead) \
|
||||
CATCH_IE_EXCEPTION(INFER_CANCELLED, InferCancelled)
|
||||
CATCH_IE_EXCEPTION(INFER_CANCELLED, InferCancelled) \
|
||||
catch (...) {return IEStatusCode::UNEXPECTED;}
|
||||
|
||||
/**
|
||||
*@brief convert the config type data to map type data.
|
||||
@ -237,9 +238,7 @@ IEStatusCode ie_core_create(const char *xml_config_file, ie_core_t **core) {
|
||||
std::unique_ptr<ie_core_t> tmp(new ie_core_t);
|
||||
tmp->object = IE::Core(xml_config_file);
|
||||
*core = tmp.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -275,15 +274,15 @@ IEStatusCode ie_core_get_versions(const ie_core_t *core, const char *device_name
|
||||
char *_deviceName = deviceName.release();
|
||||
memcpy(_deviceName, iter->first.c_str(), iter->first.length() + 1);
|
||||
vers_ptrs[i].device_name = _deviceName;
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
vers_ptrs[i].major = iter->second.apiVersion.major;
|
||||
vers_ptrs[i].minor = iter->second.apiVersion.minor;
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
vers_ptrs[i].build_number = iter->second.buildNumber;
|
||||
vers_ptrs[i].description = iter->second.description;
|
||||
}
|
||||
versions->versions = vers_ptrs.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -314,9 +313,7 @@ IEStatusCode ie_core_read_network(ie_core_t *core, const char *xml, const char *
|
||||
}
|
||||
network_result->object = core->object.ReadNetwork(xml, bin);
|
||||
*network = network_result.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -334,9 +331,7 @@ IEStatusCode ie_core_read_network_from_memory(ie_core_t *core, const uint8_t *xm
|
||||
network_result->object = core->object.ReadNetwork(std::string(reinterpret_cast<const char *>(xml_content),
|
||||
reinterpret_cast<const char *>(xml_content + xml_content_size)), weight_blob->object);
|
||||
*network = network_result.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -358,9 +353,7 @@ IEStatusCode ie_core_load_network(ie_core_t *core, const ie_network_t *network,
|
||||
// create plugin in the registery and then create ExecutableNetwork.
|
||||
exe_net->object = core->object.LoadNetwork(network->object, device_name, conf_map);
|
||||
*exe_network = exe_net.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -381,9 +374,7 @@ IEStatusCode ie_core_load_network_from_file(ie_core_t *core, const char *xml, co
|
||||
|
||||
exe_net->object = core->object.LoadNetwork(xml, device_name, conf_map);
|
||||
*exe_network = exe_net.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -404,9 +395,7 @@ IEStatusCode ie_core_set_config(ie_core_t *core, const ie_config_t *ie_core_conf
|
||||
|
||||
try {
|
||||
core->object.SetConfig(conf_map, deviceName);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -421,9 +410,7 @@ IEStatusCode ie_core_register_plugin(ie_core_t *core, const char *plugin_name, c
|
||||
|
||||
try {
|
||||
core->object.RegisterPlugin(plugin_name, device_name);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -438,9 +425,7 @@ IEStatusCode ie_core_register_plugins(ie_core_t *core, const char *xml_config_fi
|
||||
|
||||
try {
|
||||
core->object.RegisterPlugins(xml_config_file);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -455,9 +440,7 @@ IEStatusCode ie_core_unregister_plugin(ie_core_t *core, const char *device_name)
|
||||
|
||||
try {
|
||||
core->object.UnregisterPlugin(device_name);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -474,9 +457,7 @@ IEStatusCode ie_core_add_extension(ie_core_t *core, const char *extension_path,
|
||||
auto extension_ptr = std::make_shared<InferenceEngine::Extension>(std::string{extension_path});
|
||||
auto extension = std::dynamic_pointer_cast<InferenceEngine::IExtension>(extension_ptr);
|
||||
core->object.AddExtension(extension, device_name);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -492,9 +473,7 @@ IEStatusCode ie_core_get_metric(const ie_core_t *core, const char *device_name,
|
||||
try {
|
||||
IE::Parameter param = core->object.GetMetric(device_name, metric_name);
|
||||
parameter2IEparam(param, param_result);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -512,9 +491,7 @@ IEStatusCode ie_core_get_config(const ie_core_t *core, const char *device_name,
|
||||
|
||||
// convert the parameter to ie_param_t
|
||||
parameter2IEparam(param, param_result);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -536,9 +513,7 @@ IEStatusCode ie_core_get_available_devices(const ie_core_t *core, ie_available_d
|
||||
memcpy(dev_ptrs[i], _devices[i].c_str(), _devices[i].length() + 1);
|
||||
}
|
||||
avai_devices->devices = dev_ptrs.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return IEStatusCode::OK;
|
||||
}
|
||||
@ -575,9 +550,7 @@ IEStatusCode ie_exec_network_create_infer_request(ie_executable_network_t *ie_ex
|
||||
std::unique_ptr<ie_infer_request_t> req(new ie_infer_request_t);
|
||||
req->object = ie_exec_network->object.CreateInferRequest();
|
||||
*request = req.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -593,9 +566,7 @@ IEStatusCode ie_exec_network_get_metric(const ie_executable_network_t *ie_exec_n
|
||||
try {
|
||||
InferenceEngine::Parameter parameter = ie_exec_network->object.GetMetric(metric_name);
|
||||
parameter2IEparam(parameter, param_result);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -611,9 +582,7 @@ IEStatusCode ie_exec_network_set_config(ie_executable_network_t *ie_exec_network
|
||||
try {
|
||||
const std::map<std::string, IE::Parameter> conf_map = config2ParamMap(param_config);
|
||||
ie_exec_network->object.SetConfig(conf_map);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -629,9 +598,7 @@ IEStatusCode ie_exec_network_get_config(const ie_executable_network_t *ie_exec_n
|
||||
try {
|
||||
InferenceEngine::Parameter parameter = ie_exec_network->object.GetConfig(metric_config);
|
||||
parameter2IEparam(parameter, param_result);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -653,9 +620,7 @@ IEStatusCode ie_network_get_name(const ie_network_t *network, char **name) {
|
||||
std::unique_ptr<char[]> netName(new char[_name.length() + 1]);
|
||||
*name = netName.release();
|
||||
memcpy(*name, _name.c_str(), _name.length() + 1);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return IEStatusCode::OK;
|
||||
}
|
||||
@ -670,9 +635,7 @@ IEStatusCode ie_network_get_inputs_number(const ie_network_t *network, size_t *s
|
||||
try {
|
||||
IE::InputsDataMap inputs = network->object.getInputsInfo();
|
||||
*size_result = inputs.size();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -699,9 +662,7 @@ IEStatusCode ie_network_get_input_name(const ie_network_t *network, size_t numbe
|
||||
*name = inputName.release();
|
||||
memcpy(*name, iter->first.c_str(), iter->first.length() + 1);
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -722,9 +683,7 @@ IEStatusCode ie_network_get_input_precision(const ie_network_t *network, const c
|
||||
IE::Precision p = inputs[input_name]->getPrecision();
|
||||
*prec_result = precision_map[p];
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -751,9 +710,7 @@ IEStatusCode ie_network_set_input_precision(ie_network_t *network, const char *i
|
||||
}
|
||||
inputs[input_name]->setPrecision(precision);
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -774,9 +731,7 @@ IEStatusCode ie_network_get_input_layout(const ie_network_t *network, const char
|
||||
IE::Layout l = inputs[input_name]->getLayout();
|
||||
*layout_result = layout_map[l];
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -803,9 +758,7 @@ IEStatusCode ie_network_set_input_layout(ie_network_t *network, const char *inpu
|
||||
}
|
||||
inputs[input_name]->setLayout(layout);
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -829,9 +782,7 @@ IEStatusCode ie_network_get_input_dims(const ie_network_t *network, const char *
|
||||
dims_result->dims[i] = dims[i];
|
||||
}
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -852,9 +803,7 @@ IEStatusCode ie_network_get_input_resize_algorithm(const ie_network_t *network,
|
||||
IE::ResizeAlgorithm resize = inputs[input_name]->getPreProcess().getResizeAlgorithm();
|
||||
*resize_alg_result = resize_alg_map[resize];
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -881,9 +830,7 @@ IEStatusCode ie_network_set_input_resize_algorithm(ie_network_t *network, const
|
||||
}
|
||||
inputs[input_name]->getPreProcess().setResizeAlgorithm(resize);
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -904,9 +851,7 @@ IEStatusCode ie_network_get_color_format(const ie_network_t *network, const char
|
||||
IE::ColorFormat color = inputs[input_name]->getPreProcess().getColorFormat();
|
||||
*colformat_result = colorformat_map[color];
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -933,9 +878,7 @@ IEStatusCode ie_network_set_color_format(ie_network_t *network, const char *inpu
|
||||
}
|
||||
inputs[input_name]->getPreProcess().setColorFormat(color);
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -971,9 +914,7 @@ IEStatusCode ie_network_get_input_shapes(ie_network *network, input_shapes_t *sh
|
||||
}
|
||||
shapes->shapes = shape_ptrs.release();
|
||||
status = IEStatusCode::OK;
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -998,9 +939,7 @@ IEStatusCode ie_network_reshape(ie_network_t *network, const input_shapes_t shap
|
||||
}
|
||||
|
||||
network->object.reshape(net_shapes);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1016,9 +955,7 @@ IEStatusCode ie_network_get_outputs_number(const ie_network_t *network, size_t *
|
||||
try {
|
||||
IE::OutputsDataMap outputs = network->object.getOutputsInfo();
|
||||
*size_result = outputs.size();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1045,9 +982,7 @@ IEStatusCode ie_network_get_output_name(const ie_network_t *network, const size_
|
||||
*name = outputName.release();
|
||||
memcpy(*name, iter->first.c_str(), iter->first.length() + 1);
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1068,9 +1003,7 @@ IEStatusCode ie_network_get_output_precision(const ie_network_t *network, const
|
||||
IE::Precision p = outputs[output_name]->getPrecision();
|
||||
*prec_result = precision_map[p];
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1097,9 +1030,7 @@ IEStatusCode ie_network_set_output_precision(ie_network_t *network, const char *
|
||||
}
|
||||
outputs[output_name]->setPrecision(precision);
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1120,9 +1051,7 @@ IEStatusCode ie_network_get_output_layout(const ie_network_t *network, const cha
|
||||
IE::Layout l = outputs[output_name]->getLayout();
|
||||
*layout_result = layout_map[l];
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1149,9 +1078,7 @@ IEStatusCode ie_network_set_output_layout(ie_network_t *network, const char *out
|
||||
}
|
||||
outputs[output_name]->setLayout(layout);
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1175,9 +1102,7 @@ IEStatusCode ie_network_get_output_dims(const ie_network_t *network, const char
|
||||
dims_result->dims[i] = dims[i];
|
||||
}
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1220,9 +1145,7 @@ IEStatusCode ie_infer_request_get_blob(ie_infer_request_t *infer_request, const
|
||||
std::unique_ptr<ie_blob_t> blob_result(new ie_blob_t);
|
||||
blob_result->object = blob_ptr;
|
||||
*blob = blob_result.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1237,9 +1160,7 @@ IEStatusCode ie_infer_request_set_blob(ie_infer_request_t *infer_request, const
|
||||
|
||||
try {
|
||||
infer_request->object.SetBlob(name, blob->object);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1254,9 +1175,7 @@ IEStatusCode ie_infer_request_infer(ie_infer_request_t *infer_request) {
|
||||
|
||||
try {
|
||||
infer_request->object.Infer();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1271,9 +1190,7 @@ IEStatusCode ie_infer_request_infer_async(ie_infer_request_t *infer_request) {
|
||||
|
||||
try {
|
||||
infer_request->object.StartAsync();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1291,9 +1208,7 @@ IEStatusCode ie_infer_set_completion_callback(ie_infer_request_t *infer_request,
|
||||
callback->completeCallBackFunc(callback->args);
|
||||
};
|
||||
infer_request->object.SetCompletionCallback(fun);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1309,9 +1224,7 @@ IEStatusCode ie_infer_request_wait(ie_infer_request_t *infer_request, const int6
|
||||
try {
|
||||
IE::StatusCode status_code = infer_request->object.Wait(timeout);
|
||||
status = status_map[status_code];
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1326,9 +1239,7 @@ IEStatusCode ie_infer_request_set_batch(ie_infer_request_t *infer_request, const
|
||||
|
||||
try {
|
||||
infer_request->object.SetBatch(size);
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1390,9 +1301,7 @@ IEStatusCode ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **bl
|
||||
|
||||
_blob->object->allocate();
|
||||
*blob = _blob.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1462,9 +1371,7 @@ IEStatusCode ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDe
|
||||
_blob->object = IE::make_shared_blob(tensor, p, size);
|
||||
}
|
||||
*blob = _blob.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1480,9 +1387,7 @@ IEStatusCode ie_blob_make_memory_with_roi(const ie_blob_t *inputBlob, const roi_
|
||||
IE::ROI roi_d = {roi->id, roi->posX, roi->posY, roi->sizeX, roi->sizeY};
|
||||
_blob->object = IE::make_shared_blob(inputBlob->object, roi_d);
|
||||
*blob = _blob.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1496,9 +1401,7 @@ IEStatusCode ie_blob_make_memory_nv12(const ie_blob_t *y, const ie_blob_t *uv, i
|
||||
std::unique_ptr<ie_blob_t> _blob(new ie_blob_t);
|
||||
_blob->object = IE::make_shared_blob<IE::NV12Blob>(y->object, uv->object);
|
||||
*nv12Blob = _blob.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return IEStatusCode::OK;
|
||||
}
|
||||
@ -1512,9 +1415,7 @@ IEStatusCode ie_blob_make_memory_i420(const ie_blob_t *y, const ie_blob_t *u, co
|
||||
std::unique_ptr<ie_blob_t> _blob(new ie_blob_t);
|
||||
_blob->object = IE::make_shared_blob<IE::I420Blob>(y->object, u->object, v->object);
|
||||
*i420Blob = _blob.release();
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return IEStatusCode::OK;
|
||||
}
|
||||
@ -1587,9 +1488,7 @@ IEStatusCode ie_blob_get_dims(const ie_blob_t *blob, dimensions_t *dims_result)
|
||||
for (size_t i = 0; i< dims_result->ranks; ++i) {
|
||||
dims_result->dims[i] = size_vector[i];
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1605,9 +1504,7 @@ IEStatusCode ie_blob_get_layout(const ie_blob_t *blob, layout_e *layout_result)
|
||||
try {
|
||||
IE::Layout l = blob->object->getTensorDesc().getLayout();
|
||||
*layout_result = layout_map[l];
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1623,9 +1520,7 @@ IEStatusCode ie_blob_get_precision(const ie_blob_t *blob, precision_e *prec_resu
|
||||
try {
|
||||
IE::Precision p = blob->object->getTensorDesc().getPrecision();
|
||||
*prec_result = precision_map[p];
|
||||
} CATCH_IE_EXCEPTIONS catch (...) {
|
||||
return IEStatusCode::UNEXPECTED;
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "hetero/hetero_plugin_config.hpp"
|
||||
#include "ie_iinfer_request.hpp"
|
||||
#include "ie_plugin_config.hpp"
|
||||
|
||||
const std::string EXPORTED_NETWORK_NAME = "undefined";
|
||||
std::map<std::string, InferenceEngine::Precision> precision_map = {
|
||||
@ -70,6 +71,11 @@ PyObject* parse_parameter(const InferenceEngine::Parameter& param) {
|
||||
auto val = param.as<unsigned int>();
|
||||
return PyLong_FromLong((unsigned long)val);
|
||||
}
|
||||
// Check for uint64_t
|
||||
else if (param.is<uint64_t>()) {
|
||||
auto val = param.as<uint64_t>();
|
||||
return PyLong_FromLong((unsigned long)val);
|
||||
}
|
||||
// Check for float
|
||||
else if (param.is<float>()) {
|
||||
auto val = param.as<float>();
|
||||
@ -151,6 +157,21 @@ PyObject* parse_parameter(const InferenceEngine::Parameter& param) {
|
||||
PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long)it.second));
|
||||
}
|
||||
return dict;
|
||||
} else if (param.is<std::map<InferenceEngine::Precision, float>>()) {
|
||||
auto val = param.as<std::map<InferenceEngine::Precision, float>>();
|
||||
PyObject* dict = PyDict_New();
|
||||
for (const auto& it : val) {
|
||||
std::stringstream s;
|
||||
s << it.first;
|
||||
PyDict_SetItemString(dict, s.str().c_str(), PyFloat_FromDouble((double)it.second));
|
||||
}
|
||||
return dict;
|
||||
} else if (param.is<InferenceEngine::Metrics::DeviceType>()) {
|
||||
auto val = param.as<InferenceEngine::Metrics::DeviceType>();
|
||||
using namespace InferenceEngine;
|
||||
std::stringstream s;
|
||||
s << val;
|
||||
return PyUnicode_FromString(s.str().c_str());
|
||||
} else {
|
||||
PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!");
|
||||
return (PyObject*)NULL;
|
||||
|
38
inference-engine/include/auto_plugin/auto_config.hpp
Normal file
38
inference-engine/include/auto_plugin/auto_config.hpp
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
/**
|
||||
* @brief A header that defines advanced related properties for Auto plugin.
|
||||
* These properties should be used in SetConfig() and LoadNetwork() methods
|
||||
*
|
||||
* @file auto_config.hpp
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ie_plugin_config.hpp"
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
||||
/**
|
||||
* @brief Auto plugin configuration
|
||||
*/
|
||||
namespace AutoConfigParams {
|
||||
|
||||
/**
|
||||
* @def AUTO_CONFIG_KEY(name)
|
||||
* @brief A macro which provides a AUTO-mangled name for configuration key with name `name`
|
||||
*/
|
||||
#define AUTO_CONFIG_KEY(name) InferenceEngine::AutoConfigParams::_CONFIG_KEY(AUTO_##name)
|
||||
|
||||
#define DECLARE_AUTO_CONFIG_KEY(name) DECLARE_CONFIG_KEY(AUTO_##name)
|
||||
#define DECLARE_AUTO_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(AUTO_##name)
|
||||
|
||||
/**
|
||||
* @brief Limit device list config option, with comma-separated devices listed
|
||||
*/
|
||||
DECLARE_AUTO_CONFIG_KEY(DEVICE_LIST);
|
||||
|
||||
} // namespace AutoConfigParams
|
||||
} // namespace InferenceEngine
|
@ -14,6 +14,44 @@
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
||||
namespace Metrics {
|
||||
|
||||
/**
|
||||
* @def GPU_METRIC_KEY(name)
|
||||
* @brief shortcut for defining GPU plugin metrics
|
||||
*/
|
||||
#define GPU_METRIC_KEY(name) METRIC_KEY(GPU_##name)
|
||||
#define DECLARE_GPU_METRIC_KEY(name, ...) DECLARE_METRIC_KEY(GPU_##name, __VA_ARGS__)
|
||||
|
||||
/**
|
||||
* @def DECLARE_GPU_METRIC_VALUE(name)
|
||||
* @brief shortcut for defining gpu metric values
|
||||
*/
|
||||
#define DECLARE_GPU_METRIC_VALUE(name) DECLARE_METRIC_VALUE(GPU_##name)
|
||||
|
||||
/**
|
||||
* @brief Metric which defines size of memory in bytes available for the device. For iGPU it returns host memory size, for dGPU - dedicated gpu memory size
|
||||
*/
|
||||
DECLARE_GPU_METRIC_KEY(DEVICE_TOTAL_MEM_SIZE, uint64_t);
|
||||
|
||||
/**
|
||||
* @brief Metric to get microarchitecture identifier in major.minor.revision format
|
||||
*/
|
||||
DECLARE_GPU_METRIC_KEY(UARCH_VERSION, std::string);
|
||||
|
||||
/**
|
||||
* @brief Metric to get count of execution units for current GPU
|
||||
*/
|
||||
DECLARE_GPU_METRIC_KEY(EXECUTION_UNITS_COUNT, int);
|
||||
|
||||
/**
|
||||
* @brief Possible return value for OPTIMIZATION_CAPABILITIES metric
|
||||
* - "HW_MATMUL" - Defines if device has hardware block for matrix multiplication
|
||||
*/
|
||||
DECLARE_GPU_METRIC_VALUE(HW_MATMUL);
|
||||
|
||||
} // namespace Metrics
|
||||
|
||||
/**
|
||||
* @brief GPU plugin configuration
|
||||
*/
|
||||
|
@ -41,10 +41,12 @@ public:
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
/**
|
||||
* @deprecated Don't use this constructor. It will be removed soon
|
||||
* @brief Allows helper class to manage lifetime of network object
|
||||
*
|
||||
* @param network Pointer to the network object
|
||||
*/
|
||||
INFERENCE_ENGINE_DEPRECATED("Don't use this constructor. It will be removed soon")
|
||||
explicit CNNNetwork(std::shared_ptr<ICNNNetwork> network);
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
@ -59,55 +61,69 @@ public:
|
||||
const std::vector<IExtensionPtr>& exts = {});
|
||||
|
||||
/**
|
||||
* @copybrief ICNNNetwork::getOutputsInfo
|
||||
* @brief Gets the network output Data node information. The received info is stored in the given Data node.
|
||||
*
|
||||
* Wraps ICNNNetwork::getOutputsInfo
|
||||
* For single and multiple outputs networks.
|
||||
*
|
||||
* @return outputs Reference to the OutputsDataMap object
|
||||
* This method need to be called to find out OpenVINO output names for using them later
|
||||
* when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob
|
||||
*
|
||||
* If you want to use framework names, you can use InferenceEngine::CNNNetwork::getOVNameForTensor
|
||||
* method to map framework names to OpenVINO names
|
||||
*
|
||||
* @return the InferenceEngine::OutputsDataMap object
|
||||
*/
|
||||
OutputsDataMap getOutputsInfo() const;
|
||||
|
||||
/**
|
||||
* @copybrief ICNNNetwork::getInputsInfo
|
||||
* @brief Gets the network input Data node information. The received info is stored in the given InputsDataMap
|
||||
* object.
|
||||
*
|
||||
* Wraps ICNNNetwork::getInputsInfo
|
||||
* For single and multiple inputs networks.
|
||||
* This method need to be called to find out OpenVINO input names for using them later
|
||||
* when calling InferenceEngine::InferRequest::SetBlob
|
||||
*
|
||||
* @return inputs Reference to InputsDataMap object
|
||||
* If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor
|
||||
* method to map framework names to OpenVINO names
|
||||
*
|
||||
* @return The InferenceEngine::InputsDataMap object.
|
||||
*/
|
||||
InputsDataMap getInputsInfo() const;
|
||||
|
||||
/**
|
||||
* @copybrief ICNNNetwork::layerCount
|
||||
*
|
||||
* Wraps ICNNNetwork::layerCount
|
||||
*
|
||||
* @brief Returns the number of layers in the network as an integer value
|
||||
* @return The number of layers as an integer value
|
||||
*/
|
||||
size_t layerCount() const;
|
||||
|
||||
/**
|
||||
* @copybrief ICNNNetwork::getName
|
||||
*
|
||||
* Wraps ICNNNetwork::getName
|
||||
*
|
||||
* @brief Returns the network name.
|
||||
* @return Network name
|
||||
*/
|
||||
const std::string& getName() const;
|
||||
|
||||
/**
|
||||
* @copybrief ICNNNetwork::setBatchSize
|
||||
* @brief Changes the inference batch size.
|
||||
*
|
||||
* Wraps ICNNNetwork::setBatchSize
|
||||
* @note There are several limitations and it's not recommended to use it. Set batch to the input shape and call
|
||||
* InferenceEngine::CNNNetwork::reshape.
|
||||
*
|
||||
* @param size Size of batch to set
|
||||
*
|
||||
* @note Current implementation of the function sets batch size to the first dimension of all layers in the
|
||||
* networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
|
||||
* method works incorrectly. This limitation is resolved via shape inference feature by using
|
||||
* InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
|
||||
*
|
||||
* @note Current implementation of the function sets batch size to the first dimension of all layers in the
|
||||
* networks. Before calling it make sure that all your layers have batch in the first dimension, otherwise the
|
||||
* method works incorrectly. This limitation is resolved via shape inference feature by using
|
||||
* InferenceEngine::ICNNNetwork::reshape method. To read more refer to the Shape Inference section in documentation
|
||||
*/
|
||||
void setBatchSize(const size_t size);
|
||||
|
||||
/**
|
||||
* @copybrief ICNNNetwork::getBatchSize
|
||||
*
|
||||
* Wraps ICNNNetwork::getBatchSize
|
||||
*
|
||||
* @brief Gets the inference batch size
|
||||
* @return The size of batch as a size_t value
|
||||
*/
|
||||
size_t getBatchSize() const;
|
||||
@ -119,7 +135,7 @@ public:
|
||||
*
|
||||
* @return A shared pointer of the current network
|
||||
*/
|
||||
// INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated")
|
||||
INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated")
|
||||
operator ICNNNetwork::Ptr();
|
||||
|
||||
/**
|
||||
@ -128,7 +144,7 @@ public:
|
||||
*
|
||||
* @return An instance of the current network
|
||||
*/
|
||||
// INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated")
|
||||
INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated")
|
||||
operator ICNNNetwork&();
|
||||
|
||||
/**
|
||||
@ -137,47 +153,42 @@ public:
|
||||
*
|
||||
* @return A const reference of the current network
|
||||
*/
|
||||
// INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated")
|
||||
INFERENCE_ENGINE_DEPRECATED("InferenceEngine::ICNNNetwork interface is deprecated")
|
||||
operator const ICNNNetwork&() const;
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
/**
|
||||
* @brief Returns constant nGraph function
|
||||
*
|
||||
* @return constant nGraph function
|
||||
*/
|
||||
std::shared_ptr<ngraph::Function> getFunction();
|
||||
|
||||
/**
|
||||
* @brief Returns constant nGraph function
|
||||
*
|
||||
* @return constant nGraph function
|
||||
*/
|
||||
std::shared_ptr<const ngraph::Function> getFunction() const;
|
||||
|
||||
/**
|
||||
* @copybrief ICNNNetwork::addOutput
|
||||
*
|
||||
* Wraps ICNNNetwork::addOutput
|
||||
*
|
||||
* @brief Adds output to the layer
|
||||
* @param layerName Name of the layer
|
||||
* @param outputIndex Index of the output
|
||||
*/
|
||||
void addOutput(const std::string& layerName, size_t outputIndex = 0);
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
/**
|
||||
* @brief Helper method to get collect all input shapes with names of corresponding Data objects
|
||||
*
|
||||
* @return Map of pairs: input name and its dimension.
|
||||
*/
|
||||
ICNNNetwork::InputShapes getInputShapes() const;
|
||||
|
||||
/**
|
||||
* @brief Run shape inference with new input shapes for the network
|
||||
*
|
||||
* @param inputShapes - map of pairs: name of corresponding data and its dimension.
|
||||
* @param inputShapes A map of pairs: name of corresponding data and its dimension.
|
||||
*/
|
||||
void reshape(const ICNNNetwork::InputShapes& inputShapes);
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
/**
|
||||
* @brief Serialize network to IR and weights files.
|
||||
|
@ -16,40 +16,52 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "ie_parameter.hpp"
|
||||
#include "ie_remote_context.hpp"
|
||||
#include "cpp/ie_cnn_network.h"
|
||||
#include "cpp/ie_infer_request.hpp"
|
||||
#include "details/ie_so_loader.h"
|
||||
#include "ie_iexecutable_network.hpp"
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
||||
namespace details {
|
||||
class SharedObjectLoader;
|
||||
}
|
||||
|
||||
class IExecutableNetworkInternal;
|
||||
class IExecutableNetwork;
|
||||
|
||||
/**
|
||||
* @brief This is an interface of an executable network
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(ExecutableNetwork) {
|
||||
std::shared_ptr<IExecutableNetworkInternal> _impl;
|
||||
std::shared_ptr<details::SharedObjectLoader> _so;
|
||||
details::SharedObjectLoader _so;
|
||||
std::shared_ptr<IExecutableNetworkInternal> _impl;
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
std::shared_ptr<IExecutableNetwork> actual;
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
ExecutableNetwork(const std::shared_ptr<IExecutableNetworkInternal>& impl,
|
||||
const std::shared_ptr<details::SharedObjectLoader>& so);
|
||||
|
||||
friend class InferencePlugin;
|
||||
/**
|
||||
* @brief Constructs ExecutableNetwork from the initialized std::shared_ptr
|
||||
* @param so Plugin to use. This is required to ensure that ExecutableNetwork can work properly even if plugin object is destroyed.
|
||||
* @param impl Initialized shared pointer
|
||||
*/
|
||||
ExecutableNetwork(const details::SharedObjectLoader& so,
|
||||
const std::shared_ptr<IExecutableNetworkInternal>& impl);
|
||||
friend class Core;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Default constructor
|
||||
* @brief A default constructor.
|
||||
*/
|
||||
ExecutableNetwork() = default;
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
/**
|
||||
* @brief Default destructor
|
||||
* @deprecated This ctor will be removed in 2022.1
|
||||
* @brief Constructs ExecutableNetwork from the initialized std::shared_ptr
|
||||
* @param exec Initialized shared pointer
|
||||
* @param splg Plugin to use. This is required to ensure that ExecutableNetwork can work properly even if plugin object is destroyed.
|
||||
*/
|
||||
~ExecutableNetwork();
|
||||
INFERENCE_ENGINE_DEPRECATED("This ctor will be removed in 2022.1")
|
||||
explicit ExecutableNetwork(std::shared_ptr<IExecutableNetwork> exec,
|
||||
std::shared_ptr<details::SharedObjectLoader> splg = {});
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
/**
|
||||
* @brief Gets the Executable network output Data node information.
|
||||
|
@ -13,19 +13,19 @@
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "ie_blob.h"
|
||||
#include "cpp/ie_memory_state.hpp"
|
||||
#include "ie_remote_context.hpp"
|
||||
#include "ie_iinfer_request.hpp"
|
||||
#include "details/ie_so_loader.h"
|
||||
#include "ie_blob.h"
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
||||
namespace details {
|
||||
class SharedObjectLoader;
|
||||
}
|
||||
class IInferRequestInternal;
|
||||
|
||||
namespace details {
|
||||
class ICompletionCallbackWrapper;
|
||||
} // namespace details
|
||||
|
||||
/**
|
||||
* @copybrief IInferRequest
|
||||
*
|
||||
@ -33,12 +33,20 @@ class IInferRequestInternal;
|
||||
* It can throw exceptions safely for the application, where it is properly handled.
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(InferRequest) {
|
||||
std::shared_ptr<IInferRequestInternal> _impl;
|
||||
std::shared_ptr<details::SharedObjectLoader> _so;
|
||||
|
||||
InferRequest(const std::shared_ptr<IInferRequestInternal>& impl,
|
||||
const std::shared_ptr<details::SharedObjectLoader>& so);
|
||||
details::SharedObjectLoader _so;
|
||||
std::shared_ptr<IInferRequestInternal> _impl;
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
IInferRequest::Ptr actual;
|
||||
std::shared_ptr<details::ICompletionCallbackWrapper> callback;
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
/**
|
||||
* @brief Constructs InferRequest from the initialized std::shared_ptr
|
||||
* @param so Plugin to use. This is required to ensure that InferRequest can work properly even if plugin object is destroyed.
|
||||
* @param impl Initialized shared pointer
|
||||
*/
|
||||
InferRequest(const details::SharedObjectLoader& so,
|
||||
const std::shared_ptr<IInferRequestInternal>& impl);
|
||||
friend class ExecutableNetwork;
|
||||
|
||||
public:
|
||||
@ -63,10 +71,17 @@ public:
|
||||
*/
|
||||
InferRequest() = default;
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
/**
|
||||
* @brief Destructor
|
||||
* @deprecated This ctor will be removed in 2022.1
|
||||
* @brief Constructs InferRequest from the initialized std::shared_ptr
|
||||
* @param request Initialized shared pointer
|
||||
* @param splg Plugin to use. This is required to ensure that InferRequest can work properly even if plugin object is destroyed.
|
||||
*/
|
||||
~InferRequest();
|
||||
INFERENCE_ENGINE_DEPRECATED("This ctor will be removed in 2022.1")
|
||||
explicit InferRequest(IInferRequest::Ptr request,
|
||||
std::shared_ptr<details::SharedObjectLoader> splg = {});
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
/**
|
||||
* @brief Sets input/output data to infer
|
||||
@ -222,6 +237,18 @@ public:
|
||||
* @return true if current InferRequest object is initialized, false - otherwise
|
||||
*/
|
||||
explicit operator bool() const noexcept;
|
||||
|
||||
/**
|
||||
* @brief Compares whether this request wraps the same impl underneath
|
||||
* @return true if current InferRequest object doesn't wrap the same impl as the operator's arg
|
||||
*/
|
||||
bool operator!=(const InferRequest&) const noexcept;
|
||||
|
||||
/**
|
||||
* @brief Compares whether this request wraps the same impl underneath
|
||||
* @return true if current InferRequest object wraps the same impl as the operator's arg
|
||||
*/
|
||||
bool operator==(const InferRequest&) const noexcept;
|
||||
};
|
||||
|
||||
template<>
|
||||
|
@ -15,34 +15,51 @@
|
||||
|
||||
#include "ie_api.h"
|
||||
#include "ie_blob.h"
|
||||
#include "details/ie_so_loader.h"
|
||||
#include "ie_imemory_state.hpp"
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
||||
namespace details {
|
||||
class SharedObjectLoader;
|
||||
}
|
||||
|
||||
class IVariableStateInternal;
|
||||
|
||||
/**
|
||||
* @brief C++ exception based error reporting wrapper of API class IVariableState
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(VariableState) {
|
||||
std::shared_ptr<IVariableStateInternal> _impl = nullptr;
|
||||
std::shared_ptr<details::SharedObjectLoader> _so = nullptr;
|
||||
details::SharedObjectLoader _so;
|
||||
std::shared_ptr<IVariableStateInternal> _impl;
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
std::shared_ptr<IVariableState> actual;
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
/**
|
||||
* @brief Constructs VariableState from the initialized std::shared_ptr
|
||||
* @param impl Initialized shared pointer
|
||||
* @param so Optional: Plugin to use. This is required to ensure that VariableState can work properly even if plugin object is destroyed.
|
||||
*/
|
||||
VariableState(const std::shared_ptr<IVariableStateInternal>& impl,
|
||||
const std::shared_ptr<details::SharedObjectLoader>& so);
|
||||
|
||||
VariableState(const details::SharedObjectLoader& so,
|
||||
const std::shared_ptr<IVariableStateInternal>& impl);
|
||||
friend class InferRequest;
|
||||
friend class ExecutableNetwork;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Default constructor
|
||||
*/
|
||||
VariableState() = default;
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
/**
|
||||
* @deprecated This ctor will be removed in 2022.1
|
||||
* @brief constructs VariableState from the initialized std::shared_ptr
|
||||
* @param pState Initialized shared pointer
|
||||
* @param plg Optional: Plugin to use. This is required to ensure that VariableState can work properly even if plugin object is destroyed.
|
||||
*/
|
||||
INFERENCE_ENGINE_DEPRECATED("This ctor will be removed in 2022.1")
|
||||
explicit VariableState(std::shared_ptr<IVariableState> pState,
|
||||
std::shared_ptr<details::SharedObjectLoader> plg = {});
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
/**
|
||||
* @copybrief IVariableState::Reset
|
||||
*
|
||||
@ -62,7 +79,7 @@ public:
|
||||
* @copybrief IVariableState::GetState
|
||||
*
|
||||
* Wraps IVariableState::GetState
|
||||
* @return A blob representing a state
|
||||
* @return A blob representing a state
|
||||
*/
|
||||
Blob::CPtr GetState() const;
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
/**
|
||||
* @brief A header file for definition of abstraction over platform specific shared objects
|
||||
*
|
||||
*
|
||||
* @file ie_so_loader.h
|
||||
*/
|
||||
#pragma once
|
||||
@ -25,9 +25,9 @@ class INFERENCE_ENGINE_API_CLASS(SharedObjectLoader) {
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief A shared pointer to SharedObjectLoader
|
||||
* @brief Default constructor
|
||||
*/
|
||||
using Ptr = std::shared_ptr<SharedObjectLoader>;
|
||||
SharedObjectLoader() = default;
|
||||
|
||||
#ifdef ENABLE_UNICODE_PATH_SUPPORT
|
||||
/**
|
||||
|
@ -36,19 +36,19 @@ using enableIfSupportedChar = typename std::enable_if<(std::is_same<C, char>::va
|
||||
/**
|
||||
* @brief This class instantiate object using shared library
|
||||
* @tparam T An type of object SOPointer can hold
|
||||
* @tparam Loader A loader used to load a library
|
||||
*/
|
||||
template <class T, class Loader = SharedObjectLoader>
|
||||
template <class T>
|
||||
class SOPointer {
|
||||
template <class U, class W>
|
||||
template <class U>
|
||||
friend class SOPointer;
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
struct HasRelease {
|
||||
template <typename C> static char test(decltype(&C::Release));
|
||||
template <typename C> static long test(...);
|
||||
constexpr static const bool value = sizeof(test<T>(nullptr)) == sizeof(char);
|
||||
};
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
|
||||
public:
|
||||
/**
|
||||
@ -62,48 +62,36 @@ public:
|
||||
*/
|
||||
template <typename C,
|
||||
typename = enableIfSupportedChar<C>>
|
||||
explicit SOPointer(const std::basic_string<C> & name)
|
||||
: _so_loader(new Loader(name.c_str())) {
|
||||
Load(std::integral_constant<bool, HasRelease::value>{});
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief The main constructor
|
||||
* @param name Name of a shared library file
|
||||
*/
|
||||
explicit SOPointer(const char * name)
|
||||
: _so_loader(new Loader(name)) {
|
||||
Load(std::integral_constant<bool, HasRelease::value>{});
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Constructs an object with existing reference
|
||||
* @param pointedObj Existing reference to wrap
|
||||
*/
|
||||
explicit SOPointer(T* pointedObj): _so_loader(), _pointedObj(pointedObj) {
|
||||
if (_pointedObj == nullptr) {
|
||||
IE_THROW() << "Cannot create SOPointer<T, Loader> from nullptr";
|
||||
}
|
||||
SOPointer(const std::basic_string<C> & name)
|
||||
: _so(name.c_str()) {
|
||||
Load(std::integral_constant<bool, HasRelease::value>{});
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Constructs an object with existing reference
|
||||
* @brief Constructs an object with existing loader
|
||||
* @param so_loader Existing pointer to a library loader
|
||||
* @param soLoader Existing pointer to a library loader
|
||||
*/
|
||||
explicit SOPointer(const std::shared_ptr<Loader>& so_loader)
|
||||
: _so_loader(so_loader) {
|
||||
Load(std::integral_constant<bool, HasRelease::value>{});
|
||||
}
|
||||
SOPointer(const SharedObjectLoader& so, const std::shared_ptr<T>& ptr) : _so{so}, _ptr{ptr} {}
|
||||
|
||||
/**
|
||||
* @brief Constructs an object with existing loader
|
||||
* @param so Existing pointer to a library loader
|
||||
*/
|
||||
explicit SOPointer(const SharedObjectLoader& so)
|
||||
: _so(so) {
|
||||
Load(std::integral_constant<bool, HasRelease::value>{});
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief The copy-like constructor, can create So Pointer that dereferenced into child type if T is derived of U
|
||||
* @param that copied SOPointer object
|
||||
*/
|
||||
template <class U, class W>
|
||||
SOPointer(const SOPointer<U, W>& that)
|
||||
: _so_loader(std::dynamic_pointer_cast<Loader>(that._so_loader)),
|
||||
_pointedObj(std::dynamic_pointer_cast<T>(that._pointedObj)) {
|
||||
IE_ASSERT(_pointedObj != nullptr);
|
||||
template <typename U>
|
||||
SOPointer(const SOPointer<U>& that)
|
||||
: _so(that._so),
|
||||
_ptr(std::dynamic_pointer_cast<T>(that._ptr)) {
|
||||
IE_ASSERT(_ptr != nullptr);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -111,19 +99,11 @@ public:
|
||||
* @return underlined interface with disabled Release method
|
||||
*/
|
||||
T* operator->() const noexcept {
|
||||
return _pointedObj.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Standard dereference operator
|
||||
* @return underlined interface with disabled Release method
|
||||
*/
|
||||
const T* operator*() const noexcept {
|
||||
return this->operator->();
|
||||
return _ptr.get();
|
||||
}
|
||||
|
||||
explicit operator bool() const noexcept {
|
||||
return (nullptr != _pointedObj);
|
||||
return _ptr != nullptr;
|
||||
}
|
||||
|
||||
friend bool operator==(std::nullptr_t, const SOPointer& ptr) noexcept {
|
||||
@ -139,33 +119,15 @@ public:
|
||||
return static_cast<bool>(ptr);
|
||||
}
|
||||
|
||||
SOPointer& operator=(const SOPointer& pointer) noexcept {
|
||||
_pointedObj = pointer._pointedObj;
|
||||
_so_loader = pointer._so_loader;
|
||||
return *this;
|
||||
operator const SharedObjectLoader&() const noexcept {
|
||||
return _so;
|
||||
}
|
||||
|
||||
operator const std::shared_ptr<Loader>&() const noexcept {
|
||||
return _so_loader;
|
||||
operator std::shared_ptr<T>& () noexcept {
|
||||
return _ptr;
|
||||
}
|
||||
|
||||
protected:
|
||||
#define CATCH_IE_EXCEPTION(ExceptionType) catch (const InferenceEngine::ExceptionType& e) {throw e;}
|
||||
#define CATCH_IE_EXCEPTIONS \
|
||||
CATCH_IE_EXCEPTION(GeneralError) \
|
||||
CATCH_IE_EXCEPTION(NotImplemented) \
|
||||
CATCH_IE_EXCEPTION(NetworkNotLoaded) \
|
||||
CATCH_IE_EXCEPTION(ParameterMismatch) \
|
||||
CATCH_IE_EXCEPTION(NotFound) \
|
||||
CATCH_IE_EXCEPTION(OutOfBounds) \
|
||||
CATCH_IE_EXCEPTION(Unexpected) \
|
||||
CATCH_IE_EXCEPTION(RequestBusy) \
|
||||
CATCH_IE_EXCEPTION(ResultNotReady) \
|
||||
CATCH_IE_EXCEPTION(NotAllocated) \
|
||||
CATCH_IE_EXCEPTION(InferNotStarted) \
|
||||
CATCH_IE_EXCEPTION(NetworkNotRead) \
|
||||
CATCH_IE_EXCEPTION(InferCancelled)
|
||||
|
||||
/**
|
||||
* @brief Implements load of object from library if Release method is presented
|
||||
*/
|
||||
@ -173,10 +135,10 @@ protected:
|
||||
try {
|
||||
void* create = nullptr;
|
||||
try {
|
||||
create = _so_loader->get_symbol((SOCreatorTrait<T>::name + std::string("Shared")).c_str());
|
||||
create = _so.get_symbol((SOCreatorTrait<T>::name + std::string("Shared")).c_str());
|
||||
} catch (const NotFound&) {}
|
||||
if (create == nullptr) {
|
||||
create = _so_loader->get_symbol(SOCreatorTrait<T>::name);
|
||||
create = _so.get_symbol(SOCreatorTrait<T>::name);
|
||||
using CreateF = StatusCode(T*&, ResponseDesc*);
|
||||
T* object = nullptr;
|
||||
ResponseDesc desc;
|
||||
@ -186,17 +148,13 @@ protected:
|
||||
InferenceEngine::details::ThrowNow<ExceptionType>{} <<= std::stringstream{} << IE_LOCATION << desc.msg)
|
||||
}
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
_pointedObj = std::shared_ptr<T>(object, [] (T* ptr){ptr->Release();});
|
||||
_ptr = std::shared_ptr<T>(object, [] (T* ptr){ptr->Release();});
|
||||
IE_SUPPRESS_DEPRECATED_END
|
||||
} else {
|
||||
using CreateF = void(std::shared_ptr<T>&);
|
||||
reinterpret_cast<CreateF*>(create)(_pointedObj);
|
||||
reinterpret_cast<CreateF*>(create)(_ptr);
|
||||
}
|
||||
} CATCH_IE_EXCEPTIONS catch (const std::exception& ex) {
|
||||
IE_THROW() << ex.what();
|
||||
} catch(...) {
|
||||
IE_THROW(Unexpected);
|
||||
}
|
||||
} catch(...) {details::Rethrow();}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -205,25 +163,19 @@ protected:
|
||||
void Load(std::false_type) {
|
||||
try {
|
||||
using CreateF = void(std::shared_ptr<T>&);
|
||||
reinterpret_cast<CreateF*>(_so_loader->get_symbol(SOCreatorTrait<T>::name))(_pointedObj);
|
||||
} CATCH_IE_EXCEPTIONS catch (const std::exception& ex) {
|
||||
IE_THROW() << ex.what();
|
||||
} catch(...) {
|
||||
IE_THROW(Unexpected);
|
||||
}
|
||||
reinterpret_cast<CreateF*>(_so.get_symbol(SOCreatorTrait<T>::name))(_ptr);
|
||||
} catch(...) {details::Rethrow();}
|
||||
}
|
||||
#undef CATCH_IE_EXCEPTION
|
||||
#undef CATCH_IE_EXCEPTIONS
|
||||
|
||||
/**
|
||||
* @brief Gets a smart pointer to the DLL
|
||||
* @brief The DLL
|
||||
*/
|
||||
std::shared_ptr<Loader> _so_loader;
|
||||
SharedObjectLoader _so;
|
||||
|
||||
/**
|
||||
* @brief Gets a smart pointer to the custom object
|
||||
*/
|
||||
std::shared_ptr<T> _pointedObj;
|
||||
std::shared_ptr<T> _ptr;
|
||||
};
|
||||
} // namespace details
|
||||
} // namespace InferenceEngine
|
||||
|
@ -799,6 +799,7 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef __clang__
|
||||
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<float>);
|
||||
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<double>);
|
||||
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<int8_t>);
|
||||
@ -813,6 +814,7 @@ extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<unsigned
|
||||
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<unsigned long long>);
|
||||
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<bool>);
|
||||
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<char>);
|
||||
#endif // __clang__
|
||||
|
||||
/**
|
||||
* @brief Creates a blob with the given tensor descriptor.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user