diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml index 66ee0885a24..fe39944493b 100644 --- a/.ci/azure/mac.yml +++ b/.ci/azure/mac.yml @@ -133,9 +133,10 @@ jobs: displayName: 'IE FuncTests' continueOnError: false - - script: $(BIN_DIR)/cpuFuncTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml + - script: $(BIN_DIR)/cpuFuncTests --gtest_filter=*smoke*:-smoke_LPT/ReduceMinTransformation.CompareWithRefImpl/f32_Shape* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml displayName: 'CPU FuncTests' continueOnError: false + enabled: false - script: | export DATA_PATH=$(MODELS_PATH) diff --git a/CMakeLists.txt b/CMakeLists.txt index 053d67ef8c1..7788252f207 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -103,6 +103,7 @@ function(build_ngraph) endif() ie_cpack_add_component(ngraph REQUIRED) + ie_cpack_add_component(ngraph_dev REQUIRED DEPENDS ngraph) set(SDL_cmake_included ON) add_subdirectory(ngraph) diff --git a/cmake/developer_package/IEDevScriptsConfig.cmake b/cmake/developer_package/IEDevScriptsConfig.cmake index 58dfb519e50..4b924633d2f 100644 --- a/cmake/developer_package/IEDevScriptsConfig.cmake +++ b/cmake/developer_package/IEDevScriptsConfig.cmake @@ -14,7 +14,13 @@ set(CMAKE_MODULE_PATH "${IEDevScripts_DIR}") function(set_ci_build_number) set(repo_root "${CMAKE_SOURCE_DIR}") include(version) - set(CI_BUILD_NUMBER "${CI_BUILD_NUMBER}" PARENT_SCOPE) + foreach(var CI_BUILD_NUMBER IE_VERSION + IE_VERSION_MAJOR IE_VERSION_MINOR IE_VERSION_PATCH) + if(NOT DEFINED ${var}) + message(FATAL_ERROR "${var} version component is not defined") + endif() + set(${var} "${${var}}" PARENT_SCOPE) + endforeach() endfunction() set_ci_build_number() diff --git a/cmake/developer_package/add_ie_target.cmake b/cmake/developer_package/add_ie_target.cmake index d44149383c7..d49f16a4db0 100644 --- a/cmake/developer_package/add_ie_target.cmake +++ b/cmake/developer_package/add_ie_target.cmake @@ -31,6 +31,7 @@ addIeTarget( function(addIeTarget) set(options ADD_CPPLINT # Enables code style checks for the target + ADD_CLANG_FORMAT # Enables code style checks for the target ) set(oneValueRequiredArgs TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable @@ -119,6 +120,10 @@ function(addIeTarget) # code style add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME}) endif() + if (ARG_ADD_CLANG_FORMAT) + # code style + add_clang_format_target(${ARG_NAME}_clang FOR_TARGETS ${ARG_NAME}) + endif() if (ARG_DEVELOPER_PACKAGE) # developer package openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE} @@ -128,7 +133,6 @@ function(addIeTarget) # Provide default compile pdb name equal to target name set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME}) endif() - endfunction() #[[ diff --git a/cmake/developer_package/plugins/plugins.cmake b/cmake/developer_package/plugins/plugins.cmake index faafb8e9916..aae1dbb7fb7 100644 --- a/cmake/developer_package/plugins/plugins.cmake +++ b/cmake/developer_package/plugins/plugins.cmake @@ -27,7 +27,10 @@ endif() # ) # function(ie_add_plugin) - set(options SKIP_INSTALL) + set(options + SKIP_INSTALL + ADD_CLANG_FORMAT + ) set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR) set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS) cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) @@ -73,7 +76,11 @@ function(ie_add_plugin) string(CONCAT custom_filter "${custom_filter}" "," "${filter}") endforeach() - add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter}) + if (IE_PLUGIN_ADD_CLANG_FORMAT) + add_clang_format_target(${IE_PLUGIN_NAME}_clang FOR_TARGETS ${IE_PLUGIN_NAME}) + else() + add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter}) + endif() # check that plugin with such name is not registered diff --git a/cmake/developer_package/version.cmake b/cmake/developer_package/version.cmake index c17c55b7b20..688024b9923 100644 --- a/cmake/developer_package/version.cmake +++ b/cmake/developer_package/version.cmake @@ -26,6 +26,60 @@ function (commitHash VAR) set (${VAR} ${GIT_COMMIT_HASH} PARENT_SCOPE) endfunction() +macro(ie_parse_ci_build_number) + if(CI_BUILD_NUMBER MATCHES "^([0-9]+)\.([0-9]+)\.([0-9]+)\-.*") + set(IE_VERSION_MAJOR ${CMAKE_MATCH_1}) + set(IE_VERSION_MINOR ${CMAKE_MATCH_2}) + set(IE_VERSION_PATCH ${CMAKE_MATCH_3}) + set(has_ci_version ON) + else() + set(IE_VERSION_MAJOR 0) + set(IE_VERSION_MINOR 0) + set(IE_VERSION_PATCH 0) + endif() + + if(NOT DEFINED repo_root) + message(FATAL_ERROR "repo_root is not defined") + endif() + + if(DEFINED IEDevScripts_DIR AND DEFINED IE_MAIN_SOURCE_DIR AND NOT DEFINED custom_build) + set(ie_version_hpp "${IE_MAIN_SOURCE_DIR}/include/ie_version.hpp") + if(NOT EXISTS ${ie_version_hpp}) + message(FATAL_ERROR "File ie_version.hpp with IE_VERSION definitions is not found") + endif() + + file(STRINGS "${ie_version_hpp}" IE_VERSION_PARTS REGEX "#define IE_VERSION_[A-Z]+[ ]+" ) + + string(REGEX REPLACE ".+IE_VERSION_MAJOR[ ]+([0-9]+).*" "\\1" + IE_VERSION_MAJOR_HPP "${IE_VERSION_PARTS}") + string(REGEX REPLACE ".+IE_VERSION_MINOR[ ]+([0-9]+).*" "\\1" + IE_VERSION_MINOR_HPP "${IE_VERSION_PARTS}") + string(REGEX REPLACE ".+IE_VERSION_PATCH[ ]+([0-9]+).*" "\\1" + IE_VERSION_PATCH_HPP "${IE_VERSION_PARTS}") + + foreach(var IE_VERSION_MAJOR IE_VERSION_MINOR IE_VERSION_PATCH) + if(DEFINED ${var} AND NOT ${var} EQUAL ${var}_HPP) + message(FATAL_ERROR "${var} parsed from CI_BUILD_NUMBER (${${var}}) \ + and from ie_version.hpp (${${var}_HPP}) are different") + else() + # CI_BUILD_NUMBER is not defined well, take info from ie_verison.hpp as a baseline + set(${var} ${${var}_HPP}) + endif() + endforeach() + elseif(has_ci_version) + message(WARNING "IE_MAIN_SOURCE_DIR is not defined. No way to compare versions") + else() + message(WARNING "No way to detect OpenVINO version. Supposing 0.0.0.0") + endif() + + set(IE_VERSION "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}") +endmacro() + +# WA for DL Benchmark +if(DEFINED ENV{CI_BUILD_NUMBER} AND "$ENV{CI_BUILD_NUMBER}" STREQUAL "1") + unset(ENV{CI_BUILD_NUMBER}) +endif() + if (DEFINED ENV{CI_BUILD_NUMBER}) set(CI_BUILD_NUMBER $ENV{CI_BUILD_NUMBER}) else() @@ -36,6 +90,11 @@ else() set(CI_BUILD_NUMBER "${custom_build}") endif() +# provides Inference Engine version +# 1. If CI_BUILD_NUMBER is defined, parses this information +# 2. Otherwise, parses ie_version.hpp +ie_parse_ci_build_number() + function (addVersionDefines FILE) foreach (VAR ${ARGN}) if (DEFINED ${VAR} AND NOT "${${VAR}}" STREQUAL "") diff --git a/cmake/developer_package/vs_version/vs_version.cmake b/cmake/developer_package/vs_version/vs_version.cmake index 21063c86aca..14d4c0e1e26 100644 --- a/cmake/developer_package/vs_version/vs_version.cmake +++ b/cmake/developer_package/vs_version/vs_version.cmake @@ -2,24 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 # -macro(ie_parse_ci_build_number) - if(CI_BUILD_NUMBER MATCHES "^([0-9]+)\.([0-9]+)\.([0-9]+)\-.*") - set(IE_VERSION_MAJOR ${CMAKE_MATCH_1}) - set(IE_VERSION_MINOR ${CMAKE_MATCH_2}) - set(IE_VERSION_PATCH ${CMAKE_MATCH_3}) - set(IE_VS_VER_HAS_VERSION 1) - else() - set(IE_VS_VER_HAS_VERSION 0) - endif() -endmacro() - -ie_parse_ci_build_number() - -if(IE_VS_VER_HAS_VERSION) - set(IE_VS_VER_FILEVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0") - set(IE_VS_VER_PRODUCTVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0") - set(IE_VS_VER_FILEVERSION_STR "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}.0") -endif() +set(IE_VS_VER_FILEVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0") +set(IE_VS_VER_PRODUCTVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0") +set(IE_VS_VER_FILEVERSION_STR "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}.0") set(IE_VS_VER_COMPANY_NAME_STR "Intel Corporation") set(IE_VS_VER_PRODUCTVERSION_STR "${CI_BUILD_NUMBER}") diff --git a/cmake/developer_package/vs_version/vs_version.rc.in b/cmake/developer_package/vs_version/vs_version.rc.in index b515b311883..f5375d5f306 100644 --- a/cmake/developer_package/vs_version/vs_version.rc.in +++ b/cmake/developer_package/vs_version/vs_version.rc.in @@ -1,10 +1,8 @@ #include VS_VERSION_INFO VERSIONINFO -#if @IE_VS_VER_HAS_VERSION@ FILEVERSION @IE_VS_VER_FILEVERSION_QUAD@ PRODUCTVERSION @IE_VS_VER_PRODUCTVERSION_QUAD@ -#endif FILEFLAGSMASK VS_FFI_FILEFLAGSMASK #ifdef _DEBUG FILEFLAGS 1 @@ -21,9 +19,7 @@ BEGIN BEGIN VALUE "CompanyName", "@IE_VS_VER_COMPANY_NAME_STR@\0" VALUE "FileDescription", "@IE_VS_VER_FILEDESCRIPTION_STR@\0" -#if @IE_VS_VER_HAS_VERSION@ VALUE "FileVersion", "@IE_VS_VER_FILEVERSION_STR@\0" -#endif VALUE "InternalName", "@IE_VS_VER_INTERNALNAME_STR@\0" VALUE "LegalCopyright", "@IE_VS_VER_COPYRIGHT_STR@\0" VALUE "OriginalFilename", "@IE_VS_VER_ORIGINALFILENAME_STR@\0" diff --git a/docs/.clang-format b/docs/.clang-format new file mode 100644 index 00000000000..c93e6254b5b --- /dev/null +++ b/docs/.clang-format @@ -0,0 +1,25 @@ +BasedOnStyle: Google +IndentWidth: 4 +UseTab: Never + +Language: Cpp +Standard: Cpp11 + +AccessModifierOffset: -4 +AlignConsecutiveMacros: true +AllowAllArgumentsOnNextLine: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: Empty +AllowShortLoopsOnASingleLine: false +AlwaysBreakBeforeMultilineStrings: false +ColumnLimit: 160 +# Specialize this comment pragma in order to avoid changes in SEA copyrights +CommentPragmas: '^#' +DerivePointerAlignment: false +FixNamespaceComments: true +IndentCaseLabels: false +IndentPPDirectives: BeforeHash +SpaceBeforeCpp11BracedList: true +SpaceBeforeCtorInitializerColon: false \ No newline at end of file diff --git a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md index d3b0714ea44..8ca911f7d0c 100644 --- a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md +++ b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md @@ -16,7 +16,7 @@ To add your custom nGraph operation, create a new class that extends `ngraph::Op 5. Override the `visit_attributes` method, which enables serialization and deserialization of operation attributes. An `AttributeVisitor` is passed to the method, and the implementation is expected to walk over all the attributes in the op using the type-aware `on_attribute` helper. Helpers are already implemented for standard C++ types like `int64_t`, `float`, `bool`, `vector`, and for existing nGraph defined types. -6. Override `evaluate`, which is an optional method that enables the application of constant folding if there is a custom operation on the constant branch. +6. Override `evaluate`, which is an optional method that enables the application of constant folding if there is a custom operation on the constant branch. If your operation contains `evaluate` method you also need to override the `has_evaluate` method, this method allow to get information about availability of `evaluate` method for the operation. Based on that, declaration of an operation class can look as follows: @@ -55,7 +55,7 @@ nGraph operation contains two constructors: @snippet template_extension/op.cpp op:visit_attributes -### `evaluate()` +### `evaluate()` and `has_evaluate()` `ngraph::Node::evaluate` method enables you to apply constant folding to an operation. diff --git a/docs/MO_DG/img/compressed_int8_Convolution_weights.png b/docs/MO_DG/img/compressed_int8_Convolution_weights.png index ea3c831b1cc..f4333b5e1a7 100644 --- a/docs/MO_DG/img/compressed_int8_Convolution_weights.png +++ b/docs/MO_DG/img/compressed_int8_Convolution_weights.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c9ddc759bc419268f4c23089b91a9e3373114a1d36b01d6fe62a5e87b5c0ad4 -size 59827 +oid sha256:4b14b03ebb6a00b5f52a8404282f83d4ad214c8d04aea74738027a775c4ef545 +size 100581 diff --git a/docs/MO_DG/img/expanded_int8_Convolution_weights.png b/docs/MO_DG/img/expanded_int8_Convolution_weights.png index 918e2376a48..f250f509191 100644 --- a/docs/MO_DG/img/expanded_int8_Convolution_weights.png +++ b/docs/MO_DG/img/expanded_int8_Convolution_weights.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59890c0c4a6d1c721dfaca22f0c1d0b305401f75dcd30418f858382830be2d31 -size 49598 +oid sha256:cbfadd457b4d943ffb46906a7daf03516e971fe49d2806cd32c84c5015178f03 +size 92819 diff --git a/docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md b/docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md index eda5d768c47..fa4bdb50554 100644 --- a/docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md +++ b/docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md @@ -2,36 +2,36 @@ ## Introduction -Inference Engine CPU plugin can infer models in the 8-bit integer (INT8) precision. -For details, refer to [INT8 inference on the CPU](../../../IE_DG/Int8Inference.md). +Inference Engine CPU and GPU plugin can infer models in the low precision. +For details, refer to [Low Precision Inference on the CPU](../../../IE_DG/Int8Inference.md). -Intermediate Representation (IR) should be specifically formed to be suitable for INT8 inference. -Such an IR is called an INT8 IR and you can generate it in two ways: -- [Quantize model with the Post-Training Optimization tool](@ref pot_README) -- Use the Model Optimizer for TensorFlow\* pre-TFLite models (`.pb` model file with `FakeQuantize*` operations) +Intermediate Representation (IR) should be specifically formed to be suitable for low precision inference. +Such an IR is called a Low Precision IR and you can generate it in two ways: +- [Quantize regular IR with the Post-Training Optimization tool](@ref pot_README) +- Use the Model Optimizer for a model pretrained for Low Precision inference: TensorFlow\* pre-TFLite models (`.pb` model file with `FakeQuantize*` operations) and ONNX\* quantized models. +Both Tensorflow and ONNX quantized models could be prepared by [Neural Network Compression Framework](https://github.com/openvinotoolkit/nncf/blob/develop/README.md) -For an operation to be executed in INT8, it must have `FakeQuantize` operations as inputs with the `levels` attribute set to `255` or `256`. +For an operation to be executed in INT8, it must have `FakeQuantize` operations as inputs. See the [specification of `FakeQuantize` operation](../../../ops/quantization/FakeQuantize_1.md) for details. -To see the list of supported INT8 layers, refer to [INT8 inference on the CPU](../../../IE_DG/Int8Inference.md). To execute the `Convolution` operation in INT8 on CPU, both data and weight inputs should have `FakeQuantize` as an input operation: ![](../../img/expanded_int8_Convolution_weights.png) -INT8 IR is also suitable for FP32 and FP16 inference if a chosen plugin supports all operations of the IR, because the only difference between an INT8 IR and FP16 or FP32 IR is the existence of `FakeQuantize` in the INT8 IR. -Plugins with INT8 inference support recognize these sub-graphs and quantize them during the inference time. -Plugins without INT8 support execute all operations, including `FakeQuantize`, as is in the FP32 or FP16 precision. +Low pecision IR is also suitable for FP32 and FP16 inference if a chosen plugin supports all operations of the IR, because the only difference between a Low Precision IR and FP16 or FP32 IR is the existence of `FakeQuantize` in the Low Precision IR. +Plugins with Low Precision Inference support recognize these sub-graphs and quantize them during the inference time. +Plugins without Low Precision support execute all operations, including `FakeQuantize`, as is in the FP32 or FP16 precision. Accordingly, the presence of FakeQuantize operations in the IR is a recommendation for a plugin on how to quantize particular operations in the model. -If capable, a plugin accepts the recommendation and performs INT8 inference, otherwise the plugin ignores the recommendation and executes a model in the floating-point precision. +If capable, a plugin accepts the recommendation and performs Low Precision Inference, otherwise, the plugin ignores the recommendation and executes a model in the floating-point precision. -## Compressed INT8 Weights +## Compressed Low Precision Weights Weighted operations, like `Convolution`, `MatMul`, and others, store weights as floating-point `Constant` in the graph followed by the `FakeQuantize` operation. `Constant` followed by the `FakeQuantize` operation could be optimized memory-wise due to the `FakeQuantize` operation semantics. -The resulting weights sub-graph stores weights in INT8 `Constant`, which gets unpacked back to floating point with the `Convert` operation. -Weights compression leaves `FakeQuantize` output arithmetically the same and weights storing takes four times less memory. +The resulting weights sub-graph stores weights in Low Precision `Constant`, which gets unpacked back to floating point with the `Convert` operation. +Weights compression replaces `FakeQuantize` with optional `Subtract` and `Multiply` operation leaving output arithmetically the same and weights storing takes four times less memory. See the visualization of `Convolution` with the compressed weights: ![](../../img/compressed_int8_Convolution_weights.png) -Both Model Optimizer and Post-Training Optimization tool generate a compressed IR by default. To generate an expanded INT8 IR, use `--disable_weights_compression`. \ No newline at end of file +Both Model Optimizer and Post-Training Optimization tool generate a compressed IR by default. diff --git a/docs/install_guides/installing-openvino-raspbian.md b/docs/install_guides/installing-openvino-raspbian.md index 14b354532e1..61cff12e424 100644 --- a/docs/install_guides/installing-openvino-raspbian.md +++ b/docs/install_guides/installing-openvino-raspbian.md @@ -75,11 +75,11 @@ The guide assumes you downloaded the OpenVINO toolkit for Raspbian* OS. If you d By default, the package file is saved as `l_openvino_toolkit_runtime_raspbian_p_.tgz`. 3. Create an installation folder. ```sh - sudo mkdir -p /opt/intel/openvino + sudo mkdir -p /opt/intel/openvino_2021 ``` 4. Unpack the archive: ```sh - sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_.tgz --strip 1 -C /opt/intel/openvino + sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_.tgz --strip 1 -C /opt/intel/openvino_2021 ``` Now the OpenVINO toolkit components are installed. Additional configuration steps are still required. Continue to the next sections to install External Software Dependencies, configure the environment and set up USB rules. @@ -154,7 +154,7 @@ Follow the next steps to use the pre-trained face detection model using Inferenc ``` 4. Run the sample specifying the model, a path to the input image, and the VPU required to run with the Raspbian* OS: ```sh - ./armv7l/Release/object_detection_sample_ssd -m face-detection-adas-0001.xml -d MYRIAD -i + ./armv7l/Release/object_detection_sample_ssd -m /face-detection-adas-0001.xml -d MYRIAD -i ``` The application outputs an image (`out_0.bmp`) with detected faced enclosed in rectangles. diff --git a/docs/install_guides/pypi-openvino-dev.md b/docs/install_guides/pypi-openvino-dev.md index 7164c2cdf55..f04bdf3a21f 100644 --- a/docs/install_guides/pypi-openvino-dev.md +++ b/docs/install_guides/pypi-openvino-dev.md @@ -1,7 +1,7 @@ # Intel® Distribution of OpenVINO™ Toolkit Developer Package - +Copyright © 2018-2021 Intel Corporation > **LEGAL NOTICE**: Your use of this software and any required dependent software (the -“Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/en-us/license/eula-for-intel-software-development-products) for the Software Package, which may also include notices, disclaimers, or +“Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/content/dam/develop/external/us/en/documents/intel-openvino-license-agreements.pdf) for the Software Package, which may also include notices, disclaimers, or license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details. ## Introduction @@ -40,11 +40,7 @@ The table below lists the supported operating systems and Python* versions requi ## Install the Developer Package -### Step 1. Install External Software Dependencies - -On Windows* OS you are required to install [Microsoft* Visual C++ Redistributable Package (x64)](https://visualstudio.microsoft.com/downloads/#microsoft-visual-c-redistributable-for-visual-studio-2019) to be able to run OpenVINO™ applications. - -### Step 2. Set Up Python Virtual Environment +### Step 1. Set Up Python Virtual Environment To avoid dependency conflicts, use a virtual environment. Skip this step only if you do want to install all dependencies globally. @@ -62,7 +58,7 @@ On Windows: python -m venv openvino_env ``` -### Step 3. Activate Virtual Environment +### Step 2. Activate Virtual Environment On Linux and macOS: ```sh @@ -73,14 +69,14 @@ On Windows: openvino_env\Scripts\activate ``` -### Step 4. Set Up and Update pip to the Highest Version +### Step 3. Set Up and Update PIP to the Highest Version Run the command below: ```sh python -m pip install --upgrade pip ``` -### Step 5. Install the Package +### Step 4. Install the Package Run the command below:
@@ -88,7 +84,7 @@ Run the command below:
pip install openvino-dev ``` -### Step 6. Verify that the Package is Installed +### Step 5. Verify that the Package is Installed Run the command below (this may take a few seconds): ```sh @@ -97,6 +93,19 @@ pot -h You will see the help message for Post-Training Optimization Tool if installation finished successfully. +## Troubleshooting + +#### Error: Microsoft Visual C++ 14.0 is required. Get it with "Build Tools for Visual Studio" + +On Windows* some dependencies may require compilation from source when installing. To resolve this issue, you need to install [Build Tools for Visual Studio* 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019) and repeat package installation. + +#### ImportError: libpython3.7m.so.1.0: cannot open shared object file: No such file or directory + +To resolve missing external dependency on Ubuntu*, execute the following command: +```sh +sudo apt-get install libpython3.7 +``` + ## Additional Resources - Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit) diff --git a/docs/install_guides/pypi-openvino-rt.md b/docs/install_guides/pypi-openvino-rt.md index cfe95281fdf..6e22d74157c 100644 --- a/docs/install_guides/pypi-openvino-rt.md +++ b/docs/install_guides/pypi-openvino-rt.md @@ -1,7 +1,7 @@ # Intel® Distribution of OpenVINO™ Toolkit Runtime Package - +Copyright © 2018-2021 Intel Corporation > **LEGAL NOTICE**: Your use of this software and any required dependent software (the -“Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/en-us/license/eula-for-intel-software-development-products) for the Software Package, which may also include notices, disclaimers, or +“Software Package”) is subject to the terms and conditions of the [software license agreements](https://software.intel.com/content/dam/develop/external/us/en/documents/intel-openvino-license-agreements.pdf) for the Software Package, which may also include notices, disclaimers, or license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details. ## Introduction @@ -37,11 +37,7 @@ The table below lists supported operating systems and Python* versions required ## Install the Runtime Package -### Step 1. Install External Software Dependencies - -On Windows* OS you are required to install [Microsoft* Visual C++ Redistributable Package (x64)](https://visualstudio.microsoft.com/downloads/#microsoft-visual-c-redistributable-for-visual-studio-2019) to be able to run OpenVINO™ applications. - -### Step 2. Set Up Python Virtual Environment +### Step 1. Set Up Python Virtual Environment To avoid dependency conflicts, use a virtual environment. Skip this step only if you do want to install all dependencies globally. @@ -55,7 +51,7 @@ python -m venv openvino_env > **NOTE**: On Linux and macOS, you may need to type `python3` instead of `python`. You may also need to [install pip](https://pip.pypa.io/en/stable/installing/). -### Step 3. Activate Virtual Environment +### Step 2. Activate Virtual Environment On Linux and macOS: ```sh @@ -66,14 +62,14 @@ On Windows: openvino_env\Scripts\activate ``` -### Step 4. Set Up and Update pip to the Highest Version +### Step 3. Set Up and Update PIP to the Highest Version Run the command below: ```sh python -m pip install --upgrade pip ``` -### Step 5. Install the Package +### Step 4. Install the Package Run the command below:
@@ -81,7 +77,7 @@ Run the command below:
pip install openvino ``` -### Step 6. Verify that the Package is Installed +### Step 5. Verify that the Package is Installed Run the command below: ```sh @@ -90,6 +86,19 @@ python -c "from openvino.inference_engine import IECore" You will not see any error messages if installation finished successfully. +## Troubleshooting + +#### Error: Microsoft Visual C++ 14.0 is required. Get it with "Build Tools for Visual Studio" + +On Windows* some dependencies may require compilation from source when installing. To resolve this issue, you need to install [Build Tools for Visual Studio* 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019) and repeat package installation. + +#### ImportError: libpython3.7m.so.1.0: cannot open shared object file: No such file or directory + +To resolve missing external dependency on Ubuntu*, execute the following command: +```sh +sudo apt-get install libpython3.7 +``` + ## Additional Resources - [Intel® Distribution of OpenVINO™ toolkit](https://software.intel.com/en-us/openvino-toolkit). diff --git a/docs/onnx_custom_op/CMakeLists.txt b/docs/onnx_custom_op/CMakeLists.txt index 8446846dcfe..f38ead369d8 100644 --- a/docs/onnx_custom_op/CMakeLists.txt +++ b/docs/onnx_custom_op/CMakeLists.txt @@ -9,7 +9,10 @@ set(TARGET_NAME "onnx_custom_op") find_package(ngraph REQUIRED COMPONENTS onnx_importer) -add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp) +add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp onnx_custom_op.hpp) target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES}) # [cmake:onnx_custom_op] + +# Enable code style check +add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) diff --git a/docs/ops/activation/SoftPlus_4.md b/docs/ops/activation/SoftPlus_4.md index 19714de749b..e0e625ce1b4 100644 --- a/docs/ops/activation/SoftPlus_4.md +++ b/docs/ops/activation/SoftPlus_4.md @@ -28,7 +28,7 @@ The `threshold` can be calculated with the following formula where `alpha` is th -log(e^{10^{-\alpha}} - 1.0) < threshold < log(\beta) \f] -For example, if *T* is `fp32`, `threshold` should be `20` or if *T* is `fp16`, `threshold` should be `12`. +For example, if *T* is `fp32`, `threshold` should be `20` or if *T* is `fp16`, `threshold` should be `11`. **Attributes**: *SoftPlus* operation has no attributes. diff --git a/docs/ops/arithmetic/Floor_1.md b/docs/ops/arithmetic/Floor_1.md index f76c3b24752..910ce43d590 100644 --- a/docs/ops/arithmetic/Floor_1.md +++ b/docs/ops/arithmetic/Floor_1.md @@ -2,31 +2,31 @@ **Versioned name**: *Floor-1* -**Category**: Arithmetic unary operation +**Category**: Arithmetic unary operation **Short description**: *Floor* performs element-wise floor operation with given tensor. -**Attributes**: +**Detailed description**: For each element from the input tensor calculates corresponding +element in the output tensor with the following formula: - No attributes available. +\f[ +a_{i} = floor(a_{i}) +\f] + +**Attributes**: *Floor* operation has no attributes. **Inputs** -* **1**: An tensor of type T. **Required.** +* **1**: A tensor of type *T* and arbitrary shape. **Required.** **Outputs** -* **1**: The result of element-wise floor operation. A tensor of type T. +* **1**: The result of element-wise floor operation. A tensor of type *T*. **Types** * *T*: any numeric type. -*Floor* does the following with the input tensor *a*: - -\f[ -a_{i} = floor(a_{i}) -\f] **Examples** diff --git a/docs/ops/movement/VariadicSplit_1.md b/docs/ops/movement/VariadicSplit_1.md index 87c9ac67b65..9b2392d6fc9 100644 --- a/docs/ops/movement/VariadicSplit_1.md +++ b/docs/ops/movement/VariadicSplit_1.md @@ -8,7 +8,7 @@ **Detailed Description** -*VariadicSplit* operation splits a given input tensor `data` into chunks along a scalar `axis`. It produces multiple output tensors based on additional input tensor `split_lengths`. +*VariadicSplit* operation splits a given input tensor `data` into chunks along a scalar or tensor with shape `[1]` `axis`. It produces multiple output tensors based on additional input tensor `split_lengths`. The i-th output tensor shape is equal to the input tensor `data` shape, except for dimension along `axis` which is `split_lengths[i]`. \f[ @@ -23,7 +23,7 @@ Where D is the rank of input tensor `data`. The sum of elements in `split_length * **1**: `data`. A tensor of type `T1` and arbitrary shape. **Required.** -* **2**: `axis`. Axis along `data` to split. A scalar of type `T2` with value from range `-rank(data) .. rank(data)-1`. Negative values address dimensions from the end. +* **2**: `axis`. Axis along `data` to split. A scalar or tensor with shape `[1]` of type `T2` with value from range `-rank(data) .. rank(data)-1`. Negative values address dimensions from the end. **Required.** * **3**: `split_lengths`. A list containing the dimension values of each output tensor shape along the split `axis`. A 1D tensor of type `T2`. The number of elements in `split_lengths` determines the number of outputs. The sum of elements in `split_lengths` must match `data.shape[axis]`. In addition `split_lengths` can contain a single `-1` element, which means, all remaining items along specified `axis` that are not consumed by other parts. **Required.** diff --git a/docs/ops/normalization/BatchNormInference_1.md b/docs/ops/normalization/BatchNormInference_1.md index 218111575bd..694a9989e9f 100644 --- a/docs/ops/normalization/BatchNormInference_1.md +++ b/docs/ops/normalization/BatchNormInference_1.md @@ -58,7 +58,7 @@ For a particular activation, consider a mini-batch \f$\mathcal{B}\f$ of m values * *epsilon* * **Description**: *epsilon* is a constant added to the variance for numerical stability. - * **Range of values**: a positive floating-point number + * **Range of values**: a floating-point number greater than or equal to zero * **Type**: `float` * **Default value**: none * **Required**: *yes* diff --git a/docs/ops/normalization/BatchNormInference_5.md b/docs/ops/normalization/BatchNormInference_5.md index cec26e4b2ec..f5019d08b2d 100644 --- a/docs/ops/normalization/BatchNormInference_5.md +++ b/docs/ops/normalization/BatchNormInference_5.md @@ -58,7 +58,7 @@ For a particular activation, consider a mini-batch \f$\mathcal{B}\f$ of m values * *epsilon* * **Description**: *epsilon* is a constant added to the variance for numerical stability. - * **Range of values**: a positive floating-point number + * **Range of values**: a floating-point number greater than or equal to zero * **Type**: `float` * **Default value**: none * **Required**: *yes* diff --git a/docs/snippets/example_ngraph_utils.cpp b/docs/snippets/example_ngraph_utils.cpp index 44780aa4615..380f7421f2e 100644 --- a/docs/snippets/example_ngraph_utils.cpp +++ b/docs/snippets/example_ngraph_utils.cpp @@ -154,7 +154,7 @@ auto consumers = output.get_target_inputs(); { // ! [ngraph:shape] auto partial_shape = node->input(0).get_partial_shape(); // get zero input partial shape -if (partial_shape.is_dynamic() /* or !partial_shape.is_staic() */) { +if (partial_shape.is_dynamic() /* or !partial_shape.is_static() */) { return false; } auto static_shape = partial_shape.get_shape(); @@ -311,4 +311,4 @@ void pass_manager_example3(std::shared_ptr f) { manager.run_passes(f); } // ! [ngraph:disabled_by_default] -} \ No newline at end of file +} diff --git a/docs/template_extension/CMakeLists.txt b/docs/template_extension/CMakeLists.txt index 9224383ffd6..a6e7527e55f 100644 --- a/docs/template_extension/CMakeLists.txt +++ b/docs/template_extension/CMakeLists.txt @@ -33,3 +33,7 @@ if (ngraph_onnx_importer_FOUND) target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_IMPORT_ENABLED) endif() # [cmake:extension] + +# Enable code style check +file(GLOB_RECURSE template_extension_src "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp" "${CMAKE_CURRENT_SOURCE_DIR}/*.hpp") +add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${template_extension_src}) diff --git a/docs/template_extension/cpu_kernel.cpp b/docs/template_extension/cpu_kernel.cpp index 9469094e4c1..aa2486589cb 100644 --- a/docs/template_extension/cpu_kernel.cpp +++ b/docs/template_extension/cpu_kernel.cpp @@ -3,13 +3,15 @@ // #include "cpu_kernel.hpp" -#include "op.hpp" + #include +#include "op.hpp" + using namespace TemplateExtension; //! [cpu_implementation:ctor] -OpImplementation::OpImplementation(const std::shared_ptr &node) { +OpImplementation::OpImplementation(const std::shared_ptr& node) { try { auto castedNode = std::dynamic_pointer_cast(node); if (!castedNode) @@ -32,8 +34,8 @@ OpImplementation::OpImplementation(const std::shared_ptr &node) { //! [cpu_implementation:ctor] //! [cpu_implementation:getSupportedConfigurations] -InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector &conf, - InferenceEngine::ResponseDesc *resp) noexcept { +InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector& conf, + InferenceEngine::ResponseDesc* resp) noexcept { auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) { InferenceEngine::LayerConfig config; config.dynBatchSupport = false; @@ -72,7 +74,7 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve if (!error.empty()) { if (resp) { strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); - resp->msg[sizeof(resp->msg)-1] = 0; + resp->msg[sizeof(resp->msg) - 1] = 0; } return InferenceEngine::GENERAL_ERROR; } @@ -85,25 +87,24 @@ InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::ve //! [cpu_implementation:getSupportedConfigurations] //! [cpu_implementation:init] -InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept { +InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept { try { if (config.inConfs.size() != 1 || config.outConfs.size() != 1) { IE_THROW() << "Operation cannot be initialized with incorrect number of inputs/outputs!"; } if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) { - IE_THROW() - << "Operation can be initialized only with 4d input/output tensors!"; + IE_THROW() << "Operation can be initialized only with 4d input/output tensors!"; } if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 || - config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) { + config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) { IE_THROW() << "Operation supports only FP32 precisions!"; } } catch (InferenceEngine::Exception& ex) { if (resp) { strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); - resp->msg[sizeof(resp->msg)-1] = 0; + resp->msg[sizeof(resp->msg) - 1] = 0; } return InferenceEngine::GENERAL_ERROR; } @@ -113,11 +114,10 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig //! [cpu_implementation:init] //! [cpu_implementation:execute] -InferenceEngine::StatusCode OpImplementation::execute(std::vector &inputs, - std::vector &outputs, - InferenceEngine::ResponseDesc *resp) noexcept { - const float* src_data = inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); - float *dst_data = outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); +InferenceEngine::StatusCode OpImplementation::execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::ResponseDesc* resp) noexcept { + const float* src_data = inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); + float* dst_data = outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); for (size_t i = 0; i < inputs[0]->size(); i++) { dst_data[i] = src_data[i] + add; diff --git a/docs/template_extension/cpu_kernel.hpp b/docs/template_extension/cpu_kernel.hpp index 692bbbbec30..901d33093b5 100644 --- a/docs/template_extension/cpu_kernel.hpp +++ b/docs/template_extension/cpu_kernel.hpp @@ -5,6 +5,7 @@ #pragma once #include + #include namespace TemplateExtension { @@ -13,13 +14,12 @@ namespace TemplateExtension { class OpImplementation : public InferenceEngine::ILayerExecImpl { public: explicit OpImplementation(const std::shared_ptr& node); - InferenceEngine::StatusCode getSupportedConfigurations(std::vector &conf, - InferenceEngine::ResponseDesc *resp) noexcept override; - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config, - InferenceEngine::ResponseDesc *resp) noexcept override; - InferenceEngine::StatusCode execute(std::vector &inputs, - std::vector &outputs, - InferenceEngine::ResponseDesc *resp) noexcept override; + InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, + InferenceEngine::ResponseDesc* resp) noexcept override; + InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override; + InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::ResponseDesc* resp) noexcept override; + private: int64_t add; ngraph::Shape inShape; diff --git a/docs/template_extension/extension.cpp b/docs/template_extension/extension.cpp index d9baa69a059..7a0874f2bea 100644 --- a/docs/template_extension/extension.cpp +++ b/docs/template_extension/extension.cpp @@ -3,15 +3,16 @@ // #include "extension.hpp" + #include "cpu_kernel.hpp" #include "op.hpp" #ifdef OPENCV_IMPORT_ENABLED -#include "fft_op.hpp" -#include "fft_kernel.hpp" + #include "fft_kernel.hpp" + #include "fft_op.hpp" #endif #include #ifdef NGRAPH_ONNX_IMPORT_ENABLED -#include + #include #endif #include @@ -21,22 +22,19 @@ using namespace TemplateExtension; - //! [extension:ctor] Extension::Extension() { #ifdef NGRAPH_ONNX_IMPORT_ENABLED - ngraph::onnx_import::register_operator( - Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { - ngraph::OutputVector ng_inputs{node.get_ng_inputs()}; - int64_t add = node.get_attribute_value("add"); - return {std::make_shared(ng_inputs.at(0), add)}; + ngraph::onnx_import::register_operator(Operation::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { + ngraph::OutputVector ng_inputs {node.get_ng_inputs()}; + int64_t add = node.get_attribute_value("add"); + return {std::make_shared(ng_inputs.at(0), add)}; }); #ifdef OPENCV_IMPORT_ENABLED - ngraph::onnx_import::register_operator( - FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { - ngraph::OutputVector ng_inputs{node.get_ng_inputs()}; - bool inverse = node.get_attribute_value("inverse"); - return {std::make_shared(ng_inputs.at(0), inverse)}; + ngraph::onnx_import::register_operator(FFTOp::type_info.name, 1, "custom_domain", [](const ngraph::onnx_import::Node& node) -> ngraph::OutputVector { + ngraph::OutputVector ng_inputs {node.get_ng_inputs()}; + bool inverse = node.get_attribute_value("inverse"); + return {std::make_shared(ng_inputs.at(0), inverse)}; }); #endif #endif @@ -47,19 +45,19 @@ Extension::Extension() { Extension::~Extension() { #ifdef NGRAPH_ONNX_IMPORT_ENABLED ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain"); -#ifdef OPENCV_IMPORT_ENABLED + #ifdef OPENCV_IMPORT_ENABLED ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain"); -#endif // OPENCV_IMPORT_ENABLED -#endif // NGRAPH_ONNX_IMPORT_ENABLED + #endif // OPENCV_IMPORT_ENABLED +#endif // NGRAPH_ONNX_IMPORT_ENABLED } //! [extension:dtor] //! [extension:GetVersion] -void Extension::GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept { +void Extension::GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept { static InferenceEngine::Version ExtensionDescription = { - {1, 0}, // extension API version + {1, 0}, // extension API version "1.0", - "template_ext" // extension description message + "template_ext" // extension description message }; versionInfo = &ExtensionDescription; @@ -80,7 +78,7 @@ std::map Extension::getOpSets() { //! [extension:getOpSets] //! [extension:getImplTypes] -std::vector Extension::getImplTypes(const std::shared_ptr &node) { +std::vector Extension::getImplTypes(const std::shared_ptr& node) { if (std::dynamic_pointer_cast(node)) { return {"CPU"}; } @@ -94,7 +92,7 @@ std::vector Extension::getImplTypes(const std::shared_ptr &node, const std::string &implType) { +InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr& node, const std::string& implType) { if (implType == "CPU") { if (std::dynamic_pointer_cast(node)) { return std::make_shared(node); @@ -110,16 +108,16 @@ InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ //! [extension:getImplementation] //! [extension:CreateExtension] -//Generate exported function +// Generate exported function IE_DEFINE_EXTENSION_CREATE_FUNCTION(Extension) //! [extension:CreateExtension] -INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext, - InferenceEngine::ResponseDesc *resp) noexcept { +INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) +InferenceEngine::CreateExtension(InferenceEngine::IExtension*& ext, InferenceEngine::ResponseDesc* resp) noexcept { try { ext = new Extension(); return OK; - } catch (std::exception &ex) { + } catch (std::exception& ex) { if (resp) { std::string err = ((std::string) "Couldn't create extension: ") + ex.what(); err.copy(resp->msg, 255); diff --git a/docs/template_extension/extension.hpp b/docs/template_extension/extension.hpp index 24e731bcf2d..0cc3b5816fe 100644 --- a/docs/template_extension/extension.hpp +++ b/docs/template_extension/extension.hpp @@ -4,13 +4,14 @@ #pragma once -#include #include -#include -#include -#include -#include +#include + #include +#include +#include +#include +#include //! [extension:header] namespace TemplateExtension { diff --git a/docs/template_extension/fft_kernel.cpp b/docs/template_extension/fft_kernel.cpp index 8e37bdfce9f..12554a70c75 100644 --- a/docs/template_extension/fft_kernel.cpp +++ b/docs/template_extension/fft_kernel.cpp @@ -4,14 +4,16 @@ //! [fft_kernel:implementation] #include "fft_kernel.hpp" -#include "fft_op.hpp" + #include #include +#include "fft_op.hpp" + using namespace TemplateExtension; -FFTImpl::FFTImpl(const std::shared_ptr &node) { +FFTImpl::FFTImpl(const std::shared_ptr& node) { auto castedNode = std::dynamic_pointer_cast(node); if (!castedNode) IE_THROW() << "Cannot create implementation for unknown operation!"; @@ -26,8 +28,7 @@ FFTImpl::FFTImpl(const std::shared_ptr &node) { inverse = castedNode->inverse; } -InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector &conf, - InferenceEngine::ResponseDesc *resp) noexcept { +InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vector& conf, InferenceEngine::ResponseDesc* resp) noexcept { std::vector inDataConfig; std::vector outDataConfig; InferenceEngine::SizeVector order(inpShape.size()); @@ -54,28 +55,27 @@ InferenceEngine::StatusCode FFTImpl::getSupportedConfigurations(std::vectormsg, error.c_str(), sizeof(resp->msg) - 1); - resp->msg[sizeof(resp->msg)-1] = 0; + resp->msg[sizeof(resp->msg) - 1] = 0; } return InferenceEngine::GENERAL_ERROR; } return InferenceEngine::OK; } -static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) -{ +static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) { // NOTE: Inference Engine sizes are reversed. std::vector dims = blob->getTensorDesc().getDims(); std::vector size(dims.begin(), dims.end()); @@ -84,9 +84,8 @@ static cv::Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) return cv::Mat(size, CV_32F, (void*)blob->buffer()); } -InferenceEngine::StatusCode FFTImpl::execute(std::vector &inputs, - std::vector &outputs, - InferenceEngine::ResponseDesc *resp) noexcept { +InferenceEngine::StatusCode FFTImpl::execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::ResponseDesc* resp) noexcept { cv::Mat inp = infEngineBlobToMat(inputs[0]); cv::Mat out = infEngineBlobToMat(outputs[0]); @@ -95,10 +94,7 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector components = { - cv::Mat(h, w, CV_32F, inp.ptr(i, 0)), - cv::Mat(h, w, CV_32F, inp.ptr(i, 1)) - }; + std::vector components = {cv::Mat(h, w, CV_32F, inp.ptr(i, 0)), cv::Mat(h, w, CV_32F, inp.ptr(i, 1))}; cv::merge(components, complex); if (!inverse) @@ -106,13 +102,9 @@ InferenceEngine::StatusCode FFTImpl::execute(std::vector(i, 0)), - cv::Mat(h, w, CV_32F, out.ptr(i, 1)) - }; + components = {cv::Mat(h, w, CV_32F, out.ptr(i, 0)), cv::Mat(h, w, CV_32F, out.ptr(i, 1))}; cv::split(interleavedOut, components); } return InferenceEngine::OK; } //! [fft_kernel:implementation] - diff --git a/docs/template_extension/fft_kernel.hpp b/docs/template_extension/fft_kernel.hpp index 74fc3a4b138..f3283288861 100644 --- a/docs/template_extension/fft_kernel.hpp +++ b/docs/template_extension/fft_kernel.hpp @@ -6,6 +6,7 @@ #pragma once #include + #include namespace TemplateExtension { @@ -13,13 +14,12 @@ namespace TemplateExtension { class FFTImpl : public InferenceEngine::ILayerExecImpl { public: explicit FFTImpl(const std::shared_ptr& node); - InferenceEngine::StatusCode getSupportedConfigurations(std::vector &conf, - InferenceEngine::ResponseDesc *resp) noexcept override; - InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config, - InferenceEngine::ResponseDesc *resp) noexcept override; - InferenceEngine::StatusCode execute(std::vector &inputs, - std::vector &outputs, - InferenceEngine::ResponseDesc *resp) noexcept override; + InferenceEngine::StatusCode getSupportedConfigurations(std::vector& conf, + InferenceEngine::ResponseDesc* resp) noexcept override; + InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc* resp) noexcept override; + InferenceEngine::StatusCode execute(std::vector& inputs, std::vector& outputs, + InferenceEngine::ResponseDesc* resp) noexcept override; + private: ngraph::Shape inpShape; ngraph::Shape outShape; @@ -27,5 +27,5 @@ private: std::string error; }; -} +} // namespace TemplateExtension //! [fft_kernel:header] diff --git a/docs/template_extension/fft_op.cpp b/docs/template_extension/fft_op.cpp index 8d85d5c08f5..b71a06bc746 100644 --- a/docs/template_extension/fft_op.cpp +++ b/docs/template_extension/fft_op.cpp @@ -9,7 +9,7 @@ using namespace TemplateExtension; constexpr ngraph::NodeTypeInfo FFTOp::type_info; -FFTOp::FFTOp(const ngraph::Output& inp, bool _inverse) : Op({inp}) { +FFTOp::FFTOp(const ngraph::Output& inp, bool _inverse): Op({inp}) { constructor_validate_and_infer_types(); inverse = _inverse; } @@ -19,16 +19,15 @@ void FFTOp::validate_and_infer_types() { set_output_type(0, get_input_element_type(0), outShape); } -std::shared_ptr FFTOp::clone_with_new_inputs(const ngraph::OutputVector &new_args) const { +std::shared_ptr FFTOp::clone_with_new_inputs(const ngraph::OutputVector& new_args) const { if (new_args.size() != 1) { throw ngraph::ngraph_error("Incorrect number of new arguments"); } return std::make_shared(new_args.at(0), inverse); } -bool FFTOp::visit_attributes(ngraph::AttributeVisitor &visitor) { +bool FFTOp::visit_attributes(ngraph::AttributeVisitor& visitor) { visitor.on_attribute("inverse", inverse); return true; } //! [fft_op:implementation] - diff --git a/docs/template_extension/fft_op.hpp b/docs/template_extension/fft_op.hpp index eca07bcb7fc..2e79888cfd3 100644 --- a/docs/template_extension/fft_op.hpp +++ b/docs/template_extension/fft_op.hpp @@ -11,8 +11,10 @@ namespace TemplateExtension { class FFTOp : public ngraph::op::Op { public: - static constexpr ngraph::NodeTypeInfo type_info{"FFT", 0}; - const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; } + static constexpr ngraph::NodeTypeInfo type_info {"FFT", 0}; + const ngraph::NodeTypeInfo& get_type_info() const override { + return type_info; + } FFTOp() = default; FFTOp(const ngraph::Output& inp, bool inverse); @@ -23,6 +25,5 @@ public: bool inverse; }; -} +} // namespace TemplateExtension //! [fft_op:header] - diff --git a/docs/template_extension/op.cpp b/docs/template_extension/op.cpp index 416b39495c1..ec53c2ca26c 100644 --- a/docs/template_extension/op.cpp +++ b/docs/template_extension/op.cpp @@ -9,7 +9,7 @@ using namespace TemplateExtension; //! [op:ctor] NGRAPH_RTTI_DEFINITION(TemplateExtension::Operation, "Template", 0); -Operation::Operation(const ngraph::Output &arg, int64_t add) : Op({arg}), add(add) { +Operation::Operation(const ngraph::Output& arg, int64_t add): Op({arg}), add(add) { constructor_validate_and_infer_types(); } //! [op:ctor] @@ -22,7 +22,7 @@ void Operation::validate_and_infer_types() { //! [op:validate] //! [op:copy] -std::shared_ptr Operation::clone_with_new_inputs(const ngraph::OutputVector &new_args) const { +std::shared_ptr Operation::clone_with_new_inputs(const ngraph::OutputVector& new_args) const { if (new_args.size() != 1) { throw ngraph::ngraph_error("Incorrect number of new arguments"); } @@ -32,56 +32,77 @@ std::shared_ptr Operation::clone_with_new_inputs(const ngraph::Out //! [op:copy] //! [op:visit_attributes] -bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) { +bool Operation::visit_attributes(ngraph::AttributeVisitor& visitor) { visitor.on_attribute("add", add); return true; } //! [op:visit_attributes] //! [op:evaluate] -namespace -{ +namespace { template -void implementation(const T* input, - T* output, - int64_t add, - size_t size) { +void implementation(const T* input, T* output, int64_t add, size_t size) { for (size_t i = 0; i < size; i++) { output[i] = input[i] + add; } } template -bool evaluate_op(const ngraph::HostTensorPtr& arg0, - const ngraph::HostTensorPtr& out, int64_t add) -{ +bool evaluate_op(const ngraph::HostTensorPtr& arg0, const ngraph::HostTensorPtr& out, int64_t add) { size_t size = ngraph::shape_size(arg0->get_shape()); - implementation(arg0->get_data_ptr(), - out->get_data_ptr(), - add, - size); + implementation(arg0->get_data_ptr(), out->get_data_ptr(), add, size); return true; } } // namespace -bool Operation::evaluate(const ngraph::HostTensorVector& outputs, - const ngraph::HostTensorVector& inputs) const { - switch (inputs[0]->get_element_type()) - { - case ngraph::element::Type_t::i8: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::i16: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::i32: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::i64: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u8: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u16: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u32: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::u64: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::bf16: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::f16: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - case ngraph::element::Type_t::f32: return evaluate_op(inputs[0], outputs[0], getAddAttr()); - default: break; +bool Operation::evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const { + switch (inputs[0]->get_element_type()) { + case ngraph::element::Type_t::i8: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::i16: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::i32: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::i64: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::u8: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::u16: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::u32: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::u64: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::bf16: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::f16: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + case ngraph::element::Type_t::f32: + return evaluate_op(inputs[0], outputs[0], getAddAttr()); + default: + break; + } + return false; +} + +bool Operation::has_evaluate() const { + switch (get_input_element_type(0)) { + case ngraph::element::Type_t::i8: + case ngraph::element::Type_t::i16: + case ngraph::element::Type_t::i32: + case ngraph::element::Type_t::i64: + case ngraph::element::Type_t::u8: + case ngraph::element::Type_t::u16: + case ngraph::element::Type_t::u32: + case ngraph::element::Type_t::u64: + case ngraph::element::Type_t::bf16: + case ngraph::element::Type_t::f16: + case ngraph::element::Type_t::f32: + return true; + default: + break; } return false; } diff --git a/docs/template_extension/op.hpp b/docs/template_extension/op.hpp index dd03e20db16..4d3baf83a53 100644 --- a/docs/template_extension/op.hpp +++ b/docs/template_extension/op.hpp @@ -18,9 +18,11 @@ public: void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const ngraph::OutputVector& new_args) const override; bool visit_attributes(ngraph::AttributeVisitor& visitor) override; - int64_t getAddAttr() const { return add; } - bool evaluate(const ngraph::HostTensorVector& outputs, - const ngraph::HostTensorVector& inputs) const override; + int64_t getAddAttr() const { + return add; + } + bool evaluate(const ngraph::HostTensorVector& outputs, const ngraph::HostTensorVector& inputs) const override; + bool has_evaluate() const override; private: int64_t add; diff --git a/docs/template_plugin/src/CMakeLists.txt b/docs/template_plugin/src/CMakeLists.txt index 62cfe6641a1..9a24bc6c32a 100644 --- a/docs/template_plugin/src/CMakeLists.txt +++ b/docs/template_plugin/src/CMakeLists.txt @@ -13,7 +13,8 @@ ie_add_plugin(NAME ${TARGET_NAME} DEVICE_NAME "TEMPLATE" SOURCES ${SOURCES} ${HEADERS} SKIP_INSTALL # ATTENTION: uncomment to install component - VERSION_DEFINES_FOR template_plugin.cpp) + VERSION_DEFINES_FOR template_plugin.cpp + ADD_CLANG_FORMAT) target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}" diff --git a/docs/template_plugin/src/template_async_infer_request.cpp b/docs/template_plugin/src/template_async_infer_request.cpp index 50360753000..bcdd3b6f2a2 100644 --- a/docs/template_plugin/src/template_async_infer_request.cpp +++ b/docs/template_plugin/src/template_async_infer_request.cpp @@ -3,18 +3,16 @@ // #include "template_async_infer_request.hpp" + #include "template_itt.hpp" using namespace TemplatePlugin; // ! [async_infer_request:ctor] -TemplateAsyncInferRequest::TemplateAsyncInferRequest( - const TemplateInferRequest::Ptr& inferRequest, - const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor, - const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, - const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) : - AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), - _inferRequest(inferRequest), _waitExecutor(waitExecutor) { +TemplateAsyncInferRequest::TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor, + const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, + const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) + : AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), _inferRequest(inferRequest), _waitExecutor(waitExecutor) { // In current implementation we have CPU only tasks and no needs in 2 executors // So, by default single stage pipeline is created. // This stage executes InferRequest::Infer() using cpuTaskExecutor. @@ -23,24 +21,21 @@ TemplateAsyncInferRequest::TemplateAsyncInferRequest( constexpr const auto remoteDevice = false; if (remoteDevice) { - _pipeline = { - {cpuTaskExecutor, [this] { - OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, - "TemplateAsyncInferRequest::PreprocessingAndStartPipeline"); - _inferRequest->inferPreprocess(); - _inferRequest->startPipeline(); - }}, - {_waitExecutor, [this] { - OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, - "TemplateAsyncInferRequest::WaitPipeline"); - _inferRequest->waitPipeline(); - }}, - {cpuTaskExecutor, [this] { - OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, - "TemplateAsyncInferRequest::Postprocessing"); - _inferRequest->inferPostprocess(); - }} - }; + _pipeline = {{cpuTaskExecutor, + [this] { + OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::PreprocessingAndStartPipeline"); + _inferRequest->inferPreprocess(); + _inferRequest->startPipeline(); + }}, + {_waitExecutor, + [this] { + OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::WaitPipeline"); + _inferRequest->waitPipeline(); + }}, + {cpuTaskExecutor, [this] { + OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "TemplateAsyncInferRequest::Postprocessing"); + _inferRequest->inferPostprocess(); + }}}; } } // ! [async_infer_request:ctor] diff --git a/docs/template_plugin/src/template_async_infer_request.hpp b/docs/template_plugin/src/template_async_infer_request.hpp index 51221f908ee..942f71a616f 100644 --- a/docs/template_plugin/src/template_async_infer_request.hpp +++ b/docs/template_plugin/src/template_async_infer_request.hpp @@ -13,15 +13,13 @@ namespace TemplatePlugin { // ! [async_infer_request:header] class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault { public: - TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, - const InferenceEngine::ITaskExecutor::Ptr& taskExecutor, - const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, - const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor); + TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, const InferenceEngine::ITaskExecutor::Ptr& taskExecutor, + const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor); ~TemplateAsyncInferRequest(); private: - TemplateInferRequest::Ptr _inferRequest; + TemplateInferRequest::Ptr _inferRequest; InferenceEngine::ITaskExecutor::Ptr _waitExecutor; }; // ! [async_infer_request:header] diff --git a/docs/template_plugin/src/template_config.cpp b/docs/template_plugin/src/template_config.cpp index c29e17512c3..3d9d4e488fe 100644 --- a/docs/template_plugin/src/template_config.cpp +++ b/docs/template_plugin/src/template_config.cpp @@ -2,17 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - #include "template_config.hpp" + +#include +#include + #include "template/template_config.hpp" using namespace TemplatePlugin; -Configuration::Configuration() { } +Configuration::Configuration() {} -Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) { +Configuration::Configuration(const ConfigMap& config, const Configuration& defaultCfg, bool throwOnUnsupported) { *this = defaultCfg; // If plugin needs to use InferenceEngine::StreamsExecutor it should be able to process its configuration auto streamExecutorConfigKeys = _streamsExecutorConfig.SupportedKeys(); @@ -22,8 +23,7 @@ Configuration::Configuration(const ConfigMap& config, const Configuration & defa if (TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) == key) { _streamsExecutorConfig.SetConfig(CONFIG_KEY(CPU_THROUGHPUT_STREAMS), value); - } else if (streamExecutorConfigKeys.end() != - std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) { + } else if (streamExecutorConfigKeys.end() != std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) { _streamsExecutorConfig.SetConfig(key, value); } else if (CONFIG_KEY(DEVICE_ID) == key) { deviceId = std::stoi(value); diff --git a/docs/template_plugin/src/template_config.hpp b/docs/template_plugin/src/template_config.hpp index 2085e290af2..d49bf491327 100644 --- a/docs/template_plugin/src/template_config.hpp +++ b/docs/template_plugin/src/template_config.hpp @@ -4,11 +4,9 @@ #pragma once -#include -#include - #include - +#include +#include #include namespace TemplatePlugin { @@ -18,19 +16,19 @@ using ConfigMap = std::map; struct Configuration { Configuration(); - Configuration(const Configuration&) = default; - Configuration(Configuration&&) = default; - Configuration& operator=(const Configuration&) = default; - Configuration& operator=(Configuration&&) = default; + Configuration(const Configuration&) = default; + Configuration(Configuration&&) = default; + Configuration& operator=(const Configuration&) = default; + Configuration& operator=(Configuration&&) = default; - explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true); + explicit Configuration(const ConfigMap& config, const Configuration& defaultCfg = {}, const bool throwOnUnsupported = true); InferenceEngine::Parameter Get(const std::string& name) const; // Plugin configuration parameters - int deviceId = 0; - bool perfCount = true; + int deviceId = 0; + bool perfCount = true; InferenceEngine::IStreamsExecutor::Config _streamsExecutorConfig; }; // ! [configuration:header] diff --git a/docs/template_plugin/src/template_executable_network.cpp b/docs/template_plugin/src/template_executable_network.cpp index e599dceb434..f0f2a8066e5 100644 --- a/docs/template_plugin/src/template_executable_network.cpp +++ b/docs/template_plugin/src/template_executable_network.cpp @@ -2,36 +2,35 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "template_executable_network.hpp" + #include #include #include -#include "transformations/serialize.hpp" #include "template/template_config.hpp" -#include "template_plugin.hpp" -#include "template_executable_network.hpp" #include "template_itt.hpp" +#include "template_plugin.hpp" +#include "transformations/serialize.hpp" using namespace TemplatePlugin; // ! [executable_network:ctor_cnnnetwork] TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap& inputInfoMap, - const InferenceEngine::OutputsDataMap& outputsInfoMap, - const Configuration& cfg, - const Plugin::Ptr& plugin) : - InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation - _cfg(cfg), - _plugin(plugin) { + const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap, + const Configuration& cfg, const Plugin::Ptr& plugin) + : InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation + _cfg(cfg), + _plugin(plugin) { // TODO: if your plugin supports device ID (more that single instance of device can be on host machine) // you should select proper device based on KEY_DEVICE_ID or automatic behavior // In this case, _waitExecutor should also be created per device. try { CompileNetwork(function, inputInfoMap, outputsInfoMap); - InitExecutor(); // creates thread-based executor using for async requests + InitExecutor(); // creates thread-based executor using for async requests } catch (const InferenceEngine::Exception&) { throw; - } catch (const std::exception & e) { + } catch (const std::exception& e) { IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what(); } catch (...) { IE_THROW(Unexpected) << "Generic exception is thrown"; @@ -40,11 +39,7 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr(&dataSize), sizeof(dataSize)); if (0 != dataSize) { dataBlob = InferenceEngine::make_shared_blob( - InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, - {static_cast(dataSize)}, - InferenceEngine::Layout::C)); + InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {static_cast(dataSize)}, InferenceEngine::Layout::C)); dataBlob->allocate(); model.read(dataBlob->buffer(), dataSize); } @@ -77,10 +70,10 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model, try { CompileNetwork(cnnnetwork.getFunction(), inputInfoMap, outputInfoMap); - InitExecutor(); // creates thread-based executor using for async requests + InitExecutor(); // creates thread-based executor using for async requests } catch (const InferenceEngine::Exception&) { throw; - } catch (const std::exception & e) { + } catch (const std::exception& e) { IE_THROW(Unexpected) << "Standard exception from compilation library: " << e.what(); } catch (...) { IE_THROW(Unexpected) << "Generic exception is thrown"; @@ -90,12 +83,11 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model, // ! [executable_network:map_graph] // forward declaration -std::shared_ptr TransformNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap & inputInfoMap, +std::shared_ptr TransformNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap); void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap & inputInfoMap, + const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap) { // TODO: perform actual graph compilation / mapping to backend graph representation / kernels @@ -120,7 +112,6 @@ void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr(networkInputs, networkOutputs, std::static_pointer_cast(shared_from_this())); } // ! [executable_network:create_infer_request_impl] @@ -148,32 +138,26 @@ InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::C // ! [executable_network:create_infer_request] InferenceEngine::IInferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() { auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs); - return std::make_shared(std::static_pointer_cast(internalRequest), - _taskExecutor, _plugin->_waitExecutor, _callbackExecutor); + return std::make_shared(std::static_pointer_cast(internalRequest), _taskExecutor, _plugin->_waitExecutor, + _callbackExecutor); } // ! [executable_network:create_infer_request] // ! [executable_network:get_config] -InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name) const { +InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string& name) const { return _cfg.Get(name); } // ! [executable_network:get_config] // ! [executable_network:get_metric] -InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name) const { +InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string& name) const { // TODO: return more supported values for metrics if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) { - IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector{ - METRIC_KEY(NETWORK_NAME), - METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(SUPPORTED_CONFIG_KEYS), - METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)}); + IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector {METRIC_KEY(NETWORK_NAME), METRIC_KEY(SUPPORTED_METRICS), + METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)}); } else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { - std::vector configKeys = { - CONFIG_KEY(DEVICE_ID), - CONFIG_KEY(PERF_COUNT), - TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS) }; - auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys(); + std::vector configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; + auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys(); for (auto&& configKey : streamExecutorConfigKeys) { configKeys.emplace_back(configKey); } @@ -197,8 +181,7 @@ void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& modelStream) { // Note: custom ngraph extensions are not supported std::map custom_opsets; std::stringstream xmlFile, binFile; - ngraph::pass::Serialize serializer(xmlFile, binFile, - ngraph::pass::Serialize::Version::IR_V10, custom_opsets); + ngraph::pass::Serialize serializer(xmlFile, binFile, ngraph::pass::Serialize::Version::IR_V10, custom_opsets); serializer.run_on_function(_function); auto m_constants = binFile.str(); diff --git a/docs/template_plugin/src/template_executable_network.hpp b/docs/template_plugin/src/template_executable_network.hpp index 23f781a2efd..cebfddb3947 100644 --- a/docs/template_plugin/src/template_executable_network.hpp +++ b/docs/template_plugin/src/template_executable_network.hpp @@ -4,13 +4,12 @@ #pragma once +#include #include +#include "template_async_infer_request.hpp" #include "template_config.hpp" #include "template_infer_request.hpp" -#include "template_async_infer_request.hpp" - -#include namespace TemplatePlugin { @@ -24,15 +23,10 @@ class Plugin; // ! [executable_network:header] class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault { public: - ExecutableNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap& inputInfoMap, - const InferenceEngine::OutputsDataMap& outputsInfoMap, - const Configuration& cfg, - const std::shared_ptr& plugin); + ExecutableNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, + const InferenceEngine::OutputsDataMap& outputsInfoMap, const Configuration& cfg, const std::shared_ptr& plugin); - ExecutableNetwork(std::istream& model, - const Configuration& cfg, - const std::shared_ptr& plugin); + ExecutableNetwork(std::istream& model, const Configuration& cfg, const std::shared_ptr& plugin); ~ExecutableNetwork() override = default; @@ -42,23 +36,22 @@ public: InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::OutputsDataMap networkOutputs) override; InferenceEngine::IInferRequestInternal::Ptr CreateInferRequest() override; - InferenceEngine::Parameter GetMetric(const std::string &name) const override; - InferenceEngine::Parameter GetConfig(const std::string &name) const override; + InferenceEngine::Parameter GetMetric(const std::string& name) const override; + InferenceEngine::Parameter GetConfig(const std::string& name) const override; private: friend class TemplateInferRequest; - void CompileNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap& inputInfoMap, - const InferenceEngine::OutputsDataMap& outputsInfoMap); + void CompileNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, + const InferenceEngine::OutputsDataMap& outputsInfoMap); void InitExecutor(); - std::atomic _requestId = {0}; - Configuration _cfg; - std::shared_ptr _plugin; - std::shared_ptr _function; - std::map _inputIndex; - std::map _outputIndex; + std::atomic _requestId = {0}; + Configuration _cfg; + std::shared_ptr _plugin; + std::shared_ptr _function; + std::map _inputIndex; + std::map _outputIndex; }; // ! [executable_network:header] diff --git a/docs/template_plugin/src/template_infer_request.cpp b/docs/template_plugin/src/template_infer_request.cpp index 49678c7960a..20c47bfd19e 100644 --- a/docs/template_plugin/src/template_infer_request.cpp +++ b/docs/template_plugin/src/template_infer_request.cpp @@ -2,16 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include - #include "template_infer_request.hpp" + +#include +#include +#include +#include +#include +#include + +#include "blob_factory.hpp" +#include "ie_ngraph_utils.hpp" #include "template_executable_network.hpp" -#include "template_plugin.hpp" #include "template_itt.hpp" +#include "template_plugin.hpp" using namespace TemplatePlugin; using namespace InferenceEngine; @@ -19,11 +23,9 @@ using namespace InferenceEngine; using Time = std::chrono::high_resolution_clock; // ! [infer_request:ctor] -TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, - const InferenceEngine::OutputsDataMap& networkOutputs, - const std::shared_ptr& executableNetwork) : - IInferRequestInternal(networkInputs, networkOutputs), - _executableNetwork(executableNetwork) { +TemplateInferRequest::TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs, + const std::shared_ptr& executableNetwork) + : IInferRequestInternal(networkInputs, networkOutputs), _executableNetwork(executableNetwork) { // TODO: allocate infer request device and host buffers if needed, fill actual list of profiling tasks auto requestID = std::to_string(_executableNetwork->_requestId.fetch_add(1)); @@ -57,66 +59,47 @@ void TemplateInferRequest::allocateDeviceBuffers() { _outputTensors.resize(_networkOutputs.size()); } -template -static void AllocateImpl(const BlobDataMap& userDataMap, - BlobMap& userBlobMap, - BlobMap& deviceBlobMap, - GetNetworkPrecisionF&& GetNetworkPrecision, +template +static void AllocateImpl(const BlobDataMap& userDataMap, BlobMap& userBlobMap, BlobMap& deviceBlobMap, GetNetworkPrecisionF&& GetNetworkPrecision, bool isInputBlob = true) { for (auto&& userData : userDataMap) { - auto& dims = userData.second->getTensorDesc().getDims(); - const auto devicePrecision = Precision::FP32; + const auto& dims = userData.second->getTensorDesc().getDims(); const auto deviceLayout = TensorDesc::getLayoutByDims(dims); - auto userPrecision = userData.second->getTensorDesc().getPrecision(); - auto userLayout = userData.second->getTensorDesc().getLayout(); + const auto userPrecision = userData.second->getTensorDesc().getPrecision(); + const auto userLayout = userData.second->getTensorDesc().getLayout(); - Blob::Ptr userBlob; - switch (userPrecision) { - case Precision::U8: { - userBlob = InferenceEngine::make_shared_blob({userPrecision, dims, userLayout}); - } break; - case Precision::FP32 : { - userBlob = InferenceEngine::make_shared_blob({userPrecision, dims, userLayout}); - } break; - default: IE_THROW(NotImplemented) << "Template Plugin: Unsupported Input/Output Precision"; - } + const auto networkPrecision = InferenceEngine::details::convertPrecision(GetNetworkPrecision(userData.first)); + Blob::Ptr userBlob = make_blob_with_precision({userPrecision, dims, userLayout}); userBlob->allocate(); userBlobMap[userData.first] = userBlob; - auto networkPrecision = GetNetworkPrecision(userData.first); Blob::Ptr deviceBlob; - switch (networkPrecision) { - case ngraph::element::Type_t::f32 : { - if (userPrecision == devicePrecision && userLayout == deviceLayout) { - deviceBlob = userBlob; - } else { - deviceBlob = InferenceEngine::make_shared_blob({devicePrecision, dims, deviceLayout}); - } - } break; - default: IE_THROW(NotImplemented) << "Template Plugin: Unsupported network Input/Output Presision"; - } - if (userBlob != deviceBlob) { - if (isInputBlob) { - // preprocessing converts user input blob to desired device input blob automatically - deviceBlob->allocate(); - } else { - // NOTE: this is not supported for output user blobs yet - IE_THROW(NotImplemented) << "Template Plugin: does not support setPrecision, setLayout for outputs"; + if (userPrecision == networkPrecision && userLayout == deviceLayout) { + deviceBlob = userBlob; + } else { + if (userLayout != deviceLayout && !isInputBlob) { + IE_THROW(NotImplemented) << "Template Plugin: does not support setLayout for outputs"; } + deviceBlob = make_blob_with_precision({networkPrecision, dims, deviceLayout}); + deviceBlob->allocate(); } + deviceBlobMap[userData.first] = deviceBlob; } } void TemplateInferRequest::allocateBlobs() { auto&& parameters = _executableNetwork->_function->get_parameters(); - AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&] (const std::string& blobName) { + AllocateImpl(_networkInputs, _inputs, _deviceInputs, [&](const std::string& blobName) { return parameters.at(_executableNetwork->_inputIndex.at(blobName))->get_element_type(); }); auto&& results = _executableNetwork->_function->get_results(); - AllocateImpl(_networkOutputs, _outputs, _networkOutputBlobs, [&] (const std::string& blobName) { - return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type(); - }, false); + AllocateImpl( + _networkOutputs, _outputs, _networkOutputBlobs, + [&](const std::string& blobName) { + return results.at(_executableNetwork->_outputIndex.at(blobName))->get_element_type(); + }, + false); } // ! [infer_request:infer_impl] @@ -129,42 +112,108 @@ void TemplateInferRequest::InferImpl() { } // ! [infer_request:infer_impl] -template +template static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { - std::copy_n(InferenceEngine::as(src)->rmap().as(), - src->size(), - InferenceEngine::as(dst)->wmap().as()); + ngraph::runtime::reference::convert(InferenceEngine::as(src)->rmap().as(), + InferenceEngine::as(dst)->wmap().as(), src->size()); } static void blobCopy(const Blob::Ptr& src, const Blob::Ptr& dst) { switch (src->getTensorDesc().getPrecision()) { - case Precision::U8 : { - switch (dst->getTensorDesc().getPrecision()) { - case Precision::U8 : break; - case Precision::FP32 : { - blobCopy(src, dst); - } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " - << src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); - } - } + case Precision::U8: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::U8: + break; + case Precision::FP32: { + blobCopy(src, dst); } break; - case Precision::FP32 : { - switch (dst->getTensorDesc().getPrecision()) { - case Precision::FP32 : break; - case Precision::U8 : { - blobCopy(src, dst); - } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " - << src->getTensorDesc().getPrecision() <<" to " << dst->getTensorDesc().getPrecision(); - } - } - } break; - default : { - IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision(); + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); } + } + } break; + case Precision::FP32: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::FP32: + break; + case Precision::U8: { + blobCopy(src, dst); + } break; + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::I64: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::I64: + break; + case Precision::I32: { + blobCopy(src, dst); + } break; + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::I16: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::I16: + break; + case Precision::FP32: { + blobCopy(src, dst); + } break; + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::I8: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::I8: + break; + case Precision::FP32: { + blobCopy(src, dst); + } break; + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::BOOL: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::BOOL: + break; + case Precision::FP32: { + blobCopy(src, dst); + } break; + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + case Precision::U16: { + switch (dst->getTensorDesc().getPrecision()) { + case Precision::U16: + break; + case Precision::FP32: { + blobCopy(src, dst); + } break; + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision() << " to " + << dst->getTensorDesc().getPrecision(); + } + } + } break; + default: { + IE_THROW(NotImplemented) << "Unsupported precision conversion from " << src->getTensorDesc().getPrecision(); + } } } @@ -180,8 +229,8 @@ void TemplateInferRequest::inferPreprocess() { const auto& parameter = _parameters[index]; const auto& parameterShape = parameter->get_shape(); const auto& parameterType = parameter->get_element_type(); - _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(parameterType, parameterShape, - InferenceEngine::as(networkInput.second)->rmap().as()); + _inputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor( + parameterType, parameterShape, InferenceEngine::as(networkInput.second)->rmap().as()); } for (auto&& output : _outputs) { auto outputBlob = output.second; @@ -193,8 +242,8 @@ void TemplateInferRequest::inferPreprocess() { const auto& result = _results[index]; const auto& resultShape = result->get_shape(); const auto& resultType = result->get_element_type(); - _outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor(resultType, resultShape, - InferenceEngine::as(networkOutput)->wmap().as()); + _outputTensors[index] = _executableNetwork->_plugin->_backend->create_tensor( + resultType, resultShape, InferenceEngine::as(networkOutput)->wmap().as()); } _durations[Preprocess] = Time::now() - start; } diff --git a/docs/template_plugin/src/template_infer_request.hpp b/docs/template_plugin/src/template_infer_request.hpp index 61187df7985..ca92c76bbbd 100644 --- a/docs/template_plugin/src/template_infer_request.hpp +++ b/docs/template_plugin/src/template_infer_request.hpp @@ -4,20 +4,17 @@ #pragma once +#include +#include +#include +#include +#include #include +#include +#include +#include #include #include -#include -#include -#include - -#include - -#include -#include - -#include -#include namespace TemplatePlugin { @@ -29,8 +26,7 @@ class TemplateInferRequest : public InferenceEngine::IInferRequestInternal { public: typedef std::shared_ptr Ptr; - TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, - const InferenceEngine::OutputsDataMap& networkOutputs, + TemplateInferRequest(const InferenceEngine::InputsDataMap& networkInputs, const InferenceEngine::OutputsDataMap& networkOutputs, const std::shared_ptr& executableNetwork); ~TemplateInferRequest(); @@ -47,26 +43,20 @@ private: void allocateDeviceBuffers(); void allocateBlobs(); - enum { - Preprocess, - Postprocess, - StartPipeline, - WaitPipeline, - numOfStages - }; + enum { Preprocess, Postprocess, StartPipeline, WaitPipeline, numOfStages }; - std::shared_ptr _executableNetwork; - std::array _profilingTask; + std::shared_ptr _executableNetwork; + std::array _profilingTask; // for performance counters - std::array, numOfStages> _durations; + std::array, numOfStages> _durations; - InferenceEngine::BlobMap _networkOutputBlobs; - ngraph::ParameterVector _parameters; - ngraph::ResultVector _results; + InferenceEngine::BlobMap _networkOutputBlobs; + ngraph::ParameterVector _parameters; + ngraph::ResultVector _results; - std::vector> _inputTensors; - std::vector> _outputTensors; - std::shared_ptr _executable; + std::vector> _inputTensors; + std::vector> _outputTensors; + std::shared_ptr _executable; }; // ! [infer_request:header] diff --git a/docs/template_plugin/src/template_itt.hpp b/docs/template_plugin/src/template_itt.hpp index 089d49c1752..1d734bcf942 100644 --- a/docs/template_plugin/src/template_itt.hpp +++ b/docs/template_plugin/src/template_itt.hpp @@ -14,7 +14,7 @@ namespace TemplatePlugin { namespace itt { namespace domains { - OV_ITT_DOMAIN(TemplatePlugin); -} -} +OV_ITT_DOMAIN(TemplatePlugin); } +} // namespace itt +} // namespace TemplatePlugin diff --git a/docs/template_plugin/src/template_plugin.cpp b/docs/template_plugin/src/template_plugin.cpp index 6da16cce891..beaedb97c5e 100644 --- a/docs/template_plugin/src/template_plugin.cpp +++ b/docs/template_plugin/src/template_plugin.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +// clang-format off #include #include #include @@ -24,6 +25,7 @@ #include "template_infer_request.hpp" #include "transformations/template_pattern_transformation.hpp" #include "transformations/preprocessing/preprocessing.hpp" +// clang-format on using namespace TemplatePlugin; @@ -53,8 +55,7 @@ Plugin::~Plugin() { // ! [plugin:transform_network] -std::shared_ptr TransformNetwork(const std::shared_ptr& function, - const InferenceEngine::InputsDataMap & inputInfoMap, +std::shared_ptr TransformNetwork(const std::shared_ptr& function, const InferenceEngine::InputsDataMap& inputInfoMap, const InferenceEngine::OutputsDataMap& outputsInfoMap) { // 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function auto transformedNetwork = ngraph::clone_function(*function); @@ -67,7 +68,7 @@ std::shared_ptr TransformNetwork(const std::shared_ptr(); // Template plugin handles only FP32 networks - passManager.register_pass(precisions_array {{ngraph::element::f16, ngraph::element::f32 }}); + passManager.register_pass(precisions_array {{ngraph::element::f16, ngraph::element::f32}}); // Example: register plugin specific transformation passManager.register_pass(); passManager.register_pass(); @@ -83,36 +84,32 @@ std::shared_ptr TransformNetwork(const std::shared_ptr(network.getFunction(), - networkInputs, networkOutputs, fullConfig, - std::static_pointer_cast(shared_from_this())); + auto fullConfig = Configuration {config, _cfg}; + return std::make_shared(network.getFunction(), networkInputs, networkOutputs, fullConfig, + std::static_pointer_cast(shared_from_this())); } // ! [plugin:load_exe_network_impl] // ! [plugin:import_network_impl] -InferenceEngine::ExecutableNetworkInternal::Ptr -Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map& config) { +InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::ImportNetworkImpl(std::istream& modelStream, const std::map& config) { OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetworkImpl"); - auto fullConfig = Configuration{ config, _cfg }; - return std::make_shared(modelStream, fullConfig, - std::static_pointer_cast(shared_from_this())); + auto fullConfig = Configuration {config, _cfg}; + return std::make_shared(modelStream, fullConfig, std::static_pointer_cast(shared_from_this())); } // ! [plugin:import_network_impl] // ! [plugin:query_network] -InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork &network, const ConfigMap& config) const { +InferenceEngine::QueryNetworkResult Plugin::QueryNetwork(const InferenceEngine::CNNNetwork& network, const ConfigMap& config) const { OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::QueryNetwork"); - Configuration fullConfig{config, _cfg, false}; + Configuration fullConfig {config, _cfg, false}; auto function = network.getFunction(); // 1. First of all we should store initial input operation set @@ -198,36 +195,28 @@ void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) { // ! [plugin:add_extension] // ! [plugin:set_config] -void Plugin::SetConfig(const ConfigMap &config) { - _cfg = Configuration{config, _cfg}; +void Plugin::SetConfig(const ConfigMap& config) { + _cfg = Configuration {config, _cfg}; } // ! [plugin:set_config] // ! [plugin:get_config] -InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map & /*options*/) const { +InferenceEngine::Parameter Plugin::GetConfig(const std::string& name, const std::map& /*options*/) const { return _cfg.Get(name); } // ! [plugin:get_config] // ! [plugin:get_metric] -InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map & options) const { +InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std::map& options) const { if (METRIC_KEY(SUPPORTED_METRICS) == name) { - std::vector supportedMetrics = { - METRIC_KEY(AVAILABLE_DEVICES), - METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(SUPPORTED_CONFIG_KEYS), - METRIC_KEY(FULL_DEVICE_NAME), - METRIC_KEY(IMPORT_EXPORT_SUPPORT), - METRIC_KEY(DEVICE_ARCHITECTURE), - METRIC_KEY(OPTIMIZATION_CAPABILITIES), - METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) }; + std::vector supportedMetrics = {METRIC_KEY(AVAILABLE_DEVICES), METRIC_KEY(SUPPORTED_METRICS), + METRIC_KEY(SUPPORTED_CONFIG_KEYS), METRIC_KEY(FULL_DEVICE_NAME), + METRIC_KEY(IMPORT_EXPORT_SUPPORT), METRIC_KEY(DEVICE_ARCHITECTURE), + METRIC_KEY(OPTIMIZATION_CAPABILITIES), METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)}; IE_SET_METRIC_RETURN(SUPPORTED_METRICS, supportedMetrics); } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { - std::vector configKeys = { - CONFIG_KEY(DEVICE_ID), - CONFIG_KEY(PERF_COUNT), - TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; - auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config{}.SupportedKeys(); + std::vector configKeys = {CONFIG_KEY(DEVICE_ID), CONFIG_KEY(PERF_COUNT), TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS)}; + auto streamExecutorConfigKeys = InferenceEngine::IStreamsExecutor::Config {}.SupportedKeys(); for (auto&& configKey : streamExecutorConfigKeys) { if (configKey != InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) { configKeys.emplace_back(configKey); @@ -236,7 +225,7 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std: IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys); } else if (METRIC_KEY(AVAILABLE_DEVICES) == name) { // TODO: fill list of available devices - std::vector availableDevices = { "" }; + std::vector availableDevices = {""}; IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices); } else if (METRIC_KEY(FULL_DEVICE_NAME) == name) { std::string name = "Template Device Full Name"; @@ -249,13 +238,13 @@ InferenceEngine::Parameter Plugin::GetMetric(const std::string& name, const std: IE_SET_METRIC_RETURN(DEVICE_ARCHITECTURE, arch); } else if (METRIC_KEY(OPTIMIZATION_CAPABILITIES) == name) { // TODO: fill actual list of supported capabilities: e.g. Template device supports only FP32 - std::vector capabilities = { METRIC_VALUE(FP32) /*, TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION)*/ }; + std::vector capabilities = {METRIC_VALUE(FP32) /*, TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION)*/}; IE_SET_METRIC_RETURN(OPTIMIZATION_CAPABILITIES, capabilities); } else if (METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS) == name) { // TODO: fill with actual values using uint = unsigned int; - IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint{1}, uint{1}, uint{1})); - } else { + IE_SET_METRIC_RETURN(RANGE_FOR_ASYNC_INFER_REQUESTS, std::make_tuple(uint {1}, uint {1}, uint {1})); + } else { IE_THROW(NotFound) << "Unsupported device metric: " << name; } } diff --git a/docs/template_plugin/src/template_plugin.hpp b/docs/template_plugin/src/template_plugin.hpp index 10b68d7af42..f065fad04e3 100644 --- a/docs/template_plugin/src/template_plugin.hpp +++ b/docs/template_plugin/src/template_plugin.hpp @@ -4,11 +4,11 @@ #pragma once -#include "template_config.hpp" -#include "template_executable_network.hpp" #include #include "backend.hpp" +#include "template_config.hpp" +#include "template_executable_network.hpp" //! [plugin:header] namespace TemplatePlugin { @@ -20,26 +20,24 @@ public: Plugin(); ~Plugin(); - void SetConfig(const std::map &config) override; - InferenceEngine::QueryNetworkResult - QueryNetwork(const InferenceEngine::CNNNetwork &network, - const std::map& config) const override; - InferenceEngine::ExecutableNetworkInternal::Ptr - LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, - const std::map &config) override; + void SetConfig(const std::map& config) override; + InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, + const std::map& config) const override; + InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, + const std::map& config) override; void AddExtension(InferenceEngine::IExtensionPtr extension) override; - InferenceEngine::Parameter GetConfig(const std::string& name, const std::map & options) const override; - InferenceEngine::Parameter GetMetric(const std::string& name, const std::map & options) const override; + InferenceEngine::Parameter GetConfig(const std::string& name, const std::map& options) const override; + InferenceEngine::Parameter GetMetric(const std::string& name, const std::map& options) const override; InferenceEngine::ExecutableNetworkInternal::Ptr ImportNetworkImpl(std::istream& model, const std::map& config) override; private: friend class ExecutableNetwork; friend class TemplateInferRequest; - std::shared_ptr _backend; - Configuration _cfg; - InferenceEngine::ITaskExecutor::Ptr _waitExecutor; + std::shared_ptr _backend; + Configuration _cfg; + InferenceEngine::ITaskExecutor::Ptr _waitExecutor; }; } // namespace TemplatePlugin -//! [plugin:header] \ No newline at end of file + //! [plugin:header] diff --git a/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp b/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp index a74d8501981..39fd7942387 100644 --- a/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp +++ b/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "transformations/preprocessing/mean_image_or_value.hpp" + #include #include #include -#include "transformations/preprocessing/mean_image_or_value.hpp" - using namespace ngraph; NGRAPH_RTTI_DEFINITION(ngraph::pass::AddMeanSubtract, "AddMeanSubtract", 0); -ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) { +ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap& inputInfoMap) { // RUN_ON_FUNCTION_SCOPE(AddMeanSubtract); auto label = ngraph::pattern::wrap_type(); - ngraph::matcher_pass_callback callback = [=] (pattern::Matcher& m) { + ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) { auto param = std::dynamic_pointer_cast(m.get_match_root()); if (!param) { return false; @@ -28,8 +28,7 @@ ngraph::pass::AddMeanSubtract::AddMeanSubtract(const MeanMap & inputInfoMap) { } auto mean_const = it->second; - NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, - "Mean for ", param->get_friendly_name(), " must have f32 type"); + NGRAPH_CHECK(mean_const->get_element_type() == ngraph::element::f32, "Mean for ", param->get_friendly_name(), " must have f32 type"); auto copy_param = param->clone_with_new_inputs({}); auto sub = std::make_shared(copy_param, mean_const); diff --git a/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.hpp b/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.hpp index 906bfdc0aa4..f465ad9f948 100644 --- a/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.hpp +++ b/docs/template_plugin/src/transformations/preprocessing/mean_image_or_value.hpp @@ -5,10 +5,9 @@ #pragma once #include -#include - #include #include +#include #include "transformations_visibility.hpp" @@ -29,5 +28,5 @@ public: using MeanMap = std::map>; NGRAPH_RTTI_DECLARATION; - explicit AddMeanSubtract(const MeanMap & inputInfoMap); + explicit AddMeanSubtract(const MeanMap& inputInfoMap); }; diff --git a/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp b/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp index a7e6d8bc718..b6f211d1135 100644 --- a/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp +++ b/docs/template_plugin/src/transformations/preprocessing/preprocessing.cpp @@ -2,26 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "transformations/preprocessing/preprocessing.hpp" + #include +#include #include "transformations/preprocessing/mean_image_or_value.hpp" #include "transformations/preprocessing/std_scale.hpp" -#include "transformations/preprocessing/preprocessing.hpp" NGRAPH_RTTI_DEFINITION(ngraph::pass::AddPreprocessing, "AddPreprocessing", 0); -ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap) - : m_inputInfoMap(inputInfoMap) { } +ngraph::pass::AddPreprocessing::AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap): m_inputInfoMap(inputInfoMap) {} bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptr f) { ngraph::pass::AddMeanSubtract::MeanMap meanMap; ngraph::pass::AddStdScale::ScaleMap scaleMap; - for (const auto & it : m_inputInfoMap) { + for (const auto& it : m_inputInfoMap) { bool has_scales = false, has_mean_values = false, has_mean_image = false; - const InferenceEngine::PreProcessInfo & pInfo = it.second->getPreProcess(); - const auto & inputDims = it.second->getTensorDesc().getDims(); + const InferenceEngine::PreProcessInfo& pInfo = it.second->getPreProcess(); + const auto& inputDims = it.second->getTensorDesc().getDims(); const size_t cn = pInfo.getNumberOfChannels(); std::vector meanValues(cn), stdScales(cn); InferenceEngine::Blob::Ptr meanImage = nullptr; @@ -40,10 +40,10 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptrmeanData; NGRAPH_CHECK(meanImage->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32, - "Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData"); + "Only InferenceEngine::Precision::FP32 precision is supported for PreProcessChannel::meanData"); } else { - NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), - "TensorDesc for PreProcessChannel::meanData must be equal"); + NGRAPH_CHECK(pInfo[c]->meanData != nullptr, "pInfo[c]->meanData is nullptr"); + NGRAPH_CHECK(meanImage->getTensorDesc() == pInfo[c]->meanData->getTensorDesc(), "TensorDesc for PreProcessChannel::meanData must be equal"); } } } @@ -53,35 +53,33 @@ bool ngraph::pass::AddPreprocessing::run_on_function(std::shared_ptrgetTensorDesc().getDims(); std::copy(dims.begin(), dims.end(), std::back_inserter(shape)); std::vector meanImageData(ngraph::shape_size(shape)); for (size_t c = 0, i = 0; c < cn; ++c) { auto lm = pInfo[c]->meanData->buffer(); - const float *data = lm.as(); + const float* data = lm.as(); std::memcpy(&meanImageData[i], data, meanImage->byteSize()); i += meanImage->size(); } - meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, - shape, meanImageData); + meanMap[it.first] = ngraph::opset3::Constant::create(ngraph::element::f32, shape, meanImageData); } } diff --git a/docs/template_plugin/src/transformations/preprocessing/preprocessing.hpp b/docs/template_plugin/src/transformations/preprocessing/preprocessing.hpp index 3ff95fc95ea..c724f06aa0e 100644 --- a/docs/template_plugin/src/transformations/preprocessing/preprocessing.hpp +++ b/docs/template_plugin/src/transformations/preprocessing/preprocessing.hpp @@ -26,10 +26,11 @@ class AddPreprocessing; * (x - mean) * stdScale */ class ngraph::pass::AddPreprocessing : public ngraph::pass::FunctionPass { - const InferenceEngine::InputsDataMap & m_inputInfoMap; + const InferenceEngine::InputsDataMap& m_inputInfoMap; + public: NGRAPH_RTTI_DECLARATION; - explicit AddPreprocessing(const InferenceEngine::InputsDataMap & inputInfoMap); + explicit AddPreprocessing(const InferenceEngine::InputsDataMap& inputInfoMap); bool run_on_function(std::shared_ptr f) override; }; diff --git a/docs/template_plugin/src/transformations/preprocessing/std_scale.cpp b/docs/template_plugin/src/transformations/preprocessing/std_scale.cpp index 44ad4d6080c..90c5163bdf2 100644 --- a/docs/template_plugin/src/transformations/preprocessing/std_scale.cpp +++ b/docs/template_plugin/src/transformations/preprocessing/std_scale.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "transformations/preprocessing/std_scale.hpp" + #include #include #include -#include "transformations/preprocessing/std_scale.hpp" - using namespace ngraph; NGRAPH_RTTI_DEFINITION(ngraph::pass::AddStdScale, "AddStdScale", 0); @@ -16,7 +16,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) { // RUN_ON_FUNCTION_SCOPE(AddStdScale); auto label = ngraph::pattern::wrap_type(); - ngraph::matcher_pass_callback callback = [=] (pattern::Matcher& m) { + ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) { auto param = std::dynamic_pointer_cast(m.get_match_root()); if (!param) { return false; @@ -28,8 +28,7 @@ ngraph::pass::AddStdScale::AddStdScale(const ScaleMap& inputInfoMap) { } auto scale_const = it->second; - NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, - "Scale for ", param->get_friendly_name(), " must have f32 type"); + NGRAPH_CHECK(scale_const->get_element_type() == ngraph::element::f32, "Scale for ", param->get_friendly_name(), " must have f32 type"); auto copy_param = param->clone_with_new_inputs({}); auto mul = std::make_shared(copy_param, it->second); diff --git a/docs/template_plugin/src/transformations/preprocessing/std_scale.hpp b/docs/template_plugin/src/transformations/preprocessing/std_scale.hpp index edc2838bd46..cd809727f10 100644 --- a/docs/template_plugin/src/transformations/preprocessing/std_scale.hpp +++ b/docs/template_plugin/src/transformations/preprocessing/std_scale.hpp @@ -5,10 +5,9 @@ #pragma once #include -#include - #include #include +#include #include "transformations_visibility.hpp" diff --git a/docs/template_plugin/src/transformations/template_function_transformation.cpp b/docs/template_plugin/src/transformations/template_function_transformation.cpp index 0c58de4c00c..410993d680b 100644 --- a/docs/template_plugin/src/transformations/template_function_transformation.cpp +++ b/docs/template_plugin/src/transformations/template_function_transformation.cpp @@ -15,7 +15,7 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptrget_ordered_ops()) { + for (auto& node : f->get_ordered_ops()) { // Check that number of input and output ports are equal to 1 if (node->inputs().size() == 1 && node->outputs().size() == 1) { // Check that input and output shape a fully defined (not dynamic) and number of consumers equal to 1 @@ -28,9 +28,8 @@ bool pass::MyFunctionTransformation::run_on_function(std::shared_ptrget_type_info().name << std::endl - << "Name: " << node->get_friendly_name() << std::endl; + for (auto& node : nodes) { + std::cout << "Type: " << node->get_type_info().name << std::endl << "Name: " << node->get_friendly_name() << std::endl; } // Return false because we didn't change nGraph Function diff --git a/docs/template_plugin/src/transformations/template_function_transformation.hpp b/docs/template_plugin/src/transformations/template_function_transformation.hpp index 3cd330edce2..ae665be6a62 100644 --- a/docs/template_plugin/src/transformations/template_function_transformation.hpp +++ b/docs/template_plugin/src/transformations/template_function_transformation.hpp @@ -16,7 +16,7 @@ class MyFunctionTransformation; // ! [function_pass:template_transformation_hpp] // template_function_transformation.hpp -class ngraph::pass::MyFunctionTransformation: public ngraph::pass::FunctionPass { +class ngraph::pass::MyFunctionTransformation : public ngraph::pass::FunctionPass { public: NGRAPH_RTTI_DECLARATION; bool run_on_function(std::shared_ptr f) override; diff --git a/docs/template_plugin/src/transformations/template_pattern_transformation.cpp b/docs/template_plugin/src/transformations/template_pattern_transformation.cpp index c1a3a92fa15..063f52ad736 100644 --- a/docs/template_plugin/src/transformations/template_pattern_transformation.cpp +++ b/docs/template_plugin/src/transformations/template_pattern_transformation.cpp @@ -3,13 +3,14 @@ // #include "transformations/template_pattern_transformation.hpp" -#include "transformations/template_function_transformation.hpp" #include #include #include #include +#include "transformations/template_function_transformation.hpp" + using namespace ngraph; // ! [graph_rewrite:template_transformation_cpp] @@ -23,15 +24,14 @@ ngraph::pass::DecomposeDivideMatcher::DecomposeDivideMatcher() { auto div = std::make_shared(input0, input1); ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) { - auto div = std::dynamic_pointer_cast (m.get_match_root()); + auto div = std::dynamic_pointer_cast(m.get_match_root()); // We can not apply this transformation in case with integer input data type if (!div || div->input(0).get_element_type().is_integral()) { return false; } // Decompose Divide into Multiply with Power operations - auto pow = std::make_shared(div->input_value(1), - opset3::Constant::create(div->get_input_element_type(1), Shape{1}, {-1})); + auto pow = std::make_shared(div->input_value(1), opset3::Constant::create(div->get_input_element_type(1), Shape {1}, {-1})); auto mul = std::make_shared(div->input_value(0), pow); @@ -67,8 +67,7 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() { auto& node_to_output = m.get_pattern_value_map(); // Create new Relu operation and add register it for additional execution - auto new_relu = register_new_node( - node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0)); + auto new_relu = register_new_node(node_to_output.at(m_relu1).get_node_shared_ptr()->input_value(0)); // Copy runtime info attributes to newly created operation ngraph::copy_runtime_info(m.get_matched_nodes(), new_relu); @@ -91,60 +90,60 @@ ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() { // ! [matcher_pass:relu_fusion] void run_matcher_on_node(std::shared_ptr node) { -// ! [matcher_pass:run_on_node] -if (ngraph::pass::DecomposeDivideMatcher().apply(node)) { - // successful execution (root node was replaced) -} -// ! [matcher_pass:run_on_node] + // ! [matcher_pass:run_on_node] + if (ngraph::pass::DecomposeDivideMatcher().apply(node)) { + // successful execution (root node was replaced) + } + // ! [matcher_pass:run_on_node] } void run_matcher_with_manager(std::shared_ptr f) { -// ! [matcher_pass:manager] -// Two matchers will run independently (two independent graph traversals) -// pass::Manager automatically creates GraphRewrite container for each MatcherPass -pass::Manager manager; -manager.register_pass(); -manager.register_pass(); -manager.run_passes(f); -// ! [matcher_pass:manager] + // ! [matcher_pass:manager] + // Two matchers will run independently (two independent graph traversals) + // pass::Manager automatically creates GraphRewrite container for each MatcherPass + pass::Manager manager; + manager.register_pass(); + manager.register_pass(); + manager.run_passes(f); + // ! [matcher_pass:manager] } void run_matcher_with_manager2(std::shared_ptr f) { -// ! [matcher_pass:manager2] -// Register anchor GraphRewrite pass inside manager that will execute two matchers simultaneously -pass::Manager manager; -auto anchor = manager.register_pass(); -anchor->add_matcher(); -anchor->add_matcher(); -manager.run_passes(f); -// ! [matcher_pass:manager2] + // ! [matcher_pass:manager2] + // Register anchor GraphRewrite pass inside manager that will execute two matchers simultaneously + pass::Manager manager; + auto anchor = manager.register_pass(); + anchor->add_matcher(); + anchor->add_matcher(); + manager.run_passes(f); + // ! [matcher_pass:manager2] } void run_matcher_with_manager3(std::shared_ptr f) { -// ! [matcher_pass:manager3] -pass::Manager manager; -manager.register_pass(); -// Two matchers will run independently (two independent graph traversals) -// pass::Manager automatically creates GraphRewrite container for each MatcherPass -manager.register_pass(); -manager.register_pass(); -manager.run_passes(f); -// ! [matcher_pass:manager3] + // ! [matcher_pass:manager3] + pass::Manager manager; + manager.register_pass(); + // Two matchers will run independently (two independent graph traversals) + // pass::Manager automatically creates GraphRewrite container for each MatcherPass + manager.register_pass(); + manager.register_pass(); + manager.run_passes(f); + // ! [matcher_pass:manager3] } void run_matcher_with_gr(std::shared_ptr f) { -// ! [matcher_pass:graph_rewrite] -// Two matcher passes will run simultaneously in a single graph traversal -ngraph::pass::GraphRewrite pass; -pass.add_matcher(); -pass.add_matcher(); -pass.run_on_function(f); -// ! [matcher_pass:graph_rewrite] + // ! [matcher_pass:graph_rewrite] + // Two matcher passes will run simultaneously in a single graph traversal + ngraph::pass::GraphRewrite pass; + pass.add_matcher(); + pass.add_matcher(); + pass.run_on_function(f); + // ! [matcher_pass:graph_rewrite] } // ! [manual_constant_folding] template -Output eltwise_fold(const Output & input0, const Output & input1) { +Output eltwise_fold(const Output& input0, const Output& input1) { auto eltwise = std::make_shared(input0, input1); OutputVector output(eltwise->get_output_size()); // If constant folding wasn't successful return eltwise output diff --git a/docs/template_plugin/src/transformations/template_pattern_transformation.hpp b/docs/template_plugin/src/transformations/template_pattern_transformation.hpp index f2b8d400988..f4628afdc3f 100644 --- a/docs/template_plugin/src/transformations/template_pattern_transformation.hpp +++ b/docs/template_plugin/src/transformations/template_pattern_transformation.hpp @@ -21,14 +21,14 @@ class ReluReluFusionMatcher; * @ingroup ie_transformation_common_api * @brief Add transformation description. */ -class ngraph::pass::DecomposeDivideMatcher: public ngraph::pass::MatcherPass { +class ngraph::pass::DecomposeDivideMatcher : public ngraph::pass::MatcherPass { public: NGRAPH_RTTI_DECLARATION; DecomposeDivideMatcher(); }; // ! [graph_rewrite:template_transformation_hpp] -class ngraph::pass::ReluReluFusionMatcher: public ngraph::pass::MatcherPass { +class ngraph::pass::ReluReluFusionMatcher : public ngraph::pass::MatcherPass { public: NGRAPH_RTTI_DECLARATION; ReluReluFusionMatcher(); diff --git a/docs/template_plugin/tests/functional/CMakeLists.txt b/docs/template_plugin/tests/functional/CMakeLists.txt index a2962cea0ae..96ab3fdcbe4 100644 --- a/docs/template_plugin/tests/functional/CMakeLists.txt +++ b/docs/template_plugin/tests/functional/CMakeLists.txt @@ -14,7 +14,7 @@ addIeTargetTest( IE::funcSharedTests INCLUDES "${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include" - ADD_CPPLINT + ADD_CLANG_FORMAT LABELS TEMPLATE ) diff --git a/docs/template_plugin/tests/functional/core_config.cpp b/docs/template_plugin/tests/functional/core_config.cpp index e75091f571f..6c70d61d35a 100644 --- a/docs/template_plugin/tests/functional/core_config.cpp +++ b/docs/template_plugin/tests/functional/core_config.cpp @@ -4,5 +4,4 @@ #include "functional_test_utils/core_config.hpp" -void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) { -} +void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {} diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/caching_tests.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/caching_tests.cpp index f61e4c54d7e..547c073ddbc 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/caching_tests.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/caching_tests.cpp @@ -7,19 +7,14 @@ using namespace LayerTestsDefinitions; namespace { - static const std::vector precisionsTemplate = { - ngraph::element::f32, - }; +static const std::vector precisionsTemplate = { + ngraph::element::f32, +}; - static const std::vector batchSizesTemplate = { - 1, 2 - }; +static const std::vector batchSizesTemplate = {1, 2}; - INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase, - ::testing::Combine( - ::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()), - ::testing::ValuesIn(precisionsTemplate), - ::testing::ValuesIn(batchSizesTemplate), - ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)), - LoadNetworkCacheTestBase::getTestCaseName); -} // namespace +INSTANTIATE_TEST_CASE_P(smoke_CachingSupportCase_Template, LoadNetworkCacheTestBase, + ::testing::Combine(::testing::ValuesIn(LoadNetworkCacheTestBase::getStandardFunctions()), ::testing::ValuesIn(precisionsTemplate), + ::testing::ValuesIn(batchSizesTemplate), ::testing::Values(CommonTestUtils::DEVICE_TEMPLATE)), + LoadNetworkCacheTestBase::getTestCaseName); +} // namespace diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp index e10f8d64c68..3a832a5dd04 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "multi-device/multi_device_config.hpp" - #include "behavior/config.hpp" + #include