Removed GNA plugin from 2024.0 (#21800)

This commit is contained in:
Ilya Lavrenov 2023-12-21 16:39:06 +04:00 committed by GitHub
parent 1fd3399cdf
commit c79ae17bbf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
780 changed files with 15 additions and 130651 deletions

4
.github/CODEOWNERS vendored
View File

@ -58,9 +58,6 @@
/src/tests/**/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers
/thirdparty/ocl/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
# OpenVINO GNA:
/src/plugins/intel_gna/ @openvinotoolkit/openvino-ie-gna-maintainers
# OpenVINO Auto (MULTI) plugin:
/src/plugins/auto/ @openvinotoolkit/openvino-ie-auto-multi-maintainers
@ -118,7 +115,6 @@
/docs/snippets/ @openvinotoolkit/openvino-docs-maintainers @openvinotoolkit/openvino-ie-maintainers
/docs/OV_Runtime_UG/supported_plugins/ARM_CPU.md @openvinotoolkit/openvino-docs-maintainers @openvinotoolkit/openvino_contrib-arm_plugin-maintainers
/docs/OV_Runtime_UG/supported_plugins/CPU.md @openvinotoolkit/openvino-docs-maintainers @openvinotoolkit/openvino-ie-cpu-maintainers
/docs/OV_Runtime_UG/supported_plugins/GNA.md @openvinotoolkit/openvino-docs-maintainers @openvinotoolkit/openvino-ie-gna-maintainers
/docs/OV_Runtime_UG/supported_plugins/GPU*.md @openvinotoolkit/openvino-docs-maintainers @openvinotoolkit/openvino-ie-gpu-maintainers
# Configuration management

View File

@ -41,9 +41,7 @@ body:
options:
- CPU
- GPU
- GNA
- NCS2 (Intel Movidius)
- HDDL
- NPU
- AUTO
- HETERO
- BATCH

View File

@ -18,7 +18,6 @@ LP_transformations:
preprocessing:
revalidate:
- inference
- GNA
- C_API
- Python_API
@ -48,18 +47,10 @@ GPU:
- IR_FE
- PROXY
GNA:
build:
- HETERO
- AUTO_BATCH
- TEMPLATE
- IR_FE
HETERO:
revalidate:
- CPU
- GPU
- GNA
- HETERO
- AUTO_BATCH
- TEMPLATE
@ -72,7 +63,6 @@ AUTO_BATCH:
revalidate:
- CPU
- GPU
- GNA
- HETERO
- AUTO_BATCH
- TEMPLATE
@ -85,7 +75,6 @@ TEMPLATE:
revalidate:
- CPU
- GPU
- GNA
- HETERO
- AUTO_BATCH
- TEMPLATE
@ -194,7 +183,6 @@ IE_Tests:
revalidate:
- CPU
- GPU
- GNA
- HETERO
- AUTO_BATCH
- TEMPLATE

View File

@ -24,7 +24,6 @@
"openvino-docs-maintainers": "category: docs",
"openvino-ie-maintainers": "category: inference",
"openvino-ie-cpu-maintainers": "category: CPU",
"openvino-ie-gna-maintainers": "category: GNA",
"openvino-ie-gpu-maintainers": "category: GPU",
"openvino-ie-lpt-maintainers": "category: LP transformations",
"openvino-ie-transformations-maintainers": "category: transformations",

3
.github/labeler.yml vendored
View File

@ -67,9 +67,6 @@
- 'src/frontends/common/include/openvino/frontend/extension.hpp'
- 'src/frontends/common/include/openvino/frontend/extension/**/*'
'category: GNA':
- 'src/plugins/intel_gna/**/*'
'category: GPU':
- 'src/plugins/intel_gpu/**/*'
- 'thirdparty/ocl/**/*'

View File

@ -157,22 +157,6 @@ jobs:
${INSTALL_TEST_DIR}/ov_transformations_tests --gtest_print_time=1 \
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-Transformations.xml
- name: Legacy Transformations func tests
if: fromJSON(inputs.affected-components).GNA.test &&
(runner.os != 'macOS' && runner.arch != 'ARM64')
run: |
source ${INSTALL_DIR}/setupvars.sh
${INSTALL_TEST_DIR}/ov_legacy_transformations_tests --gtest_print_time=1 \
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-LegacyTransformations.xml
- name: Inference Engine 1.0 unit tests
if: fromJSON(inputs.affected-components).GNA.test &&
(runner.os != 'macOS' && runner.arch != 'ARM64')
run: |
source ${INSTALL_DIR}/setupvars.sh
${INSTALL_TEST_DIR}/InferenceEngineUnitTests --gtest_print_time=1 \
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceEngineUnitTests.xml
- name: Common test utils tests
run: |
source ${INSTALL_DIR}/setupvars.sh

View File

@ -147,7 +147,6 @@ jobs:
-DENABLE_TESTS=ON \
-DENABLE_CPPLINT=OFF \
-DENABLE_NCC_STYLE=OFF \
-DENABLE_INTEL_GNA=OFF \
-DCMAKE_COMPILE_WARNING_AS_ERROR=ON \
-DENABLE_PROFILING_ITT=ON \
-DSELECTIVE_BUILD=COLLECT \
@ -303,7 +302,6 @@ jobs:
-DSELECTIVE_BUILD=ON \
-DENABLE_TEMPLATE=OFF \
-DENABLE_INTEL_GPU=OFF \
-DENABLE_INTEL_GNA=OFF \
-DENABLE_OV_TF_FRONTEND=OFF \
-DENABLE_OV_TF_LITE_FRONTEND=OFF \
-DENABLE_OV_PADDLE_FRONTEND=OFF \

View File

@ -617,18 +617,6 @@ jobs:
run: |
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml
- name: Legacy Transformations func tests
if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test
shell: cmd
run: |
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_legacy_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LegacyTransformations.xml
- name: Inference Engine 1.0 unit tests
if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test
shell: cmd
run: |
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineUnitTests.xml
- name: Common test utils tests
shell: cmd
run: |
@ -656,13 +644,7 @@ jobs:
run: |
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_op_conformance_tests --gtest_print_time=1 --gtest_filter="*OpImpl*" --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateOpImplTests.xml
- name: GNA plugin unit tests
if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test
shell: cmd
run: |
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_gna_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-GNAUnitTests.xml
- name: AUTO unit tests
- name: AUTO unit tests
if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test
shell: cmd
run: |

View File

@ -140,7 +140,6 @@ jobs:
-DENABLE_TESTS=ON `
-DENABLE_CPPLINT=OFF `
-DENABLE_NCC_STYLE=OFF `
-DENABLE_INTEL_GNA=OFF `
-DCMAKE_COMPILE_WARNING_AS_ERROR=ON `
-DENABLE_PROFILING_ITT=ON `
-DSELECTIVE_BUILD=COLLECT `
@ -306,7 +305,6 @@ jobs:
-DSELECTIVE_BUILD=ON `
-DENABLE_TEMPLATE=OFF `
-DENABLE_INTEL_GPU=OFF `
-DENABLE_INTEL_GNA=OFF `
-DENABLE_OV_TF_FRONTEND=OFF `
-DENABLE_OV_TF_LITE_FRONTEND=OFF `
-DENABLE_OV_PADDLE_FRONTEND=OFF `

View File

@ -33,7 +33,7 @@ OpenVINO™ is an open-source toolkit for optimizing and deploying AI inference.
- Reduce resource demands and efficiently deploy on a range of Intel® platforms from edge to cloud
This open-source version includes several components: namely [OpenVINO Model Converter (OVC)], [OpenVINO™ Runtime], as well as CPU, GPU, GNA, multi device and heterogeneous plugins to accelerate deep learning inference on Intel® CPUs and Intel® Processor Graphics.
This open-source version includes several components: namely [OpenVINO Model Converter (OVC)], [OpenVINO™ Runtime], as well as CPU, GPU, multi device and heterogeneous plugins to accelerate deep learning inference on Intel® CPUs and Intel® Processor Graphics.
It supports pre-trained models from [Open Model Zoo], along with 100+ open
source and public models in popular formats such as TensorFlow, ONNX, PaddlePaddle, MXNet, Caffe, Kaldi.
@ -82,12 +82,6 @@ The OpenVINO™ Runtime can infer models on different hardware devices. This sec
<td><b><i><a href="./src/plugins/intel_gpu">openvino_intel_gpu_plugin</a></i></b></td>
<td>Intel Processor Graphics, including Intel HD Graphics and Intel Iris Graphics</td>
</tr>
<tr>
<td>GNA</td>
<td><a href="https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_GNA.html#doxid-openvino-docs-o-v-u-g-supported-plugins-g-n-a">Intel GNA</a></td>
<td><b><i><a href="./src/plugins/intel_gna">openvino_intel_gna_plugin</a></i></b></td>
<td>Intel Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel Pentium Silver J5005 Processor, Intel Pentium Silver N5000 Processor, Intel Celeron J4005 Processor, Intel Celeron J4105 Processor, Intel Celeron Processor N4100, Intel Celeron Processor N4000, Intel Core i3-8121U Processor, Intel Core i7-1065G7 Processor, Intel Core i7-1060G7 Processor, Intel Core i5-1035G4 Processor, Intel Core i5-1035G7 Processor, Intel Core i5-1035G1 Processor, Intel Core i5-1030G7 Processor, Intel Core i5-1030G4 Processor, Intel Core i3-1005G1 Processor, Intel Core i3-1000G1 Processor, Intel Core i3-1000G4 Processor</td>
</tr>
</tbody>
</table>

View File

@ -89,13 +89,6 @@ if(ENABLE_INTEL_CPU)
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
endif()
if(ENABLE_INTEL_GNA)
ov_coverage_extract(INPUT "openvino" OUTPUT "intel_gna_plugin"
PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/src/plugins/intel_gna/*")
ov_coverage_genhtml(INFO_FILE "intel_gna_plugin"
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
endif()
if (ENABLE_INTEL_GPU)
ov_coverage_extract(INPUT "openvino" OUTPUT "intel_gpu_plugin"
PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/src/plugins/intel_gpu/*")

View File

@ -221,47 +221,3 @@ Build oneTBB from sources and set TBBROOT environment var before OpenVINO cmake
update_deps_cache(TBBBIND_2_5_ROOT "${TBBBIND_2_5}" "Path to TBBBIND_2_5 root folder")
update_deps_cache(TBBBIND_2_5_DIR "${TBBBIND_2_5}/cmake" "Path to TBBBIND_2_5 cmake folder")
endfunction()
if(ENABLE_INTEL_GNA)
reset_deps_cache(
GNA_EXT_DIR
GNA_PLATFORM_DIR
GNA_KERNEL_LIB_NAME
GNA_LIBS_LIST
GNA_LIB_DIR
libGNA_INCLUDE_DIRS
libGNA_LIBRARIES_BASE_PATH)
set(GNA_VERSION "03.05.00.2116")
set(GNA_HASH "960350567702bda17276ac4c060d7524fb7ce7ced785004bd861c81ff2bfe2c5")
set(FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/include)
if(WIN32)
LIST(APPEND FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/win64)
else()
LIST(APPEND FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/linux)
endif()
RESOLVE_DEPENDENCY(GNA_EXT_DIR
ARCHIVE_UNIFIED "gna/gna_${GNA_VERSION}.zip"
TARGET_PATH "${TEMP}/gna_${GNA_VERSION}"
VERSION_REGEX ".*_([0-9]+.[0-9]+.[0-9]+.[0-9]+).*"
FILES_TO_EXTRACT FILES_TO_EXTRACT_LIST
SHA256 ${GNA_HASH}
USE_NEW_LOCATION TRUE)
update_deps_cache(GNA_EXT_DIR "${GNA_EXT_DIR}" "Path to GNA root folder")
debug_message(STATUS "gna=" ${GNA_EXT_DIR})
if (WIN32)
set(GNA_PLATFORM_DIR win64 CACHE STRING "" FORCE)
elseif (UNIX)
set(GNA_PLATFORM_DIR linux CACHE STRING "" FORCE)
else ()
message(FATAL_ERROR "GNA not supported on this platform, only linux, and windows")
endif ()
set(GNA_LIB_DIR x64 CACHE STRING "" FORCE)
set(GNA_PATH ${GNA_EXT_DIR}/${GNA_PLATFORM_DIR}/${GNA_LIB_DIR} CACHE STRING "" FORCE)
if(NOT BUILD_SHARED_LIBS)
list(APPEND PATH_VARS "GNA_PATH")
endif()
endif()

View File

@ -103,13 +103,6 @@ endif()
ov_dependent_option (ENABLE_TBBBIND_2_5 "Enable TBBBind_2_5 static usage in OpenVINO runtime" ${ENABLE_TBBBIND_2_5_DEFAULT} "THREADING MATCHES TBB; NOT APPLE" OFF)
ov_dependent_option (ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are linked to the OpenVINO Runtime binaries" ON "THREADING MATCHES TBB;LINUX" OFF)
ov_dependent_option (ENABLE_INTEL_GNA "GNA support for OpenVINO Runtime" ON
"NOT APPLE;NOT ANDROID;X86_64;CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 5.4" OFF)
ov_dependent_option (ENABLE_INTEL_GNA_DEBUG "GNA debug build" OFF "ENABLE_INTEL_GNA" OFF)
ov_dependent_option (ENABLE_V7_SERIALIZE "enables serialization to IR v7" OFF "ENABLE_INTEL_GNA" OFF)
ov_dependent_option (ENABLE_IR_V7_READER "Enables IR v7 reader" ${BUILD_SHARED_LIBS} "ENABLE_TESTS;ENABLE_INTEL_GNA" OFF)
ov_dependent_option (ENABLE_GAPI_PREPROCESSING "Enables G-API preprocessing" ON "NOT MINGW64" OFF)
ov_option (ENABLE_MULTI "Enables MULTI Device Plugin" ON)

View File

@ -51,8 +51,6 @@ macro(ov_cpack_settings)
NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO}_python.*" AND
# because in case of .deb package, pyopenvino_package_python${Python3_VERSION_MAJOR}${Python3_VERSION_MINOR} is installed
(NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE}_python.*" OR ENABLE_PYTHON_PACKAGING) AND
# see ticket # 82605
NOT item STREQUAL "gna" AND
# temporary block nvidia
NOT item STREQUAL "nvidia" AND
# don't install Intel OpenMP
@ -183,23 +181,6 @@ macro(ov_cpack_settings)
set(gpu_copyright "generic")
endif()
# intel-gna
if(ENABLE_INTEL_GNA AND "gna" IN_LIST CPACK_COMPONENTS_ALL)
set(CPACK_COMPONENT_GNA_DESCRIPTION "Intel® Gaussian Neural Accelerator inference plugin")
set(CPACK_COMPONENT_GNA_DEPENDS "${OV_CPACK_COMP_CORE}")
set(CPACK_DEBIAN_GNA_PACKAGE_NAME "libopenvino-intel-gna-plugin-${cpack_name_ver}")
# since we have libgna.so we need to call ldconfig and have `def_triggers` here
set(CPACK_DEBIAN_GNA_PACKAGE_CONTROL_EXTRA "${def_postinst};${def_postrm};${def_triggers}")
ov_debian_add_lintian_suppression(gna
# package name matches libopenvino_intel_gna_plugin.so
# but lintian looks at libgna.so.2 since it's a versioned library
"package-name-doesnt-match-sonames")
set(gna_copyright "generic")
_ov_add_plugin(gna OFF)
endif()
# # add pseudo plugins are recommended to core component
# if(pseudo_plugins_recommends)
# # see https://superuser.com/questions/70031/what-is-the-difference-between-recommended-and-suggested-packages-ubuntu.

View File

@ -37,8 +37,6 @@ macro(ov_cpack_settings)
NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO}_python.*" AND
# because in case of .rpm package, pyopenvino_package_python${Python3_VERSION_MAJOR}${Python3_VERSION_MINOR} is installed
(NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE}_python.*" OR ENABLE_PYTHON_PACKAGING) AND
# see ticket # 82605
NOT item STREQUAL "gna" AND
# temporary block nvidia
NOT item STREQUAL "nvidia" AND
# don't install Intel OpenMP
@ -179,15 +177,6 @@ macro(ov_cpack_settings)
set(gpu_copyright "generic")
endif()
# intel-gna
if(ENABLE_INTEL_GNA AND "gna" IN_LIST CPACK_COMPONENTS_ALL)
set(CPACK_COMPONENT_GNA_DESCRIPTION "Intel® Gaussian Neural Accelerator inference plugin")
set(CPACK_RPM_GNA_PACKAGE_REQUIRES "${core_package}")
set(CPACK_RPM_GNA_PACKAGE_NAME "libopenvino-intel-gna-plugin-${cpack_name_ver}")
_ov_add_package(plugin_packages gna)
set(gna_copyright "generic")
endif()
#
# Frontends
#

View File

@ -417,20 +417,6 @@ macro(_ov_find_intel_gpu_dependencies)
unset(_OV_ENABLE_ONEDNN_FOR_GPU)
endmacro()
macro(_ov_find_intel_gna_dependencies)
set(_OV_ENABLE_INTEL_GNA "@ENABLE_INTEL_GNA@")
if(_OV_ENABLE_INTEL_GNA)
set_and_check(GNA_PATH "@PACKAGE_GNA_PATH@")
_ov_find_dependency(libGNA
COMPONENTS KERNEL
CONFIG
PATHS "${CMAKE_CURRENT_LIST_DIR}"
NO_DEFAULT_PATH)
unset(GNA_PATH)
endif()
unset(_OV_ENABLE_INTEL_GNA)
endmacro()
macro(_ov_find_protobuf_frontend_dependency)
set(_OV_ENABLE_SYSTEM_PROTOBUF "@ENABLE_SYSTEM_PROTOBUF@")
set(_OV_PROTOBUF_PACKAGE_CONFIG "@protobuf_config@")
@ -518,7 +504,6 @@ if(NOT _OV_ENABLE_OPENVINO_BUILD_SHARED)
# plugin dependencies
_ov_find_intel_cpu_dependencies()
_ov_find_intel_gpu_dependencies()
_ov_find_intel_gna_dependencies()
endif()
_ov_find_dependency(Threads)

View File

@ -81,7 +81,7 @@ GPU
were used during OpenVINO internal validation: 22.43 for Ubuntu 22.04, 21.48
for Ubuntu 20.04 and 21.49 for Red Hat Enterprise Linux 8.
NPU and GNA
NPU
#############################
.. tab-set::
@ -91,13 +91,6 @@ NPU and GNA
* Ubuntu 22.04 long-term support (LTS), 64-bit
* Windows 11, 64-bit
.. tab-item:: Operating Systems for GNA
* Ubuntu 22.04 long-term support (LTS), 64-bit
* Ubuntu 20.04 long-term support (LTS), 64-bit
* Windows 10, 64-bit
* Windows 11, 64-bit
.. tab-item:: Additional considerations
* These Accelerators require drivers that are not included in the
@ -130,7 +123,7 @@ Operating systems and developer environment
Higher versions of kernel might be required for 10th Gen Intel® Core™ Processors,
11th Gen Intel® Core™ Processors, 11th Gen Intel® Core™ Processors S-Series Processors,
12th Gen Intel® Core™ Processors, 13th Gen Intel® Core™ Processors, Intel® Core™ Ultra
Processors, or 4th Gen Intel® Xeon® Scalable Processors to support CPU, GPU, GNA or
Processors, or 4th Gen Intel® Xeon® Scalable Processors to support CPU, GPU or
hybrid-cores CPU capabilities.
.. tab-item:: Windows

View File

@ -75,7 +75,7 @@ Glossary of terms used in OpenVINO™
| Number of images to analyze during one call of infer. Maximum batch size is a property of the model set before its compilation. In NHWC, NCHW, and NCDHW image data layout representations, the 'N' refers to the number of images in the batch.
| *Device Affinity*
| A preferred hardware device to run inference (CPU, GPU, GNA, etc.).
| A preferred hardware device to run inference (CPU, GPU, NPU, etc.).
| *Extensibility mechanism, Custom layers*
| The mechanism that provides you with capabilities to extend the OpenVINO™ Runtime and model conversion API so that they can work with models containing operations that are not yet supported.
@ -87,7 +87,7 @@ Glossary of terms used in OpenVINO™
| The Conversion API is used to import and convert models trained in popular frameworks to a format usable by other OpenVINO components. Model conversion API is represented by a Python ``openvino.convert_model()`` method and ``ovc`` command-line tool.
| *OpenVINO™ Core*
| OpenVINO™ Core is a software component that manages inference on certain Intel(R) hardware devices: CPU, GPU, GNA, etc.
| OpenVINO™ Core is a software component that manages inference on certain Intel(R) hardware devices: CPU, GPU, NPU, etc.
| *OpenVINO™ API*
| The basic default API for all supported devices, which allows you to load a model from Intermediate Representation or convert from ONNX, PaddlePaddle, TensorFlow, TensorFlow Lite file formats, set input and output formats and execute the model on various devices.

View File

@ -245,7 +245,7 @@ Build OpenVINO with conditional compilation enabled:
cd %OPENVINO_HOME%
md build_cc
cd build_cc
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Debug -DENABLE_CPPLINT=OFF -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF -DENABLE_FASTER_BUILD=ON -DENABLE_SANITIZER=OFF -DTHREADING=TBB -DBUILD_SHARED_LIBS=OFF -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT -DENABLE_INTEL_GPU=OFF -DENABLE_INTEL_GNA=OFF -DENABLE_MULTI=OFF -DENABLE_AUTO=OFF -DENABLE_AUTO_BATCH=OFF -DENABLE_HETERO=OFF -DENABLE_TEMPLATE=OFF -DENABLE_OV_ONNX_FRONTEND=OFF -DENABLE_OV_PADDLE_FRONTEND=OFF -DENABLE_OV_PYTORCH_FRONTEND=OFF -DENABLE_OV_TF_FRONTEND=OFF -DCMAKE_INSTALL_PREFIX=install ..
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Debug -DENABLE_CPPLINT=OFF -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF -DENABLE_FASTER_BUILD=ON -DENABLE_SANITIZER=OFF -DTHREADING=TBB -DBUILD_SHARED_LIBS=OFF -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT -DENABLE_INTEL_GPU=OFF -DENABLE_MULTI=OFF -DENABLE_AUTO=OFF -DENABLE_AUTO_BATCH=OFF -DENABLE_HETERO=OFF -DENABLE_TEMPLATE=OFF -DENABLE_OV_ONNX_FRONTEND=OFF -DENABLE_OV_PADDLE_FRONTEND=OFF -DENABLE_OV_PYTORCH_FRONTEND=OFF -DENABLE_OV_TF_FRONTEND=OFF -DCMAKE_INSTALL_PREFIX=install ..
cmake --build . --config Debug
@ -278,7 +278,7 @@ Generate final optimal binaries size of OpenVINO package
md build
cd build
cmake -G "Visual Studio 16 2019" -A x64 -DENABLE_CPPLINT=OFF -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF -DCMAKE_BUILD_TYPE=Release -DENABLE_FASTER_BUILD=ON -DENABLE_PROFILING_ITT=OFF -DSELECTIVE_BUILD=ON -DENABLE_INTEL_GPU=OFF -DENABLE_INTEL_GNA=OFF -DENABLE_MULTI=OFF -DENABLE_AUTO=OFF -DENABLE_AUTO_BATCH=OFF -DENABLE_HETERO=OFF -DENABLE_TEMPLATE=OFF -DENABLE_OV_ONNX_FRONTEND=OFF -DENABLE_OV_PADDLE_FRONTEND=OFF -DENABLE_OV_PYTORCH_FRONTEND=OFF -DENABLE_OV_TF_FRONTEND=OFF -DSELECTIVE_BUILD_STAT=%OPENVINO_HOME%\cc_data\*.csv -DBUILD_SHARED_LIBS=OFF -DENABLE_LTO=ON -DENABLE_ONEDNN_FOR_GPU=OFF -DENABLE_GAPI_PREPROCESSING=OFF -DENABLE_OV_TF_LITE_FRONTEND=OFF -DENABLE_PROFILING_FIRST_INFERENCE=OFF ..
cmake -G "Visual Studio 16 2019" -A x64 -DENABLE_CPPLINT=OFF -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF -DCMAKE_BUILD_TYPE=Release -DENABLE_FASTER_BUILD=ON -DENABLE_PROFILING_ITT=OFF -DSELECTIVE_BUILD=ON -DENABLE_INTEL_GPU=OFF -DENABLE_MULTI=OFF -DENABLE_AUTO=OFF -DENABLE_AUTO_BATCH=OFF -DENABLE_HETERO=OFF -DENABLE_TEMPLATE=OFF -DENABLE_OV_ONNX_FRONTEND=OFF -DENABLE_OV_PADDLE_FRONTEND=OFF -DENABLE_OV_PYTORCH_FRONTEND=OFF -DENABLE_OV_TF_FRONTEND=OFF -DSELECTIVE_BUILD_STAT=%OPENVINO_HOME%\cc_data\*.csv -DBUILD_SHARED_LIBS=OFF -DENABLE_LTO=ON -DENABLE_ONEDNN_FOR_GPU=OFF -DENABLE_GAPI_PREPROCESSING=OFF -DENABLE_OV_TF_LITE_FRONTEND=OFF -DENABLE_PROFILING_FIRST_INFERENCE=OFF ..
cmake --build . --config Release

View File

@ -19,8 +19,6 @@ This document provides description and default values for CMake options that can
* `ON` is default for x86 platforms; `OFF`, otherwise.
* `ENABLE_INTEL_GPU` enables Intel GPU plugin compilation:
* `ON` is default for x86 platforms; not available, otherwise.
* `ENABLE_INTEL_GNA` enables GNA plugin compilation:
* `ON` is default for x86 platforms; not available, otherwise.
* `ENABLE_HETERO` enables HETERO plugin build:
* `ON` is default.
* `ENABLE_MULTI` enables MULTI plugin build:
@ -58,9 +56,6 @@ This document provides description and default values for CMake options that can
* `ON` if requirements are satisfied (auto-discovered by CMake).
* `ENABLE_TESTS` enables tests compilation:
* `OFF` is default.
* `ENABLE_IR_V7_READER` enables IR v7 reader:
* `ON` is default.
**Note:** must be turned `OFF` when building OpenVINO runtime as static
* `ENABLE_DOCS` enables building the OpenVINO documentation:
* `OFF` is on Debian (Ubuntu) OSes
* `OFF` is in other cases.

View File

@ -31,14 +31,12 @@ The default architecture of OpenVINO Runtime assumes that the following componen
* (Device) Inference backends (CPU, GPU, MULTI, HETERO, etc.)
* (Model) Frontends (IR, ONNX, PDPD, etc.)
* Preprocessing library (to perform preprocessing, e.g. resize and color space conversions)
* IR v7 reader (used in legacy tests only, if you are not to going to run OpenVINO tests, set `-DENABLE_TESTS=OFF` which disables IR v7 reader)
With the static OpenVINO Runtime, all these modules should be linked into a final user application and **the list of modules/configuration must be known for the CMake configuration stage**. To minimize the total binary size, you can explicitly turn `OFF` unnecessary components. Use [[CMake Options for Custom Compilation|CMakeOptionsForCustomCompilation ]] as a reference for OpenVINO CMake configuration.
For example, to enable only IR v11 reading and CPU inference capabilities, use:
```sh
cmake -DENABLE_INTEL_GPU=OFF \
-DENABLE_INTEL_GNA=OFF \
-DENABLE_TEMPLATE=OFF \
-DENABLE_HETERO=OFF \
-DENABLE_MULTI=OFF \
@ -49,7 +47,6 @@ cmake -DENABLE_INTEL_GPU=OFF \
-DENABLE_OV_TF_FRONTEND=OFF \
-DENABLE_OV_TF_LITE_FRONTEND=OFF \
-DENABLE_OV_PYTORCH_FRONTEND=OFF \
-DENABLE_IR_V7_READER=OFF \
-DENABLE_GAPI_PREPROCESSING=OFF \
-DENABLE_INTEL_CPU=ON \
-DENABLE_OV_IR_FRONTEND=ON
@ -135,7 +132,6 @@ cmake -DCMAKE_TOOLCHAIN_FILE=<openvino source dir>/cmake/toolchains/mt.runtime.w
* The enabled and tested capabilities of OpenVINO Runtime in a static build:
* OpenVINO common runtime - work with `ov::Model`, perform model loading on particular device
* CPU and GNA inference plugins (**GPU is not enabled**)
* MULTI, HETERO, AUTO, and BATCH inference modes
* IR, ONNX, PDPD, and TF frontends to read `ov::Model`
* Static build support for building static libraries only for OpenVINO Runtime libraries. All other third-party prebuilt dependencies remain in the same format:

View File

@ -54,12 +54,6 @@ endif()
if(NOT BUILD_SHARED_LIBS)
target_compile_definitions(${TARGET_NAME} PUBLIC OPENVINO_STATIC_LIBRARY)
# TODO: remove together we GNA plugin
# for static linkage the dependencies are in opposite order
if(TARGET inference_engine_ir_v7_reader)
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_ir_v7_reader)
endif()
endif()
if(WIN32)

View File

@ -23,14 +23,6 @@ file (GLOB LIBRARY_SRC
set(OV_STATIC_DEPENDENT_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/file_utils.cpp)
list(REMOVE_ITEM LIBRARY_SRC ${OV_STATIC_DEPENDENT_FILES})
if(BUILD_SHARED_LIBS OR ENABLE_IR_V7_READER)
# TODO: remove together with GNA plugin
# we have unconditional adding of the ENABLE_IR_V7_READER compile definition for shared libs case
# to avoid rebuild, relink during work with build tree
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/src/ie_network_reader.cpp PROPERTIES
COMPILE_DEFINITIONS "ENABLE_IR_V7_READER")
endif()
file (GLOB LIBRARY_HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/src/*.h
${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp

View File

@ -667,7 +667,7 @@ public:
* @brief Returns devices available for inference.
* Core objects go over all registered plugins and ask about available devices.
*
* @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, GNA }.
* @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, NPU }.
* If there is more than one device of a specific type, they are enumerated with the .# suffix.
* Such enumerated device can later be used as a device name in all Core methods like Core::compile_model,
* Core::query_model, Core::set_property and so on.

View File

@ -286,7 +286,7 @@ public:
/**
* @brief Returns devices available for neural networks inference
*
* @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, GNA }
* @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, NPU }
* If there more than one device of specific type, they are enumerated with .# suffix.
*/
std::vector<std::string> GetAvailableDevices() const override;

View File

@ -19,13 +19,7 @@
#include "ie_common.h"
#include "ie_icnn_network.hpp"
#include "ie_input_info.hpp"
#include "openvino/frontend/manager.hpp"
#include "openvino/runtime/shared_buffer.hpp"
#ifdef ENABLE_IR_V7_READER
# include "legacy/ie_ir_version.hpp"
#endif
#include "itt.hpp"
#include "legacy/ie_reader.hpp"
#include "legacy_op_extension.hpp"
#include "ngraph/function.hpp"
#include "ngraph/type/element_type.hpp"
@ -33,6 +27,8 @@
#include "openvino/core/except.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/frontend/manager.hpp"
#include "openvino/runtime/shared_buffer.hpp"
#include "openvino/util/shared_object.hpp"
#include "so_ptr.hpp"
#include "transformations/rt_info/old_api_map_order_attribute.hpp"
@ -40,179 +36,6 @@
namespace InferenceEngine {
#ifdef ENABLE_IR_V7_READER
/**
* @brief This class is a wrapper for reader interfaces
*/
class Reader : public IReader {
# ifdef OPENVINO_STATIC_LIBRARY
using ReaderPtr = std::shared_ptr<IReader>;
# else
using ReaderPtr = ov::SoPtr<IReader>;
# endif
ReaderPtr ptr;
public:
using Ptr = std::shared_ptr<Reader>;
explicit Reader(const std::string& location) {
# ifdef OPENVINO_STATIC_LIBRARY
// call library creator directly, since we are in the same application
InferenceEngine::CreateReader(ptr);
OPENVINO_ASSERT(ptr != nullptr, "Failed to create static version of IR v7 reader");
# else
ov::util::FilePath libraryPath = ov::util::to_file_path(FileUtils::makePluginLibraryName({}, location));
ov::util::FilePath readersLibraryPath = FileUtils::makePath(getInferenceEngineLibraryPath(), libraryPath);
if (FileUtils::fileExist(readersLibraryPath)) {
libraryPath = readersLibraryPath;
}
auto so = ov::util::load_shared_object(libraryPath.c_str());
std::shared_ptr<IReader> plugin_impl;
using createFunc = void(std::shared_ptr<IReader>&);
reinterpret_cast<createFunc*>(ov::util::get_symbol(so, "CreateReader"))(plugin_impl);
ptr = {plugin_impl, so};
# endif // OPENVINO_STATIC_LIBRARY
}
bool supportModel(std::istream& model) const override {
OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "Reader::supportModel");
return ptr->supportModel(model);
}
CNNNetwork read(std::istream& model, const std::vector<IExtensionPtr>& exts) const override {
return ptr->read(model, exts);
}
CNNNetwork read(std::istream& model,
const Blob::CPtr& weights,
const std::vector<IExtensionPtr>& exts) const override {
return ptr->read(model, weights, exts);
}
std::vector<std::string> getDataFileExtensions() const override {
return ptr->getDataFileExtensions();
}
};
namespace {
Reader::Ptr reader_irv7 = nullptr;
void registerReaders() {
OV_ITT_SCOPED_TASK(ov::itt::domains::OV, "registerReaders");
static bool initialized = false;
static std::mutex readerMutex;
std::lock_guard<std::mutex> lock(readerMutex);
if (initialized)
return;
initialized = true;
// try to load IR reader v7 if library exists
try {
reader_irv7 =
std::make_shared<Reader>(std::string("inference_engine_ir_v7_reader") + std::string(OV_BUILD_POSTFIX));
} catch (const std::runtime_error&) {
// runtime error is thrown in case of library cannot be loaded
}
}
void assertIfIRv7LikeModel(std::istream& modelStream) {
auto irVersion = details::get_ir_version(modelStream);
bool isIRv7 = irVersion > 1 && irVersion <= 7;
if (!isIRv7 || reader_irv7)
return;
IE_THROW() << "The support of IR v" << irVersion
<< " has been removed from the product. "
"Please, convert the original model using the Model Optimizer which comes with this "
"version of the OpenVINO to generate supported IR version.";
}
CNNNetwork load_ir_v7_network(const std::string& modelPath,
const std::string& binPath,
const std::vector<IExtensionPtr>& exts) {
// Fix unicode name
# if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
std::wstring model_path = ov::util::string_to_wstring(modelPath.c_str());
# else
std::string model_path = modelPath;
# endif
if (ov::util::directory_exists(modelPath)) {
return {};
}
// Try to open model file
std::ifstream modelStream(model_path.c_str(), std::ios::binary);
if (!modelStream.is_open())
IE_THROW() << "Model file " << modelPath << " cannot be opened!";
assertIfIRv7LikeModel(modelStream);
// Check that reader supports the model
if (reader_irv7 && reader_irv7->supportModel(modelStream)) {
// Find weights
std::string bPath = binPath;
if (bPath.empty()) {
auto pathWoExt = modelPath;
auto pos = modelPath.rfind('.');
if (pos != std::string::npos)
pathWoExt = modelPath.substr(0, pos);
for (const auto& ext : reader_irv7->getDataFileExtensions()) {
bPath = pathWoExt + "." + ext;
if (!FileUtils::fileExist(bPath)) {
bPath.clear();
} else {
break;
}
}
}
if (!bPath.empty()) {
// Open weights file
# if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
std::wstring weights_path = ov::util::string_to_wstring(bPath.c_str());
# else
std::string weights_path = bPath;
# endif
std::ifstream binStream;
binStream.open(weights_path.c_str(), std::ios::binary);
if (!binStream.is_open())
IE_THROW() << "Weights file " << bPath << " cannot be opened!";
binStream.seekg(0, std::ios::end);
size_t fileSize = binStream.tellg();
binStream.seekg(0, std::ios::beg);
Blob::Ptr weights = make_shared_blob<uint8_t>({Precision::U8, {fileSize}, C});
{
OV_ITT_SCOPE(FIRST_INFERENCE, ov::itt::domains::ReadTime, "ReadNetworkWeights");
weights->allocate();
binStream.read(weights->buffer(), fileSize);
binStream.close();
}
// read model with weights
auto network = reader_irv7->read(modelStream, weights, exts);
modelStream.close();
return network;
}
// read model without weights
return reader_irv7->read(modelStream, exts);
}
return {};
}
} // namespace
#endif // ENABLE_IR_V7_READER
namespace {
CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
@ -297,20 +120,6 @@ CNNNetwork details::ReadNetwork(const std::string& modelPath,
bool is_new_api,
bool enable_mmap) {
auto exts = ov::legacy_convert::convert_extension(ov_exts);
#ifdef ENABLE_IR_V7_READER
// IR v7 obsolete code
{
// Register readers if it is needed
registerReaders();
auto cnnnetwork = load_ir_v7_network(modelPath, binPath, exts);
OPENVINO_SUPPRESS_DEPRECATED_START
if (static_cast<ICNNNetwork::Ptr>(cnnnetwork) != nullptr) {
return cnnnetwork;
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
#endif // ENABLE_IR_V7_READER
// Fix unicode name
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
@ -366,21 +175,6 @@ CNNNetwork details::ReadNetwork(const std::string& model,
std::istream& modelStream = modelStringStream;
auto exts = ov::legacy_convert::convert_extension(ov_exts);
#ifdef ENABLE_IR_V7_READER
// IR v7 obsolete code
{
// Register readers if it is needed
registerReaders();
assertIfIRv7LikeModel(modelStream);
if (reader_irv7 && reader_irv7->supportModel(modelStream)) {
if (weights)
return reader_irv7->read(modelStream, weights, exts);
return reader_irv7->read(modelStream, exts);
}
}
#endif // ENABLE_IR_V7_READER
// Try to load with FrontEndManager
ov::frontend::FrontEndManager manager;
ov::frontend::FrontEnd::Ptr FE;

View File

@ -8,7 +8,7 @@ namespace auto_plugin {
// AUTO will enable the blocklist if
// 1.No device priority passed to AUTO/MULTI.(eg. core.compile_model(model, "AUTO", configs);)
// 2.No valid device parsed out from device priority (eg. core.compile_model(model, "AUTO:-CPU,-GPU", configs);).
const std::set<std::string> PluginConfig::device_block_list = {"NPU", "GNA", "notIntelGPU"};
const std::set<std::string> PluginConfig::device_block_list = {"NPU", "notIntelGPU"};
PluginConfig::PluginConfig() {
set_default();

View File

@ -1,141 +0,0 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if (NOT ENABLE_INTEL_GNA)
return()
endif()
# GNA plugin will be removed before 2024.0 together with API 1.0
ov_disable_deprecated_warnings()
add_subdirectory(legacy)
set(TARGET_NAME "openvino_intel_gna_plugin")
if (ENABLE_INTEL_GNA_DEBUG)
add_compile_definitions(GNA_DEBUG)
endif()
file(GLOB_RECURSE SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp)
file(GLOB_RECURSE HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/src/*.h
${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp)
# build avx2.cpp with AVX2 support, only for Windows
if(ENABLE_AVX2 AND CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
ov_avx2_optimization_flags(avx2_flags)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/src/pre_post_process/hw_accelerated_converter_avx2.cpp PROPERTIES COMPILE_OPTIONS "${avx2_flags}")
add_compile_definitions(HAVE_AVX2=1)
endif()
find_package(libGNA REQUIRED
CONFIG
PATHS "${CMAKE_CURRENT_SOURCE_DIR}/cmake"
NO_DEFAULT_PATH
NO_CMAKE_FIND_ROOT_PATH)
#
# Shared plugin library
#
ov_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "GNA"
SOURCES ${SOURCES} ${HEADERS}
VERSION_DEFINES_FOR src/gna_plugin_entry_points.cpp
ADD_CLANG_FORMAT)
# Enable support of CC for the plugin
ov_mark_target_as_cc(${TARGET_NAME})
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_legacy
Threads::Threads libGNA)
target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src)
target_compile_definitions(${TARGET_NAME}
PRIVATE
_NO_MKL_
)
# must be called after all target_link_libraries
ov_add_api_validator_post_build_step(TARGET ${TARGET_NAME})
#
# Static version for tests
#
add_library(${TARGET_NAME}_test_static STATIC EXCLUDE_FROM_ALL ${SOURCES} ${HEADERS})
ov_add_version_defines(src/gna_plugin_entry_points.cpp ${TARGET_NAME}_test_static)
target_compile_definitions(${TARGET_NAME}_test_static
PRIVATE
_NO_MKL_
IMPLEMENT_INFERENCE_ENGINE_PLUGIN
PUBLIC
INTEGER_LOW_P
USE_STATIC_IE)
target_link_libraries(${TARGET_NAME}_test_static PUBLIC inference_engine_s inference_engine_transformations libGNA::API)
target_include_directories(${TARGET_NAME}_test_static
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/src
$<TARGET_PROPERTY:inference_engine_legacy,INTERFACE_INCLUDE_DIRECTORIES>
PRIVATE
$<TARGET_PROPERTY:openvino::conditional_compilation,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(${TARGET_NAME} ${TARGET_NAME}_test_static
PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
# install
if(BUILD_SHARED_LIBS)
set(gna_component gna)
else()
# during static build all plugins are part of the core, thus the dependencies as well
set(gna_component ${OV_CPACK_COMP_CORE})
endif()
file(GLOB_RECURSE gna_libraries "${libGNA_LIBRARIES_BASE_PATH}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*")
set(gna_library_installed OFF)
foreach(gna_lib IN LISTS gna_libraries)
# install only versioned files like libgna.so.N or libgna.so.N.M
# and don't install global symlink libgna.so. Exceptions:
# 1. On Windows we don't have libraries versioning and need to install all .dlls
# 2. For case of static libraries we also need to install .so since it's required for linkage with final application
if(gna_lib MATCHES "^.*\.${CMAKE_SHARED_LIBRARY_SUFFIX}(\.[0-9]+)+$" OR WIN32 OR NOT BUILD_SHARED_LIBS)
install(FILES ${gna_lib}
DESTINATION ${OV_CPACK_RUNTIMEDIR}
COMPONENT ${gna_component})
set(gna_library_installed ON)
endif()
endforeach()
if(NOT gna_library_installed)
message(FATAL_ERROR "Failed to install GNA library (available libraries: ${gna_libraries})")
endif()
# additional install rules for case of static libraries
if(NOT BUILD_SHARED_LIBS)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/libGNAConfig.cmake ${CMAKE_BINARY_DIR} COPYONLY)
install(FILES "${CMAKE_BINARY_DIR}/libGNAConfig.cmake"
DESTINATION ${OV_CPACK_IE_CMAKEDIR}
COMPONENT ${gna_component}
${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL})
# install .lib file on Windows
if(WIN32)
file(GLOB_RECURSE gna_libraries "${libGNA_LIBRARIES_BASE_PATH}/*${CMAKE_STATIC_LIBRARY_SUFFIX}")
install(FILES ${gna_libraries}
DESTINATION ${OV_CPACK_LIBRARYDIR}
COMPONENT ${gna_component}
${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL})
endif()
endif()
add_subdirectory(tests)

View File

@ -1,88 +0,0 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
#
# The module defines several imported targets:
#
# - (Optional) libGNA::API
# - (Optional) libGNA::KERNEL
#
# And high-level imported interface target:
#
# - libGNA
#
# And the following variables:
#
# - libGNA_API_FOUND
# - libGNA_KERNEL_FOUND
#
# The example usage:
#
# find_package(libGNA NO_MODULE COMPONENTS API KERNEL)
#
set(libGNA_FOUND ON)
set(GNA_KERNEL_LIB_NAME gna CACHE STRING "" FORCE)
if(NOT libGNA_FIND_COMPONENTS)
set(libGNA_FIND_COMPONENTS "API;KERNEL")
endif()
foreach (_gna_component ${libGNA_FIND_COMPONENTS})
set(libGNA_${_gna_component}_FOUND OFF)
set(libGNA_FIND_REQUIRED_${_gna_component} ON)
endforeach()
set(libGNA_LIBRARIES_BASE_PATH ${GNA_PATH} CACHE STRING "" FORCE)
if(libGNA_FIND_REQUIRED_KERNEL AND NOT TARGET libGNA::KERNEL)
find_library(GNA_KERNEL_LIBRARY ${GNA_KERNEL_LIB_NAME}
HINTS ${libGNA_LIBRARIES_BASE_PATH}
NO_CMAKE_FIND_ROOT_PATH)
if(GNA_KERNEL_LIBRARY)
add_library(libGNA::KERNEL SHARED IMPORTED)
set_property(TARGET libGNA::KERNEL APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
if(WIN32)
set(gna_dll "${CMAKE_SHARED_LIBRARY_PREFIX}${GNA_KERNEL_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}")
set_target_properties(libGNA::KERNEL PROPERTIES
IMPORTED_LOCATION_RELEASE "${libGNA_LIBRARIES_BASE_PATH}/${gna_dll}"
IMPORTED_IMPLIB_RELEASE "${GNA_KERNEL_LIBRARY}")
else()
set_target_properties(libGNA::KERNEL PROPERTIES
IMPORTED_LOCATION_RELEASE "${GNA_KERNEL_LIBRARY}"
INTERFACE_LINK_OPTIONS "-Wl,-rpath-link,${libGNA_LIBRARIES_BASE_PATH}")
endif()
else()
message(SEND_ERROR "GNA KERNEL library (${GNA_KERNEL_LIB_NAME}) was not found in ${libGNA_LIBRARIES_BASE_PATH}")
endif()
endif()
if(libGNA_FIND_REQUIRED_API AND NOT TARGET libGNA::API)
find_path(libGNA_INCLUDE_DIRS gna2-api.h
PATHS "${GNA_EXT_DIR}/include"
NO_CMAKE_FIND_ROOT_PATH)
if(libGNA_INCLUDE_DIRS)
add_library(libGNA::API INTERFACE IMPORTED)
set_target_properties(libGNA::API PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${libGNA_INCLUDE_DIRS}")
else()
message(SEND_ERROR "GNA API headers (gna2-api.h) was not found in ${GNA_EXT_DIR}/include")
endif()
endif()
if(TARGET libGNA::KERNEL)
set(libGNA_KERNEL_FOUND ON)
endif()
if(TARGET libGNA::API)
set(libGNA_API_FOUND ON)
endif()
if(NOT TARGET libGNA)
add_library(libGNA INTERFACE IMPORTED)
foreach(_lib_name IN LISTS libGNA_FIND_COMPONENTS)
set_property(TARGET libGNA APPEND PROPERTY INTERFACE_LINK_LIBRARIES libGNA::${_lib_name})
endforeach()
endif()

View File

@ -1,97 +0,0 @@
# GNA transformations documentation
GNA Plugin provides implementation of multiple methods required by OpenVINO plugin API. Original model usually consists of variety of operations, i.e. Convolution, Add, Gather, LSTMSequence and so on. GNA Hardware is its own limitation and not all operations can be executed on GNA Hardware.
One of the main functionalities for GNA Plugin is conversion of source network to equivalent network which could be executed on the GNA hardware. This conversion is done in LoadNetwok method.
## LoadNetwork
GNAPlugin::LoadNetwork in the future should execute following stages:
- Converting input graph to fully GNA-supported graph (all in ngraph)
- Creating and connecting GNA primitives within libGNA from ngraph-based network
These stages include:
- Obtain ngraph-based network from the CNNNetwork argument (if input is not ngraph-based, proceed to CNNNetwork passes stage)
- Pass ngraph-based network through ngraph-based transformations.
- Convert ngraph-based network to CNNNetwork-based
- Pass network through CNNNetwork-based transformations.
- Creating and connecting GNA primitives withing libGNA from CNNNetwork-bases graph
Transformations are the way of modifying input graph. Ngraph-based transformations usually are of the following types:
- inherited from ov::pass::ModelPass. They implement run_on_model method. It allows them to be a container of other transformations. For example, ngraph::pass::CommonOptimizations executes multiple transformations in it. Each of them do some basic transformations.
- inherited from ov::pass::MatcherPass. Such transformations usually have a constructor. That constructor defines a pattern with the several connected together layers and a function that modifies found group of layers. The pattern can also handle additional predicates that do any checks on the traversed nodes. It is preferable to use that predicates explicitly rather than check and return from the transform function.
GNA-specific ngraph-based transformations are placed in src/plugins/intel_gna/src/transformations. All transformations should have brief comments in their headers. That brief should describe what pattern transformation handles and what modifications do.
There is also a directory src/plugins/intel_gna/src/transformations/rt_info with auxiliary runtime attributes. That attributes could be added into node rt_info map. That attributes can be read/write in transformations which is useful in some cases. For example, transformation can proceed some node in a special way if the node has special attribute.
All new transformations should have unit tests, that are placed in src/plugins/intel_gna/tests/unit/transformations. All that unit tests are compiled in ov_gna_unit_tests binary.
CNNNetwork transformations are so-called passes. They are placed in src/plugins/intel_gna/src/optimizer/gna_pass_manager.cpp. Passes proceed network as a
```
std::vector<InferenceEngine::CNNLayerPtr> * pLayers
```
It is preferrable to write new transformations as nGraph passes and avoid implementing CNNNetwork passes. All CNNNetwork related code is considered as a legacy. Existed CNNNetwork passes are ported to ngraph.
## GNA ngraph-based layers
OpenVino allows to work with graph nodes as ov::Node class instances. Most of them are stored in src/core/include/openvino/op directory and could be used by all plugins. GNA plugin stores own (GNA-specific) layer types.
1. src/plugins/intel_gna/legacy/include/legacy/ngraph_ops
Here there are legacy layer types. Their names ends with “IE”. These types cannot be in graph, that pass to GNA plugin. All of these types are created within GNA transformations and used in GNA graph compiler for creating libGNA primitives. There are plans to rewrite all legacy code. These legacy types should be removed after that.
2. src/plugins/intel_gna/src/ops
GNA-specific operations. For example, GNAConvolution type describes convolution layers. It differs from common OpenVino Convolution type as it handles NHWC data layout instead of NCHW.
Ngraph-based transformations
1. Transformations that are common for all OpenVino plugins (are placed outside GNA plugin directory). These transformations perform different optimizations. For example, ov::pass::ConvertDivide transforms Divide operation into the sequence of nodes with Power layer. LSTMCellDecomposition extracts LSTMCell into subgraph of mathematical operations.
2. Transformations that are specific for the GNA plugin (are placed inside GNA plugin directory)
They also include src/plugins/intel_gna/legacy/include/legacy/transformations/convert_opset1_to_legacy directory with ngraph-based legacy transformations. These transformations produce “IE” layers. After rewriting GNA legacy code these transformations should be removed.
### "Layout transformations"
There are group of transformations that work with data layout. GNA-hardware supports MaxPool and Convolution operations in a different way in comparison to OpenVino common types. GNA supports NHWC layout, OpenVino supports NCHW layout.
There are group of transformations ReplaceGnaNHWCLayers that substitutes common types with NCHW layout to GNA-specific types with NHWC layout. It is done with wrapping GNA-types with transpose operations, that converts layout on input and output of GNA-types. Unfortunately, in most situations GNA hardware cannot execute these transpose operations. To solve this issue, there are transformations that allows to push transposes through layers from GNA-specific NHWC layers to the start and end of the graph, exchanging Transpose/Gather layer with neighbor layer. Some of them (for example, TransposeSinking group of transformations) allows to push transpose layers through multiple layer types. These transformations are common for all OpenVino and stores outside GNA plugin code. They are not able to push Transpose layer through Reshape type nodes due to mathematical reasons.
To push Transpose operation through Reshape nodes there are transformations that substitute Transpose + Reshape pattern with Reshape + Gather. Gather operation is not supported by the GNA hardware and it should also be pushed through the graph to the start and end. There are group of transformations that does it.
Transpose/Gather sinking consists of multiple transformations. Each of these transformations works with a small pattern consisting of Transpose/Gather and a node with a specific kind of layers (for example, with binary elementwise operations). Sinking transformation interchanges layers. After each sinking transformation execution Transpose/Gather layer moves through one layer in the graph. There are multiple nodes between start/end of the graph and initial Transpose/Gather layer position. Node types can repeat multiple times while sinking and are going in a arbitrary order. The same Transpose/Sinking transformation should be executed multiple times. They use register_new_node functionality. This method adds new created Transpose/Gather node at the end of the matcher pass queue to allow the same transformation be executed once again without necessity to call it implicitly once again.
TransposeSinking changes Concat and Split axis while pushing Transpose nodes through them. GNA doesn't support all possible Concat and Split axis. Some TransposeSinking transformations support callbacks. These callbacks are executed inside transformations and allow to add plugin specific checks. In these checks, GNA plugin prevents sinking transposes that would make some Split/Concats unsupported.
As Transpose and Gather layers are moved to start and end of the graph they are cut from the graph and moved to ov::intel_gna::PrePostProcessModels structure as separate models. On each network inference plugin searches in this structure model for input/output, executes this model on CPU and copy resulted data as input/output of the entire model.
TransposeSinking group of transformations doesnt support currently StridedSlice layer. It leads to the next problem.
GNA plugin has the following Slice layer flow:
- SliceToStridedSlice transformation in CommonOptimizations converts Slice to StridedSlice
- ConvertStridedSliceToCropMatcher transformation convers StridedSlice to CropIE
- convertFunctionToICNNNetwork converts CropIE to CNNNetwork CropLayer
- GNA graph compiler converts CropLayer into affine layer
Since TransposeSInking is called after common optimizations it cannot push Transpose through the StridedSlice. If we have Slice operation in the original model we should prevent converting Slice to StridedSlice in common optimization. It is done by next steps:
- Disable execution of SliceToStridedSlice transformation
- Execute entire set of ngraph-based transformations
- Execute a set of transformations to convert Slice -> StridedSlice -> CropIE nodes
When StridedSlice layer will be supported by TransposeSInking these steps could be removed from GNA plugin pipeline.
## CNNNetwork based passes
After running ngraph-based transformations model is converted with function convertFunctionToICNNNetwork into CNNNetwork-based function. The next step is the model transformation with the CNNNetwork-based passes.
All the legacy CNNNetwork-based passes are stored in src/plugins/intel_gna/src/optimizer/gna_pass_manager.cpp. One of the main difference between legacy passes and ngraph transformations is that legacy passes doesnt have pattern matching functionality. Each of the passes iterating through the graph nodes (previously sorting toplogical) searching for sought sequence of layers and modify them.
It should be mentioned that ngraph API stores constant data as input nodes with type Constant, but CNNNetwork API stores data as a BLOB in layer info.
## Debugging
There is an ability to dump model between transformations/passes.
To dump CNNNetwork passes use -DENABLE_INTEL_GNA_DEBUG=ON option to cmake build configuration. After plugin execution, *.dot files representing the final graph will be saved in the current working directory; *.dot files can be converted to an image with the graphviz dot executable, for example:
```
dot -Tpng <dot_filename> -o <image.png>
```
To dump CNNNetwork-based model in xml add
```
#define ENABLE_V7_SERIALIZE
```
to src/plugins/intel_gna/src/log/debug.hpp
To dump model between ngraph-based transformations use VisualizeTree and Serialize transformations.
### VisualizeTree
VisualizeTree transformation allows to dump model as image.
```
#include "openvino/pass/visualize_tree.hpp"
manager.register_pass<ov::pass::VisualizeTree>("./dump.png");
```
### Serialize
Serialize transformation allows to dump model as xml and binary files that could be loaded in neutron web application
```
#include "openvino/pass/serialize.hpp"
manager.register_pass<ov::pass::Serialize>("./dump.xml", "./dump.bin");
```
Where, manager is the ov::pass::Manager instance.

View File

@ -1,92 +0,0 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME "inference_engine_legacy")
set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include")
file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp)
file(GLOB_RECURSE PUBLIC_HEADERS ${PUBLIC_HEADERS_DIR}/*.hpp
${PUBLIC_HEADERS_DIR}/*.h)
# Create named folders for the sources within the .vcproj
# Empty name lists them directly under the .vcproj
source_group("src" FILES ${LIBRARY_SRC})
source_group("include" FILES ${PUBLIC_HEADERS})
if(ENABLE_V7_SERIALIZE)
set_source_files_properties("${CMAKE_CURRENT_SOURCE_DIR}/src/cnn_network_impl.cpp"
PROPERTIES COMPILE_DEFINITIONS ENABLE_V7_SERIALIZE)
endif()
# Create object library
add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL
${LIBRARY_SRC}
${PUBLIC_HEADERS})
ov_build_target_faster(${TARGET_NAME}_obj
PCH PRIVATE "src/precomp.hpp")
target_include_directories(${TARGET_NAME}_obj PRIVATE
${PUBLIC_HEADERS_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/src
$<TARGET_PROPERTY:inference_engine_obj,SOURCE_DIR>/src # For CNNNetworkNGraphImpl
$<TARGET_PROPERTY:openvino::runtime::dev,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:openvino::pugixml,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:openvino_intel_gna_plugin,SOURCE_DIR>/src/ops)
target_compile_definitions(${TARGET_NAME}_obj PRIVATE $<TARGET_PROPERTY:ngraph,INTERFACE_COMPILE_DEFINITIONS>)
target_link_libraries(${TARGET_NAME}_obj PRIVATE openvino::itt)
ov_add_clang_format_target(${TARGET_NAME}_obj_clang FOR_TARGETS ${TARGET_NAME}_obj)
# Create static library
add_library(${TARGET_NAME} STATIC EXCLUDE_FROM_ALL
$<TARGET_OBJECTS:${TARGET_NAME}_obj>)
target_link_libraries(${TARGET_NAME} PUBLIC openvino::runtime
PRIVATE openvino::pugixml openvino::itt openvino::runtime::dev)
target_include_directories(${TARGET_NAME} INTERFACE
$<BUILD_INTERFACE:${PUBLIC_HEADERS_DIR}>)
# Compile with USE_STATIC_IE
add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL
${LIBRARY_SRC}
${PUBLIC_HEADERS})
target_compile_definitions(${TARGET_NAME}_s PUBLIC USE_STATIC_IE)
target_include_directories(${TARGET_NAME}_s
PUBLIC
$<BUILD_INTERFACE:${PUBLIC_HEADERS_DIR}>
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/src
$<TARGET_PROPERTY:inference_engine_obj,SOURCE_DIR>/src # For CNNNetworkNGraphImpl
$<TARGET_PROPERTY:openvino::runtime::dev,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:openvino::pugixml,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:openvino_intel_gna_plugin,SOURCE_DIR>/src/ops)
set_target_properties(${TARGET_NAME}_s PROPERTIES
EXCLUDE_FROM_ALL ON
INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
# LTO
set_target_properties(${TARGET_NAME} ${TARGET_NAME}_obj
PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
# install
ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE} OPTIONAL)
if(ENABLE_TESTS)
add_subdirectory(tests)
endif()

View File

@ -1,143 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cpp/ie_cnn_network.h>
#include <legacy/ie_layers.h>
#include <ie_icnn_network.hpp>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "ie_api.h"
#include "ie_blob.h"
#include "ie_common.h"
#include "ie_data.h"
#include "ie_input_info.hpp"
namespace InferenceEngine {
namespace details {
IE_SUPPRESS_DEPRECATED_START
class CNNNetworkImpl final : public ICNNNetwork {
public:
CNNNetworkImpl();
explicit CNNNetworkImpl(const CNNNetwork& ngraphImpl);
~CNNNetworkImpl();
std::shared_ptr<::ngraph::Function> getFunction() noexcept override {
return nullptr;
}
std::shared_ptr<const ::ngraph::Function> getFunction() const noexcept override {
return nullptr;
}
void getOutputsInfo(std::map<std::string, DataPtr>& out) const noexcept override;
void getInputsInfo(InputsDataMap& inputs) const noexcept override;
InputInfo::Ptr getInput(const std::string& inputName) const noexcept override {
auto it = _inputData.find(inputName);
if (it == _inputData.end()) {
return nullptr;
}
return it->second;
}
void setInputInfo(InputInfo::Ptr data) {
_inputData[data->name()] = data;
}
void removeInputInfo(const std::string& name) {
_inputData.erase(name);
}
const std::string& getName() const noexcept override {
return _name;
}
void setName(const std::string& name) {
_name = name;
}
const std::map<std::string, CNNLayerPtr>& allLayers() const {
return _layers;
}
size_t layerCount() const override {
return _layers.size();
}
DataPtr& getData(const char* name) noexcept {
return _data[name];
}
void addData(const char* name, DataPtr data) noexcept {
_data.emplace(name, data);
}
DataPtr& getData(const std::string& name) {
return getData(name.c_str());
}
void addLayer(const CNNLayerPtr& layer) noexcept;
void removeLayer(const std::string& layerName);
// renames layer, statistics is not supported
void renameLayer(const std::string& currentName, const std::string& newName);
void removeData(const std::string& dataName);
StatusCode getLayerByName(const char* layerName, CNNLayerPtr& out, ResponseDesc* resp) const noexcept;
// public version
StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept override;
// for internal usage (e.g. setBatch via reshape in tests)
StatusCode setBatchSizeReshape(size_t size, ResponseDesc* responseDesc) noexcept;
size_t getBatchSize() const override;
StatusCode addOutput(const std::string& layerName, size_t outputIndex, ResponseDesc* resp) noexcept override;
void resolveOutput();
void addOutput(const std::string& dataName);
void removeOutput(const std::string& dataName);
virtual void validate(int = 2);
StatusCode reshape(const std::map<std::string, std::vector<size_t>>& inputShapes,
ResponseDesc* resp) noexcept override;
StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
noexcept override;
StatusCode serialize(std::ostream& xmlBuf, std::ostream& binBuf, ResponseDesc* resp) const noexcept override;
StatusCode serialize(std::ostream& xmlBuf, Blob::Ptr& binBlob, ResponseDesc* resp) const noexcept override;
protected:
std::map<std::string, DataPtr> _data;
std::map<std::string, CNNLayerPtr> _layers;
InferenceEngine::InputsDataMap _inputData;
std::map<std::string, DataPtr> _outputData;
std::string _name;
DataPtr _emptyData;
};
IE_SUPPRESS_DEPRECATED_END
typedef std::shared_ptr<CNNNetworkImpl> CNNNetworkImplPtr;
} // namespace details
} // namespace InferenceEngine

View File

@ -1,95 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_ngraph_utils.hpp>
#include <legacy/cnn_network_impl.hpp>
#include <memory>
#include <ngraph/function.hpp>
#include <ngraph/op/constant.hpp>
#include <string>
#include <vector>
#include "blob_factory.hpp"
namespace InferenceEngine {
namespace details {
std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function>& graph,
const CNNNetwork& network,
bool keep_constant_inputs = false);
void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function>& graph,
const CNNNetwork& ngraphNetwork,
CNNNetworkImpl* cnnNetworkImpl,
bool keep_constant_inputs = false);
// TODO: move ConstAllocatorWrapper class, shareWeights add addBlob into CNNLayerCreator when NodeConverter class is
// removed
class ConstAllocatorWrapper : public IAllocator {
public:
explicit ConstAllocatorWrapper(std::shared_ptr<ngraph::op::Constant> constOp) : _constOp(std::move(constOp)) {}
void* lock(void* handle, LockOp) noexcept override {
return handle;
}
void unlock(void*) noexcept override {} // NOLINT
void* alloc(size_t) noexcept override {
return const_cast<void*>(_constOp->get_data_ptr());
}
bool free(void*) noexcept override { // NOLINT
return true;
}
private:
std::shared_ptr<ngraph::op::Constant> _constOp;
};
enum BlobType { weights, biases };
inline Blob::Ptr shareWeights(const std::shared_ptr<ngraph::op::Constant>& constLayer) {
if (!constLayer)
IE_THROW() << "Cannot share weights! Constant operation is empty!";
auto dataPrecision = convertPrecision(constLayer->get_element_type());
size_t shapeSize = ngraph::shape_size(constLayer->get_shape());
constexpr size_t byte_size{8};
if (dataPrecision == Precision::BIN) {
shapeSize = (shapeSize + (byte_size - 1)) / byte_size;
}
TensorDesc td(dataPrecision, {shapeSize}, Layout::C);
auto blob = make_blob_with_precision(td, std::make_shared<ConstAllocatorWrapper>(constLayer));
blob->allocate();
return blob;
}
template <class T>
bool addBlob(const std::shared_ptr<ngraph::Node>& weightsNode, std::shared_ptr<T>& res, BlobType type) {
auto constWeights = ngraph::as_type_ptr<ngraph::op::Constant>(weightsNode);
if (constWeights) {
Blob::Ptr dataBlob = shareWeights(constWeights);
if (type == weights) {
res->blobs["weights"] = dataBlob;
res->_weights = dataBlob;
} else if (type == biases) {
res->blobs["biases"] = dataBlob;
res->_biases = dataBlob;
} else {
return false;
}
return true;
} else {
return false;
}
}
} // namespace details
} // namespace InferenceEngine

View File

@ -1,234 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header file for the CNNNetworkIterator class
*
* @file ie_cnn_network_iterator.hpp
*/
#pragma once
#include <legacy/ie_layers.h>
#include <deque>
#include <iterator>
#include <legacy/cnn_network_impl.hpp>
#include <list>
#include <unordered_set>
#include <utility>
#include "cpp/ie_cnn_network.h"
#include "ie_api.h"
#include "ie_locked_memory.hpp"
namespace InferenceEngine {
namespace details {
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief This class enables range loops for CNNNetwork objects
*/
class CNNNetworkIterator {
IE_SUPPRESS_DEPRECATED_START
std::list<CNNLayerPtr> nextLayersToVisit{};
InferenceEngine::CNNLayerPtr currentLayer = nullptr;
const ICNNNetwork* network = nullptr;
void init(const ICNNNetwork* net) {
network = net;
if (network == nullptr)
IE_THROW() << "ICNNNetwork object is nullptr";
OutputsDataMap outputs;
network->getOutputsInfo(outputs);
InputsDataMap inputs;
network->getInputsInfo(inputs);
auto get_consumers = [](const CNNLayerPtr& node) -> std::vector<CNNLayerPtr> {
std::vector<CNNLayerPtr> consumers;
for (const auto& output : node->outData) {
for (const auto& consumer : getInputTo(output)) {
consumers.push_back(consumer.second);
}
}
return consumers;
};
std::unordered_set<CNNLayer*> visited;
auto bfs = [&](const CNNLayerPtr& start_node, bool traverse_via_outputs = false) {
if (!start_node || visited.count(start_node.get()))
return;
std::deque<CNNLayerPtr> q;
q.push_front(start_node);
while (!q.empty()) {
auto node = q.front();
q.pop_front();
if (visited.insert(node.get()).second) {
nextLayersToVisit.push_front(node);
}
// Traverse via inputs
for (const auto& input : node->insData) {
auto locked_input = input.lock();
if (!locked_input) {
IE_THROW() << "insData for " << node->name << " is not valid.";
}
if (auto next_node = getCreatorLayer(locked_input).lock()) {
if (!visited.count(next_node.get())) {
// Check that all consumers were visited
bool all_consumers_used(true);
for (const auto& consumer : get_consumers(next_node)) {
if (!visited.count(consumer.get()))
all_consumers_used = false;
}
if (all_consumers_used) {
q.push_front(next_node);
}
}
}
}
// Traverse via outputs
if (traverse_via_outputs) {
for (const auto& consumer : get_consumers(node)) {
if (!visited.count(consumer.get())) {
q.push_front(consumer);
}
}
}
}
};
// Find all outputLayers
std::vector<CNNLayerPtr> outputLayers;
const auto* networkImpl = dynamic_cast<const CNNNetworkImpl*>(network);
if (networkImpl) {
for (const auto& node : networkImpl->allLayers()) {
if (get_consumers(node.second).empty())
outputLayers.emplace_back(node.second);
}
} else {
// For backward compatibility
for (const auto& out : outputs) {
outputLayers.emplace_back(getCreatorLayer(out.second).lock());
}
}
// First we run bfs starting from outputs that provides deterministic graph traverse
for (const auto& output : outputLayers) {
bfs(output);
}
if (!networkImpl) {
// For cases when graph has no outputs we start bfs from inputs to ensure topological sort
for (const auto& input : inputs) {
const auto data_ptr = input.second->getInputData();
for (const auto& consumer : getInputTo(data_ptr))
bfs(consumer.second, true);
}
}
currentLayer = nextLayersToVisit.front();
}
public:
/**
* iterator trait definitions
*/
typedef std::forward_iterator_tag iterator_category;
typedef CNNLayerPtr value_type;
typedef int difference_type;
typedef CNNLayerPtr pointer;
typedef CNNLayerPtr reference;
/**
* @brief Default constructor
*/
CNNNetworkIterator() = default;
/**
* @brief Constructor. Creates an iterator for specified CNNNetwork instance.
* @param network Network to iterate. Make sure the network object is not destroyed before iterator goes out of
* scope.
*/
explicit CNNNetworkIterator(const ICNNNetwork* network) {
init(network);
}
explicit CNNNetworkIterator(const CNNNetwork& network) {
const auto& inetwork = static_cast<const InferenceEngine::ICNNNetwork&>(network);
init(&inetwork);
}
/**
* @brief Performs pre-increment
* @return This CNNNetworkIterator instance
*/
CNNNetworkIterator& operator++() {
currentLayer = next();
return *this;
}
/**
* @brief Performs post-increment.
* Implementation does not follow the std interface since only move semantics is used
*/
void operator++(int) {
currentLayer = next();
}
/**
* @brief Checks if the given iterator is not equal to this one
* @param that Iterator to compare with
* @return true if the given iterator is not equal to this one, false - otherwise
*/
bool operator!=(const CNNNetworkIterator& that) const {
return !operator==(that);
}
/**
* @brief Gets const layer pointer referenced by this iterator
*/
const CNNLayerPtr& operator*() const {
if (nullptr == currentLayer) {
IE_THROW() << "iterator out of bound";
}
return currentLayer;
}
/**
* @brief Gets a layer pointer referenced by this iterator
*/
CNNLayerPtr& operator*() {
if (nullptr == currentLayer) {
IE_THROW() << "iterator out of bound";
}
return currentLayer;
}
/**
* @brief Compares the given iterator with this one
* @param that Iterator to compare with
* @return true if the given iterator is equal to this one, false - otherwise
*/
bool operator==(const CNNNetworkIterator& that) const {
return currentLayer == that.currentLayer &&
(network == that.network ||
((network == nullptr || that.network == nullptr) && currentLayer == nullptr));
}
private:
/**
* @brief implementation based on BFS
*/
CNNLayerPtr next() {
if (nextLayersToVisit.empty()) {
return nullptr;
}
nextLayersToVisit.pop_front();
return nextLayersToVisit.empty() ? nullptr : nextLayersToVisit.front();
}
IE_SUPPRESS_DEPRECATED_END
};
} // namespace details
} // namespace InferenceEngine

View File

@ -1,23 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief A header file for CNNNetwork tools
*
* @file ie_cnn_network_tools.h
*/
#pragma once
#include <legacy/ie_layers.h>
#include <vector>
#include "cpp/ie_cnn_network.h"
namespace InferenceEngine {
namespace details {
std::vector<CNNLayerPtr> CNNNetSortTopologically(const CNNNetwork& network);
} // namespace details
} // namespace InferenceEngine

View File

@ -1,735 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <functional>
#include <legacy/cnn_network_impl.hpp>
#include <legacy/layer_transform.hpp>
#include <list>
#include <map>
#include <memory>
#include <queue>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "cpp/ie_cnn_network.h"
#include "ie_algorithm.hpp"
IE_SUPPRESS_DEPRECATED_START
namespace InferenceEngine {
namespace details {
/**
* @brief Iterate over all layers followed by certain CNNLayer layer, and suitable to use ranged loops for output layers
*/
class OutLayersIterator {
std::vector<DataPtr>::iterator dataCntIteratorCurrent;
std::vector<DataPtr>::iterator dataCntIteratorEnd;
using OutdataIterator = std::map<std::string, CNNLayerPtr>::iterator;
bool pointingToEnd = true;
OutdataIterator currentIterator;
public:
OutLayersIterator() = default;
static OutLayersIterator make_begin(std::vector<DataPtr>& origin) {
if (origin.empty()) {
return {};
}
OutLayersIterator it;
it.dataCntIteratorCurrent = origin.begin();
it.dataCntIteratorEnd = origin.end();
it.moveToNextNonEmptyData();
return it;
}
bool operator==(const OutLayersIterator& it) const {
if (pointingToEnd || it.pointingToEnd) {
return pointingToEnd && it.pointingToEnd;
}
return it.dataCntIteratorCurrent == dataCntIteratorCurrent && it.currentIterator == currentIterator;
}
bool operator!=(const OutLayersIterator& it) const {
return !this->operator==(it);
}
void operator++() {
if (dataCntIteratorCurrent == dataCntIteratorEnd) {
return;
}
if (pointingToEnd) {
return;
}
currentIterator++;
if (currentIterator != getInputTo(dataCntIteratorCurrent->get()).end()) {
return;
}
dataCntIteratorCurrent++;
moveToNextNonEmptyData();
}
CNNLayerPtr operator*() const {
return currentIterator->second;
}
protected:
void moveToNextNonEmptyData() {
pointingToEnd = true;
for (; dataCntIteratorCurrent != dataCntIteratorEnd; dataCntIteratorCurrent++) {
if (!getInputTo(dataCntIteratorCurrent->get()).empty()) {
currentIterator = getInputTo(dataCntIteratorCurrent->get()).begin();
pointingToEnd = false;
break;
}
}
}
};
class OutInfoWrapper {
CNNLayer* origin = nullptr;
public:
explicit OutInfoWrapper(CNNLayer* origin) : origin(origin) {}
OutLayersIterator begin() const {
return OutLayersIterator::make_begin(origin->outData);
}
OutLayersIterator end() const {
return {};
}
};
inline OutInfoWrapper default_order(CNNLayer* layer) {
return OutInfoWrapper(layer);
}
/**
* @brief implementation of DFS with visiting checking to avoid multientry
* @param visited - set to store visited layers
* @param layer - current layer to start DFS from
* @param visit - user callback on visited node
* @param visitBefore - indicates when callback is happened before all child nodes or after
* @return false if cycle detected
*/
template <class T, class Ordering = std::function<OutInfoWrapper(CNNLayer*)>>
inline bool DFS(std::unordered_map<CNNLayer*, bool>& visited,
const InferenceEngine::CNNLayerPtr& layer,
const T& visit,
bool visitBefore,
const Ordering& order = &default_order) {
if (layer == nullptr) {
return true;
}
if (visitBefore)
visit(layer);
visited[layer.get()] = false;
for (auto outLayerPtr : order(layer.get())) {
auto i = visited.find(outLayerPtr.get());
if (i != visited.end()) {
/**
* cycle detected we entered still not completed node
*/
if (!i->second) {
return false;
}
continue;
}
if (!DFS(visited, outLayerPtr, visit, visitBefore, order)) {
return false;
}
}
if (!visitBefore)
visit(layer);
visited[layer.get()] = true;
return true;
}
/**
* @brief implementation of DFS in unordered graph, mean next layers not just child but also parents
* @param visited - set to store visited layers
* @param layer - current layer to start UnorderedDFS from
* @param visit - user callback on visited node
* @param visitBefore - indicates when callback is happened before all child nodes or after
*/
template <class T>
inline void UnorderedDFS(std::unordered_set<CNNLayer*>& visited,
const InferenceEngine::CNNLayerPtr& layer,
const T& visit,
bool visitBefore) {
std::queue<InferenceEngine::CNNLayerPtr> layers;
auto cycleDFS = [&]() {
if (layers.empty())
return;
auto cnnLayer = layers.front();
layers.pop();
if (cnnLayer == nullptr) {
return;
}
if (visited.end() != visited.find(cnnLayer.get())) {
return;
}
if (visitBefore)
visit(cnnLayer);
visited.insert(cnnLayer.get());
// visit children
for (const auto& od : cnnLayer->outData) {
for (const auto& nl : getInputTo(od)) {
layers.push(nl.second);
}
}
// visit parents
for (size_t i = 0; i < cnnLayer->insData.size(); i++) {
auto& input = cnnLayer->insData[i];
if (!input.lock()) {
IE_THROW() << "Data " << i << " inserted into layer " << cnnLayer->name << " is nullptr";
} else {
auto creatorLayer = getCreatorLayer(input.lock()).lock();
if (creatorLayer) {
layers.push(creatorLayer);
}
}
}
if (!visitBefore)
visit(cnnLayer);
};
layers.push(layer);
while (!layers.empty()) {
cycleDFS();
}
}
/**
* @brief implementation of DFS with visiting checking to avoid multyentry
* @param visited - set to store visited layers
* @param layer - current layer to start DFS from
* @param visit - user callback on visited node
*/
template <class T>
inline void BFS(InferenceEngine::CNNLayerPtr layer, const T& visit, int maxDepth) {
std::set<InferenceEngine::CNNLayer*> visited;
std::list<InferenceEngine::CNNLayerPtr> nextLayers;
nextLayers.push_back(layer);
int layersOnLevel = 1;
for (; !nextLayers.empty() && maxDepth != 0;) {
visit(*nextLayers.begin());
for (auto& od : (*nextLayers.begin())->outData) {
for (auto nl : getInputTo(od)) {
if (visited.find(nl.second.get()) == visited.end()) {
nextLayers.push_back(nl.second);
visited.insert(nl.second.get());
}
}
}
nextLayers.pop_front();
// move to nextLayer
if (!--layersOnLevel) {
layersOnLevel = nextLayers.size();
maxDepth--;
}
}
}
} // namespace details
/**
* Generic DFS algorithm traverser
* @param layer - starting layer
* @param visit - callback to be called upon visiting
* @param visitBefore - indicates when callback is happened before all child nodes or after
*/
template <class T, class Ordering = std::function<details::OutInfoWrapper(CNNLayer*)>>
inline bool CNNNetDFS(const InferenceEngine::CNNLayerPtr& layer,
const T& visit,
bool visitBefore = true,
const Ordering& order = &details::default_order) {
if (layer == nullptr) {
return true;
}
std::unordered_map<CNNLayer*, bool> visited;
return details::DFS(visited, layer, visit, visitBefore, order);
}
/**
* DFS algorithm with multiple starting data
* @param layer - starting data
* @param visit - callback to be called upon visiting
* @param visitBefore - indicates when callback is happened before all child nodes or after
*/
template <class T>
inline bool CNNNetForestDFS(const std::vector<DataPtr>& heads, const T& visit, bool bVisitBefore) {
std::unordered_map<CNNLayer*, bool> visited;
for (const auto& in : heads) {
for (const auto& to : getInputTo(in)) {
if (visited.find(to.second.get()) != visited.end())
continue;
if (!details::DFS(visited, to.second, visit, bVisitBefore)) {
return false;
}
}
}
return true;
}
/**
* DFS algorithm with multiple starting nodes
* @param layer - starting layer
* @param visit - callback to be called upon visiting
* @param visitBefore - indicates when callback is happened before all child nodes or after
*/
template <class Forest, class T>
inline bool CNNNetForestDFS(const Forest& heads, const T& visit, bool bVisitBefore) {
if (heads.empty()) {
return true;
}
std::unordered_map<CNNLayer*, bool> visited;
for (auto& layer : heads) {
if (!details::DFS(visited, layer, visit, bVisitBefore)) {
return false;
}
}
return true;
}
/**
* DFS algorithm with multiple starting nodes
* @param layer - starting layer
* @param visit - callback to be called upon visiting
* @param visitBefore - indicates when callback is happened before all child nodes or after
*/
template <class Ordering, class Forest, class T>
inline bool CNNNetForestDFS(const Forest& heads, const T& visit, bool bVisitBefore, const Ordering& order) {
if (heads.empty()) {
return true;
}
std::unordered_map<CNNLayer*, bool> visited;
for (auto& layer : heads) {
if (!details::DFS(visited, layer, visit, bVisitBefore, order)) {
return false;
}
}
return true;
}
/**
* Generic BFS algorithm traverser
* @param layer - starting layer
* @param visit - callback to be called upon visiting
*/
template <class T>
inline void CNNNetBFS(const InferenceEngine::CNNLayerPtr& layer, const T& visit) {
if (!layer) {
return;
}
details::BFS(layer, visit, -1);
}
/**
* @brief pointer of previous layers
* @param idx - index in previous layer collection
* @param layer
*/
inline bool CNNNetHasPrevLayer(const InferenceEngine::CNNLayer* layer, int idx = 0) {
IE_ASSERT(layer != nullptr);
if (layer->insData.empty() || static_cast<int>(layer->insData.size()) <= idx) {
return false;
}
auto prevData = layer->insData[idx].lock();
return !!getCreatorLayer(prevData).lock();
}
/**
* @brief to allow storing of LayersSP in collections ordered by names
*/
class LayerNameLess {
public:
bool operator()(const CNNLayerPtr& lhs, const CNNLayerPtr& rhs) const {
return std::less<std::string>()(lhs->name, rhs->name);
}
};
using CNNLayerSet = std::set<CNNLayerPtr, LayerNameLess>;
/**
* @brief returns all layers that are input or memory
* @param network
* @return set of input layers
*/
inline CNNLayerSet CNNNetGetAllInputLayers(const CNNNetwork& network) {
InputsDataMap inputs = network.getInputsInfo();
OutputsDataMap outputs = network.getOutputsInfo();
std::vector<DataPtr> entryDataSet;
entryDataSet.reserve(inputs.size() + outputs.size());
for (const auto& kvp : inputs)
entryDataSet.push_back(kvp.second->getInputData());
for (const auto& kvp : outputs)
entryDataSet.push_back(kvp.second);
CNNLayerSet inputLayers;
std::unordered_set<CNNLayer*> allLayers;
if (entryDataSet.empty())
return inputLayers;
// define any layer connected to provided Data object (consumer or creator)
auto findConnectedLayer = [](const DataPtr& data) -> CNNLayerPtr {
auto consumerLayers = getInputTo(data);
if (!consumerLayers.empty())
return consumerLayers.begin()->second;
auto creator = getCreatorLayer(data).lock();
if (creator != nullptr)
return creator;
return nullptr;
};
for (const auto& data : entryDataSet) {
auto entryLayer = findConnectedLayer(data);
if (entryLayer == nullptr)
continue;
details::UnorderedDFS(
allLayers,
entryLayer,
[&inputLayers](const CNNLayerPtr& layer) {
if (layer->insData.empty()) {
inputLayers.insert(layer);
}
},
false);
}
return inputLayers;
}
inline CNNLayerSet CNNNetGetAllInputLayers(ICNNNetwork* network) {
std::shared_ptr<ICNNNetwork> pointer(network, [](ICNNNetwork* p) {});
return CNNNetGetAllInputLayers(CNNNetwork(pointer));
}
/**
* @brief returns all layers that are input or memory , search started from arbitrary location in network
* @param start layer
* @return set of input layers
*/
inline CNNLayerSet CNNNetGetAllInputLayers(CNNLayer* layer) {
CNNLayerSet inputLayers;
std::unordered_set<CNNLayer*> allLayers;
CNNLayerPtr layerPtr(layer, [](CNNLayer*) {});
details::UnorderedDFS(
allLayers,
layerPtr,
[&](CNNLayerPtr layer) {
if (layer->insData.empty()) {
inputLayers.insert(layer);
}
},
false);
return inputLayers;
}
/**
* @brief Sorts CNNNetork graph in topological order, while uses custom ordering when walking among child nodes
* @param network - input CNNNetwork
* @param ordering - callback that returns output layers for given CNNLayer pointer, see default_order function
* @return sorted CNNNetwork layers
*/
template <class LayerOrdering>
std::vector<CNNLayerPtr> CNNNetSortTopologicallyEx(const CNNNetwork& network, LayerOrdering ordering) {
std::vector<CNNLayerPtr> stackOfVisited;
bool res = CNNNetForestDFS(
CNNNetGetAllInputLayers(network),
[&](CNNLayerPtr current) {
stackOfVisited.push_back(current);
},
false,
ordering);
if (!res) {
IE_THROW() << "Sorting not possible, due to existed loop.";
}
std::reverse(std::begin(stackOfVisited), std::end(stackOfVisited));
return stackOfVisited;
}
using CNNNetPtr = std::shared_ptr<ICNNNetwork>;
using CNNNetCPtr = std::shared_ptr<const ICNNNetwork>;
/**
* @brief deep copy of the entire network, structure using custom copier for layers
* @param input - source network
* @param cp - custom copier object, ex: [](CNNLayerPtr lp) { return injectData<EmptyStruct>(lp); }
* @return copied network
*/
template <class Copier>
inline CNNNetwork CNNNetCopy(const CNNNetwork& input, const Copier& cp) {
auto net = std::make_shared<details::CNNNetworkImpl>();
net->setName(input.getName());
// rest info is layer dependent so have to create graph clone
std::unordered_map<CNNLayer*, CNNLayerPtr> oldToNewLayers;
auto starters = CNNNetGetAllInputLayers(input);
// 1st pass node creation
bool res = CNNNetForestDFS(
starters,
[&](CNNLayerPtr current) {
auto newLayer = cp(current);
oldToNewLayers[current.get()] = newLayer;
net->addLayer(newLayer);
},
true);
if (!res) {
IE_THROW() << "Copying of network not possible, due to existed loop.";
}
// internal utility to locate out data idx in layer
auto findOutDataIdx = [&](DataPtr sourceData) {
int dataIdx = -1;
auto sourceLayer = getCreatorLayer(sourceData).lock();
if (!sourceLayer) {
IE_THROW() << "Data " << sourceData->getName() << " has no creator layer";
}
for (size_t j = 0; j < sourceLayer->outData.size(); j++) {
if (sourceData.get() == sourceLayer->outData[j].get()) {
dataIdx = static_cast<int>(j);
break;
}
}
IE_ASSERT(dataIdx != -1);
return dataIdx;
};
// compares data, for copied network and in old network
auto areEqualDatas = [&](DataPtr source, DataPtr target) {
if (source.get() == target.get()) {
return true;
}
// dims comparison -
// actual dims value might be incorrect dueto syntetic case
// , when getbatch() size returns value not reflect in actual data
if (source->getTensorDesc().getDims().size() != target->getTensorDesc().getDims().size()) {
return false;
}
// name comparison
if (source->getName() != target->getName()) {
return false;
}
// inputTO layers are identical by design
return true;
};
// internal utility to locate input data idx in layer
auto findInsDataIdx = [&](DataPtr sourceData, CNNLayerPtr layer) {
int dataIdx = -1;
auto sourceLayerMap = getInputTo(sourceData);
for (auto& layersMapping : sourceLayerMap) {
if (layersMapping.second.get() != layer.get()) {
continue;
}
for (size_t j = 0; j < layer->insData.size(); j++) {
if (areEqualDatas(layer->insData[j].lock(), sourceData)) {
dataIdx = static_cast<int>(j);
}
}
if (dataIdx != -1) {
break;
}
}
IE_ASSERT(dataIdx != -1);
return dataIdx;
};
// 2nd pass edges creation
CNNNetForestDFS(
starters,
[&](CNNLayerPtr current) {
auto newLayer = oldToNewLayers[current.get()];
// remap output data
for (size_t i = 0; i != current->outData.size(); i++) {
getCreatorLayer(newLayer->outData[i]) = CNNLayerWeakPtr(newLayer);
// transfer data info for getData routine
net->getData(newLayer->outData[i]->getName()) = newLayer->outData[i];
for (auto inputTo = std::begin(getInputTo(newLayer->outData[i]));
inputTo != std::end(getInputTo(newLayer->outData[i]));
inputTo++) {
inputTo->second = oldToNewLayers[inputTo->second.get()];
}
}
// remap input data
for (size_t i = 0; i != current->insData.size(); i++) {
// found that data IDX
auto sourceData = current->insData[i].lock();
auto sourceLayer = getCreatorLayer(sourceData).lock();
if (!sourceLayer) {
IE_THROW() << "Data " << sourceData->getName() << " has no creator layer";
}
// find insData Entry in outData of sourceLayer
newLayer->insData[i] = oldToNewLayers[sourceLayer.get()]->outData[findOutDataIdx(sourceData)];
}
},
true);
// transfer input info
InputsDataMap inputsInfo = input.getInputsInfo();
std::set<DataPtr> insDatas;
for (auto&& info : inputsInfo) {
for (const auto& secondLayer : getInputTo(info.second->getInputData())) {
auto secondLayerNew = oldToNewLayers[secondLayer.second.get()];
InputInfo::Ptr infoNew = std::make_shared<InputInfo>();
infoNew->setInputData(
secondLayerNew->insData[findInsDataIdx(info.second->getInputData(), secondLayer.second)].lock());
infoNew->getPreProcess() = info.second->getPreProcess();
net->setInputInfo(infoNew);
}
}
// transfer output info
OutputsDataMap outmap = input.getOutputsInfo();
for (auto&& data : outmap) {
ResponseDesc dsc;
if (OK != net->addOutput(getCreatorLayer(data.second).lock()->name, findOutDataIdx(data.second), &dsc)) {
IE_THROW() << dsc.msg;
}
}
ResponseDesc dsc;
// transfer batch size
if (OK != net->setBatchSize(input.getBatchSize(), &dsc)) {
IE_THROW() << dsc.msg;
}
return CNNNetwork(net);
}
/**
* @brief deep copy of the entire network
* @param input
* @return
*/
inline CNNNetwork CNNNetCopy(const CNNNetwork& input) {
struct EmptyStruct {};
auto copier = [](CNNLayerPtr lp) {
return injectData<EmptyStruct>(lp);
};
return InferenceEngine::CNNNetCopy(input, copier);
}
namespace details {
/**
* The structure to wrap network as lists of input and output data objects
* Each layer of network is achievable by DFS started from inputs.
*
* NB! The input collection may contain a "fake" data object which is not a
* real input to network, but just a holder to keep "const" and "memory"
* layers alive. Fake data object points layers with empty creator field.
* The fake data object always has "UNSPECIFIED" precision attribute.
*/
struct CNNSubnet {
std::vector<DataPtr> inputs;
std::vector<DataPtr> outputs;
};
/**
* @brief Detect all input data object, not only provided as entry point.
* @param heads collection of some input into graph
* @return all input data objects including "fake" data (layers holder).
*/
inline std::vector<DataPtr> CNNSubnetGetAllInputs(const std::vector<DataPtr>& heads) {
CNNLayerSet inputLayers;
std::unordered_set<CNNLayer*> allLayers;
// Define all start layers
for (const auto& data : heads) {
auto& secondLayers = getInputTo(data);
if (secondLayers.empty())
continue;
details::UnorderedDFS(
allLayers,
secondLayers.begin()->second,
[&](CNNLayerPtr layer) {
if (layer->insData.empty()) {
inputLayers.insert(layer);
}
},
false);
}
std::vector<DataPtr> res = heads;
// Add fake input data to point on not achievable
// layers from head (like const placeholders)
for (auto& starter : inputLayers) {
DataPtr holder(new Data(starter->name + ":input_holder", starter->precision));
getInputTo(holder)[starter->name] = starter;
res.push_back(holder);
}
return res;
}
/**
* @brief Sorts SNNSubnet graph representation in topological order
* @param subnet input object
* @return layer collection sorted in topological order
*/
inline std::vector<CNNLayerPtr> CNNSubnetSortTopologically(const CNNSubnet& subnet) {
std::vector<CNNLayerPtr> stackOfVisited;
bool res = CNNNetForestDFS(
CNNSubnetGetAllInputs(subnet.inputs),
[&](CNNLayerPtr current) {
stackOfVisited.push_back(current);
},
false);
if (!res) {
IE_THROW() << "Sorting not possible, due to existed loop.";
}
std::reverse(stackOfVisited.begin(), stackOfVisited.end());
return stackOfVisited;
}
} // namespace details
} // namespace InferenceEngine
IE_SUPPRESS_DEPRECATED_END

View File

@ -1,82 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief a header file with common functions for graph transformation
* @file graph_transformer.h
*/
#pragma once
#include <caseless.hpp>
#include <legacy/cnn_network_impl.hpp>
#include <map>
#include <string>
#include <vector>
namespace InferenceEngine {
/**
* @brief TBD
*/
class ConstTransformer {
public:
explicit ConstTransformer(details::CNNNetworkImpl* _network);
/**
* @brief calculates const layers, combines const subgraph into a single const layers
*/
void foldConstSubgraphs();
/**
* @brief folds Const Subgraphs and removes second input of Reshape-like layers (Interp, Gather, Resample, ...)
*/
void fullTrim();
protected:
ConstTransformer(std::vector<DataPtr>& _inputs, std::vector<DataPtr>& _outputs);
/**
* @brief collect all const layers with marking if it defines shape (1 - for shape, 0 - otherwise)
*/
virtual const std::map<std::string, bool> getConstLayers(const std::vector<CNNLayerPtr>& sortedLayers);
/**
* @brief TBD
*/
virtual const BlobMap getConstData(const std::map<std::string, bool>& constLayers,
const std::vector<CNNLayerPtr>& sortedLayers);
/**
* @brief TBD
*/
virtual std::vector<CNNLayerPtr> foldConstSubgraphsInternal(const std::map<std::string, bool>& constLayers,
const BlobMap& constData,
const std::vector<CNNLayerPtr>& sortedLayers);
/**
* @brief TBD
*/
virtual void trimShapeInputs(const std::vector<CNNLayerPtr>& constLayers, std::vector<CNNLayerPtr>& allLayers);
/**
* @brief TBD
*/
void cleanup();
private:
const details::caseless_set<std::string> shapeTaking = {"Reshape", "Resample", "Interp", "Squeeze", "Unsqueeze"};
details::CNNNetworkImpl* network;
std::vector<DataPtr> inputs;
std::vector<DataPtr> outputs;
/** data/layer collection to restore valida state of network if it was specified */
std::vector<DataPtr> data_to_remove;
std::vector<DataPtr> data_to_add;
std::vector<CNNLayerPtr> layer_to_remove;
std::vector<CNNLayerPtr> layer_to_add;
};
} // namespace InferenceEngine

View File

@ -1,49 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <xml_parse_utils.h>
#include <array>
#include <fstream>
namespace InferenceEngine {
namespace details {
inline size_t get_ir_version(pugi::xml_node& root) {
return pugixml::utils::GetUIntAttr(root, "version", 0);
}
/**
* @brief Extracts IR version from model stream
* @param model Models stream
* @return IR version, 0 if model does represent IR
*/
inline size_t get_ir_version(std::istream& model) {
std::array<char, 512> header = {};
model.seekg(0, model.beg);
model.read(header.data(), header.size());
model.clear();
model.seekg(0, model.beg);
pugi::xml_document doc;
auto res =
doc.load_buffer(header.data(), header.size(), pugi::parse_default | pugi::parse_fragment, pugi::encoding_utf8);
if (res == pugi::status_ok) {
pugi::xml_node root = doc.document_element();
std::string node_name = root.name();
std::transform(node_name.begin(), node_name.end(), node_name.begin(), ::tolower);
if (node_name == "net") {
return get_ir_version(root);
}
}
return 0;
}
} // namespace details
} // namespace InferenceEngine

File diff suppressed because it is too large Load Diff

View File

@ -1,74 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <tuple>
#include "legacy/ie_util_internal.hpp"
namespace InferenceEngine {
class Paddings {
public:
PropertyVector<unsigned int> begin;
PropertyVector<unsigned int> end;
};
/**
* @brief gets padding with runtime type check
*/
Paddings getPaddingsImpl(const CNNLayer& layer);
/**
* @brief checks that given type is one of specified in variadic template list
* @tparam ...
*/
template <typename...>
struct is_one_of {
static constexpr bool value = false;
};
/**
* @brief checks that given type is one of specified in variadic template list
* @tparam ...
*/
template <typename F, typename S, typename... T>
struct is_one_of<F, S, T...> {
static constexpr bool value = std::is_same<F, S>::value || is_one_of<F, T...>::value;
};
IE_SUPPRESS_DEPRECATED_START
/**
* @brief gets padding without compile-time type check
*/
template <class T>
inline typename std::enable_if<is_one_of<T,
DeformableConvolutionLayer,
DeconvolutionLayer,
ConvolutionLayer,
BinaryConvolutionLayer,
PoolingLayer>::value,
Paddings>::type
getPaddings(const T& layer) {
return getPaddingsImpl(layer);
}
/*********************************************
* TensorIterator Helpers section
*********************************************/
/**
* @brief Calculate number of iteration required for provided TI layer
*
* @param ti TensorIterator layer to parse
* @return positive value in case of correct TI layer, -1 in case of inconsistency
*/
int getNumIteration(const TensorIterator& ti);
IE_SUPPRESS_DEPRECATED_END
} // namespace InferenceEngine

View File

@ -1,142 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
/**
* @brief a header file for describing property style structure used by CNNLayers
*
* @file ie_layers_property.hpp
*/
#pragma once
#include <ie_common.h>
#include <vector>
namespace InferenceEngine {
constexpr const int MAX_DIMS_NUMBER = 12;
enum eDIMS_AXIS : uint8_t { X_AXIS = 0, Y_AXIS, Z_AXIS };
template <class T, int N = MAX_DIMS_NUMBER>
class PropertyVector {
T _axises[N] = {};
bool _allocated[N] = {};
size_t _length = 0;
public:
PropertyVector() = default;
PropertyVector(size_t len, T val) {
if (len > N) {
IE_THROW() << "Property size exceeed limit of: " << N;
}
for (size_t i = 0; i < len; i++) {
_axises[i] = val;
_allocated[i] = true;
}
_length = len;
}
explicit PropertyVector(const std::vector<T>& values) {
size_t i = 0;
for (const auto val : values) {
insert(i++, val);
}
}
PropertyVector(std::initializer_list<int> init_list) {
size_t i = 0;
for (const auto val : init_list) {
insert(i++, val);
}
}
/**
* @brief allows access up-to capacity size
*
* @param index
* @return
*/
T& at(int index) {
if (index >= N) {
IE_THROW() << "Property index is out of bounds (" << index << "/" << N;
}
return _axises[index];
}
const T& operator[](size_t index) const {
if (index >= N || !_allocated[index]) {
IE_THROW() << "Property index (" << index << ") is out of bounds";
}
return _axises[index];
}
T& operator[](size_t index) {
if (index >= N || !_allocated[index]) {
IE_THROW() << "Property index (" << index << ") is out of bounds";
}
return _axises[index];
}
PropertyVector& operator=(const PropertyVector& src) {
if (this != &src) {
_length = src.size();
for (size_t i = 0; i < N; i++) {
_allocated[i] = src._allocated[i];
if (_allocated[i]) {
_axises[i] = src[i];
}
}
}
return *this;
}
bool operator==(const PropertyVector& src) const {
if (this == &src)
return true;
if (_length != src.size())
return false;
for (size_t i = 0; i < N; i++)
if ((_allocated[i] != src._allocated[i]) || (_allocated[i] && _axises[i] != src._axises[i]))
return false;
return true;
}
size_t size() const {
return _length;
}
void insert(size_t axis, const T& val) {
if (axis < N) {
if (!_allocated[axis]) {
_allocated[axis] = true;
_length++;
}
_axises[axis] = val;
} else {
IE_THROW() << "Layer Property insertion at(axis) should be in [0," << N << ")";
}
}
void remove(size_t axis) {
if (axis < N && _allocated[axis]) {
_allocated[axis] = false;
_length--;
}
}
void clear() {
for (int i = 0; i != N; i++) {
_allocated[i] = 0;
}
_length = 0u;
}
bool exist(size_t axis) const {
return (axis < N && _allocated[axis]);
}
};
} // namespace InferenceEngine

View File

@ -1,65 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cpp/ie_cnn_network.h>
#include <ie_api.h>
#include <ie_iextension.h>
#include <istream>
#include <string>
#include <vector>
namespace InferenceEngine {
/**
* @brief IReader an abstract interface for Inference Engine readers
*/
class IReader : public std::enable_shared_from_this<IReader> {
public:
/**
* @brief Checks that reader supports format of the model
* @param model stream with model
* @return true if format is supported
*/
virtual bool supportModel(std::istream& model) const = 0;
/**
* @brief Reads the model to CNNNetwork
* @param model stream with model
* @param exts vector with extensions
*
* @return CNNNetwork
*/
virtual CNNNetwork read(std::istream& model, const std::vector<IExtensionPtr>& exts) const = 0;
/**
* @brief Reads the model to CNNNetwork
* @param model stream with model
* @param weights stream with binary data
* @param exts vector with extensions
*
* @return CNNNetwork
*/
virtual CNNNetwork read(std::istream& model,
const Blob::CPtr& weights,
const std::vector<IExtensionPtr>& exts) const = 0;
/**
* @brief Returns all supported extensions for data files
*
* @return vector of file extensions, for example the reader for OpenVINO IR returns {"bin"}
*/
virtual std::vector<std::string> getDataFileExtensions() const = 0;
protected:
virtual ~IReader() = default;
};
/**
* @brief Creates the default instance of the reader
* @return Reader interface
*/
INFERENCE_PLUGIN_API(void) CreateReader(std::shared_ptr<IReader>& reader);
} // namespace InferenceEngine

View File

@ -1,80 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cpp/ie_cnn_network.h>
#include <deque>
#include <functional>
#include <legacy/cnn_network_impl.hpp>
#include <legacy/details/ie_cnn_network_iterator.hpp>
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_set>
#include <utility>
#include <vector>
namespace InferenceEngine {
/**
* @brief Creates data object copy unconnected to any graph
* @param source - source data object
* @return Shared pointer to new data object
*/
DataPtr cloneData(const Data& source);
IE_SUPPRESS_DEPRECATED_START
/**
* @brief Creates layer object copy, unconnected to any grapoh
* @param source - source layer object
* @return Shared pointer to new layer object
*/
CNNLayerPtr clonelayer(const CNNLayer& source);
/**
* @brief Clones selected set of nodes into separate network
* only connections between passed nodes will be duplicated
*
* @param layers Layers to clone, must all be in same network
* @param networkStats A network statistic to clone
*
* @return Cloned network
*/
InferenceEngine::details::CNNNetworkImplPtr cloneNet(const std::vector<InferenceEngine::CNNLayerPtr>& layers);
IE_SUPPRESS_DEPRECATED_END
/**
* @brief Clones the whole network without conversion to CNNNetworkImpl. All layers and data objects will be cloned
* @note Blobs inside layers are reused
* @param network A network to clone
* @return A cloned object
*/
InferenceEngine::CNNNetwork cloneNetwork(const InferenceEngine::CNNNetwork& network);
/**
* @brief Clones the whole network. All layers and data objects will be cloned
* @note Blobs inside layers are reused
* @param network A network to clone
* @return A cloned object
*/
InferenceEngine::details::CNNNetworkImplPtr cloneNet(const InferenceEngine::CNNNetwork& network);
using ordered_properties = std::vector<std::pair<std::string, std::string>>;
using printer_callback =
std::function<void(const InferenceEngine::CNNLayerPtr, ordered_properties&, ordered_properties&)>;
/**
* @brief Visualize network in GraphViz (.dot) format and write to output stream
*
* @param network - graph to visualize
* @param out - output stream for saving graph
* @param layer_cb - callback function, that called on every printed layer node
*/
void saveGraphToDot(const InferenceEngine::CNNNetwork& network, std::ostream& out, printer_callback layer_cb = nullptr);
} // namespace InferenceEngine

View File

@ -1,227 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <legacy/ie_layers.h>
#include <memory>
#include <tuple>
#include <utility>
namespace InferenceEngine {
IE_SUPPRESS_DEPRECATED_START
namespace details {
template <class T, class InjectType>
class LayerInjector : public T {
public:
InjectType injected;
explicit LayerInjector(const T& base) : T(base) {}
};
using AllLayers = std::tuple<SelectLayer*,
DeformableConvolutionLayer*,
DeconvolutionLayer*,
ConvolutionLayer*,
TopKLayer*,
PoolingLayer*,
FullyConnectedLayer*,
GemmLayer*,
PadLayer*,
GatherLayer*,
StridedSliceLayer*,
ShuffleChannelsLayer*,
DepthToSpaceLayer*,
SpaceToDepthLayer*,
SparseFillEmptyRowsLayer*,
SparseSegmentReduceLayer*,
ExperimentalSparseWeightedReduceLayer*,
SparseToDenseLayer*,
BucketizeLayer*,
ReverseSequenceLayer*,
RangeLayer*,
FillLayer*,
BroadcastLayer*,
ConcatLayer*,
SplitLayer*,
NormLayer*,
SoftMaxLayer*,
GRNLayer*,
MVNLayer*,
ReLULayer*,
EltwiseLayer*,
CropLayer*,
ReshapeLayer*,
TileLayer*,
ScaleShiftLayer*,
PReLULayer*,
PowerLayer*,
BatchNormalizationLayer*,
ClampLayer*,
TensorIterator*,
LSTMCell*,
GRUCell*,
RNNCell*,
RNNSequenceLayer*,
QuantizeLayer*,
BinaryConvolutionLayer*,
WeightableLayer*,
OneHotLayer*,
MathLayer*,
ReduceLayer*,
UniqueLayer*,
NonMaxSuppressionLayer*,
ScatterUpdateLayer*,
ScatterElementsUpdateLayer*,
ExperimentalDetectronPriorGridGeneratorLayer*,
ExperimentalDetectronGenerateProposalsSingleImageLayer*,
ExperimentalDetectronTopKROIs*,
CNNLayer*>;
template <class Visitor, std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I == sizeof...(Tp), void>::type visitActualLayer(std::tuple<Tp...>&& t,
const CNNLayer& sourceLayer,
const Visitor& v) {}
template <class Visitor, std::size_t I = 0, typename... Tp>
inline typename std::enable_if < I<sizeof...(Tp), void>::type visitActualLayer(std::tuple<Tp...>&& t,
const CNNLayer& sourceLayer,
const Visitor& visitor) {
using EType = typename std::tuple_element<I, std::tuple<Tp...>>::type;
auto casted = dynamic_cast<EType>(const_cast<CNNLayer*>(&sourceLayer));
if (casted != nullptr) {
// means no need to handle further layers
if (visitor(casted)) {
return;
}
}
visitActualLayer<Visitor, I + 1, Tp...>(std::move(t), sourceLayer, visitor);
}
template <class InjectedType, std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I == sizeof...(Tp), void>::type injectHelper(std::tuple<Tp...>& t,
const CNNLayer& sourceLayer,
CNNLayerPtr& targetLayer,
const InjectedType& value) {}
template <class InjectedType, std::size_t I = 0, typename... Tp>
inline typename std::enable_if < I<sizeof...(Tp), void>::type injectHelper(std::tuple<Tp...>& t,
const CNNLayer& sourceLayer,
CNNLayerPtr& target,
const InjectedType& value) {
if (target) {
return;
}
using EType = typename std::tuple_element<I, std::tuple<Tp...>>::type;
auto casted = dynamic_cast<EType>(const_cast<CNNLayer*>(&sourceLayer));
if (casted != nullptr) {
auto layerWithInjectedData =
std::make_shared<LayerInjector<typename std::remove_pointer<EType>::type, InjectedType>>(*casted);
// copy outdata
for (auto&& data : layerWithInjectedData->outData) {
data = std::make_shared<Data>(*data.get());
}
layerWithInjectedData->injected = value;
target = layerWithInjectedData;
}
injectHelper<InjectedType, I + 1, Tp...>(t, sourceLayer, target, value);
}
template <class InjectedType, std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I == sizeof...(Tp), void>::type locateInjected(std::tuple<Tp...>& t,
const CNNLayer& sourceLayer,
InjectedType*& value) {}
template <class InjectedType, std::size_t I = 0, typename... Tp>
inline typename std::enable_if < I<sizeof...(Tp), void>::type locateInjected(std::tuple<Tp...>& t,
const CNNLayer& sourceLayer,
InjectedType*& value) {
if (value != nullptr) {
return;
}
using EType = typename std::tuple_element<I, std::tuple<Tp...>>::type;
auto injectedLayer = dynamic_cast<LayerInjector<typename std::remove_pointer<EType>::type, InjectedType>*>(
const_cast<CNNLayer*>(&sourceLayer));
if (injectedLayer != nullptr) {
value = &injectedLayer->injected;
}
locateInjected<InjectedType, I + 1, Tp...>(t, sourceLayer, value);
}
} // namespace details
/**
* @brief creates copy of source layer, with injected arbitrary data
* @tparam InjectType data type to be injected
* @param sourceLayer
* @param value injected value
* @return newly created layer with injected data
*/
template <class InjectType>
inline CNNLayerPtr injectData(const CNNLayer& sourceLayer, const InjectType& value = InjectType()) {
details::AllLayers layers;
CNNLayerPtr targetLayer;
details::injectHelper(layers, sourceLayer, targetLayer, value);
return targetLayer;
}
template <class InjectType>
inline CNNLayerPtr injectData(CNNLayerPtr sourceLayer, const InjectType& value = InjectType()) {
return injectData(*sourceLayer.get(), value);
}
/**
* @brief transforms of source layer
* @tparam InjectType data type to be injected
* @param sourceLayer
* @param value injected value
* @return newly created layer with injected data
*/
template <class Transformer>
inline void transformLayer(const CNNLayer& sourceLayer, const Transformer& transformer) {
details::visitActualLayer<Transformer>(std::move(details::AllLayers()), sourceLayer, transformer);
}
template <class Transformer>
inline void transformLayer(CNNLayerPtr sourceLayer, const Transformer& transformer) {
transformLayer(*sourceLayer.get(), transformer);
}
/**
* @brief getPointer to injected data
* @tparam InjectType
* @param sourceLayer
* @return if previously data of type InjectType was injected, will return pointer to it, nullptr otherwise
*/
template <class InjectType>
inline InjectType* getInjectedData(const CNNLayer& sourceLayer) {
details::AllLayers layers;
InjectType* injected = nullptr;
details::locateInjected(layers, sourceLayer, injected);
return injected;
}
template <class InjectType>
inline InjectType* getInjectedData(CNNLayerPtr sourceLayer) {
return getInjectedData<InjectType>(*sourceLayer.get());
}
IE_SUPPRESS_DEPRECATED_END
} // namespace InferenceEngine

View File

@ -1,107 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include <vector>
#include "cpp/ie_cnn_network.h"
#include "legacy/graph_tools.hpp"
namespace InferenceEngine {
namespace NetPass {
IE_SUPPRESS_DEPRECATED_START
/**
* Try to detect LSTM Sequence pattern inside TI and convert it
*
* @param net network to modify
* @return true if all Tensor iterator was converted
*/
bool CombineRNNSeq(CNNNetwork& net);
bool CombineRNNSeq(TensorIterator::Body& net);
/**
* Returns a vector of the topologically sorted layers from
* the passed TI layer body.
*
* @param body TI body
* @return vector of layer objects
*/
std::vector<CNNLayerPtr> TIBodySortTopologically(const TensorIterator::Body& body);
/**
* Check if provided layer contains internal attribute like subnet/subgraph
*
* @param layer to check
* @return true if layer has subnet
*/
bool HasInternalSubnet(const CNNLayerPtr& layer);
/**
* Extract internal subnet from layer
*
* All internal layers are returned by reference. Any modification further subnet modification will
* has affect on original layer state.
*
* @param layer to proceed
* @return internal subnet
*/
details::CNNSubnet GetInternalSubnet(const CNNLayerPtr& layer);
/**
* Unroll all present Tensor Iterators
*
* @param net network to modify
* @return true if all Tensor iterator was unrolled successfully
*/
bool UnrollTI(CNNNetwork& net);
/**
* Unroll all RNN specific layers by predicate
*
* Will be applied to all RNNSeq and RNNCell layers
*
* @param net network to modify
* @param pred predicate to mark layer to unroll
* @return true if all RNN layers was unrolled successfully
*/
bool UnrollRNN_if(CNNNetwork& net, std::function<bool(const RNNCellBase&)> pred);
/**
* Construct a copy of provided subnet. Will change names by adding suffix if it was provided.
*
* @param subnet to copy from
* @param suffix is optional attribute. Will be added into name of each layer/data object if provided
* @return subnet copy. Each layer/data object is newly created. Const blob objects is inherited from
* original subnet.
*/
TensorIterator::Body CopyTIBody(const TensorIterator::Body& body, std::string suffix = std::string());
bool UnrollRNN_if(TensorIterator::Body& net, std::function<bool(const RNNCellBase&)> pred);
IE_SUPPRESS_DEPRECATED_END
/**
* Precision conversion pass
*
* Will perform conversion of all presented tensors with specified precision including
* const blobs and intermediate tensors. It doesn't check layer semantic. It may break
* correctness of topology.
*
* It also remove redundant convert layers if they will appear.
*
* @param net is network to apply conversion
* @param from precision of tensors required conversion
* @param to resulting precision of tensors
*/
void ConvertPrecision(CNNNetwork& net, Precision from, Precision to);
void ConvertIOPrecision(CNNNetwork& net, Precision from, Precision to);
} // namespace NetPass
} // namespace InferenceEngine

View File

@ -1,143 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <memory>
#include <vector>
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class ConvolutionIE : public Op {
public:
OPENVINO_OP("ConvolutionIE", "util");
/// \brief Constructs a batched convolution operation.
ConvolutionIE() = default;
/// \brief Constructs a batched convolution operation.
///
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// \param strides The strides.<br>
/// `[f]`
/// \param dilations The dilations.<br>
/// `[f]`
/// \param pads_begin The beginning of padding shape.<br>
/// `[f]`
/// \param pads_end The end of padding shape.<br>
/// `[f]`
/// \param auto_pad The pad type for automatically computing padding sizes.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
///
ConvolutionIE(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const element::Type output_type,
const size_t& group = 1,
const PadType& auto_pad = PadType::EXPLICIT);
ConvolutionIE(const Output<Node>& data_batch,
const Output<Node>& filters,
const Output<Node>& bias,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const element::Type output_type,
const size_t& group = 1,
const PadType& auto_pad = PadType::EXPLICIT);
// KMB compilation support
ConvolutionIE(const Output<Node>& data_batch,
const Output<Node>& filters,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const size_t& group = 1,
const PadType& auto_pad = PadType::EXPLICIT);
// KMB compilation support
ConvolutionIE(const Output<Node>& data_batch,
const Output<Node>& filters,
const Output<Node>& bias,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const size_t& group = 1,
const PadType& auto_pad = PadType::EXPLICIT);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The strides.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The padding-below sizes (possibly negative).
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The padding-above sizes (possibly negative).
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_adding_above(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
/// \return The pad type for convolution.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
/// \return The groups for convolution.
const size_t& get_group() const {
return m_group;
}
void set_group(const size_t& group) {
m_group = group;
}
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
size_t m_group;
element::Type m_output_type;
};
} // namespace op
} // namespace ngraph

View File

@ -1,33 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <vector>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class CropIE : public Op {
public:
OPENVINO_OP("CropIE", "legacy");
CropIE(const Output<Node>& data1, std::vector<int64_t> axes, std::vector<int64_t> dim, std::vector<int64_t> offset);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
std::vector<int64_t> axes, dim, offset;
};
} // namespace op
} // namespace ngraph

View File

@ -1,109 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <memory>
#include <vector>
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class DeconvolutionIE : public Op {
public:
OPENVINO_OP("DeconvolutionIE", "util");
DeconvolutionIE() = default;
DeconvolutionIE(const Output<Node>& data,
const Output<Node>& filters,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const element::Type output_type,
const size_t& group = 1,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {},
const std::shared_ptr<Node>& output_shape = nullptr);
DeconvolutionIE(const Output<Node>& data,
const Output<Node>& filters,
const Output<Node>& bias,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
const CoordinateDiff& pads_end,
const element::Type output_type,
const size_t& group = 1,
const PadType& auto_pad = PadType::EXPLICIT,
const CoordinateDiff& output_padding = {},
const std::shared_ptr<Node>& output_shape = nullptr);
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
/// \return The strides from the forward prop.
const Strides& get_strides() const {
return m_strides;
}
void set_strides(const Strides& strides) {
m_strides = strides;
}
/// \return The dilations from the forward prop.
const Strides& get_dilations() const {
return m_dilations;
}
void set_dilations(const Strides& dilations) {
m_dilations = dilations;
}
/// \return The padding-below sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_pads_begin() const {
return m_pads_begin;
}
void set_pads_begin(const CoordinateDiff& pads_begin) {
m_pads_begin = pads_begin;
}
/// \return The padding-above sizes (possibly negative) from the forward prop.
const CoordinateDiff& get_pads_end() const {
return m_pads_end;
}
void set_pads_end(const CoordinateDiff& pads_end) {
m_pads_end = pads_end;
}
/// \return The auto pad.
const PadType& get_auto_pad() const {
return m_auto_pad;
}
void set_auto_pad(const PadType& auto_pad) {
m_auto_pad = auto_pad;
}
/// \return The group
const size_t& get_group() const {
return m_group;
}
void set_group(const size_t& group) {
m_group = group;
}
bool visit_attributes(AttributeVisitor& visitor) override;
protected:
Strides m_strides;
Strides m_dilations;
CoordinateDiff m_pads_begin;
CoordinateDiff m_pads_end;
PadType m_auto_pad;
size_t m_group;
CoordinateDiff m_output_padding;
std::shared_ptr<Node> m_output_shape;
element::Type m_output_type;
};
} // namespace op
} // namespace ngraph

View File

@ -1,56 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include "ngraph/op/op.hpp"
enum class ELTWISE_TYPE { Sum, Prod, Max, Sub, Min, Div };
namespace ngraph {
namespace op {
class Eltwise : public Op {
public:
OPENVINO_OP("Eltwise", "legacy");
Eltwise(const Output<Node>& data1,
const Output<Node>& data2,
const ELTWISE_TYPE eltwise_type,
const element::Type output_type = element::undefined);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
ELTWISE_TYPE eltwise_type;
private:
ELTWISE_TYPE type_from_string(const std::string& eltwise_type) const {
return as_enum<ELTWISE_TYPE>(eltwise_type);
}
element::Type m_output_type;
};
} // namespace op
std::ostream& operator<<(std::ostream& s, const ELTWISE_TYPE& type);
} // namespace ngraph
namespace ov {
template <>
class AttributeAdapter<ELTWISE_TYPE> : public EnumAttributeAdapterBase<ELTWISE_TYPE> {
public:
AttributeAdapter(ELTWISE_TYPE& value) : EnumAttributeAdapterBase<ELTWISE_TYPE>(value) {}
OPENVINO_RTTI("AttributeAdapter<ELTWISE_TYPE>");
};
} // namespace ov

View File

@ -1,54 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
/// \brief Operator performing Matrix Multiplication.
class FullyConnected : public Op {
public:
OPENVINO_OP("FullyConnected", "legacy");
FullyConnected() = default;
/// \brief Constructs an FullyConnected operation.
///
/// \param A Matrix A
/// \param B Matrix B
/// \param C Matrix C
FullyConnected(const Output<Node>& A,
const Output<Node>& B,
const Output<Node>& C,
const Shape& output_shape,
const element::Type output_type = element::undefined);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
size_t get_out_size() const {
return m_output_size;
}
element::Type get_output_type() const {
return m_output_type;
}
private:
size_t m_output_size = 0;
Shape m_output_shape = {};
element::Type m_output_type;
};
} // namespace op
} // namespace ngraph

View File

@ -1,39 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <vector>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class GatherIE : public Op {
public:
OPENVINO_OP("GatherIE", "legacy");
GatherIE() = default;
GatherIE(const Output<Node>& params, const Output<Node>& indices, int64_t axis);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
int64_t get_axis() const {
return m_axis;
}
void set_axis(int64_t axis) {
m_axis = axis;
}
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
protected:
int64_t m_axis;
};
} // namespace op
} // namespace ngraph

View File

@ -1,40 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class GatherTreeIE : public Op {
public:
OPENVINO_OP("GatherTreeIE", "legacy");
GatherTreeIE() = default;
/// \param step_ids Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with
/// indices from per each step
/// \param parent_idx Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH] with
/// parent beam indices
/// \param max_seq_len Tensor of shape [BATCH_SIZE] with maximum lengths for each
/// sequence in the batch
/// \param end_token Tensor of shape [MAX_TIME, BATCH_SIZE, BEAM_WIDTH]
GatherTreeIE(const Output<Node>& step_ids,
const Output<Node>& parent_idx,
const Output<Node>& max_seq_len,
const Output<Node>& end_token);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
} // namespace op
} // namespace ngraph

View File

@ -1,69 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include <vector>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class GRUCellIE : public Op {
public:
OPENVINO_OP("GRUCellIE", "legacy");
GRUCellIE(const Output<Node>& X,
const Output<Node>& H_t,
const Output<Node>& WR,
const Output<Node>& B,
size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
bool linear_before_reset);
GRUCellIE() = delete;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
std::size_t get_hidden_size() {
return m_hidden_size;
}
const std::vector<std::string>& get_activations() {
return m_activations;
}
const std::vector<float>& get_activations_alpha() {
return m_activations_alpha;
}
const std::vector<float>& get_activations_beta() {
return m_activations_beta;
}
float get_clip() {
return m_clip;
}
bool get_linear_before_reset() const {
return m_linear_before_reset;
}
bool visit_attributes(AttributeVisitor& visitor) override;
protected:
int64_t m_hidden_size{};
std::vector<std::string> m_activations;
std::vector<float> m_activations_alpha;
std::vector<float> m_activations_beta;
float m_clip;
bool m_linear_before_reset;
};
} // namespace op
} // namespace ngraph

View File

@ -1,71 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include <vector>
#include "ngraph/op/op.hpp"
#include "ngraph/opsets/opset4.hpp"
namespace ngraph {
namespace op {
class GRUSequenceIE : public ngraph::op::util::RNNCellBase {
public:
OPENVINO_OP("GRUSequenceIE", "legacy");
GRUSequenceIE(const Output<Node>& X,
const Output<Node>& H_t,
const Output<Node>& seg_lengths,
const Output<Node>& WR,
const Output<Node>& B,
size_t hidden_size,
op::RecurrentSequenceDirection direction,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
bool linear_before_reset,
int64_t seq_axis = 1);
GRUSequenceIE() = delete;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
std::size_t get_hidden_size() {
return m_hidden_size;
}
const std::vector<std::string>& get_activations() {
return m_activations;
}
const std::vector<float>& get_activations_alpha() {
return m_activations_alpha;
}
const std::vector<float>& get_activations_beta() {
return m_activations_beta;
}
float get_clip() {
return m_clip;
}
bool visit_attributes(AttributeVisitor& visitor) override;
protected:
op::RecurrentSequenceDirection m_direction;
bool m_linear_before_reset;
int64_t m_seq_axis;
};
} // namespace op
} // namespace ngraph

View File

@ -1,48 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class HardSigmoid_IE : public Op {
public:
OPENVINO_OP("HardSigmoid_IE", "legacy");
HardSigmoid_IE() = default;
HardSigmoid_IE(const Output<Node>& arg, float alpha, float beta);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
float get_alpha() const {
return m_alpha;
}
void set_alpha(float alpha) {
m_alpha = alpha;
}
float get_beta() const {
return m_beta;
}
void set_beta(float beta) {
m_beta = beta;
}
protected:
float m_alpha;
float m_beta;
};
} // namespace op
} // namespace ngraph

View File

@ -1,80 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <limits>
#include <memory>
#include <string>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
struct InterpolateIEAttrs {
int height = -1;
int width = -1;
float zoom_factor = 0;
float shrink_factor = 0;
float scale_factor = 1.0;
bool align_corners = true;
bool antialias = true;
std::string mode = "";
int pad_beg = 0;
int pad_end = 0;
};
class Interp : public Op {
public:
OPENVINO_OP("Interp", "legacy");
Interp(const Output<Node>& image, const InterpolateIEAttrs& attrs);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
InterpolateIEAttrs get_attrs() {
return m_attrs;
}
private:
InterpolateIEAttrs m_attrs;
};
struct ResampleIEAttrs {
bool antialias = true;
int64_t factor = 0;
std::string mode = "";
};
class ResampleV2 : public Op {
public:
OPENVINO_OP("ResampleV2", "legacy");
ResampleV2(const Output<Node>& image, const Output<Node>& output_shape, const ResampleIEAttrs& attrs);
ResampleV2(const Output<Node>& image, const ResampleIEAttrs& attrs);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
ResampleIEAttrs get_attrs() {
return m_attrs;
}
private:
ResampleIEAttrs m_attrs;
};
} // namespace op
} // namespace ngraph

View File

@ -1,69 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class LRN_IE : public Op {
public:
OPENVINO_OP("LRN_IE", "legacy");
LRN_IE() = default;
LRN_IE(const Output<Node>& arg, double alpha, double beta, double bias, size_t size, std::string region);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
double get_alpha() const {
return m_alpha;
}
void set_alpha(double alpha) {
m_alpha = alpha;
}
double get_beta() const {
return m_beta;
}
void set_beta(double beta) {
m_beta = beta;
}
double get_bias() const {
return m_bias;
}
void set_bias(double bias) {
m_bias = bias;
}
size_t get_nsize() const {
return m_size;
}
void set_nsize(size_t size) {
m_size = size;
}
std::string get_region() const {
return m_region;
}
void set_region(std::string region) {
m_region = region;
}
protected:
double m_alpha;
double m_beta;
double m_bias;
size_t m_size;
std::string m_region;
};
} // namespace op
} // namespace ngraph

View File

@ -1,65 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include <vector>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class LSTMCellIE : public Op {
public:
OPENVINO_OP("LSTMCellIE", "legacy");
LSTMCellIE(const Output<Node>& X,
const Output<Node>& H_t,
const Output<Node>& C_t,
const Output<Node>& WR,
const Output<Node>& B,
size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip);
LSTMCellIE() = delete;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
std::size_t get_hidden_size() {
return m_hidden_size;
}
const std::vector<std::string>& get_activations() {
return m_activations;
}
const std::vector<float>& get_activations_alpha() {
return m_activations_alpha;
}
const std::vector<float>& get_activations_beta() {
return m_activations_beta;
}
float get_clip() {
return m_clip;
}
bool visit_attributes(AttributeVisitor& visitor) override;
protected:
int64_t m_hidden_size{};
std::vector<std::string> m_activations;
std::vector<float> m_activations_alpha;
std::vector<float> m_activations_beta;
float m_clip;
};
} // namespace op
} // namespace ngraph

View File

@ -1,53 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include <vector>
#include "ngraph/op/op.hpp"
#include "ngraph/opsets/opset4.hpp"
namespace ngraph {
namespace op {
class LSTMSequenceIE : public ngraph::op::util::RNNCellBase {
public:
OPENVINO_OP("LSTMSequenceIE", "legacy");
LSTMSequenceIE() = delete;
LSTMSequenceIE(const Output<Node>& X,
const Output<Node>& H_t,
const Output<Node>& C_t,
const Output<Node>& seq_lengths,
const Output<Node>& WR,
const Output<Node>& B,
size_t hidden_size,
ngraph::op::RecurrentSequenceDirection lstm_direction,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
int64_t seq_len = 1);
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
ngraph::op::RecurrentSequenceDirection get_direction() {
return m_direction;
}
bool visit_attributes(AttributeVisitor& visitor) override;
protected:
ngraph::op::RecurrentSequenceDirection m_direction;
int64_t m_seq_axis;
};
} // namespace op
} // namespace ngraph

View File

@ -1,101 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class NonMaxSuppressionIE;
class NonMaxSuppressionIE2;
class NonMaxSuppressionIE3;
} // namespace op
} // namespace ngraph
class ngraph::op::NonMaxSuppressionIE : public Op {
public:
OPENVINO_OP("NonMaxSuppressionIE", "legacy");
NonMaxSuppressionIE(const Output<Node>& boxes,
const Output<Node>& scores,
const Output<Node>& max_output_boxes_per_class,
const Output<Node>& iou_threshold,
const Output<Node>& score_threshold,
int center_point_box,
bool sort_result_descending,
const ngraph::element::Type& output_type = ngraph::element::i64);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
int m_center_point_box;
bool m_sort_result_descending = true;
element::Type m_output_type;
};
class ngraph::op::NonMaxSuppressionIE2 : public NonMaxSuppressionIE {
public:
OPENVINO_OP("NonMaxSuppressionIE2", "legacy");
NonMaxSuppressionIE2(const Output<Node>& boxes,
const Output<Node>& scores,
const Output<Node>& max_output_boxes_per_class,
const Output<Node>& iou_threshold,
const Output<Node>& score_threshold,
int center_point_box,
bool sort_result_descending,
const ngraph::element::Type& output_type = ngraph::element::i64);
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
};
class ngraph::op::NonMaxSuppressionIE3 : public Op {
public:
OPENVINO_OP("NonMaxSuppressionIE3", "legacy");
NonMaxSuppressionIE3(const Output<Node>& boxes,
const Output<Node>& scores,
const Output<Node>& max_output_boxes_per_class,
const Output<Node>& iou_threshold,
const Output<Node>& score_threshold,
int center_point_box,
bool sort_result_descending,
const ngraph::element::Type& output_type = ngraph::element::i64);
NonMaxSuppressionIE3(const Output<Node>& boxes,
const Output<Node>& scores,
const Output<Node>& max_output_boxes_per_class,
const Output<Node>& iou_threshold,
const Output<Node>& score_threshold,
const Output<Node>& soft_nms_sigma,
int center_point_box,
bool sort_result_descending,
const ngraph::element::Type& output_type = ngraph::element::i64);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
int m_center_point_box;
bool m_sort_result_descending = true;
element::Type m_output_type;
private:
int64_t max_boxes_output_from_input() const;
};

View File

@ -1,52 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class NormalizeIE : public Op {
public:
OPENVINO_OP("NormalizeIE", "legacy");
NormalizeIE() = default;
NormalizeIE(const Output<Node>& data,
const Output<Node>& weights,
float eps,
bool across_spatial,
bool channel_shared,
const ngraph::element::Type output_type);
float get_eps() const {
return m_eps;
}
bool get_channel_shared() const {
return m_channel_shared;
}
bool get_across_spatial() const {
return m_across_spatial;
}
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
protected:
float m_eps;
bool m_across_spatial;
bool m_channel_shared;
ngraph::element::Type m_output_type;
};
} // namespace op
} // namespace ngraph

View File

@ -1,58 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <vector>
#include "ngraph/node.hpp"
#include "ngraph/op/one_hot.hpp"
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class OneHotIE;
} // namespace op
} // namespace ngraph
class ngraph::op::OneHotIE : public Op {
public:
OPENVINO_OP("OneHotIE", "legacy");
explicit OneHotIE(const Output<ngraph::Node>& input,
int axis,
int depth,
float on_value,
float off_value,
element::Type type);
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
int get_axis() {
return m_axis;
}
int get_depth() {
return m_depth;
}
float get_on_value() {
return m_on_value;
}
float get_off_value() {
return m_off_value;
}
private:
element::Type m_type;
int m_axis;
int m_depth;
float m_off_value = 0.0;
float m_on_value = 0.0;
};

View File

@ -1,56 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <vector>
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/pad.hpp"
namespace ngraph {
namespace op {
class PadIE : public Op {
public:
OPENVINO_OP("PadIE", "legacy");
explicit PadIE(const std::shared_ptr<op::v1::Pad>& pad);
PadIE(const Output<ngraph::Node>& input,
PadMode pad_mode,
CoordinateDiff pads_begin,
CoordinateDiff pads_end,
Shape output_shape,
float pad_value);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
PadMode get_pad_mode() {
return m_pad_mode;
}
CoordinateDiff get_pads_begin() {
return m_pads_begin;
}
CoordinateDiff get_pads_end() {
return m_pads_end;
}
float get_pad_value() {
return m_pad_value;
}
private:
PadMode m_pad_mode;
CoordinateDiff m_pads_begin, m_pads_end;
Shape m_output_shape;
float m_pad_value = 0;
};
} // namespace op
} // namespace ngraph

View File

@ -1,39 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class PowerIE : public Op {
public:
OPENVINO_OP("PowerIE", "legacy");
PowerIE() = default;
PowerIE(const Output<Node>& data_batch,
const float power,
const float scale,
const float shift,
const element::Type output_type = element::undefined);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
float scale, power, shift;
private:
element::Type m_output_type;
};
} // namespace op
} // namespace ngraph

View File

@ -1,44 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/op/proposal.hpp>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class ProposalIE : public Op {
public:
OPENVINO_OP("ProposalIE", "legacy");
// \brief Constructs a Proposal operation
//
// \param class_probs Class probability scores
// \param class_bbox_deltas Class prediction bbox_deltas
// \param image_shape Shape of image
// \param attrs Proposal op attributes
ProposalIE(const Output<Node>& class_probs,
const Output<Node>& class_bbox_deltas,
const Output<Node>& image_shape,
const ProposalAttrs& attrs);
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
const ProposalAttrs& get_attrs() const {
return m_attrs;
}
private:
ProposalAttrs m_attrs;
};
} // namespace op
} // namespace ngraph

View File

@ -1,42 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class ReLUIE : public Op {
public:
OPENVINO_OP("ReLUIE", "legacy");
ReLUIE(const Output<Node>& data, const float& negative_slope, const element::Type output_type);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
float get_slope() {
return m_negative_slope;
}
element::Type get_output_type() const {
return m_output_type;
}
private:
float m_negative_slope;
element::Type m_output_type;
};
} // namespace op
} // namespace ngraph

View File

@ -1,64 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include <vector>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class RNNCellIE : public Op {
public:
OPENVINO_OP("RNNCellIE", "legacy");
RNNCellIE(const Output<Node>& X,
const Output<Node>& H_t,
const Output<Node>& WR,
const Output<Node>& B,
size_t hidden_size,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip);
RNNCellIE() = delete;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
std::size_t get_hidden_size() {
return m_hidden_size;
}
const std::vector<std::string>& get_activations() {
return m_activations;
}
const std::vector<float>& get_activations_alpha() {
return m_activations_alpha;
}
const std::vector<float>& get_activations_beta() {
return m_activations_beta;
}
float get_clip() {
return m_clip;
}
bool visit_attributes(AttributeVisitor& visitor) override;
protected:
int64_t m_hidden_size{};
std::vector<std::string> m_activations;
std::vector<float> m_activations_alpha;
std::vector<float> m_activations_beta;
float m_clip;
};
} // namespace op
} // namespace ngraph

View File

@ -1,68 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include <vector>
#include "ngraph/op/op.hpp"
#include "ngraph/opsets/opset4.hpp"
namespace ngraph {
namespace op {
class RNNSequenceIE : public ngraph::op::util::RNNCellBase {
public:
OPENVINO_OP("RNNSequenceIE", "legacy");
RNNSequenceIE(const Output<Node>& X,
const Output<Node>& H_t,
const Output<Node>& seq_lengths,
const Output<Node>& WR,
const Output<Node>& B,
size_t hidden_size,
op::RecurrentSequenceDirection direction,
const std::vector<std::string>& activations,
const std::vector<float>& activations_alpha,
const std::vector<float>& activations_beta,
float clip,
int64_t seq_axis = 1);
RNNSequenceIE() = delete;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void validate_and_infer_types() override;
std::size_t get_hidden_size() {
return m_hidden_size;
}
const std::vector<std::string>& get_activations() {
return m_activations;
}
const std::vector<float>& get_activations_alpha() {
return m_activations_alpha;
}
const std::vector<float>& get_activations_beta() {
return m_activations_beta;
}
float get_clip() {
return m_clip;
}
bool visit_attributes(AttributeVisitor& visitor) override;
protected:
op::RecurrentSequenceDirection m_direction;
int64_t m_seq_axis;
};
} // namespace op
} // namespace ngraph

View File

@ -1,35 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class ScaleShiftIE : public Op {
public:
OPENVINO_OP("ScaleShiftIE", "legacy");
ScaleShiftIE(const Output<Node>& data_batch,
const Output<Node>& weights,
const Output<Node>& bias,
const element::Type output_type = element::undefined);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
private:
element::Type output_type;
};
} // namespace op
} // namespace ngraph

View File

@ -1,30 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class SeluIE : public Op {
public:
OPENVINO_OP("SeluIE", "legacy");
SeluIE(const Output<Node>& input, const float alpha, const float gamma);
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
float gamma, alpha;
};
} // namespace op
} // namespace ngraph

View File

@ -1,32 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class SwishIE : public Op {
public:
OPENVINO_OP("SwishIE", "legacy");
explicit SwishIE(const Output<Node>& input, float alpha = 1.0);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
void set_alpha(float alpha);
float get_alpha() const;
protected:
float m_alpha;
};
} // namespace op
} // namespace ngraph

View File

@ -1,30 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include "ngraph/op/op.hpp"
namespace ngraph {
namespace op {
class TileIE : public Op {
public:
OPENVINO_OP("TileIE", "legacy");
TileIE(const Output<Node>& data1, const int64_t axis, const int64_t tiles);
void validate_and_infer_types() override;
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
int64_t axis, tiles;
};
} // namespace op
} // namespace ngraph

View File

@ -1,54 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <string>
#include "ngraph/op/op.hpp"
#include "ngraph/op/topk.hpp"
namespace ngraph {
namespace op {
class TopKIE : public Op {
public:
OPENVINO_OP("TopKIE", "legacy");
TopKIE(const Output<Node>& data,
const Output<Node>& k,
const int64_t axis,
const ngraph::op::TopKMode mode,
const ngraph::op::TopKSortType sort,
const element::Type& index_element_type = element::i32);
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
int64_t get_axis() {
return m_axis;
}
ngraph::op::TopKMode get_mode() {
return m_mode;
}
ngraph::op::TopKSortType get_sort_type() {
return m_sort_type;
}
bool visit_attributes(AttributeVisitor& visitor) override;
private:
int64_t m_axis;
ngraph::op::TopKMode m_mode;
ngraph::op::TopKSortType m_sort_type;
ngraph::element::Type m_index_element_type;
};
} // namespace op
} // namespace ngraph

View File

@ -1,58 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <functional>
#include <memory>
#include <ngraph/ngraph.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
#include "legacy/ngraph_ops/convolution_ie.hpp"
#include "legacy/ngraph_ops/deconvolution_ie.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/group_conv.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/pattern/matcher.hpp"
#include "ngraph/rt_info.hpp"
namespace ngraph {
namespace pass {
class ConvFusion;
class ConvAddFusion;
class ConvMultiplyFusion;
class DeconvAddFusion;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvAddFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvAddFusion", "0");
ConvAddFusion();
};
class ngraph::pass::ConvMultiplyFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvMultiplyFusion", "0");
ConvMultiplyFusion();
};
class ngraph::pass::DeconvAddFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("DeconvAddFusion", "0");
DeconvAddFusion();
};
class ngraph::pass::ConvFusion : public ov::pass::GraphRewrite {
public:
OPENVINO_RTTI("ConvFusion", "0");
ConvFusion() {
add_matcher<ngraph::pass::ConvAddFusion>();
add_matcher<ngraph::pass::ConvMultiplyFusion>();
add_matcher<ngraph::pass::DeconvAddFusion>();
}
};

View File

@ -1,40 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertLSTMCellMatcher;
class ConvertGRUCellMatcher;
class ConvertRNNCellMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertLSTMCellMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertLSTMCellMatcher", "0");
ConvertLSTMCellMatcher();
};
class ngraph::pass::ConvertGRUCellMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertGRUCellMatcher", "0");
ConvertGRUCellMatcher();
};
class ngraph::pass::ConvertRNNCellMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertRNNCellMatcher", "0");
ConvertRNNCellMatcher();
};

View File

@ -1,59 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertConvolutions;
class ConvertConvolution;
class ConvertGroupConvolution;
class ConvertDeconvolution;
class ConvertGroupDeconvolution;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertConvolution : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertConvolution", "0");
ConvertConvolution();
};
class ngraph::pass::ConvertGroupConvolution : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertGroupConvolution", "0");
ConvertGroupConvolution();
};
class ngraph::pass::ConvertDeconvolution : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertDeconvolution", "0");
ConvertDeconvolution();
};
class ngraph::pass::ConvertGroupDeconvolution : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertGroupDeconvolution", "0");
ConvertGroupDeconvolution();
};
class ngraph::pass::ConvertConvolutions : public ov::pass::GraphRewrite {
public:
OPENVINO_RTTI("ConvertConvolutions", "0");
ConvertConvolutions() {
add_matcher<ngraph::pass::ConvertConvolution>();
add_matcher<ngraph::pass::ConvertGroupConvolution>();
add_matcher<ngraph::pass::ConvertDeconvolution>();
add_matcher<ngraph::pass::ConvertGroupDeconvolution>();
}
};

View File

@ -1,39 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <legacy/ngraph_ops/gather_ie.hpp>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
#include "ngraph/op/constant.hpp"
#include "ngraph/op/gather.hpp"
#include "ngraph/op/squeeze.hpp"
#include "ngraph/op/unsqueeze.hpp"
namespace ngraph {
namespace pass {
class ConvertGatherToGatherIEMatcher;
} // namespace pass
} // namespace ngraph
/*
* Description:
* This transformation converts opset1::Gather to legacy GatherIE
* GatherIE takes axes as value and if indices input has empty shape (scalar)
* we unsqueeze indices input and squeeze GatherIE output.
*/
class ngraph::pass::ConvertGatherToGatherIEMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertGatherToGatherIEMatcher", "0");
ConvertGatherToGatherIEMatcher();
};

View File

@ -1,27 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <legacy/ngraph_ops/gather_tree_ie.hpp>
#include <memory>
#include <ngraph/op/gather_tree.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertGatherTreeToGatherTreeIEMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertGatherTreeToGatherTreeIEMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertGatherTreeToGatherTreeIEMatcher", "0");
ConvertGatherTreeToGatherTreeIEMatcher();
};

View File

@ -1,26 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertHardSigmoidToLegacyMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertHardSigmoidToLegacyMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertHardSigmoidToLegacyMatcher", "0");
ConvertHardSigmoidToLegacyMatcher();
};

View File

@ -1,28 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <map>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <set>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertInterpolateToInterpOrResampleMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertInterpolateToInterpOrResampleMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertInterpolateToInterpOrResampleMatcher", "0");
ConvertInterpolateToInterpOrResampleMatcher();
};

View File

@ -1,26 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertLRNToLegacyMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertLRNToLegacyMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertLRNToLegacyMatcher", "0");
ConvertLRNToLegacyMatcher();
};

View File

@ -1,46 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <algorithm>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertMatMulToFCorGemm;
class ConvertMatMulToFC;
class ConvertMatMulToGemm;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertMatMulToFC : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertMatMulToFC", "0");
ConvertMatMulToFC();
};
class ngraph::pass::ConvertMatMulToGemm : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertMatMulToGemm", "0");
ConvertMatMulToGemm();
};
class ngraph::pass::ConvertMatMulToFCorGemm : public ov::pass::GraphRewrite {
public:
OPENVINO_RTTI("ConvertMatMulToFCorGemm", "0");
ConvertMatMulToFCorGemm() {
add_matcher<ngraph::pass::ConvertMatMulToFC>();
add_matcher<ngraph::pass::ConvertMatMulToGemm>();
}
};

View File

@ -1,38 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertMulAddToScaleShiftOrPower;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertMulAddToScaleShiftOrPower : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertMulAddToScaleShiftOrPower", "0");
ConvertMulAddToScaleShiftOrPower();
};
enum class CONVERSION_RESULT { SCALE_SHIFT, POWER, NONE };
/*
* check_constant function checks how given constant performs elementwise operation with given input
* CONVERSION_RESULT has several types:
* SCALE_SHIFT - constant applies only per-channel
* POWER - constant applies as single value
* NONE - default return value
*/
CONVERSION_RESULT
check_constant(const std::shared_ptr<ngraph::op::Constant>& constant, const ngraph::PartialShape& shape);

View File

@ -1,25 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertMulOrAddFinally;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertMulOrAddFinally : public ov::pass::GraphRewrite {
public:
OPENVINO_RTTI("ConvertMulOrAddFinally", "0");
ConvertMulOrAddFinally();
};

View File

@ -1,29 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <transformations_visibility.hpp>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertNMS5ToLegacyMatcher;
} // namespace pass
} // namespace ngraph
/*
* Description:
* Convert NMS-5 directly to inner NMS.
*/
class ngraph::pass::ConvertNMS5ToLegacyMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertNMS5ToLegacyMatcher", "0");
ConvertNMS5ToLegacyMatcher(bool force_i32_output_type = true);
};

View File

@ -1,34 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertNMSToNMSIEMatcher;
} // namespace pass
} // namespace ngraph
/*
* Description:
* This transformation converts opset1::NonMaxSuppression to legacy NonMaxSuppressionIE
* NonMaxSuppressionIE takes max_output_boxes_per_class, iou_threshold and score_threshold
* inputs as 1D tensors when original operation requires scalars. And for this inputs
* we insert Unsqueeze operations.
*/
class ngraph::pass::ConvertNMSToNMSIEMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertNMSToNMSIEMatcher", "0");
ConvertNMSToNMSIEMatcher();
};

View File

@ -1,32 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertNormalizeL2WithMulToNormalizeIE;
class ConvertNormalizeL2ToLegacyMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertNormalizeL2WithMulToNormalizeIE : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertNormalizeL2WithMulToNormalizeIE", "0");
ConvertNormalizeL2WithMulToNormalizeIE();
};
class ngraph::pass::ConvertNormalizeL2ToLegacyMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertNormalizeL2ToLegacyMatcher", "0");
ConvertNormalizeL2ToLegacyMatcher();
};

View File

@ -1,31 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertOneHotToOneHotIEMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertOneHotToOneHotIEMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertOneHotToOneHotIEMatcher", "0");
ConvertOneHotToOneHotIEMatcher();
void detect_output_type(const std::shared_ptr<Function>& f);
private:
element::Type m_output_type = element::Type_t::f32;
};

View File

@ -1,25 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertOpSet1ToLegacy;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertOpSet1ToLegacy : public ov::pass::ModelPass {
public:
OPENVINO_RTTI("ConvertOpSet1ToLegacy", "0");
bool run_on_model(const std::shared_ptr<ngraph::Function>& m) override;
};

View File

@ -1,30 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <legacy/ngraph_ops/pad_ie.hpp>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
#include "ngraph/op/constant.hpp"
#include "ngraph/op/lrn.hpp"
namespace ngraph {
namespace pass {
class ConvertPadToLegacyMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertPadToLegacyMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertPadToLegacyMatcher", "0");
ConvertPadToLegacyMatcher();
};

View File

@ -1,26 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertPowerToPowerIEMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertPowerToPowerIEMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertPowerToPowerIEMatcher", "0");
ConvertPowerToPowerIEMatcher();
};

View File

@ -1,26 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertPReLUToReLUIE;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertPReLUToReLUIE : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertPReLUToReLUIE", "0");
ConvertPReLUToReLUIE();
};

View File

@ -1,32 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertProposalToLegacyMatcher;
class ConvertProposal4ToLegacyMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertProposal4ToLegacyMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertProposal4ToLegacyMatcher", "0");
ConvertProposal4ToLegacyMatcher();
};
class ngraph::pass::ConvertProposalToLegacyMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertProposalToLegacyMatcher", "0");
ConvertProposalToLegacyMatcher();
};

View File

@ -1,26 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertSeluToSeluIEMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertSeluToSeluIEMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertSeluToSeluIEMatcher", "0");
ConvertSeluToSeluIEMatcher();
};

View File

@ -1,61 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertLSTMSequenceMatcher;
class ConvertGRUSequenceMatcher;
class ConvertRNNSequenceMatcher;
} // namespace pass
} // namespace ngraph
/**
* @ingroup ie_transformation_common_api
* @brief Converts LSTMSequence to legacy LSTMSequenceIE.
* SequenceIE op doesn't use seq_length input and num_direction (direction) attribute.
* We squeeze num_direction dimension for all corresponding inputs and unsqueeze them after the SequenceIE op.
*/
class ngraph::pass::ConvertLSTMSequenceMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertLSTMSequenceMatcher", "0");
ConvertLSTMSequenceMatcher();
};
/**
* @ingroup ie_transformation_common_api
* @brief Converts GRUSequence to legacy GRUSequenceIE.
* SequenceIE op doesn't use seq_length input and num_direction (direction) attribute.
* We squeeze num_direction dimension for all corresponding inputs and unsqueeze them after the SequenceIE op.
*/
class ngraph::pass::ConvertGRUSequenceMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertGRUSequenceMatcher", "0");
ConvertGRUSequenceMatcher();
};
/**
* @ingroup ie_transformation_common_api
* @brief Converts RNNSequence to legacy RNNSequenceIE.
* SequenceIE op doesn't use seq_length input and num_direction (direction) attribute.
* We squeeze num_direction dimension for all corresponding inputs and unsqueeze them after the SequenceIE op.
*/
class ngraph::pass::ConvertRNNSequenceMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertRNNSequenceMatcher", "0");
ConvertRNNSequenceMatcher();
};

View File

@ -1,26 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertSqrtToPowerIEMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertSqrtToPowerIEMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertSqrtToPowerIEMatcher", "0");
ConvertSqrtToPowerIEMatcher();
};

View File

@ -1,26 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <algorithm>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertStridedSliceToCropMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertStridedSliceToCropMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertStridedSliceToCropMatcher", "0");
ConvertStridedSliceToCropMatcher();
};

View File

@ -1,24 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertSwishToSwishIEMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertSwishToSwishIEMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertSwishToSwishIEMatcher", "0");
ConvertSwishToSwishIEMatcher();
};

View File

@ -1,25 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertTileToLegacyMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertTileToLegacyMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertTileToLegacyMatcher", "0");
ConvertTileToLegacyMatcher();
};

View File

@ -1,26 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_api.h>
#include <memory>
#include <ngraph/pass/graph_rewrite.hpp>
#include <string>
#include <vector>
namespace ngraph {
namespace pass {
class ConvertTopKToTopKIEMatcher;
} // namespace pass
} // namespace ngraph
class ngraph::pass::ConvertTopKToTopKIEMatcher : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertTopKToTopKIEMatcher", "0");
ConvertTopKToTopKIEMatcher();
};

Some files were not shown because too many files have changed in this diff Show More