diff --git a/.ci/azure/linux_ngraph_onnx.yml b/.ci/azure/linux_ngraph_onnx.yml index 5521d224630..8218a0874cd 100644 --- a/.ci/azure/linux_ngraph_onnx.yml +++ b/.ci/azure/linux_ngraph_onnx.yml @@ -72,7 +72,7 @@ jobs: workingDirectory: $(WORK_DIR) displayName: 'Install dependencies' - - script: ngraph/python/tests/test_onnx/model_zoo_preprocess.sh -d $(MODELS_DIR)/models_data -o -s "$(ONNX_MODEL_ZOO_SHA)" + - script: runtime/bindings/python/tests/test_onnx/model_zoo_preprocess.sh -d $(MODELS_DIR)/models_data -o -s "$(ONNX_MODEL_ZOO_SHA)" displayName: 'Update models' condition: ne(variables['BUILD_TYPE'], 'Debug') diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile index 3a190c42769..dae27a71177 100644 --- a/.ci/openvino-onnx/Dockerfile +++ b/.ci/openvino-onnx/Dockerfile @@ -74,8 +74,8 @@ RUN cmake .. \ RUN make -j $(nproc) install # Run tests via tox -WORKDIR /openvino/ngraph/python -ENV ngraph_DIR=/openvino/dist/deployment_tools/ngraph +WORKDIR /openvino/runtime/bindings/python +ENV OpenVINO_DIR=/openvino/dist/deployment_tools/inference_engine/share ENV LD_LIBRARY_PATH=/openvino/dist/deployment_tools/ngraph/lib ENV PYTHONPATH=/openvino/bin/intel64/${BUILD_TYPE}/lib/python_api/python3.8:${PYTHONPATH} CMD tox diff --git a/.ci/openvino-onnx/Jenkinsfile b/.ci/openvino-onnx/Jenkinsfile index 2849579dcdb..5581c7c2ea8 100644 --- a/.ci/openvino-onnx/Jenkinsfile +++ b/.ci/openvino-onnx/Jenkinsfile @@ -94,7 +94,7 @@ def prepare_repository(String workdir) { def updateModels() { sh """ - ./ngraph/python/tests/test_onnx/model_zoo_preprocess.sh -d ${HOME}/ONNX_CI/models_data -o -s ${ONNX_MODEL_ZOO_SHA} + ./runtime/bindings/python/tests/test_onnx/model_zoo_preprocess.sh -d ${HOME}/ONNX_CI/models_data -o -s ${ONNX_MODEL_ZOO_SHA} """ } diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 7d450a95183..cc8124c21c7 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,7 +4,7 @@ version: 2 updates: # Enable version updates for nGraph Python API - package-ecosystem: pip - directory: "/ngraph/python" + directory: "/runtime/bindings/python" schedule: interval: weekly day: monday diff --git a/.gitmodules b/.gitmodules index 095f3968264..8569ecbb958 100644 --- a/.gitmodules +++ b/.gitmodules @@ -44,8 +44,8 @@ [submodule "thirdparty/protobuf"] path = thirdparty/protobuf/protobuf url = https://github.com/protocolbuffers/protobuf.git -[submodule "ngraph/python/pybind11"] - path = ngraph/python/pybind11 +[submodule "runtime/bindings/python/thirdparty/pybind11"] + path = runtime/bindings/python/thirdparty/pybind11 url = https://github.com/pybind/pybind11.git [submodule "thirdparty/ittapi/ittapi"] path = thirdparty/ittapi/ittapi diff --git a/CMakeLists.txt b/CMakeLists.txt index 2cec8d2d5e8..e5ee874a7ac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -33,6 +33,7 @@ message (STATUS "CMAKE_BUILD_TYPE ...................... " ${CMAKE_BUILD_TYPE}) # remove file with exported developer targets to force its regeneration file(REMOVE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake") +file(REMOVE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") foreach(component IN LISTS openvino_export_components) file(REMOVE "${CMAKE_BINARY_DIR}/${component}_dev_targets.cmake") unset(${component} CACHE) @@ -83,6 +84,8 @@ include(cmake/test_model_zoo.cmake) add_subdirectory(thirdparty) add_subdirectory(openvino) add_subdirectory(ngraph) + +add_subdirectory(runtime) add_subdirectory(inference-engine) # for Template plugin diff --git a/README.md b/README.md index c445e5209b1..7c4b3cdd3a7 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # OpenVINO™ Toolkit -[![Stable release](https://img.shields.io/badge/version-2021.4-green.svg)](https://github.com/openvinotoolkit/openvino/releases/tag/2021.4) +[![Stable release](https://img.shields.io/badge/version-2021.4.1-green.svg)](https://github.com/openvinotoolkit/openvino/releases/tag/2021.4.1) [![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE) ![GitHub branch checks state](https://img.shields.io/github/checks-status/openvinotoolkit/openvino/master?label=GitHub%20checks) ![Azure DevOps builds (branch)](https://img.shields.io/azure-devops/build/openvinoci/b2bab62f-ab2f-4871-a538-86ea1be7d20f/13?label=Public%20CI) diff --git a/cmake/templates/InferenceEngineConfig.cmake.in b/cmake/templates/InferenceEngineConfig.cmake.in index 43408483f9a..c5b95bd75ef 100644 --- a/cmake/templates/InferenceEngineConfig.cmake.in +++ b/cmake/templates/InferenceEngineConfig.cmake.in @@ -111,6 +111,25 @@ _ie_find_dependency(ngraph if(NOT TARGET inference_engine) set(_ie_as_external_package ON) include("${CMAKE_CURRENT_LIST_DIR}/InferenceEngineTargets.cmake") + + # create targets with old names for compatibility + if(TARGET IE::runtime AND NOT TARGET IE::inference_engine) + add_library(IE::inference_engine INTERFACE IMPORTED) + set_target_properties(IE::inference_engine PROPERTIES + INTERFACE_LINK_LIBRARIES IE::runtime) + endif() + + if(TARGET IE::core AND NOT TARGET ngraph::ngraph) + add_library(IE::ngraph INTERFACE IMPORTED) + set_target_properties(IE::ngraph PROPERTIES + INTERFACE_LINK_LIBRARIES IE::core) + endif() + + if(TARGET IE::runtime::c AND NOT TARGET IE::inference_engine_c_api) + add_library(IE::inference_engine_c_api INTERFACE IMPORTED) + set_target_properties(IE::inference_engine_c_api PROPERTIES + INTERFACE_LINK_LIBRARIES IE::runtime::c) + endif() endif() # mark components as available diff --git a/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in b/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in index 72af5ca89ca..cdb02f8b8af 100644 --- a/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in +++ b/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in @@ -44,10 +44,17 @@ find_dependency(InferenceEngine NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) +find_dependency(OpenVINO + PATHS "${CMAKE_CURRENT_LIST_DIR}" + NO_CMAKE_FIND_ROOT_PATH + NO_DEFAULT_PATH) + # WA for cmake: it exports ngraph as IE::ngraph in the IE export list # while we already have ngraph export in its own export list as ngraph::ngraph -set_property(TARGET ngraph::ngraph PROPERTY IMPORTED_GLOBAL TRUE) -add_library(IE::ngraph ALIAS ngraph::ngraph) +if(TARGET ngraph::ngraph AND NOT TARGET IE::ngraph) + add_library(IE::ngraph INTERFACE IMPORTED) + set_target_properties(IE::ngraph PROPERTIES INTERFACE_LINK_LIBRARIES ngraph::ngraph) +endif() foreach(component @openvino_export_components@) include("${CMAKE_CURRENT_LIST_DIR}/${component}_dev_targets.cmake") diff --git a/cmake/templates/OpenVINOConfig-version.cmake.in b/cmake/templates/OpenVINOConfig-version.cmake.in new file mode 100644 index 00000000000..4e42995a830 --- /dev/null +++ b/cmake/templates/OpenVINOConfig-version.cmake.in @@ -0,0 +1,21 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(PACKAGE_VERSION_MAJOR @IE_VERSION_MAJOR@) +set(PACKAGE_VERSION_MINOR @IE_VERSION_MINOR@) +set(PACKAGE_VERSION_PATCH @IE_VERSION_PATCH@) +set(PACKAGE_VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}.${PACKAGE_VERSION_PATCH}") + +set(PACKAGE_VERSION_EXACT False) +set(PACKAGE_VERSION_COMPATIBLE False) + +if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) + set(PACKAGE_VERSION_EXACT True) + set(PACKAGE_VERSION_COMPATIBLE True) +endif() + +if(PACKAGE_FIND_VERSION_MAJOR EQUAL PACKAGE_VERSION_MAJOR AND + PACKAGE_FIND_VERSION VERSION_LESS PACKAGE_VERSION) + set(PACKAGE_VERSION_COMPATIBLE True) +endif() diff --git a/cmake/templates/OpenVINOConfig.cmake.in b/cmake/templates/OpenVINOConfig.cmake.in new file mode 100644 index 00000000000..14fc57b36c2 --- /dev/null +++ b/cmake/templates/OpenVINOConfig.cmake.in @@ -0,0 +1,203 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# +# FindOpenVINO +# ------ +# +# Provides OpenVINO runtime for model creation and inference, frontend libraries +# to convert models from framework specific formats. +# +# The following components are supported: +# +# * `Runtime`: OpenVINO C++ and C Core & Inference Runtime, frontend manager +# * `ONNX`: OpenVINO ONNX frontend +# * `PaddlePaddle`: OpenVINO PaddlePaddle frontend +# +# If no components are specified, `Runtime` component is provided: +# +# find_package(OpenVINO REQUIRED) # only Runtime component +# +# If specific components are required: +# +# find_package(OpenVINO REQUIRED COMPONENTS Runtime ONNX) +# +# Imported Targets: +# ------ +# +# Runtime targets: +# +# `openvino::runtime` +# The OpenVINO C++ Core & Inference Runtime +# +# `openvino::runtime::c` +# The OpenVINO C Inference Runtime +# +# `openvino::core` +# The OpenVINO C++ Core Runtime +# +# Frontend specific targets: +# +# `openvino::frontend::manager` +# OpenVINO frontend manager +# +# `openvino::frontend::onnx` +# ONNX FrontEnd target (optional) +# +# `openvino::frontend::paddlepaddle` +# PaddlePaddle FrontEnd target (optional) +# +# Result variables: +# ------ +# +# The module sets the following variables in your project: +# +# `OpenVINO_FOUND` +# System has OpenVINO Runtime installed +# +# `OpenVINO_Runtime_FOUND` +# OpenVINO C++ Core & Inference Runtime is available +# +# `OpenVINO_Frontend_ONNX_FOUND` +# OpenVINO ONNX frontend is available +# +# `OpenVINO_Frontend_PaddlePaddle_FOUND` +# OpenVINO PaddlePaddle frontend is available +# +# OpenVINO version variables: +# +# `OpenVINO_VERSION_MAJOR` +# Major version component +# +# `OpenVINO_VERSION_MINOR` +# minor version component +# +# `OpenVINO_VERSION_PATCH` +# Patch version component +# + +@PACKAGE_INIT@ + +# +# Common functions +# + +if(NOT DEFINED CMAKE_FIND_PACKAGE_NAME) + set(CMAKE_FIND_PACKAGE_NAME OpenVINO) + set(_need_package_name_reset ON) +endif() + +# we have to use our own version of find_dependency because of support cmake 3.7 +macro(_ov_find_dependency dep) + set(cmake_fd_quiet_arg) + if(${CMAKE_FIND_PACKAGE_NAME}_FIND_QUIETLY) + set(cmake_fd_quiet_arg QUIET) + endif() + set(cmake_fd_required_arg) + if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED) + set(cmake_fd_required_arg REQUIRED) + endif() + + get_property(cmake_fd_alreadyTransitive GLOBAL PROPERTY + _CMAKE_${dep}_TRANSITIVE_DEPENDENCY) + + find_package(${dep} ${ARGN} + ${cmake_fd_quiet_arg} + ${cmake_fd_required_arg}) + + if(NOT DEFINED cmake_fd_alreadyTransitive OR cmake_fd_alreadyTransitive) + set_property(GLOBAL PROPERTY _CMAKE_${dep}_TRANSITIVE_DEPENDENCY TRUE) + endif() + + if(NOT ${dep}_FOUND) + set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "${CMAKE_FIND_PACKAGE_NAME} could not be found because dependency ${dep} could not be found.") + set(${CMAKE_FIND_PACKAGE_NAME}_FOUND False) + return() + endif() + + set(cmake_fd_required_arg) + set(cmake_fd_quiet_arg) +endmacro() + +function(_ov_target_no_deprecation_error) + if(NOT MSVC) + if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") + set(flags "-diag-warning=1786") + else() + set(flags "-Wno-error=deprecated-declarations") + endif() + if(CMAKE_CROSSCOMPILING) + set_target_properties(${ARGV} PROPERTIES + INTERFACE_LINK_OPTIONS "-Wl,--allow-shlib-undefined") + endif() + + set_target_properties(${ARGV} PROPERTIES INTERFACE_COMPILE_OPTIONS ${flags}) + endif() +endfunction() + +# +# OpenVINO config +# + +# need to store current PACKAGE_PREFIX_DIR, because it's overwritten by sub-package one +set(_ov_package_prefix_dir "${PACKAGE_PREFIX_DIR}") + +set(THREADING "@THREADING@") +if(THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO" AND NOT TBB_FOUND) + set_and_check(_tbb_dir "@PACKAGE_IE_TBB_DIR@") + _ov_find_dependency(TBB + COMPONENTS tbb tbbmalloc + CONFIG + PATHS ${TBBROOT}/cmake + ${_tbb_dir} + NO_CMAKE_FIND_ROOT_PATH + NO_DEFAULT_PATH) +endif() + +if(NOT TARGET inference_engine) + set(_ov_as_external_package ON) + include("${CMAKE_CURRENT_LIST_DIR}/OpenVINOTargets.cmake") +endif() + +# +# Components +# + +set(${CMAKE_FIND_PACKAGE_NAME}_Runtime_FOUND ON) + +set(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND @NGRAPH_ONNX_FRONTEND_ENABLE@) +set(${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND @NGRAPH_PDPD_FRONTEND_ENABLE@) + +set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_ONNX_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND}) +set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_PaddlePaddle_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND}) + +# if no components specified, only Runtime is provided +if(NOT ${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS) + set(${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS Runtime) +endif() + +# +# Apply common functions +# + +foreach(target openvino::runtime openvino::runtime::c openvino::core + openvino::frontend::manager openvino::frontend::onnx + openvino::frontend::paddlepaddle) + if(TARGET ${target} AND _ov_as_external_package) + _ov_target_no_deprecation_error(${target}) + endif() +endforeach() +unset(_ov_as_external_package) + +# restore PACKAGE_PREFIX_DIR +set(PACKAGE_PREFIX_DIR ${_ov_package_prefix_dir}) +unset(_ov_package_prefix_dir) + +check_required_components(${CMAKE_FIND_PACKAGE_NAME}) + +if(_need_package_name_reset) + unset(CMAKE_FIND_PACKAGE_NAME) + unset(_need_package_name_reset) +endif() + +unset(${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND) +unset(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND) diff --git a/cmake/templates/ngraphConfig.cmake.in b/cmake/templates/ngraphConfig.cmake.in index 1c17cbeb4cc..52a107da536 100644 --- a/cmake/templates/ngraphConfig.cmake.in +++ b/cmake/templates/ngraphConfig.cmake.in @@ -39,6 +39,31 @@ if(NOT TARGET ngraph) include("${CMAKE_CURRENT_LIST_DIR}/ngraphTargets.cmake") + + # create targets with old names for compatibility + if(TARGET ngraph::core AND NOT TARGET ngraph::ngraph) + add_library(ngraph::ngraph INTERFACE IMPORTED) + set_target_properties(ngraph::ngraph PROPERTIES + INTERFACE_LINK_LIBRARIES ngraph::core) + endif() + + if(TARGET ngraph::frontend::manager AND NOT TARGET ngraph::frontend_manager) + add_library(ngraph::frontend_manager INTERFACE IMPORTED) + set_target_properties(ngraph::frontend_manager PROPERTIES + INTERFACE_LINK_LIBRARIES ngraph::frontend::manager) + endif() + + if(TARGET ngraph::frontend::onnx AND NOT TARGET ngraph::onnx_ngraph_frontend) + add_library(ngraph::onnx_ngraph_frontend INTERFACE IMPORTED) + set_target_properties(ngraph::onnx_ngraph_frontend PROPERTIES + INTERFACE_LINK_LIBRARIES ngraph::frontend::onnx) + endif() + + if(TARGET ngraph::frontend::paddlepaddle AND NOT TARGET ngraph::paddlepaddle_ngraph_frontend) + add_library(ngraph::paddlepaddle_ngraph_frontend INTERFACE IMPORTED) + set_target_properties(ngraph::paddlepaddle_ngraph_frontend PROPERTIES + INTERFACE_LINK_LIBRARIES ngraph::frontend::paddlepaddle) + endif() endif() set(ngraph_ngraph_FOUND ON) @@ -46,19 +71,18 @@ set(NGRAPH_LIBRARIES ngraph::ngraph) set(ngraph_onnx_ngraph_frontend_FOUND @NGRAPH_ONNX_FRONTEND_ENABLE@) -# ngraph::onnx_importer target and variables are deprecated set(ngraph_onnx_importer_FOUND @NGRAPH_ONNX_FRONTEND_ENABLE@) if(ngraph_onnx_importer_FOUND) set(ONNX_IMPORTER_LIBRARIES ngraph::onnx_ngraph_frontend) + # ngraph::onnx_importer target and variables are deprecated + # but need to create a dummy target for BW compatibility if(NOT TARGET ngraph::onnx_importer) add_library(ngraph::onnx_importer INTERFACE IMPORTED) set_target_properties(ngraph::onnx_importer PROPERTIES - INTERFACE_LINK_LIBRARIES ngraph::onnx_ngraph_frontend - ) + INTERFACE_LINK_LIBRARIES ngraph::onnx_ngraph_frontend) endif() endif() set(ngraph_paddlepaddle_frontend_FOUND @NGRAPH_PDPD_FRONTEND_ENABLE@) -set(ir_frontend_FOUND @IR_FRONTEND_ENABLE@) check_required_components(ngraph) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 4d3135903de..6eac6cc9ecb 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -9,20 +9,12 @@ if(NOT ENABLE_DOCKER) add_subdirectory(snippets) - # Detect nGraph - find_package(ngraph QUIET - PATHS "${CMAKE_BINARY_DIR}/ngraph" - NO_DEFAULT_PATH) - if(NOT ngraph_FOUND) - set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph) - endif() - - # Detect InferenceEngine - find_package(InferenceEngine QUIET + # Detect OpenVINO + find_package(OpenVINO QUIET PATHS "${CMAKE_BINARY_DIR}" NO_DEFAULT_PATH) - if(NOT InferenceEngine_FOUND) - set(InferenceEngine_DIR ${CMAKE_BINARY_DIR}) + if(NOT OpenVINO_FOUND) + set(OpenVINO_DIR ${CMAKE_BINARY_DIR}) endif() if(NGRAPH_ONNX_FRONTEND_ENABLE) @@ -72,7 +64,7 @@ function(build_docs) set(C_API "${IE_SOURCE_DIR}/ie_bridges/c/include") set(PLUGIN_API_DIR "${DOCS_BUILD_DIR}/IE_PLUGIN_DG") set(NGRAPH_DIR "${OpenVINO_SOURCE_DIR}/ngraph") - set(NGRAPH_PY_DIR "${NGRAPH_DIR}/python/src/ngraph/") + set(NGRAPH_PY_DIR "${OpenVINO_SOURCE_DIR}/runtime/bindings/python/src/compatibility/ngraph/") set(NGRAPH_CPP_DIR "${NGRAPH_DIR}/core/include/" "${NGRAPH_DIR}/frontend/onnx_import/include") # Preprocessing scripts diff --git a/docs/IE_DG/Extensibility_DG/Building.md b/docs/IE_DG/Extensibility_DG/Building.md index d1f62cb53a8..be93c5a06d3 100644 --- a/docs/IE_DG/Extensibility_DG/Building.md +++ b/docs/IE_DG/Extensibility_DG/Building.md @@ -14,6 +14,6 @@ To build an extension library, run the commands below: $ cd template_extension $ mkdir build $ cd build -$ cmake -DInferenceEngine_DIR=[IE_DIR] -Dngraph_DIR=[NGRAPH_DIR] ../ +$ cmake -DOpenVINO_DIR=[OpenVINO_DIR] ../ $ cmake --build . ``` diff --git a/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md b/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md index a9a9841cac4..eb7183f0dc2 100644 --- a/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md +++ b/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md @@ -50,8 +50,8 @@ The example below demonstrates how to unregister an operator from the destructor ## Requirements for Building with CMake -A program that uses the `register_operator` functionality requires `ngraph::ngraph` and `ngraph::onnx_ngraph_frontend` libraries in addition to the Inference Engine. -The `onnx_ngraph_frontend` is a component of the `ngraph` package , so `find_package(ngraph REQUIRED COMPONENTS onnx_ngraph_frontend)` can find both. +A program that uses the `register_operator` functionality requires `openvino::core` and `openvino::frontend::onnx` libraries in addition to the OpenVINO Inference Runtime. +The `onnx_ngraph_frontend` is a component of the `OpenVINO` package , so `find_package(OpenVINO REQUIRED COMPONENTS ONNX)` can find both. Those libraries need to be passed to the `target_link_libraries` command in the CMakeLists.txt file. See CMakeLists.txt below for reference: diff --git a/docs/IE_DG/Integrate_with_customer_application_new_API.md b/docs/IE_DG/Integrate_with_customer_application_new_API.md index 9e5ac71189c..044c1c62ad9 100644 --- a/docs/IE_DG/Integrate_with_customer_application_new_API.md +++ b/docs/IE_DG/Integrate_with_customer_application_new_API.md @@ -193,11 +193,10 @@ build/ - build directory ``` cmake cmake_minimum_required(VERSION 3.0.0) project(project_name) -find_package(ngraph REQUIRED) -find_package(InferenceEngine REQUIRED) +find_package(OpenVINO REQUIRED) find_package(OpenCV REQUIRED) add_executable(${PROJECT_NAME} src/main.cpp) -target_link_libraries(${PROJECT_NAME} PRIVATE ${InferenceEngine_LIBRARIES} ${OpenCV_LIBS} ${NGRAPH_LIBRARIES}) +target_link_libraries(${PROJECT_NAME} PRIVATE openvino::runtime ${OpenCV_LIBS}) ``` 3. **To build your project** using CMake with the default build tools currently available on your machine, execute the following commands: > **NOTE**: Make sure you set environment variables first by running `/bin/setupvars.sh` (or setupvars.bat for Windows)`. Otherwise the `InferenceEngine_DIR` and `OpenCV_DIR` variables won't be configured properly to pass `find_package` calls. diff --git a/docs/doxygen/ngraph_cpp_api.config b/docs/doxygen/ngraph_cpp_api.config index e289689d315..2b0b50b6d70 100644 --- a/docs/doxygen/ngraph_cpp_api.config +++ b/docs/doxygen/ngraph_cpp_api.config @@ -29,7 +29,8 @@ FILE_PATTERNS = *.cpp \ LAYOUT_FILE = "@NGRAPH_CPP_LAYOUT_BUILD@" INPUT = "@NGRAPH_DIR@/core/include/" \ - "@NGRAPH_DIR@/frontend/onnx_import/include" + "@NGRAPH_DIR@/frontend/onnx/frontend/include/" \ + "@NGRAPH_DIR@/frontend/paddlepaddle/frontend/include/" HTML_OUTPUT = "@NGRAPH_CPP_OUTPUT@" diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml index 047107467c2..748ff27e752 100644 --- a/docs/doxygen/openvino_docs.xml +++ b/docs/doxygen/openvino_docs.xml @@ -67,10 +67,6 @@ limitations under the License. - - - - diff --git a/docs/install_guides/PAC_Configure.md b/docs/install_guides/PAC_Configure.md deleted file mode 100644 index 7f4f46ee18f..00000000000 --- a/docs/install_guides/PAC_Configure.md +++ /dev/null @@ -1,21 +0,0 @@ -# Configuration Guide for Intel® Distribution of OpenVINO™ toolkit 2020.4 and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA on CentOS or Ubuntu* {#openvino_docs_install_guides_PAC_Configure} - -## Product Change Notice -Intel® Distribution of OpenVINO™ toolkit for Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA - - - - - - - - - - -
Change Notice BeginsJuly 2020
Change DateOctober 2020
- -Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. - -Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates. - -For documentation for previous releases of Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_PAC_Configure.html) and lower. diff --git a/docs/install_guides/PAC_Configure_2018R5.md b/docs/install_guides/PAC_Configure_2018R5.md deleted file mode 100644 index 1378c0c6f2c..00000000000 --- a/docs/install_guides/PAC_Configure_2018R5.md +++ /dev/null @@ -1,247 +0,0 @@ -# Configuration Guide for Intel® Distribution of OpenVINO™ toolkit 2018R5 and the Intel® Programmable Acceleration Card with Intel® Arria® 10 FPGA GX on CentOS* {#openvino_docs_install_guides_PAC_Configure_2018R5} - -## Get Started - -The following describes the set-up of the Intel® Distribution of OpenVINO™ toolkit on CentOS* 7.4. This is based upon a completely fresh install of CentOS 7.4 with developer tools included. This document was written for the Intel® Distribution of OpenVINO™ toolkit 2018 R5 release and may be largely applicable for later versions. Official Intel® documentation for the install process can be found in the following locations and it is highly recommended that these are read, especially for new users. This document serves as a guide, and in some cases, adds additional detail, specifically for an install with `sudo` privileges on CentOS 7.4. - -[Intel® Acceleration Stack for FPGAs Quick Start Guide](https://www.intel.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug-qs-ias-v1-1.pdf) - -[OpenCL™ on Intel® PAC Quick Start Guide](https://www.intel.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug-qs-ias-opencl-a10-v1-1.pdf) - -[Installing the Intel® Distribution of OpenVINO™ toolkit for Linux*](https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html) - -(Optional): Install NTFS support for transferring large installers if already downloaded on another machine. -```sh -sudo yum -y install epel-release -``` -```sh -sudo yum -y install ntfs-3g -``` - -## Install Intel® PAC and the Intel® Programmable Acceleration Card Stack - -1. Download version 1.1 of the Acceleration Stack for Runtime from the [Intel FPGA Acceleration Hub](https://www.altera.com/solutions/acceleration-hub/downloads.html). -This downloads as `a10_gx_pac_ias_1_1_pv_rte_installer.tar.gz`. Let it download to `~/Downloads`. - -2. Create a new directory to install to: -```sh -mkdir -p ~/tools/intelrtestack -``` - -3. Untar and launch the installer: -```sh -cd ~/Downloads -``` -```sh -tar xf a10_gx_pac_ias_1_1_pv_rte_installer.tar.gz -``` -```sh -cd a10_gx_pac_ias_1_1_pv_rte_installer -``` -```sh -sudo ./setup.sh -``` - -4. Select **Y** to install OPAE and accept license and when asked, specify `~/tools/intelrtestack` as the install path. During the installation there should be a message stating the directory already exists as it was created in the first command above. Select Y to install to this directory. If this message is not seen, it suggests that there was a typo when entering the install location. - -5. Tools are installed to the following directories: - * `Intel® Quartus® software Programmer: ~/tools/inteltrestack/intelFPGA_pro/qprogrammer` - * `OpenCL™ Run Time Environment: ~/tools/intelrtestack/intelFPGA_pro/aclrte-linux64` - * `Intel® Acceleration Stack for FPGAs: ~/tools/intelrtestack/a10_gx_pac_ias_1_1_pv` - -6. Install E10/E40 Software Patch -```sh -source ~/tools/intelrtestack/init_env.sh -``` -```sh -cd $OPAE_PLATFORM_ROOT/hw -``` -```sh -sudo wget https://www.intel.com/content/dam/altera-www/global/en_US/others/solutions/acceleration-hub/a10_gx_pac_ias_1_1_pv_eth.patch -``` -```sh -sudo patch -s -p0 < a10_gx_pac_ias_1_1_pv_eth.patch -``` - -7. Check the version of the FPGA Interface Manager firmware on the PAC board. -```sh -sudo fpgainfo fme -``` - -8. If the reported `Pr Interface Id` is not `9926ab6d-6c92-5a68-aabc-a7d84c545738` then follow the instructions in section 4 of the [Intel® Acceleration Stack for FPGAs Quick Start Guide](https://www.intel.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug-qs-ias-v1-1.pdf) to update the FME. - -9. Run the built in self-test to verify operation of the Acceleration Stack and Intel® PAC in a non-virtualized environment. -```sh -sudo sh -c "echo 20 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" -``` -```sh -sudo fpgabist $OPAE_PLATFORM_ROOT/hw/samples/nlb_mode_3/bin/nlb_mode_3.gbs -``` - -## Extract and Verify the Intel® Acceleration Stack for FPGAs OpenCL™ BSP - -1. Extract the BSP -```sh -cd $OPAE_PLATFORM_ROOT/opencl -``` -```sh -sudo tar xf opencl_bsp.tar.gz -``` - -2. Create an initialization script `~/init_openvino.sh` with the following content that can be run upon opening a new terminal or rebooting. This will source the script ran above as well as setting up the OpenCL™ environment. -```sh -source \$HOME/tools/intelrtestack/init_env.sh -``` -```sh -export CL_CONTEXT_COMPILER_MODE_ALTERA=3 -``` -```sh -export CL_CONTEXT_COMPILER_MODE_INTELFPGA=3 -``` -```sh -export INTELFPGAOCLSDKROOT="\$HOME/tools/intelrtestack/intelFPGA_pro/aclrte-linux64" -``` -```sh -export ALTERAOCLSDKROOT="\$INTELFPGAOCLSDKROOT" -``` -```sh -export AOCL_BOARD_PACKAGE_ROOT="\$OPAE_PLATFORM_ROOT/opencl/opencl_bsp" -``` -```sh -\$AOCL_BOARD_PACKAGE_ROOT/linux64/libexec/setup_permissions.sh``` -```sh -source $INTELFPGAOCLSDKROOT/init_opencl.sh -``` - -3. Source the script: -```sh -source ~/init_openvino.sh -``` - -4. Some of the settings made in the child scripts need a reboot to take effect. Reboot the machine and source the script again. Note that this script should be sourced each time a new terminal is opened for use with the Intel® Acceleration Stack for FPGAs and Intel® Distribution of OpenVINO™ toolkit. -```sh -source ~/init_openvino.sh -``` - -5. Install the OpenCL™ driver: -```sh -cd ~ -``` -```sh -sudo -E ./tools/intelrtestack/intelFPGA_pro/aclrte-linux64/bin/aocl install -``` -Select **Y** when asked to install the BSP. Note that the following warning can be safely ignored. -```sh -WARNING: install not implemented. Please refer to DCP Quick Start User Guide. -``` - -6. Program the Intel® PAC board with a pre-compiled `.aocx` file (OpenCL™ based FPGA bitstream). -```sh -cd \$OPAE_PLATFORM_ROOT/opencl -``` -```sh -aocl program acl0 hello_world.aocx -``` - -7. Build and run the Hello World application: -```sh -sudo tar xf exm_opencl_hello_world_x64_linux.tgz -``` -```sh -sudo chmod -R a+w hello_world -``` -```sh -cd hello_world -``` -```sh -make -``` -```sh -cp ../hello_world.aocx ./bin -``` -```sh -./bin/host -``` - -## Add Intel® Distribution of OpenVINO™ toolkit with FPGA Support to Environment Variables - -1. To run the Intel® Distribution of OpenVINO™ toolkit, add the last four commands to the `~/init_openvino.sh` script. The previous content is shown as well. -```sh -source \$HOME/tools/intelrtestack/init_env.sh -export CL_CONTEXT_COMPILER_MODE_ALTERA=3 -export CL_CONTEXT_COMPILER_MODE_INTELFPGA=3 -export INTELFPGAOCLSDKROOT="\$HOME/tools/intelrtestack/intelFPGA_pro/aclrte-linux64" -export ALTERAOCLSDKROOT="\$INTELFPGAOCLSDKROOT" -export AOCL_BOARD_PACKAGE_ROOT="\$OPAE_PLATFORM_ROOT/opencl/opencl_bsp" -\$AOCL_BOARD_PACKAGE_ROOT/linux64/libexec/setup_permissions.sh -source $INTELFPGAOCLSDKROOT/init_opencl.sh -export IE_INSTALL="/opt/intel/openvino/deployment_tools" -source \$IE_INSTALL/../bin/setupvars.sh -export PATH="\$PATH:\$HOME/inference_engine_samples/intel64/Release" -alias mo="python3.6 \$IE_INSTALL/model_optimizer/mo.py" -``` - -2. Source the script -```sh -source ~/init_openvino.sh -``` - -## Program a Bitstream - -The bitstream you program should correspond to the topology you want to deploy. In this section, you program a SqueezeNet bitstream and deploy the classification sample with a SqueezeNet model. - -> **IMPORTANT**: Only use bitstreams from the installed version of the Intel® Distribution of OpenVINO™ toolkit. Bitstreams from older versions of the Intel® Distribution of OpenVINO™ toolkit are incompatible with later versions. For example, you cannot use the `1-0-1_RC_FP16_Generic` bitstream, when the Intel® Distribution of OpenVINO™ toolkit supports the `2-0-1_RC_FP16_Generic bitstream`. - -There are different folders for each FPGA card type which were downloaded in the Intel® Distribution of OpenVINO™ toolkit package. -For the Intel® Programmable Acceleration Card with Intel® Arria® 10 FPGA GX, the pre-trained bitstreams are in the `/opt/intel/openvino/bitstreams/a10_dcp_bitstreams` directory. This example uses a SqueezeNet bitstream with low precision for the classification sample. - -Program the bitstream for Intel® Programmable Acceleration Card with Intel® Arria® 10 FPGA GX: -```sh -aocl program acl0 /opt/intel/openvino/bitstreams/a10_dcp_bitstreams/5-0_RC_FP11_SqueezeNet.aocx -``` - -## Use the Intel® Distribution of OpenVINO™ toolkit - -1. Run inference with the Intel® Distribution of OpenVINO™ toolkit independent of the demo scripts using the SqueezeNet model that was download by the scripts. For convenience, copy the necessary files to a local directory. If the workstation has been rebooted or a new terminal is opened, source the script above first. -```sh -mkdir ~/openvino_test -``` -```sh -cd ~/openvino_test -``` -```sh -cp ~/openvino_models/classification/squeezenet/1.1/caffe/squeezenet1.1.* . -``` -```sh -cp ~/openvino_models/ir/squeezenet1.1/squeezenet1.1.labels . -``` - -2. Note that the `squeezenet1.1.labels` file contains the classes used by ImageNet and is included here so that the inference results show text rather than classification numbers. Convert the model with the [Model Optimizer](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html). Note that the command below uses the alias defined in the script above and is not referred to in other documentation. -```sh -mo --input_model squeezenet1.1.caffemodel -``` - -3. Now run Inference on the CPU using one of the built in Inference Engine samples: -```sh -classification_sample_async -m squeezenet1.1.xml -i $IE_INSTALL/demo/car.png -``` - -4. Add the `-d` option to run on FPGA: -```sh -classification_sample_async -m squeezenet1.1.xml -i $IE_INSTALL/demo/car.png -d HETERO:FPGA,CPU -``` - -5. Increase the number of iterations with the `-ni` option to reduce the impact of initialization: -```sh -classification_sample_async -m squeezenet1.1.xml -i $IE_INSTALL/demo/car.png -d HETERO:FPGA,CPU -ni 100 -``` - -Congratulations, You are done with the Intel® Distribution of OpenVINO™ toolkit installation for FPGA. - -## Additional Resources - -Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit) - -Intel® Distribution of OpenVINO™ toolkit documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org) - -Inference Engine FPGA plugin documentation: [https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html) diff --git a/docs/install_guides/PAC_Configure_2019RX.md b/docs/install_guides/PAC_Configure_2019RX.md deleted file mode 100644 index 150ca475d65..00000000000 --- a/docs/install_guides/PAC_Configure_2019RX.md +++ /dev/null @@ -1,247 +0,0 @@ -# Configuration Guide for Intel® Distribution of OpenVINO™ toolkit 2019R1/2019R2/2019R3/2020.1 and the Intel® Programmable Acceleration Card with Intel® Arria® 10 FPGA GX on CentOS or Ubuntu* {#openvino_docs_install_guides_PAC_Configure_2019RX} - -## Get Started - -The following describes the set-up of the Intel® Distribution of OpenVINO™ toolkit on CentOS* 7.4 or Ubuntu* 16.04, kernel 4.15. This is based upon a completely fresh install of the OS with developer tools included. This document was written for the Intel® Distribution of OpenVINO™ toolkit 2019 release 1, 2, and 3 and may be largely applicable for later versions. Official Intel® documentation for the install process can be found in the following locations and it is highly recommended that these are read, especially for new users. This document serves as a guide, and in some cases, adds additional detail where necessary. - -[Intel® Acceleration Stack for FPGAs Quick Start Guide](https://www.intel.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug-qs-ias-v1-1.pdf) - -[OpenCL™ on Intel® PAC Quick Start Guide](https://www.intel.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug-qs-ias-opencl-a10-v1-1.pdf) - -[Installing the Intel® Distribution of OpenVINO™ toolkit for Linux*](https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html) - -(Optional): Install NTFS support for transferring large installers if already downloaded on another machine. -```sh -sudo yum -y install epel-release -``` -```sh -sudo yum -y install ntfs-3g -``` - -## Install Intel® PAC and the Intel® Programmable Acceleration Card Stack - -1. Download version 1.2 of the Acceleration Stack for Runtime from the [Intel FPGA Acceleration Hub](https://www.altera.com/solutions/acceleration-hub/downloads.html). -This downloads as `a10_gx_pac_ias_1_2_pv_rte_installer.tar.gz`. Let it download to `~/Downloads`. - -2. Create a new directory to install to: -```sh -mkdir -p ~/tools/intelrtestack -``` - -3. Untar and launch the installer: -```sh -cd ~/Downloads -``` -```sh -tar xf a10_gx_pac_ias_1_2_pv_rte_installer.tar.gz -``` -```sh -cd a10_gx_pac_ias_1_2_pv_rte_installer -``` -```sh -./setup.sh -``` - -4. Select **Y** to install OPAE and accept license and when asked, specify `/home//tools/intelrtestack` as the absolute install path. During the installation there should be a message stating the directory already exists as it was created in the first command above. Select **Y** to install to this directory. If this message is not seen, it suggests that there was a typo when entering the install location. - -5. Tools are installed to the following directories: - * OpenCL™ Runtime Environment: `~/tools/intelrtestack/opencl_rte/aclrte-linux64` - * Intel® Acceleration Stack for FPGAs: `~/tools/intelrtestack/a10_gx_pac_ias_1_2_pv` - -7. Check the version of the FPGA Interface Manager firmware on the PAC board. -```sh -sudo fpgainfo fme -``` - -8. If the reported `Pr Interface Id` is not `69528db6-eb31-577a-8c36-68f9faa081f6` then follow the instructions in section 4 of the [Intel® Acceleration Stack for FPGAs Quick Start Guide](https://www.intel.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug-qs-ias-v1-2.pdf) to update the FME. - -9. Run the built in self-test to verify operation of the Acceleration Stack and Intel® PAC in a non-virtualized environment. -```sh -sudo sh -c "echo 20 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" -``` -```sh -source ~/tools/intelrtestack/init_env.sh -``` -```sh -sudo fpgabist $OPAE_PLATFORM_ROOT/hw/samples/nlb_mode_3/bin/nlb_mode_3.gbs -``` - -## Verify the Intel® Acceleration Stack for FPGAs OpenCL™ BSP - -1. Remove any previous FCD files that may be from previous installations of hardware in the `/opt/Intel/OpenCL/Boards/` directory: -```sh -cd /opt/Intel/OpenCL/Boards -sudo rm -rf *.fcd -``` - -2. Install `lsb_release` on your system if you are using CentOS: -```sh -sudo yum install redhat-lsb-core -``` - -3. Create an initialization script `~/init_openvino.sh` with the following content that can be run upon opening a new terminal or rebooting. This will source the script ran above as well as setting up the OpenCL™ environment. -```sh -source $HOME/tools/intelrtestack/init_env.sh -``` -```sh -export CL_CONTEXT_COMPILER_MODE_ALTERA=3 -``` -```sh -export CL_CONTEXT_COMPILER_MODE_INTELFPGA=3 -``` -```sh -export INTELFPGAOCLSDKROOT="/opt/altera/aocl-pro-rte/aclrte-linux64" -``` -```sh -export ALTERAOCLSDKROOT="$INTELFPGAOCLSDKROOT" -``` -```sh -export AOCL_BOARD_PACKAGE_ROOT="$OPAE_PLATFORM_ROOT/opencl/opencl_bsp" -``` -```sh -$AOCL_BOARD_PACKAGE_ROOT/linux64/libexec/setup_permissions.sh -``` -```sh -source $INTELFPGAOCLSDKROOT/init_opencl.sh -``` - -4. Source the script: -```sh -source ~/init_openvino.sh -``` - -5. Some of the settings made in the child scripts need a reboot to take effect. Reboot the machine and source the script again. Note that this script should be sourced each time a new terminal is opened for use with the Intel® Acceleration Stack for FPGAs and Intel® Distribution of OpenVINO™ toolkit. -```sh -source ~/init_openvino.sh -``` - -6. Install the OpenCL™ driver: -```sh -cd ~ -``` -```sh -sudo -E ./tools/intelrtestack/opencl_rte/aclrte-linux64/bin/aocl install -``` -Select **Y** when asked to install the BSP. Note that the following warning can be safely ignored. -```sh -WARNING: install not implemented. Please refer to DCP Quick Start User Guide. -``` - -7. Program the Intel® PAC board with a pre-compiled `.aocx` file (OpenCL™ based FPGA bitstream). -```sh -cd $OPAE_PLATFORM_ROOT/opencl -``` -```sh -aocl program acl0 hello_world.aocx -``` - -8. Build and run the Hello World application: -```sh -sudo tar xf exm_opencl_hello_world_x64_linux.tgz -``` -```sh -sudo chmod -R a+w hello_world -``` -```sh -cd hello_world -``` -```sh -make -``` -```sh -cp ../hello_world.aocx ./bin -``` -```sh -./bin/host -``` - -## Add Intel® Distribution of OpenVINO™ toolkit with FPGA Support to Environment Variables - -1. To run the Intel® Distribution of OpenVINO™ toolkit, add the last four commands to the `~/init_openvino.sh` script. The previous content is shown as well. -```sh -source $HOME/tools/intelrtestack/init_env.sh -export CL_CONTEXT_COMPILER_MODE_ALTERA=3 -export CL_CONTEXT_COMPILER_MODE_INTELFPGA=3 -export INTELFPGAOCLSDKROOT="/opt/altera/aocl-pro-rte/aclrte-linux64" -export ALTERAOCLSDKROOT="$INTELFPGAOCLSDKROOT" -export AOCL_BOARD_PACKAGE_ROOT="$OPAE_PLATFORM_ROOT/opencl/opencl_bsp" -$AOCL_BOARD_PACKAGE_ROOT/linux64/libexec/setup_permissions.sh -source $INTELFPGAOCLSDKROOT/init_opencl.sh -export IE_INSTALL="/opt/intel/openvino/deployment_tools" -source $IE_INSTALL/../bin/setupvars.sh -export PATH="$PATH:$HOME/inference_engine_samples_build/intel64/Release" -alias mo="python3.6 $IE_INSTALL/model_optimizer/mo.py" -``` -For Ubuntu systems, it is recommended to use python3.5 above instead of python3.6. - -2. Source the script -```sh -source ~/init_openvino.sh -``` - -## Program a Bitstream - -The bitstream you program should correspond to the topology you want to deploy. In this section, you program a SqueezeNet bitstream and deploy the classification sample with a SqueezeNet model. - -> **IMPORTANT**: Only use bitstreams from the installed version of the Intel® Distribution of OpenVINO™ toolkit. Bitstreams from older versions of the Intel® Distribution of OpenVINO™ toolkit are incompatible with later versions. For example, you cannot use the `1-0-1_RC_FP16_Generic` bitstream, when the Intel® Distribution of OpenVINO™ toolkit supports the `2-0-1_RC_FP16_Generic bitstream`. - -There are different folders for each FPGA card type which were downloaded in the Intel® Distribution of OpenVINO™ toolkit package. -For the Intel® Programmable Acceleration Card with Intel® Arria® 10 FPGA GX, the pre-trained bitstreams are in the `/opt/intel/openvino/bitstreams/a10_dcp_bitstreams` directory. This example uses a SqueezeNet bitstream with low precision for the classification sample. - -Program the bitstream for Intel® Programmable Acceleration Card with Intel® Arria® 10 FPGA GX. -For R1: -```sh -aocl program acl0 /opt/intel/openvino/bitstreams/a10_dcp_bitstreams/2019R1_RC_FP11_ResNet_SqueezeNet_VGG.aocx -``` -Or for R2: -```sh -aocl program acl0 /opt/intel/openvino/bitstreams/a10_dcp_bitstreams/2019R2_RC_FP11_ResNet_SqueezeNet_VGG.aocx -``` -Or for R3: -```sh -aocl program acl0 /opt/intel/openvino/bitstreams/a10_dcp_bitstreams/2019R3_PV_RC_FP11_InceptionV1_ResNet_SqueezeNet_TinyYolo_VGG.aocx -``` -Or for 2020.1: -```sh -aocl program acl0 /opt/intel/openvino/bitstreams/a10_dcp_bitstreams/2019R4_RC_FP11_ResNet_SqueezeNet_TinyYolo.aocx -``` - -## Use the Intel® Distribution of OpenVINO™ toolkit - -1. Run inference with the Intel® Distribution of OpenVINO™ toolkit independent of the demo scripts using the SqueezeNet model that was download by the scripts. For convenience, copy the necessary files to a local directory. If the workstation has been rebooted or a new terminal is opened, source the script above first. -```sh -mkdir ~/openvino_test -``` -```sh -cd ~/openvino_test -``` -```sh -cp ~/openvino_models/models/public/squeezenet1.1/squeezenet1.1.* . -``` -```sh -cp ~/openvino_models/ir/public/squeezenet1.1/FP16/squeezenet1.1.labels . -``` - -2. Note that the `squeezenet1.1.labels` file contains the classes used by ImageNet and is included here so that the inference results show text rather than classification numbers. Convert the model with the [Model Optimizer](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html). Note that the command below uses the alias defined in the script above and is not referred to in other documentation. -```sh -mo --input_model squeezenet1.1.caffemodel -``` - -3. Now run Inference on the CPU using one of the built in Inference Engine samples: -```sh -classification_sample_async -m squeezenet1.1.xml -i $IE_INSTALL/demo/car.png -``` - -4. Add the `-d` option to run on FPGA: -```sh -classification_sample_async -m squeezenet1.1.xml -i $IE_INSTALL/demo/car.png -d HETERO:FPGA,CPU -``` - -Congratulations, You are done with the Intel® Distribution of OpenVINO™ toolkit installation for FPGA. -## Additional Resources - -Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit) - -Intel® Distribution of OpenVINO™ toolkit documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org) - -Inference Engine FPGA plugin documentation: [https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html) diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure.md b/docs/install_guides/VisionAcceleratorFPGA_Configure.md deleted file mode 100644 index 07ff86e8fb8..00000000000 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure.md +++ /dev/null @@ -1,21 +0,0 @@ -# Configuration Guide for the Intel® Distribution of OpenVINO™ toolkit 2020.4 and the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA SG2 (IEI's Mustang-F100-A10) on Linux* {#openvino_docs_install_guides_VisionAcceleratorFPGA_Configure} - -## Product Change Notice -Intel® Distribution of OpenVINO™ toolkit for Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA - - - - - - - - - - -
Change Notice BeginsJuly 2020
Change DateOctober 2020
- -Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. - -Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates. - -For documentation for previous releases of Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_VisionAcceleratorFPGA_Configure.html) and lower. diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md deleted file mode 100644 index 328c824fa35..00000000000 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_2018R5.md +++ /dev/null @@ -1,330 +0,0 @@ -# Configuration Guide for the Intel® Distribution of OpenVINO™ toolkit 2018R5 and the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (IEI's Mustang-F100-A10) on Linux* {#openvino_docs_install_guides_VisionAcceleratorFPGA_Configure_2018R5} - -> **NOTES:** -> * For a first-time installation, use all steps. -> * Use steps 1 and 2 only after receiving a new FPGA card. -> * Repeat steps 2-5 when installing a new version of the Intel® Distribution of OpenVINO™ toolkit. -> * Use steps 3-5 when a Neural Network topology used by an Intel® Distribution of OpenVINO™ toolkit application changes. - -## 1. Configure and Install the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA - -1. Download `fpga_support_files.tgz` from the [Intel Registration Center](http://registrationcenter-download.intel.com/akdlm/irc_nas/12954/fpga_support_files.tgz). The files in this `.tgz` archive are required to ensure your FPGA card and the Intel® Distribution of OpenVINO™ toolkit work correctly. - -2. Go to the directory where you downloaded the `fpga_support_files.tgz` archive. - -3. Unpack the `.tgz` file: -```sh -tar -xvzf fpga_support_files.tgz -``` -A directory named `fpga_support_files` is created. - -4. Go to the `fpga_support_files` directory: -```sh -cd fpga_support_files -``` - -5. Source `setup_env.sh` to set your environment variables: -```sh -source /home//Downloads/fpga_support_files/setup_env.sh -``` - -6. Configure the FPGA Driver Blacklist: -```sh -sudo mv config/blacklist-altera-cvp.conf /etc/modprobe.d -``` - -7. Switch to superuser: -```sh -sudo su -``` - -8. Use the `setup_env.sh` script from `fpga_support_files.tgz` to set your environment variables: -```sh -source /home//Downloads/fpga_support_files/setup_env.sh -``` - -9. Change directory to `Downloads/fpga_support_files/`: -```sh -cd /home//Downloads/fpga_support_files/ -``` - -10. Run the FPGA dependencies script, which allows OpenCL to support Ubuntu* and recent kernels: -```sh -./install_openvino_fpga_dependencies.sh -``` - -11. When asked, select the FPGA card, Intel® GPU, and Intel® Neural Compute Stick 2, then you can install the correct dependencies. - -12. If you installed the 4.14 kernel as part of the installation script, you will need to reboot the machine and select the new kernel in the Ubuntu (grub) boot menu. You will also need to rerun `setup_env.sh` to set up your environmental variables again. - -13. Install OpenCL™ devices. Enter **Y** when prompted to install: -```sh -aocl install -``` - -14. Reboot the machine: -```sh -reboot -``` - -15. Use the `setup_env.sh` script from `fpga_support_files.tgz` to set your environment variables: -```sh -source /home//Downloads/fpga_support_files/setup_env.sh -``` - -16. Run `aocl diagnose`: -```sh -aocl diagnose -``` -Your screen displays `DIAGNOSTIC_PASSED`. - -## 2. Set Up the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA for 2018R5 - -For the 2018R5 release, the Intel® Distribution of OpenVINO™ toolkit introduced a new board support package (BSP) `a10_1150_sg1` for the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA, which is included into the `fpga_support_files.tgz` archive. To program the bitstreams for the Intel® Distribution of OpenVINO™ toolkit R5, you need to program the BSP into the board using the USB blaster. - -> **NOTE**: These steps apply only if you update to the Intel® Distribution of OpenVINO™ toolkit R5. Otherwise, you can skip them. - -1. Go to the `config` folder of the `fpga_support_files` directory where the `a10_1150_sg1` is located: -```sh -cd /home//Downloads/fpga_support_files/config/ -``` - -2. Copy the `a10_1150_sg1` folder to the `board` directory: -```sh -sudo cp -rf a10_1150_sg1 /opt/altera/aocl-pro-rte/aclrte-linux64/board/ -``` - -3. Convert the BSP files from DOS to UNIX: -```sh -sudo chmod +x a10_1150_sg1 -find a10_1150_sg1 -type f -print0 | xargs -0 dos2unix -``` - -4. Set up the USB Blaster: - - 1. Connect the cable between the board and the host system. Use the letter codes in the diagram below for the connection points: - - 2. Connect the B end of the cable to point B on the board. - - 3. Connect the F end of the cable to point F on the FPGA download cable. - - 4. From point F end of the cable to point F on the FPGA download cable, the connection is as shown: -![](../img/VisionAcceleratorJTAG.png) - -5. Source the `setup_env.sh` script from the `fpga_support_files` to set up the environment variables: -```sh -source /home//Downloads/fpga_support_files/setup_env.sh -``` - -6. Update the Intel® FPGA Download Cable rules to program the board without root permissions and to flash the initialization bitstreams so that the Intel® FPGA Download Cable can communicate with the board: -```sh -sudo cp config/51-usbblaster.rules /etc/udev/rules.d -``` - -7. Load the USB rules: -```sh -sudo udevadm control --reload-rules && udevadm trigger -``` - -8. Unplug and re-plug the Intel® FPGA Download Cable to enable JTAG connection. - -9. Run `jtagconfig` to ensure that your Intel FPGA Download Cable driver is ready to use: -```sh -jtagconfig -``` -Your output is similar to: -```sh -1) USB-Blaster [1-6] -02E660DD 10AX115H1(.|E2|ES)/10AX115H2/.. -``` - -10. Download [Intel® Quartus® Prime Software Lite Edition 17.1](http://fpgasoftware.intel.com/17.1/?edition=lite). Install the Intel® Quartus® Prime Software Lite to the `/home//intelFPGA/17.1` directory. -> **NOTE**: You will need the complete the Intel® Quartus® Prime Software Lite version when you want to program the `boardtest_1ddr_top.aocx` into the flash for permanent availability. - -11. Export the Intel® Quartus® Prime Software Lite environment variable: -```sh -export QUARTUS_ROOTDIR=/home//intelFPGA/17.1/quartus -``` - -12. Use `jtagconfig` to slow the clock: -```sh -jtagconfig --setparam 1 JtagClock 6M -``` - -13. (OPTIONAL) Confirm the clock is set to 6M: -```sh -jtagconfig --getparam 1 JtagClock -``` -You should see the following: -```sh -6M -``` - -14. Go to `/opt/altera/aocl-pro-rte/aclrte-linux64/board/a10_1150_sg1/bringup`, where `boardtest_1ddr_top.aocx `is located: -```sh -cd /opt/altera/aocl-pro-rte/aclrte-linux64/board/a10_1150_sg1/bringup -``` - -15. Program the `boardtest_1ddr_top.aocx` file to the flash to be made permanently available even after power cycle: -```sh -aocl flash acl0 boardtest_1ddr_top.aocx -``` -> **NOTE**: You will need the USB Blaster for this. - -16. Reboot the host system. - -17. Check if the host system recognizes the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA board. Confirm you can detect the PCIe card: -```sh -lspci | grep -i Altera -``` -Your output is similar to: -```sh -01:00.0 Processing accelerators: Altera Corporation Device 2494 (rev 01) -``` - -18. Source the `setup_env.sh` script from the `fpga_support_files` directory to setup the environment variables: -```sh -source /home//Downloads/fpga_support_file/setup_env.sh -``` - -19. Uninstall the previous BSP before installing the OpenCL drivers for the R5 BSP: -```sh -aocl uninstall /opt/altera/aocl-pro-rte/aclrte-linux64/board// -``` - -20. Export and source the environment script: -```sh -export AOCL_BOARD_PACKAGE_ROOT=/opt/altera/aocl-pro-rte/aclrte-linux64/board/a10_1150_sg1 -``` -```sh -source /opt/altera/aocl-pro-rte/aclrte-linux64/init_opencl.sh -``` - -21. Install OpenCL™ devices: -```sh -aocl install -``` - -22. Run the `diagnose` command: -```sh -aocl diagnose -``` -You should see `DIAGNOSTIC_PASSED` before proceeding to the next steps. - -## 3. Program a Bitstream - -The bitstream you program should correspond to the topology you want to deploy. In this section, you program a SqueezeNet bitstream and deploy the classification sample with a SqueezeNet model that you used the Model Optimizer to convert in the steps before. - -> **IMPORTANT**: Only use bitstreams from the installed version of the Intel® Distribution of OpenVINO™ toolkit. Bitstreams from older versions of the Intel® Distribution of OpenVINO™ toolkit are incompatible with later versions of the Intel® Distribution of OpenVINO™ toolkit. For example, you cannot use the `1-0-1_A10DK_FP16_Generic` bitstream, when the Intel® Distribution of OpenVINO™ toolkit supports the `2-0-1_A10DK_FP16_Generic` bitstream. - -Depending on how many bitstreams you selected, there are different folders for each FPGA card type which were downloaded in the Intel® Distribution of OpenVINO™ toolkit package: - -1. For the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA the pre-trained bistreams are in `/opt/intel/openvino/bitstreams/a10_vision_design_bitstreams`. This example uses a SqueezeNet bitstream with low precision for the classification sample. - -2. Rerun the environment setup script: -```sh -source /home//Downloads/fpga_support_files/setup_env.sh -``` - -3. Change to your home directory: -```sh -cd /home/ -``` - -4. Program the bitstream for the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA: -```sh -aocl program acl0 /opt/intel/openvino/bitstreams/a10_vision_design_bitstreams/5-0_PL1_FP11_SqueezeNet.aocx -``` - -### Optional Steps to Flash the FPGA Card - -> **NOTE**: -> - To avoid having to reprogram the board after a power down, a bitstream will be programmed to permanent memory on the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA. This will take about 20 minutes. -> - The following steps 1-5 need to be done only once for a new Intel® Arria 10 FPGA card. - -1. Plug in the micro USB cable to the card and your host system. - -2. Run `jtagconfig` to ensure that the cable is properly inserted: -```sh -jtagconfig -``` - -3. Use `jtagconfig` to slow the clock: -```sh -jtagconfig --setparam 1 JtagClock 6M -``` - -4. Store the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA bitstream on the board: -```sh -aocl flash acl0 /opt/intel/openvino/bitstreams/a10_vision_design_bitstreams/5-0_PL1_FP11_SqueezeNet.aocx -``` -Your output is similar to: -```sh -USB-BlasterII [1-14] -02E660DD 10AX115H1(.|E2|ES)/10AX115H2/.. -020A40DD 5M(1270ZF324|2210Z)/EPM2210 -``` - -## 4. Setup a Neural Network Model for FPGA - -In this section, you will create an FP16 model suitable for hardware accelerators. For more information, see the [FPGA plugin](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html) section in the Inference Engine Developer Guide. - - -1. Create a directory for the FP16 SqueezeNet Model: -```sh -mkdir /home//squeezenet1.1_FP16 -``` - -2. Go to `/home//squeezenet1.1_FP16`: -```sh -cd /home//squeezenet1.1_FP16 -``` - -3. Use the Model Optimizer to convert an FP16 SqueezeNet Caffe* model into an optimized Intermediate Representation (IR): -```sh -python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --input_model /home//openvino_models/FP32/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir . -``` - -4. The `squeezenet1.1.labels` file contains the classes `ImageNet` uses. This file is included so that the inference results show text instead of classification numbers. Copy `squeezenet1.1.labels` to the your optimized model location: -```sh -cp /home//openvino_models/ir/squeezenet1.1/FP32/squeezenet1.1.labels . -``` - -5. Copy a sample image to the release directory. You will use this with your optimized model: -```sh -sudo cp /opt/intel/openvino/deployment_tools/demo/car.png ~/inference_engine_samples/intel64/Release -``` - -## 5. Run a Sample Application - -1. Go to the samples directory -```sh -cd /home//inference_engine_samples/intel64/Release -``` - -2. Use an Inference Engine sample to run a sample application on the CPU: -```sh -./classification_sample_async -i car.png -m ~/openvino_models/ir/squeezenet1.1/FP32/squeezenet1.1.xml -``` -Note the CPU throughput in Frames Per Second (FPS). This tells you how quickly the inference is done on the hardware. Now run the inference using the FPGA. - -3. Add the `-d` option to target the FPGA: -```sh -./classification_sample_async -i car.png -m ~/squeezenet1.1_FP16/squeezenet1.1.xml -d HETERO:FPGA,CPU -``` -The throughput on FPGA is listed and may show a lower FPS. This is due to the initialization time. To account for that, the next step increases the iterations to get a better sense of the speed the FPGA can run inference at. - -4. Use `-ni` to increase the number of iterations, This option reduces the initialization impact: -```sh -./classification_sample_async -i car.png -m ~/squeezenet1.1_FP16/squeezenet1.1.xml -d HETERO:FPGA,CPU -ni 100 -``` - -Congratulations, you are done with the Intel® Distribution of OpenVINO™ toolkit installation for FPGA. - -## Additional Resources - -Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit) - -Intel® Distribution of OpenVINO™ toolkit documentation: [https://docs.openvinotoolkit.org/](https://docs.openvinotoolkit.org/) - -Inference Engine FPGA plugin documentation: [https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html) diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md deleted file mode 100644 index 8de131e8c45..00000000000 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R1.md +++ /dev/null @@ -1,281 +0,0 @@ -# Configuration Guide for the Intel® Distribution of OpenVINO™ toolkit 2019R1 and the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA (IEI's Mustang-F100-A10) on Linux* {#openvino_docs_install_guides_VisionAcceleratorFPGA_Configure_2019R1} - -> **NOTES:** -> * For a first-time installation, use all steps. -> * Use step 1 only after receiving a new FPGA card. -> * Repeat steps 2-4 when installing a new version of the Intel® Distribution of OpenVINO™ toolkit. -> * Use steps 3-4 when a Neural Network topology used by an Intel® Distribution of OpenVINO™ toolkit application changes. - -## 1. Configure and Set Up the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA - -For the 2019R1.x releases, the Intel® Distribution of OpenVINO™ toolkit introduced a new board support package (BSP) `a10_1150_sg1` for the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA, which is included in the `fpga_support_files.tgz` archive below. To program the bitstreams for the Intel® Distribution of OpenVINO™ toolkit 2019R1.x, you need to program the BSP into the board using the USB blaster. - -1. Download [Intel® Quartus® Prime Programmer and Tools Standard Edition 18.1](http://fpgasoftware.intel.com/18.1/?edition=standard&platform=linux&download_manager=direct#tabs-4). Install the Intel® Quartus® Prime Programmer and Tools Software to the `/home//intelFPGA/18.1` directory. - -2. Download `fpga_support_files.tgz` from the [Intel Registration Center](http://registrationcenter-download.intel.com/akdlm/irc_nas/12954/fpga_support_files.tgz) to the `~/Downloads` directory. The files in this `.tgz` archive are required to ensure your FPGA card and the Intel® Distribution of OpenVINO™ toolkit work correctly. - -3. Go to the directory where you downloaded the `fpga_support_files.tgz` archive. - -4. Unpack the `.tgz` file: -```sh -tar -xvzf fpga_support_files.tgz -``` -A directory named `fpga_support_files` is created. - -5. Go to the `fpga_support_files` directory: -```sh -cd fpga_support_files -``` - -6. Switch to superuser: -```sh -sudo su -``` - -7. Use the `setup_env.sh` script from `fpga_support_files.tgz` to set your environment variables: -```sh -source /home//Downloads/fpga_support_files/setup_env.sh -``` - -8. Uninstall any previous BSP before installing the OpenCL BSP for the 2019R1.x BSP: -```sh -aocl uninstall /opt/altera/aocl-pro-rte/aclrte-linux64/board// -``` - -9. Change directory to `Downloads/fpga_support_files/`: -```sh -cd /home//Downloads/fpga_support_files/ -``` - -10. Run the FPGA dependencies script, which allows OpenCL to support Ubuntu* and recent kernels: -```sh -./install_openvino_fpga_dependencies.sh -``` - -11. When asked, select the appropriate hardware accelerators you plan to use so it installs the correct dependencies. - -12. If you installed the 4.14 kernel as part of the installation script, you will need to reboot the machine and select the new kernel in the Ubuntu (grub) boot menu. You will also need to rerun `setup_env.sh` to set up your environmental variables again. - -13. Export the Intel® Quartus® Prime Programmer environment variable: -```sh -export QUARTUS_ROOTDIR=/home//intelFPGA/18.1/qprogrammer -``` - -14. Set up the USB Blaster: - - 1. Connect the cable between the board and the host system. Use the letter codes in the diagram below for the connection points: - - 2. Connect the B end of the cable to point B on the board. - - 3. Connect the F end of the cable to point F on the FPGA download cable. - - 4. From point F end of the cable to point F on the FPGA download cable, the connection is as shown: -![](../img/VisionAcceleratorJTAG.png) - -15. Run `jtagconfig` to ensure that your Intel FPGA Download Cable driver is ready to use: -```sh -jtagconfig -``` -Your output is similar to: -```sh -1) USB-Blaster [1-6] -02E660DD 10AX115H1(.|E2|ES)/10AX115H2/.. -``` - -16. Use `jtagconfig` to slow the clock. The message "No parameter named JtagClock" can be safely ignored. -```sh -jtagconfig --setparam 1 JtagClock 6M -``` - -17. (OPTIONAL) Confirm the clock is set to 6M: -```sh -jtagconfig --getparam 1 JtagClock -``` -You should see the following: -```sh -6M -``` - -18. Go to `/opt/altera/aocl-pro-rte/aclrte-linux64/board/a10_1150_sg1/bringup`, where `sg1_boardtest_2ddr_base.sof`is located: -```sh -cd /opt/altera/aocl-pro-rte/aclrte-linux64/board/a10_1150_sg1/bringup -``` - -19. Program the new sof file to the board: -```sh -quartus_pgm -c 1 -m JTAG -o "p;sg1_boardtest_2ddr_base.sof" -``` - -20. Soft reboot: -```sh -sudo reboot -``` - -21. Open up a new terminal and restore sudo access and the environment variables: -```sh -sudo su -source /home//Downloads/fpga_support_files/setup_env.sh -``` - -22. Install OpenCL™ devices. Enter **Y** when prompted to install: -```sh -aocl install -``` - -23. Reboot the machine: -```sh -reboot -``` - -24. Open up a new terminal and restore sudo access and the environment variables: -```sh -sudo su -source /home//Downloads/fpga_support_files/setup_env.sh -export QUARTUS_ROOTDIR=/home//intelFPGA/18.1/qprogrammer -``` - -25. Run `aocl diagnose`: -```sh -aocl diagnose -``` -Your screen displays `DIAGNOSTIC_PASSED`. - -26. Use `jtagconfig` to slow the clock. The message "No parameter named JtagClock" can be safely ignored. -```sh -jtagconfig --setparam 1 JtagClock 6M -``` - -27. Go to `/opt/intel/openvino/bitstreams/a10_vision_design_bitstreams/`, where `2019R1_PL1_FP11_ResNet_SqueezeNet_VGG.aocx `is located: -```sh -cd /opt/intel/openvino/bitstreams/a10_vision_design_bitstreams/ -``` - -28. Program the `2019R1_PL1_FP11_ResNet_SqueezeNet_VGG.aocx` file to the flash to be made permanently available even after power cycle: -```sh -aocl flash acl0 2019R1_PL1_FP11_ResNet_SqueezeNet_VGG.aocx -``` -> **NOTE**: You will need the USB Blaster for this. - -29. Hard reboot the host system including powering off. - -30. Now Soft reboot the host system to ensure the new PCIe device is seen properly -```sh -reboot -``` - -31. Open up a new terminal and restore sudo access and the environment variables: -```sh -sudo su -source /home//Downloads/fpga_support_files/setup_env.sh -``` - -32. Check if the host system recognizes the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA board. Confirm you can detect the PCIe card: -```sh -lspci | grep -i Altera -``` -Your output is similar to: -```sh -01:00.0 Processing accelerators: Altera Corporation Device 2494 (rev 01) -``` - -33. Run `aocl diagnose`: -```sh -aocl diagnose -``` -You should see `DIAGNOSTIC_PASSED` before proceeding to the next steps. - -## 2. Program a Bitstream - -The bitstream you program should correspond to the topology you want to deploy. In this section, you program a SqueezeNet bitstream and deploy the classification sample with a SqueezeNet model that you used the Model Optimizer to convert in the steps before. - -> **IMPORTANT**: Only use bitstreams from the installed version of the Intel® Distribution of OpenVINO™ toolkit. Bitstreams from older versions of the Intel® Distribution of OpenVINO™ toolkit are incompatible with later versions of the Intel® Distribution of OpenVINO™ toolkit. For example, you cannot use the `1-0-1_A10DK_FP16_Generic` bitstream, when the Intel® Distribution of OpenVINO™ toolkit supports the `2-0-1_A10DK_FP16_Generic` bitstream. - -Depending on how many bitstreams you selected, there are different folders for each FPGA card type which were downloaded in the Intel® Distribution of OpenVINO™ toolkit package: - -1. For the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA the pre-trained bistreams are in `/opt/intel/openvino/bitstreams/a10_vision_design_bitstreams`. This example uses a SqueezeNet bitstream with low precision for the classification sample. - -2. Rerun the environment setup script: -```sh -source /home//Downloads/fpga_support_files/setup_env.sh -``` - -3. Change to your home directory: -```sh -cd /home/ -``` - -4. Program the bitstream for the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA: -```sh -aocl program acl0 /opt/intel/openvino/bitstreams/a10_vision_design_bitstreams/2019R1_PL1_FP11_ResNet_SqueezeNet_VGG.aocx -``` - -### Steps to Flash the FPGA Card - -> **NOTE**: -> - To avoid having to reprogram the board after a power down, a bitstream will be programmed to permanent memory on the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA. This will take about 20 minutes. -> - The steps can be followed in the [Configure and Setup the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA](#1-configure-and-setup-the-intel-vision-accelerator-design-with-an-intel-arria-10-fpga) section of this guide from steps 14-18 and 28-36. - - -## 3. Setup a Neural Network Model for FPGA - -In this section, you will create an FP16 model suitable for hardware accelerators. For more information, see the [FPGA plugin](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html) section in the Inference Engine Developer Guide. - - -1. Create a directory for the FP16 SqueezeNet Model: -```sh -mkdir /home//squeezenet1.1_FP16 -``` - -2. Go to `/home//squeezenet1.1_FP16`: -```sh -cd /home//squeezenet1.1_FP16 -``` - -3. Use the Model Optimizer to convert the FP32 SqueezeNet Caffe* model into an FP16 optimized Intermediate Representation (IR). The model files were downloaded when you ran the the Image Classification verification script while [installing the Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support](installing-openvino-linux-fpga.md). To convert, run the Model Optimizer script with the following arguments: -```sh -python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --input_model /home//openvino_models/models/FP32/classification/squeezenet/1.1/caffe/squeezenet1.1.caffemodel --data_type FP16 --output_dir . -``` - -4. The `squeezenet1.1.labels` file contains the classes `ImageNet` uses. This file is included so that the inference results show text instead of classification numbers. Copy `squeezenet1.1.labels` to the your optimized model location: -```sh -cp /home//openvino_models/ir/FP32/classification/squeezenet/1.1/caffe/squeezenet1.1.labels . -``` - -5. Copy a sample image to the release directory. You will use this with your optimized model: -```sh -sudo cp /opt/intel/openvino/deployment_tools/demo/car.png ~/inference_engine_samples_build/intel64/Release -``` - -## 4. Run a Sample Application - -1. Go to the samples directory -```sh -cd /home//inference_engine_samples_build/intel64/Release -``` - -2. Use an Inference Engine sample to run a sample application on the CPU: -```sh -./classification_sample_async -i car.png -m ~/openvino_models/ir/FP32/classification/squeezenet/1.1/caffe/squeezenet1.1.xml -``` -Note the CPU throughput in Frames Per Second (FPS). This tells you how quickly the inference is done on the hardware. Now run the inference using the FPGA. - -3. Add the `-d` option to target the FPGA: -```sh -./classification_sample_async -i car.png -m ~/squeezenet1.1_FP16/squeezenet1.1.xml -d HETERO:FPGA,CPU -``` -The throughput on FPGA is listed and may show a lower FPS. This is due to the initialization time. To account for that, the next step increases the iterations to get a better sense of the speed the FPGA can run inference at. - -4. Use `-ni` to increase the number of iterations, This option reduces the initialization impact: -```sh -./classification_sample_async -i car.png -m ~/squeezenet1.1_FP16/squeezenet1.1.xml -d HETERO:FPGA,CPU -ni 100 -``` - -Congratulations, you are done with the Intel® Distribution of OpenVINO™ toolkit installation for FPGA. - -## Additional Resources - -Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit) - -Intel® Distribution of OpenVINO™ toolkit documentation: [https://docs.openvinotoolkit.org/](https://docs.openvinotoolkit.org/) - -Inference Engine FPGA plugin documentation: [https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html) diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md deleted file mode 100644 index 06d8ebbc869..00000000000 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_2019R3.md +++ /dev/null @@ -1,281 +0,0 @@ -# Configuration Guide for the Intel® Distribution of OpenVINO™ toolkit 2019R3 and the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA SG1 and SG2 (IEI's Mustang-F100-A10) on Linux* {#openvino_docs_install_guides_VisionAcceleratorFPGA_Configure_2019R3} - -## 1. Configure and Set Up the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA - -1. Download [Intel® Quartus® Prime Programmer and Tools Standard Edition 18.1](http://fpgasoftware.intel.com/18.1/?edition=standard&platform=linux&download_manager=direct#tabs-4). Install the Intel® Quartus® Prime Programmer and Tools Software to the `/home//intelFPGA/18.1` directory. - -2. Download `fpga_support_files.tgz` from the [Intel Registration Center](http://registrationcenter-download.intel.com/akdlm/irc_nas/12954/fpga_support_files.tgz) to the `~/Downloads` directory. The files in this `.tgz` archive are required to ensure your FPGA card and the Intel® Distribution of OpenVINO™ toolkit work correctly. - -3. Go to the directory where you downloaded the `fpga_support_files.tgz` archive. - -4. Unpack the `.tgz` file: -```sh -tar -xvzf fpga_support_files.tgz -``` -A directory named `fpga_support_files` is created. - -5. Switch to superuser: -```sh -sudo su -``` - -6. Change directory to `Downloads/fpga_support_files/`: -```sh -cd /home//Downloads/fpga_support_files/ -``` - -7. Copy the USB Blaster Rules file: -```sh -cp config/51-usbblaster.rules /etc/udev/rules.d -udevadm control --reload-rules -udevadm trigger -``` - -8. Copy aocl fixes for latest kernels: -```sh -cp fixes/Command.pm /opt/altera/aocl-pro-rte/aclrte-linux64/share/lib/perl/acl/ -cp config/blacklist-altera-cvp.conf /etc/modprobe.d/ -``` - -9. Copy flash files so we don't need a full Quartus installation: -```sh -cp -r config/aocl_flash/linux64/* /home//intelFPGA/18.1/qprogrammer/linux64 -``` - -10. Unpack the BSP for your appropriate Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA SG1 or SG2: -```sh -cd /opt/intel/openvino/bitstreams/a10_vision_design_sg<#>_bitstreams/BSP/ -tar -xvzf a10_1150_sg<#>_r3.tgz -chmod -R 755 /opt/intel/openvino/bitstreams/a10_vision_design_sg<#>_bitstreams -``` -> **NOTE**: If you do not know which version of the board you have, please refer to the product label on the fan cover side or by the product SKU: Mustang-F100-A10-R10 => SG1; Mustang-F100-A10E-R10 => SG2 - -11. Create an initialization script `/home//init_openvino.sh` with the following content that can be run upon opening a new terminal or rebooting. This will setup your proper environment variables. -```sh -export AOCL_BOARD_PACKAGE_ROOT=/opt/intel/openvino/bitstreams/a10_vision_design_sg<#>_bitstreams/BSP/a10_1150_sg<#> -export QUARTUS_ROOTDIR=/home//intelFPGA/18.1/qprogrammer -export PATH=$PATH:/opt/altera/aocl-pro-rte/aclrte-linux64/bin:/opt/altera/aocl-pro-rte/aclrte-linux64/host/linux64/bin:/home//intelFPGA/18.1/qprogrammer/bin -export INTELFPGAOCLSDKROOT=/opt/altera/aocl-pro-rte/aclrte-linux64 -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$AOCL_BOARD_PACKAGE_ROOT/linux64/lib -export CL_CONTEXT_COMPILER_MODE_INTELFPGA=3 -source /opt/altera/aocl-pro-rte/aclrte-linux64/init_opencl.sh -source /opt/intel/openvino/bin/setupvars.sh -``` - -12. Source the script. -```sh -source /home//init_openvino.sh -``` - -13. Uninstall any previous BSP before installing the OpenCL BSP for the 2019R3 BSP: -```sh -aocl uninstall /opt/altera/aocl-pro-rte/aclrte-linux64/board// -``` - -14. Set up the USB Blaster: - - 1. Connect the cable between the board and the host system. Use the letter codes in the diagram below for the connection points: - - 2. Connect the B end of the cable to point B on the board. - - 3. Connect the F end of the cable to point F on the FPGA download cable. - - 4. From point F end of the cable to point F on the FPGA download cable, the connection is as shown: -![](../img/VisionAcceleratorJTAG.png) - -15. Run `jtagconfig` to ensure that your Intel FPGA Download Cable driver is ready to use: -```sh -jtagconfig -``` -Your output is similar to: -```sh -1) USB-Blaster [1-6] -02E660DD 10AX115H1(.|E2|ES)/10AX115H2/.. -``` - -16. Use `jtagconfig` to slow the clock. The message "No parameter named JtagClock" can be safely ignored. -```sh -jtagconfig --setparam 1 JtagClock 6M -``` - -17. (OPTIONAL) Confirm the clock is set to 6M: -```sh -jtagconfig --getparam 1 JtagClock -``` -You should see the following: -```sh -6M -``` - -18. Go to `/opt/intel/openvino/bitstreams/a10_vision_design_sg<#>_bitstreams/BSP/a10_1150_sg<#>/bringup`, where `sg<#>_boardtest_2ddr_base.sof`is located: -```sh -cd /opt/intel/openvino/bitstreams/a10_vision_design_sg<#>_bitstreams/BSP/a10_1150_sg<#>/bringup -``` - -19. Program the new sof file to the board: -```sh -quartus_pgm -c 1 -m JTAG -o "p;sg<#>_boardtest_2ddr_base.sof" -``` - -20. Soft reboot: -```sh -reboot -``` - -21. Source the environment variable script you made. -```sh -sudo su -source /home//init_openvino.sh -``` - -22. Install OpenCL™ devices. Enter **Y** when prompted to install: -```sh -aocl install -``` - -23. Reboot the machine: -```sh -reboot -``` - -24. Source the environment variable script you made. -```sh -sudo su -source /home//init_openvino.sh -``` - -25. Run `aocl diagnose`: -```sh -aocl diagnose -``` -Your screen displays `DIAGNOSTIC_PASSED`. - -26. Use `jtagconfig` to slow the clock. The message "No parameter named JtagClock" can be safely ignored. -```sh -jtagconfig --setparam 1 JtagClock 6M -``` - -27. Go to `/opt/intel/openvino/bitstreams/a10_vision_design_sg<#>_bitstreams/`, where `2019R3_PV_PL<#>_FP11_InceptionV1_SqueezeNet.aocx `is located: -```sh -cd /opt/intel/openvino/bitstreams/a10_vision_design_sg<#>_bitstreams/ -``` - -28. Program the `2019R3_PV_PL<#>_FP11_InceptionV1_SqueezeNet.aocx` file to the flash to be made permanently available even after power cycle: -```sh -aocl flash acl0 2019R3_PV_PL<#>_FP11_InceptionV1_SqueezeNet.aocx -``` -> **NOTE**: You will need the USB Blaster for this. - -29. Hard reboot the host system including powering off. - -30. Source the environment variable script you made. -```sh -sudo su -source /home//init_openvino.sh -``` - -31. Check if the host system recognizes the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA board. Confirm you can detect the PCIe card: -```sh -lspci | grep -i Altera -``` -Your output is similar to: -```sh -01:00.0 Processing accelerators: Altera Corporation Device 2494 (rev 01) -``` - -32. Run `aocl diagnose`: -```sh -aocl diagnose -``` -You should see `DIAGNOSTIC_PASSED` before proceeding to the next steps. - -## 2. Program a Bitstream - -The bitstream you program should correspond to the topology you want to deploy. In this section, you program a SqueezeNet bitstream and deploy the classification sample with a SqueezeNet model that you used the Model Optimizer to convert in the steps before. - -> **IMPORTANT**: Only use bitstreams from the installed version of the Intel® Distribution of OpenVINO™ toolkit. Bitstreams from older versions of the Intel® Distribution of OpenVINO™ toolkit are incompatible with later versions of the Intel® Distribution of OpenVINO™ toolkit. For example, you cannot use the `1-0-1_A10DK_FP16_Generic` bitstream, when the Intel® Distribution of OpenVINO™ toolkit supports the `2-0-1_A10DK_FP16_Generic` bitstream. - -Depending on how many bitstreams you selected, there are different folders for each FPGA card type which were downloaded in the Intel® Distribution of OpenVINO™ toolkit package: - -1. For the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA SG1 or SG2, the pre-trained bistreams are in `/opt/intel/openvino/bitstreams/a10_vision_design_sg<#>_bitstreams/`. This example uses a SqueezeNet bitstream with low precision for the classification sample. - -2. Source the environment variable script you made. -```sh -source /home//init_openvino.sh -``` - -3. Change to your home directory: -```sh -cd /home/ -``` - -4. Program the bitstream for the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA SG1 or SG2: -```sh -aocl program acl0 /opt/intel/openvino/bitstreams/a10_vision_design_sg<#>_bitstreams/2019R3_PV_PL<#>_FP11_InceptionV1_SqueezeNet.aocx -``` - -### Steps to Flash the FPGA Card - -> **NOTE**: -> - To avoid having to reprogram the board after a power down, a bitstream will be programmed to permanent memory on the Intel® Vision Accelerator Design with Intel® Arria® 10 FPGA. This will take about 20 minutes. -> - The steps can be followed above in this guide to do this. - - -## 3. Setup a Neural Network Model for FPGA - -In this section, you will create an FP16 model suitable for hardware accelerators. For more information, see the [FPGA plugin](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html) section in the Inference Engine Developer Guide. - - -1. Create a directory for the FP16 SqueezeNet Model: -```sh -mkdir ~/squeezenet1.1_FP16 -``` - -2. Go to `~/squeezenet1.1_FP16`: -```sh -cd ~/squeezenet1.1_FP16 -``` - -3. Use the Model Optimizer to convert the FP32 SqueezeNet Caffe* model into an FP16 optimized Intermediate Representation (IR). The model files were downloaded when you ran the the Image Classification verification script while [installing the Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support](installing-openvino-linux-fpga.md). To convert, run the Model Optimizer script with the following arguments: -```sh -python3 /opt/intel/openvino/deployment_tools/model_optimizer/mo.py --input_model ~/openvino_models/models/FP16/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir . -``` - -4. The `squeezenet1.1.labels` file contains the classes `ImageNet` uses. This file is included so that the inference results show text instead of classification numbers. Copy `squeezenet1.1.labels` to the your optimized model location: -```sh -cp ~/openvino_models/ir/FP16/public/squeezenet1.1/squeezenet1.1.labels . -``` - -5. Copy a sample image to the release directory. You will use this with your optimized model: -```sh -cp /opt/intel/openvino/deployment_tools/demo/car.png ~/inference_engine_samples_build/intel64/Release -``` - -## 4. Run a Sample Application - -1. Go to the samples directory -```sh -cd ~/inference_engine_samples_build/intel64/Release -``` - -2. Use an Inference Engine sample to run a sample application on the CPU: -```sh -./classification_sample_async -i car.png -m ~/openvino_models/ir/FP16/public/squeezenet1.1/squeezenet1.1.xml -``` -Note the CPU throughput in Frames Per Second (FPS). This tells you how quickly the inference is done on the hardware. Now run the inference using the FPGA. - -3. Add the `-d` option to target the FPGA: -```sh -./classification_sample_async -i car.png -m ~/openvino_models/ir/FP16/public/squeezenet1.1/squeezenet1.1.xml -d HETERO:FPGA,CPU -``` -The throughput on FPGA is listed and may show a lower FPS. This may be due to the initialization time. To account for that, increase the number of iterations or batch size when deploying to get a better sense of the speed the FPGA can run inference at. - -Congratulations, you are done with the Intel® Distribution of OpenVINO™ toolkit installation for FPGA. - -## Additional Resources - -Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit) - -Intel® Distribution of OpenVINO™ toolkit documentation: [https://docs.openvinotoolkit.org/](https://docs.openvinotoolkit.org/) - -Inference Engine FPGA plugin documentation: [https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html](https://docs.openvinotoolkit.org/latest/_docs_IE_DG_supported_plugins_FPGA.html) diff --git a/docs/install_guides/VisionAcceleratorFPGA_Configure_Windows.md b/docs/install_guides/VisionAcceleratorFPGA_Configure_Windows.md deleted file mode 100644 index c7025807452..00000000000 --- a/docs/install_guides/VisionAcceleratorFPGA_Configure_Windows.md +++ /dev/null @@ -1,21 +0,0 @@ -# Configuration Guide for the Intel® Distribution of OpenVINO™ toolkit and the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA SG2 (IEI's Mustang-F100-A10) on Windows* {#openvino_docs_install_guides_VisionAcceleratorFPGA_Configure_Windows} - -## Product Change Notice -Intel® Distribution of OpenVINO™ toolkit for Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA - - - - - - - - - - -
Change Notice BeginsJuly 2020
Change DateOctober 2020
- -Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. - -Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates. - -For documentation for previous releases of Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_VisionAcceleratorFPGA_Configure_Windows.html) and lower. diff --git a/docs/onnx_custom_op/CMakeLists.txt b/docs/onnx_custom_op/CMakeLists.txt index 09d6635ee92..5403489794e 100644 --- a/docs/onnx_custom_op/CMakeLists.txt +++ b/docs/onnx_custom_op/CMakeLists.txt @@ -7,11 +7,11 @@ set(CMAKE_CXX_STANDARD 11) set(TARGET_NAME "onnx_custom_op") -find_package(ngraph REQUIRED COMPONENTS onnx_ngraph_frontend) +find_package(OpenVINO REQUIRED COMPONENTS ONNX) add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp onnx_custom_op.hpp) -target_link_libraries(${TARGET_NAME} PUBLIC ngraph::ngraph ngraph::onnx_ngraph_frontend) +target_link_libraries(${TARGET_NAME} PUBLIC openvino::core openvino::frontend::onnx) # [cmake:onnx_custom_op] # Enable code style check diff --git a/docs/ops/movement/GatherTree_1.md b/docs/ops/movement/GatherTree_1.md index e39d616d36a..5c4441b37b3 100644 --- a/docs/ops/movement/GatherTree_1.md +++ b/docs/ops/movement/GatherTree_1.md @@ -2,63 +2,60 @@ **Versioned name**: *GatherTree-1* -**Category**: Beam search post-processing +**Category**: *Data movement* **Short description**: Generates the complete beams from the ids per each step and the parent beam ids. **Detailed description** -The GatherTree operation implements the same algorithm as the [GatherTree operation in TensorFlow](https://www.tensorflow.org/addons/api_docs/python/tfa/seq2seq/gather_tree). +*GatherTree* operation reorders token IDs of a given input tensor `step_id` representing IDs per each step of beam search, based on input tensor `parent_ids` representing the parent beam IDs. For a given beam, past the time step containing the first decoded `end_token` all values are filled in with end_token. -Pseudo code: +The algorithm in pseudocode is as follows: ```python -final_idx[ :, :, :] = end_token +final_ids[ :, :, :] = end_token for batch in range(BATCH_SIZE): for beam in range(BEAM_WIDTH): max_sequence_in_beam = min(MAX_TIME, max_seq_len[batch]) - parent = parent_idx[max_sequence_in_beam - 1, batch, beam] + parent = parent_ids[max_sequence_in_beam - 1, batch, beam] - final_idx[max_sequence_in_beam - 1, batch, beam] = step_idx[max_sequence_in_beam - 1, batch, beam] + final_ids[max_sequence_in_beam - 1, batch, beam] = step_ids[max_sequence_in_beam - 1, batch, beam] for level in reversed(range(max_sequence_in_beam - 1)): - final_idx[level, batch, beam] = step_idx[level, batch, parent] + final_ids[level, batch, beam] = step_ids[level, batch, parent] - parent = parent_idx[level, batch, parent] + parent = parent_ids[level, batch, parent] # For a given beam, past the time step containing the first decoded end_token # all values are filled in with end_token. finished = False for time in range(max_sequence_in_beam): if(finished): - final_idx[time, batch, beam] = end_token - elif(final_idx[time, batch, beam] == end_token): + final_ids[time, batch, beam] = end_token + elif(final_ids[time, batch, beam] == end_token): finished = True ``` -Element data types for all input tensors should match each other. +*GatherTree* operation is equivalent to [GatherTree operation in TensorFlow](https://www.tensorflow.org/addons/api_docs/python/tfa/seq2seq/gather_tree). -**Attributes**: *GatherTree* has no attributes +**Attributes**: *GatherTree* operation has no attributes. **Inputs** -* **1**: `step_ids` -- a tensor of shape `[MAX_TIME, BATCH_SIZE, BEAM_WIDTH]` of type *T* with indices from per each step. **Required.** - -* **2**: `parent_idx` -- a tensor of shape `[MAX_TIME, BATCH_SIZE, BEAM_WIDTH]` of type *T* with parent beam indices. **Required.** - -* **3**: `max_seq_len` -- a tensor of shape `[BATCH_SIZE]` of type *T* with maximum lengths for each sequence in the batch. **Required.** - -* **4**: `end_token` -- a scalar tensor of type *T* with value of the end marker in a sequence. **Required.** - +* **1**: `step_ids` - Indices per each step. A tensor of type *T* and rank 3. Layout is `[MAX_TIME, BATCH_SIZE, BEAM_WIDTH]`. **Required.** +* **2**: `parent_ids` - Parent beam indices. A tensor of type *T* and rank 3. Layout is `[MAX_TIME, BATCH_SIZE, BEAM_WIDTH]`. **Required.** +* **3**: `max_seq_len` - Maximum lengths for each sequence in the batch. A tensor of type *T* and rank 1. Layout is `[BATCH_SIZE]`. **Required.** +* **4**: `end_token` - Value of the end marker in a sequence. A scalar of type *T*. **Required.** +* **Note**: Inputs should have integer values only. **Outputs** -* **1**: `final_idx` -- a tensor of shape `[MAX_TIME, BATCH_SIZE, BEAM_WIDTH]` of type *T*. +* **1**: `final_ids` - The reordered token IDs based on `parent_ids` input. A tensor of type *T* and rank 3. Layout is `[MAX_TIME, BATCH_SIZE, BEAM_WIDTH]`. **Types** -* *T*: `float32` or `int32`; `float32` should have integer values only. +* *T*: any supported numeric type. **Example** diff --git a/docs/template_extension/CMakeLists.txt b/docs/template_extension/CMakeLists.txt index 230323768e0..90a9e886b35 100644 --- a/docs/template_extension/CMakeLists.txt +++ b/docs/template_extension/CMakeLists.txt @@ -7,30 +7,28 @@ set(CMAKE_CXX_STANDARD 11) set(TARGET_NAME "template_extension") -find_package(ngraph REQUIRED OPTIONAL_COMPONENTS onnx_ngraph_frontend) -find_package(InferenceEngine REQUIRED) +find_package(OpenVINO REQUIRED COMPONENTS Runtime OPTIONAL_COMPONENTS ONNX) find_package(OpenCV QUIET COMPONENTS core) set(SRC cpu_kernel.cpp extension.cpp op.cpp) -if (OpenCV_FOUND) +if(OpenCV_FOUND) set(SRC ${SRC} fft_kernel.cpp fft_op.cpp) endif() add_library(${TARGET_NAME} MODULE ${SRC}) -if (OpenCV_FOUND) +if(OpenCV_FOUND) target_compile_definitions(${TARGET_NAME} PRIVATE OPENCV_IMPORT_ENABLED) target_link_libraries(${TARGET_NAME} PRIVATE opencv_core) endif() target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_EXTENSION_API) -target_link_libraries(${TARGET_NAME} PRIVATE IE::inference_engine - ${NGRAPH_LIBRARIES}) +target_link_libraries(${TARGET_NAME} PRIVATE openvino::core openvino::runtime) -if (ngraph_onnx_ngraph_frontend_FOUND) - target_link_libraries(${TARGET_NAME} PRIVATE ngraph::onnx_ngraph_frontend) - target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_ONNX_FRONTEND_ENABLED) +if(OpenVINO_Frontend_ONNX_FOUND) + target_link_libraries(${TARGET_NAME} PRIVATE openvino::frontend::onnx) + target_compile_definitions(${TARGET_NAME} PRIVATE OPENVINO_ONNX_FRONTEND_ENABLED) endif() # [cmake:extension] diff --git a/docs/template_extension/extension.cpp b/docs/template_extension/extension.cpp index d3be82d1120..140d9086ea4 100644 --- a/docs/template_extension/extension.cpp +++ b/docs/template_extension/extension.cpp @@ -11,7 +11,7 @@ # include "fft_op.hpp" #endif #include -#ifdef NGRAPH_ONNX_FRONTEND_ENABLED +#ifdef OPENVINO_ONNX_FRONTEND_ENABLED # include #endif @@ -24,7 +24,7 @@ using namespace TemplateExtension; //! [extension:ctor] Extension::Extension() { -#ifdef NGRAPH_ONNX_FRONTEND_ENABLED +#ifdef OPENVINO_ONNX_FRONTEND_ENABLED ngraph::onnx_import::register_operator(Operation::type_info.name, 1, "custom_domain", @@ -49,12 +49,12 @@ Extension::Extension() { //! [extension:dtor] Extension::~Extension() { -#ifdef NGRAPH_ONNX_FRONTEND_ENABLED +#ifdef OPENVINO_ONNX_FRONTEND_ENABLED ngraph::onnx_import::unregister_operator(Operation::type_info.name, 1, "custom_domain"); # ifdef OPENCV_IMPORT_ENABLED ngraph::onnx_import::unregister_operator(FFTOp::type_info.name, 1, "custom_domain"); # endif // OPENCV_IMPORT_ENABLED -#endif // NGRAPH_ONNX_FRONTEND_ENABLED +#endif // OPENVINO_ONNX_FRONTEND_ENABLED } //! [extension:dtor] diff --git a/docs/template_plugin/tests/functional/op_reference/gather_tree.cpp b/docs/template_plugin/tests/functional/op_reference/gather_tree.cpp new file mode 100644 index 00000000000..412848f3a9e --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/gather_tree.cpp @@ -0,0 +1,100 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include +#include + +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ngraph; +using namespace InferenceEngine; + +namespace { +struct GatherTreeParams { + template + GatherTreeParams(const ngraph::Shape inShape, std::vector stepIds, const std::vector parentIds, + const std::vector maxSeqLen, const std::vector endToken, std::vector output) : + stepIdsTensor(inShape, element::from(), stepIds), parentIdsTensor(inShape, element::from(), parentIds), + maxSeqLenTensor(ngraph::Shape{inShape[1]}, element::from(), maxSeqLen), endTokenTensor(ngraph::Shape{}, element::from(), endToken), + expectedTensor(inShape, element::from(), output) {} + Tensor stepIdsTensor; + Tensor parentIdsTensor; + Tensor maxSeqLenTensor; + Tensor endTokenTensor; + Tensor expectedTensor; +}; + +class ReferenceGatherTreeTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params); + inputData = {params.stepIdsTensor.data, params.parentIdsTensor.data, params.maxSeqLenTensor.data, params.endTokenTensor.data}; + refOutData = {params.expectedTensor.data}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "iType=" << param.stepIdsTensor.type << "_"; + result << "iShape=" << param.stepIdsTensor.shape; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const GatherTreeParams& params) { + const auto stepIds = std::make_shared(params.stepIdsTensor.type, params.stepIdsTensor.shape); + const auto parentIds = std::make_shared(params.parentIdsTensor.type, params.parentIdsTensor.shape); + const auto maxSeqLen = std::make_shared(params.maxSeqLenTensor.type, params.maxSeqLenTensor.shape); + const auto endToken = std::make_shared(params.endTokenTensor.type, params.endTokenTensor.shape); + const auto gatherTree = std::make_shared(stepIds, parentIds, maxSeqLen, endToken); + return std::make_shared(NodeVector {gatherTree}, ParameterVector {stepIds, parentIds, maxSeqLen, endToken}); + } +}; + +TEST_P(ReferenceGatherTreeTest, CompareWithRefs) { + Exec(); +} + +template +std::vector generateGatherTreeParams() { + using T = typename element_type_traits::value_type; + std::vector gatherTreeParams { + GatherTreeParams(Shape{4, 1, 3}, + std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1}, + std::vector{0, 0, 0, 0, 1, 1, 2, 1, 2, -1, -1, -1}, + std::vector{3}, + std::vector{10}, + std::vector{2, 2, 2, 6, 5, 6, 7, 8, 9, 10, 10, 10}), + GatherTreeParams(Shape{2, 2, 2}, + std::vector{1, 2, 3, 4, 5, 6, 7, 8}, + std::vector{0, 0, 0, 0, 0, 0, 0, 0}, + std::vector{2, 4}, + std::vector{0}, + std::vector{1, 1, 3, 3, 5, 6, 7, 8}) + }; + return gatherTreeParams; +} + +std::vector generateGatherTreeCombinedParams() { + const std::vector> gatherTreeTypeParams { + generateGatherTreeParams(), + generateGatherTreeParams()}; + std::vector combinedParams; + + for (const auto& params : gatherTreeTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_GatherTree_With_Hardcoded_Refs, ReferenceGatherTreeTest, + testing::ValuesIn(generateGatherTreeCombinedParams()), ReferenceGatherTreeTest::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/inference-engine/ie_bridges/c/src/CMakeLists.txt b/inference-engine/ie_bridges/c/src/CMakeLists.txt index a0e1b3469c9..5cf46942f50 100644 --- a/inference-engine/ie_bridges/c/src/CMakeLists.txt +++ b/inference-engine/ie_bridges/c/src/CMakeLists.txt @@ -10,11 +10,11 @@ file(GLOB HEADERS ${InferenceEngine_C_API_SOURCE_DIR}/include/*.h) # create library add_library(${TARGET_NAME} SHARED ${HEADERS} ${SOURCES}) +add_library(openvino::runtime::c ALIAS ${TARGET_NAME}) target_link_libraries(${TARGET_NAME} PRIVATE inference_engine) target_include_directories(${TARGET_NAME} PUBLIC - $ $) add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) @@ -29,6 +29,10 @@ ie_add_vs_version_file(NAME ${TARGET_NAME} export(TARGETS ${TARGET_NAME} NAMESPACE IE:: APPEND FILE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake") +set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME runtime::c) +export(TARGETS ${TARGET_NAME} NAMESPACE openvino:: + APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") + # install ie_cpack_add_component(core_c DEPENDS core) @@ -37,7 +41,14 @@ ie_cpack_add_component(core_c_dev DEPENDS core_c) install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core_c ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core_c - LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c) + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c + INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include/ie) + +install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core_c + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core_c + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c + INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include/ie) install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/ DESTINATION ${IE_CPACK_IE_DIR}/include/ie diff --git a/inference-engine/ie_bridges/python/CMakeLists.txt b/inference-engine/ie_bridges/python/CMakeLists.txt index a88b1017a12..b5e535f8740 100644 --- a/inference-engine/ie_bridges/python/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/CMakeLists.txt @@ -8,11 +8,8 @@ cmake_minimum_required (VERSION 3.13) # Set the project name project (ie_python_api) -if(DEFINED OpenVINO_SOURCE_DIR) - set(InferenceEngine_LIBRARIES inference_engine) -else() +if(NOT DEFINED OpenVINO_SOURCE_DIR) find_package(InferenceEngineDeveloperPackage REQUIRED) - set(InferenceEngine_LIBRARIES IE::inference_engine) endif() option(ENABLE_CONDA_FOLDER "Create output folder with conda python bindings" OFF) diff --git a/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/ngraph_function_creation_sample.py b/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/ngraph_function_creation_sample.py index 40499546891..47fc46353b5 100755 --- a/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/ngraph_function_creation_sample.py +++ b/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/ngraph_function_creation_sample.py @@ -11,6 +11,7 @@ from functools import reduce import cv2 import ngraph +from ngraph.opset1 import max_pool import numpy as np from openvino.inference_engine import IECore, IENetwork @@ -85,7 +86,7 @@ def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function: add_1_node = ngraph.add(conv_1_node, add_1_kernel) # maxpool 1 - maxpool_1_node = ngraph.max_pool(add_1_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil', None) + maxpool_1_node = max_pool(add_1_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil') # convolution 2 conv_2_kernel_shape, conv_2_kernel_length = shape_and_length([50, 20, 5, 5]) @@ -104,7 +105,7 @@ def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function: add_2_node = ngraph.add(conv_2_node, add_2_kernel) # maxpool 2 - maxpool_2_node = ngraph.max_pool(add_2_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil', None) + maxpool_2_node = max_pool(add_2_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil') # reshape 1 reshape_1_dims, reshape_1_length = shape_and_length([2]) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt index cfab4f2d907..5fcdd37c790 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt @@ -31,7 +31,7 @@ foreach(PYX_FILE IN LISTS PYX_SOURCES) ov_python_disable_intel_warnings(${PYX_NAME}) add_dependencies(${TARGET_NAME} ${PYX_NAME}) target_include_directories(${PYX_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") - target_link_libraries(${PYX_NAME} PRIVATE ${InferenceEngine_LIBRARIES}) + target_link_libraries(${PYX_NAME} PRIVATE openvino::runtime) list(APPEND INSTALLED_TARGETS ${PYX_NAME}) ie_python_minimal_api(${PYX_NAME}) endforeach() @@ -53,7 +53,7 @@ python_disable_deprecated_warnings() ie_python_minimal_api(${TARGET_NAME}) target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") -target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES}) +target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime) # Compatibility with python 2.7 which has deprecated "register" specifier if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp index 21ba2b7dbe5..d72465341b2 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp @@ -4,10 +4,8 @@ #include "ie_api_impl.hpp" -#include - -#include "ie_iinfer_request.hpp" #include "ie_plugin_config.hpp" +#include "ngraph/partial_shape.hpp" const std::string EXPORTED_NETWORK_NAME = "undefined"; std::map precision_map = {{"FP32", InferenceEngine::Precision::FP32}, diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp index 028bbd3ad74..6d479784d14 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp @@ -4,12 +4,9 @@ #pragma once -#include - #include #include #include -#include #include #include #include @@ -23,6 +20,7 @@ #include #include "Python.h" +#include "ie_core.hpp" typedef std::chrono::high_resolution_clock Time; typedef std::chrono::nanoseconds ns; diff --git a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt index 512b1662be5..5aeb5224aa1 100644 --- a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt @@ -30,14 +30,14 @@ if(COMMAND ie_add_vs_version_file) endif() if(TARGET offline_transformations) - list(APPEND InferenceEngine_LIBRARIES offline_transformations) + list(APPEND link_libraries offline_transformations) else() - list(APPEND InferenceEngine_LIBRARIES IE::offline_transformations) + list(APPEND link_libraries IE::offline_transformations) endif() target_include_directories(${TARGET_NAME} SYSTEM PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inference_engine") target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") -target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES}) +target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime ${link_libraries}) # Compatibility with python 2.7 which has deprecated "register" specifier if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") diff --git a/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt index 9d3e1e0ffc0..cb071162e35 100644 --- a/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt @@ -30,14 +30,14 @@ if(COMMAND ie_add_vs_version_file) endif() if(TARGET commonTestUtils) - list(APPEND InferenceEngine_LIBRARIES commonTestUtils) + list(APPEND link_libraries commonTestUtils) else() - list(APPEND InferenceEngine_LIBRARIES IE::commonTestUtils) + list(APPEND link_libraries IE::commonTestUtils) endif() target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}") target_include_directories(${TARGET_NAME} SYSTEM PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inference_engine") -target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES}) +target_link_libraries(${TARGET_NAME} PRIVATE ${link_libraries}) # Compatibility with python 2.7 which has deprecated "register" specifier if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") diff --git a/inference-engine/samples/CMakeLists.txt b/inference-engine/samples/CMakeLists.txt index 3e42fa84f2e..b78e32b2fa7 100644 --- a/inference-engine/samples/CMakeLists.txt +++ b/inference-engine/samples/CMakeLists.txt @@ -33,11 +33,8 @@ endif() if(OpenVINO_SOURCE_DIR) # in case if samples are built from IE repo - set(IE_MAIN_SAMPLES_DIR ${OpenVINO_SOURCE_DIR}) - # hint for find_package(InferenceEngine in the samples folder) - set(InferenceEngine_DIR "${CMAKE_BINARY_DIR}") - # hint for find_package(ngraph in the samples folder) - set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph) + set(IE_MAIN_SAMPLES_DIR "${OpenVINO_SOURCE_DIR}") + set(OpenVINO_DIR "${CMAKE_BINARY_DIR}") else() # in case if samples are built out of IE repo set(IE_MAIN_SAMPLES_DIR ${CMAKE_CURRENT_BINARY_DIR}) @@ -128,8 +125,8 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/common/utils") add_subdirectory(common/utils) endif() -# format reader must be added after find_package(InferenceEngine) to get -# exactly the same OpenCV_DIR path which was used for the InferenceEngine build +# format reader must be added after find_package(OpenVINO) to get +# exactly the same OpenCV_DIR path which was used for the OpenVINO build if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/common/format_reader") add_subdirectory(common/format_reader) elseif(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/common/opencv_c_wrapper") @@ -209,14 +206,13 @@ macro(ie_add_sample) set(folder_name c_samples) endif() + find_package(OpenVINO REQUIRED COMPONENTS Runtime) if(c_sample) - set(ie_component inference_engine_c_api) + set(ov_link_libraries openvino::runtime::c) else() - set(ie_component inference_engine) + set(ov_link_libraries openvino::runtime) endif() - find_package(InferenceEngine REQUIRED COMPONENTS ${ie_component}) - set_target_properties(${IE_SAMPLE_NAME} PROPERTIES FOLDER ${folder_name} COMPILE_PDB_NAME ${IE_SAMPLE_NAME}) @@ -225,7 +221,7 @@ macro(ie_add_sample) endif() target_include_directories(${IE_SAMPLE_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../common") - target_link_libraries(${IE_SAMPLE_NAME} PRIVATE ${OpenCV_LIBRARIES} ${InferenceEngine_LIBRARIES} + target_link_libraries(${IE_SAMPLE_NAME} PRIVATE ${OpenCV_LIBRARIES} ${ov_link_libraries} ${IE_SAMPLE_DEPENDENCIES}) if(NOT c_sample) target_link_libraries(${IE_SAMPLE_NAME} PRIVATE gflags) diff --git a/inference-engine/samples/common/utils/CMakeLists.txt b/inference-engine/samples/common/utils/CMakeLists.txt index 9bdc0516af8..534ab26160d 100644 --- a/inference-engine/samples/common/utils/CMakeLists.txt +++ b/inference-engine/samples/common/utils/CMakeLists.txt @@ -12,9 +12,9 @@ set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "src") target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include") -find_package(InferenceEngine REQUIRED COMPONENTS inference_engine) +find_package(OpenVINO REQUIRED COMPONENTS Runtime) -target_link_libraries(${TARGET_NAME} PUBLIC ${InferenceEngine_LIBRARIES} gflags) +target_link_libraries(${TARGET_NAME} PUBLIC openvino::runtime gflags) if(COMMAND add_clang_format_target) add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) diff --git a/inference-engine/samples/hello_reshape_ssd/CMakeLists.txt b/inference-engine/samples/hello_reshape_ssd/CMakeLists.txt index 5c47b6e2fe8..5be6137ab2a 100644 --- a/inference-engine/samples/hello_reshape_ssd/CMakeLists.txt +++ b/inference-engine/samples/hello_reshape_ssd/CMakeLists.txt @@ -7,6 +7,3 @@ ie_add_sample(NAME hello_reshape_ssd HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/reshape_ssd_extension.hpp" DEPENDENCIES ie_samples_utils OPENCV_DEPENDENCIES core imgproc imgcodecs) - -find_package(ngraph REQUIRED) -target_link_libraries(hello_reshape_ssd PRIVATE ${NGRAPH_LIBRARIES}) diff --git a/inference-engine/samples/ngraph_function_creation_sample/CMakeLists.txt b/inference-engine/samples/ngraph_function_creation_sample/CMakeLists.txt index 37fd62c2ecc..4a9b406d6bb 100644 --- a/inference-engine/samples/ngraph_function_creation_sample/CMakeLists.txt +++ b/inference-engine/samples/ngraph_function_creation_sample/CMakeLists.txt @@ -8,6 +8,3 @@ ie_add_sample(NAME ngraph_function_creation_sample SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/ngraph_function_creation_sample.hpp" DEPENDENCIES format_reader ie_samples_utils) - -find_package(ngraph REQUIRED) -target_link_libraries(${TARGET_NAME} PRIVATE ${NGRAPH_LIBRARIES}) diff --git a/inference-engine/samples/object_detection_sample_ssd/CMakeLists.txt b/inference-engine/samples/object_detection_sample_ssd/CMakeLists.txt index 46dbb6bfe23..af58af8d6b2 100644 --- a/inference-engine/samples/object_detection_sample_ssd/CMakeLists.txt +++ b/inference-engine/samples/object_detection_sample_ssd/CMakeLists.txt @@ -6,6 +6,3 @@ ie_add_sample(NAME object_detection_sample_ssd SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/object_detection_sample_ssd.h" DEPENDENCIES format_reader ie_samples_utils) - -find_package(ngraph REQUIRED) -target_link_libraries(object_detection_sample_ssd PRIVATE ${NGRAPH_LIBRARIES}) diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index e79a5709366..f0cb4a1348e 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -120,12 +120,12 @@ ie_faster_build(${TARGET_NAME}_obj ) target_compile_definitions(${TARGET_NAME}_obj PRIVATE IMPLEMENT_INFERENCE_ENGINE_API - $ - $) + $ + $) -target_include_directories(${TARGET_NAME}_obj SYSTEM PRIVATE $ +target_include_directories(${TARGET_NAME}_obj SYSTEM PRIVATE $ $ - $ + $ $) target_include_directories(${TARGET_NAME}_obj PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src" @@ -150,6 +150,8 @@ add_library(${TARGET_NAME} SHARED ${vs_version_file} $) +add_library(openvino::runtime ALIAS ${TARGET_NAME}) + add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${IE_STATIC_DEPENDENT_FILES} ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${PUBLIC_HEADERS}) ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} @@ -166,14 +168,12 @@ if (TBBBIND_2_4_FOUND) endif() target_link_libraries(${TARGET_NAME} PRIVATE pugixml::static openvino::itt ${CMAKE_DL_LIBS} Threads::Threads - ngraph::frontend_manager::static inference_engine_transformations + frontend_manager::static inference_engine_transformations PUBLIC ngraph) target_include_directories(${TARGET_NAME} INTERFACE $ $ - $ - $ PRIVATE $ $) @@ -209,7 +209,7 @@ if(WIN32) set_target_properties(${TARGET_NAME}_s PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME}_s) endif() -target_link_libraries(${TARGET_NAME}_s PRIVATE openvino::itt ${CMAKE_DL_LIBS} ngraph ngraph::frontend_manager::static +target_link_libraries(${TARGET_NAME}_s PRIVATE openvino::itt ${CMAKE_DL_LIBS} ngraph frontend_manager::static inference_engine_transformations pugixml::static) target_compile_definitions(${TARGET_NAME}_s PUBLIC USE_STATIC_IE) @@ -223,8 +223,11 @@ set_target_properties(${TARGET_NAME} ${TARGET_NAME}_obj ${TARGET_NAME}_s # Export for build tree -export(TARGETS ngraph ${TARGET_NAME} NAMESPACE IE:: - APPEND FILE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake") +export(TARGETS ${TARGET_NAME} NAMESPACE IE:: + APPEND FILE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake") + +export(TARGETS ${TARGET_NAME} NAMESPACE openvino:: + APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") # Export for developer package @@ -281,7 +284,17 @@ install(DIRECTORY "${PUBLIC_HEADERS_DIR}" DESTINATION ${IE_CPACK_IE_DIR} install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core - LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core + INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include/ie) + +set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME runtime) +install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core + INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include + # TODO: remove later once samples are updated + ${IE_CPACK_IE_DIR}/include/ie) install(FILES $/plugins.xml DESTINATION ${IE_CPACK_RUNTIME_PATH} @@ -304,6 +317,12 @@ install(EXPORT InferenceEngineTargets DESTINATION ${IE_CPACK_IE_DIR}/share COMPONENT core_dev) +install(EXPORT OpenVINOTargets + FILE OpenVINOTargets.cmake + NAMESPACE openvino:: + DESTINATION ${IE_CPACK_IE_DIR}/share + COMPONENT core_dev) + set(IE_NGRAPH_DIR "${CMAKE_BINARY_DIR}/ngraph") set(IE_INCLUDE_DIR "${PUBLIC_HEADERS_DIR}/ie") set(IE_PARALLEL_CMAKE "${InferenceEngine_SOURCE_DIR}/cmake/ie_parallel.cmake") @@ -313,6 +332,11 @@ configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceE INSTALL_DESTINATION "${CMAKE_INSTALL_PREFIX}" PATH_VARS ${PATH_VARS}) +configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOConfig.cmake.in" + "${CMAKE_BINARY_DIR}/OpenVINOConfig.cmake" + INSTALL_DESTINATION "${CMAKE_INSTALL_PREFIX}" + PATH_VARS ${PATH_VARS}) + set(IE_INCLUDE_DIR "include/ie") set(IE_NGRAPH_DIR "../ngraph/cmake") set(IE_TBB_DIR "${IE_TBB_DIR_INSTALL}") @@ -323,12 +347,23 @@ configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceE INSTALL_DESTINATION share PATH_VARS ${PATH_VARS}) +configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOConfig.cmake.in" + "${CMAKE_BINARY_DIR}/share/OpenVINOConfig.cmake" + INSTALL_DESTINATION share + PATH_VARS ${PATH_VARS}) + configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-version.cmake.in" - "${CMAKE_BINARY_DIR}/InferenceEngineConfig-version.cmake" - @ONLY) + "${CMAKE_BINARY_DIR}/InferenceEngineConfig-version.cmake" @ONLY) +configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOConfig-version.cmake.in" + "${CMAKE_BINARY_DIR}/OpenVINOConfig-version.cmake" @ONLY) install(FILES "${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake" "${CMAKE_BINARY_DIR}/InferenceEngineConfig-version.cmake" "${InferenceEngine_SOURCE_DIR}/cmake/ie_parallel.cmake" DESTINATION ${IE_CPACK_IE_DIR}/share COMPONENT core_dev) + +install(FILES "${CMAKE_BINARY_DIR}/share/OpenVINOConfig.cmake" + "${CMAKE_BINARY_DIR}/OpenVINOConfig-version.cmake" + DESTINATION ${IE_CPACK_IE_DIR}/share + COMPONENT core_dev) diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/common.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/common.hpp index c2dea4c2304..b8f9872308b 100644 --- a/inference-engine/src/inference_engine/include/openvino/runtime/common.hpp +++ b/inference-engine/src/inference_engine/include/openvino/runtime/common.hpp @@ -13,7 +13,8 @@ #include #include -namespace InferenceEngine {}; +namespace InferenceEngine {} + namespace ov { namespace ie = InferenceEngine; namespace runtime { diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/executable_network.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/executable_network.hpp new file mode 100644 index 00000000000..77acd1ea52d --- /dev/null +++ b/inference-engine/src/inference_engine/include/openvino/runtime/executable_network.hpp @@ -0,0 +1,147 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +/** + * @brief A header file that provides ExecutableNetwork class + * + * @file openvino/runtime/executable_network.hpp + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "ie_parameter.hpp" +#include "openvino/core/function.hpp" +#include "openvino/runtime/infer_request.hpp" +#include "openvino/runtime/remote_context.hpp" + +namespace InferenceEngine { +class IExecutableNetworkInternal; +class RemoteContext; +} // namespace InferenceEngine +namespace ov { +namespace runtime { + +class Core; + +/** + * @brief This is an interface of an executable network + */ +class INFERENCE_ENGINE_API_CLASS(ExecutableNetwork) { + std::shared_ptr _so; + std::shared_ptr _impl; + + /** + * @brief Constructs ExecutableNetwork from the initialized std::shared_ptr + * @param so Plugin to use. This is required to ensure that ExecutableNetwork can work properly even if plugin + * object is destroyed. + * @param impl Initialized shared pointer + */ + ExecutableNetwork(const std::shared_ptr& so, + const std::shared_ptr& impl); + friend class ov::runtime::Core; + +public: + /** + * @brief A default constructor. + */ + ExecutableNetwork() = default; + + /** + * @brief Get executable graph information from a device + * + * @return Function containing Executable Graph Info + */ + std::shared_ptr get_runtime_function() const; + + /** + * @brief Get parameters of executeble graph function + * + * @return vector of paramter nodes + */ + ParameterVector get_parameters() const; + + /** + * @brief Get results of executeble graph function + * + * @return vector of result nodes + */ + ResultVector get_results() const; + + /** + * @brief Creates an inference request object used to infer the network. + * + * The created request has allocated input and output blobs (that can be changed later). + * + * @return InferRequest object + */ + InferRequest create_infer_request(); + + /** + * @brief Exports the current executable network. + * + * @see Core::ImportNetwork + * + * @param networkModel Network model output stream + */ + void export_model(std::ostream& networkModel); + + /** + * @brief Sets configuration for current executable network + * + * @param config Map of pairs: (config parameter name, config parameter value) + */ + void set_config(const ie::ParamMap& config); + + /** @brief Gets configuration for current executable network. + * + * The method is responsible to extract information + * which affects executable network execution. The list of supported configuration values can be extracted via + * ExecutableNetwork::get_metric with the SUPPORTED_CONFIG_KEYS key, but some of these keys cannot be changed + * dynamically, e.g. DEVICE_ID cannot changed if an executable network has already been compiled for particular + * device. + * + * @param name config key, can be found in ie_plugin_config.hpp + * @return Configuration parameter value + */ + ie::Parameter get_config(const std::string& name) const; + + /** + * @brief Gets general runtime metric for an executable network. + * + * It can be network name, actual device ID on + * which executable network is running or all other properties which cannot be changed dynamically. + * + * @param name metric name to request + * @return Metric parameter value + */ + ie::Parameter get_metric(const std::string& name) const; + + /** + * @brief Returns pointer to plugin-specific shared context + * on remote accelerator device that was used to create this ExecutableNetwork + * @return A context + */ + std::shared_ptr get_context() const; + + /** + * @brief Checks if current ExecutableNetwork object is not initialized + * @return true if current ExecutableNetwork object is not initialized, false - otherwise + */ + bool operator!() const noexcept; + + /** + * @brief Checks if current ExecutableNetwork object is initialized + * @return true if current ExecutableNetwork object is initialized, false - otherwise + */ + explicit operator bool() const noexcept; +}; + +} // namespace runtime +} // namespace ov diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/infer_request.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/infer_request.hpp index d5993d9a09c..25d7ab60f47 100644 --- a/inference-engine/src/inference_engine/include/openvino/runtime/infer_request.hpp +++ b/inference-engine/src/inference_engine/include/openvino/runtime/infer_request.hpp @@ -24,6 +24,9 @@ class Blob; namespace ov { namespace runtime { + +class ExecutableNetwork; + /** * @brief This is an interface of asynchronous infer request * @@ -40,7 +43,7 @@ class INFERENCE_ENGINE_API_CLASS(InferRequest) { * @param impl Initialized shared pointer */ InferRequest(const std::shared_ptr& so, const std::shared_ptr& impl); - friend class ExecutableNetwork; + friend class ov::runtime::ExecutableNetwork; public: /** diff --git a/inference-engine/src/inference_engine/src/cpp/ie_executable_network.cpp b/inference-engine/src/inference_engine/src/cpp/ie_executable_network.cpp index 0ddd99db096..ed13b4a22f2 100644 --- a/inference-engine/src/inference_engine/src/cpp/ie_executable_network.cpp +++ b/inference-engine/src/inference_engine/src/cpp/ie_executable_network.cpp @@ -9,6 +9,7 @@ #include "cpp_interfaces/interface/ie_iremote_context.hpp" #include "ie_common.h" #include "ie_executable_network_base.hpp" +#include "openvino/runtime/executable_network.hpp" namespace InferenceEngine { @@ -18,7 +19,7 @@ namespace InferenceEngine { try { \ __VA_ARGS__; \ } catch (...) { \ - details::Rethrow(); \ + InferenceEngine::details::Rethrow(); \ } ExecutableNetwork::ExecutableNetwork(const details::SharedObjectLoader& so, const IExecutableNetworkInternal::Ptr& impl) @@ -55,9 +56,10 @@ ExecutableNetwork::operator IExecutableNetwork::Ptr() { std::vector ExecutableNetwork::QueryState() { std::vector controller; - EXEC_NET_CALL_STATEMENT(for (auto&& state - : _impl->QueryState()) { - controller.emplace_back(VariableState{_so, state}); + EXEC_NET_CALL_STATEMENT({ + for (auto&& state : _impl->QueryState()) { + controller.emplace_back(VariableState{_so, state}); + } }); return controller; } @@ -106,3 +108,58 @@ ExecutableNetwork::operator bool() const noexcept { return !!_impl; } } // namespace InferenceEngine + +namespace ov { +namespace runtime { +ExecutableNetwork::ExecutableNetwork(const std::shared_ptr& so, + const std::shared_ptr& impl) + : _so{so}, + _impl{impl} { + IE_ASSERT(_impl != nullptr); +} + +std::shared_ptr ExecutableNetwork::get_runtime_function() const { + EXEC_NET_CALL_STATEMENT(return std::const_pointer_cast(_impl->GetExecGraphInfo())); +} + +ParameterVector ExecutableNetwork::get_parameters() const { + EXEC_NET_CALL_STATEMENT(return _impl->GetExecGraphInfo()->get_parameters()); +} + +ResultVector ExecutableNetwork::get_results() const { + EXEC_NET_CALL_STATEMENT(return _impl->GetExecGraphInfo()->get_results()); +} + +InferRequest ExecutableNetwork::create_infer_request() { + EXEC_NET_CALL_STATEMENT(return {_so, _impl->CreateInferRequest()}); +} + +void ExecutableNetwork::export_model(std::ostream& networkModel) { + EXEC_NET_CALL_STATEMENT(_impl->Export(networkModel)); +} + +void ExecutableNetwork::set_config(const ie::ParamMap& config) { + EXEC_NET_CALL_STATEMENT(_impl->SetConfig(config)); +} + +ie::Parameter ExecutableNetwork::get_config(const std::string& name) const { + EXEC_NET_CALL_STATEMENT(return _impl->GetConfig(name)); +} + +ie::Parameter ExecutableNetwork::get_metric(const std::string& name) const { + EXEC_NET_CALL_STATEMENT(return _impl->GetMetric(name)); +} + +std::shared_ptr ExecutableNetwork::get_context() const { + EXEC_NET_CALL_STATEMENT(return _impl->GetContext()); +} + +bool ExecutableNetwork::operator!() const noexcept { + return !_impl; +} + +ExecutableNetwork::operator bool() const noexcept { + return !!_impl; +} +} // namespace runtime +} // namespace ov \ No newline at end of file diff --git a/inference-engine/src/legacy_api/CMakeLists.txt b/inference-engine/src/legacy_api/CMakeLists.txt index 9de8bf16910..bfc5c11129a 100644 --- a/inference-engine/src/legacy_api/CMakeLists.txt +++ b/inference-engine/src/legacy_api/CMakeLists.txt @@ -42,10 +42,10 @@ target_include_directories(${TARGET_NAME}_obj PRIVATE ${IE_MAIN_SOURCE_DIR}/src/inference_engine/src # For CNNNetworkNGraphImpl $ $ - $ + $ $) -target_compile_definitions(${TARGET_NAME}_obj PRIVATE $) +target_compile_definitions(${TARGET_NAME}_obj PRIVATE $) target_link_libraries(${TARGET_NAME}_obj PRIVATE openvino::itt) diff --git a/inference-engine/src/low_precision_transformations/src/concat.cpp b/inference-engine/src/low_precision_transformations/src/concat.cpp index ae95a834d7d..da040d1f897 100644 --- a/inference-engine/src/low_precision_transformations/src/concat.cpp +++ b/inference-engine/src/low_precision_transformations/src/concat.cpp @@ -182,18 +182,36 @@ bool ConcatTransformation::canBeTransformed(const TransformationContext& context return false; } - const auto axis = concat->get_axis(); - const auto outPShape = concat->get_output_partial_shape(0); - const size_t normalizedAxis = ngraph::normalize_axis(concat->get_friendly_name(), axis, outPShape.rank()); + const auto& axis = concat->get_axis(); + const auto& outPShape = concat->get_output_partial_shape(0); + const auto& outRank = outPShape.rank(); + if (outRank.is_dynamic()) { + return false; + } + + const size_t normalizedAxis = ngraph::normalize_axis(concat->get_friendly_name(), axis, outRank); if (normalizedAxis != 1ul) { return false; } - if (outPShape.rank().is_dynamic() || outPShape[normalizedAxis].is_dynamic()) { + if (outPShape[normalizedAxis].is_dynamic()) { return false; } + auto checkConstShape = [&normalizedAxis, &outRank](const std::shared_ptr& constant) { + const size_t rankValue = outRank.get_length(); + Shape constantShape = constant->get_shape(); + + while (constantShape.size() < rankValue) { + constantShape.insert(constantShape.begin(), 1ul); + } + + const auto dqDimensionsCount = std::count_if(constantShape.begin(), constantShape.end(), [](size_t elem) { return elem > 1; }); + const bool dqOnlyByConcatAxis = (dqDimensionsCount == 0) || (dqDimensionsCount == 1 && constantShape[normalizedAxis] != 1ul); + return dqOnlyByConcatAxis; + }; + element::Type precision; for (size_t i = 0ul; i < concat->get_input_size(); i++) { const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(concat, i); @@ -201,6 +219,11 @@ bool ConcatTransformation::canBeTransformed(const TransformationContext& context return false; } + if (((dequantization.subtract != nullptr) && (!checkConstShape(dequantization.subtractConstant))) || + ((dequantization.multiply != nullptr) && (!checkConstShape(dequantization.multiplyConstant)))) { + return false; + } + if (precision == element::undefined) { precision = dequantization.data.get_element_type(); } else if (precision != dequantization.data.get_element_type()) { diff --git a/inference-engine/src/low_precision_transformations/src/fake_quantize_dequantization.cpp b/inference-engine/src/low_precision_transformations/src/fake_quantize_dequantization.cpp index 14a0104a46c..aecd9c072ae 100644 --- a/inference-engine/src/low_precision_transformations/src/fake_quantize_dequantization.cpp +++ b/inference-engine/src/low_precision_transformations/src/fake_quantize_dequantization.cpp @@ -34,7 +34,7 @@ FakeQuantizeDequantization::FakeQuantizeDequantization( } bool FakeQuantizeDequantization::empty() const { - return (convert == nullptr) && (subtract == nullptr) && (multiply == nullptr); + return (subtract == nullptr) && (multiply == nullptr); } bool FakeQuantizeDequantization::multiplyHasZeroOrDenormal() const { diff --git a/inference-engine/src/low_precision_transformations/src/multiply.cpp b/inference-engine/src/low_precision_transformations/src/multiply.cpp index cf15d24dc02..ea713ee6b27 100644 --- a/inference-engine/src/low_precision_transformations/src/multiply.cpp +++ b/inference-engine/src/low_precision_transformations/src/multiply.cpp @@ -159,17 +159,17 @@ bool MultiplyTransformation::canBeTransformed(const TransformationContext& conte FakeQuantizeDequantization dequantization1 = pass::low_precision::NetworkHelper::getDequantization(layer, 0ul); FakeQuantizeDequantization dequantization2 = pass::low_precision::NetworkHelper::getDequantization(layer, 1ul); - if ((dequantization1.data.get_node() == nullptr) || - (dequantization1.empty() && !ov::is_type(dequantization1.data.get_node_shared_ptr()) && - !ov::is_type(dequantization2.data.get_node_shared_ptr()))) { + if (dequantization1.data.get_node() == nullptr || dequantization2.data.get_node() == nullptr) { return false; } - if ((dequantization2.data.get_node() == nullptr) || - (dequantization2.empty() && !ov::is_type(dequantization2.data.get_node_shared_ptr()) && - !ov::is_type(dequantization1.data.get_node_shared_ptr()))) { + const bool nonConstantData = !ov::is_type(dequantization1.data.get_node_shared_ptr()) && + !ov::is_type(dequantization2.data.get_node_shared_ptr()); + + if (((dequantization1.empty() || dequantization2.empty()) && nonConstantData)) { return false; } + return EltwiseBaseTransformation::canBeTransformed(context, layer); } diff --git a/inference-engine/src/low_precision_transformations/src/mvn.cpp b/inference-engine/src/low_precision_transformations/src/mvn.cpp index 093747a68b6..34f43655604 100644 --- a/inference-engine/src/low_precision_transformations/src/mvn.cpp +++ b/inference-engine/src/low_precision_transformations/src/mvn.cpp @@ -78,8 +78,6 @@ bool MVNTransformation::canBeTransformed(const TransformationContext& context, s } } - bool isScalarScales = NetworkHelper::isScalarLike(dequantization.multiplyConstant); - AxisSet reduction_axes; if (ov::is_type(mvn)) { reduction_axes = ov::as_type_ptr(mvn)->get_reduction_axes(); @@ -104,6 +102,7 @@ bool MVNTransformation::canBeTransformed(const TransformationContext& context, s } } + bool isScalarScales = NetworkHelper::isScalarLike(dequantization.multiplyConstant); return perTensor && isScalarScales; } @@ -127,9 +126,9 @@ bool MVNTransformation::transform(TransformationContext &context, ngraph::patter FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(mvn); const auto scalesConst = dequantization.multiplyConstant; + const auto type = scalesConst->get_element_type(); - auto newScalesConst = dequantization.multiplyConstant; - const auto type = scalesConst->get_output_element_type(0); + auto newScalesConst = scalesConst; if (normalizeVariance) { switch (type) { case ngraph::element::Type_t::f16: { @@ -145,6 +144,7 @@ bool MVNTransformation::transform(TransformationContext &context, ngraph::patter } } } + std::shared_ptr newMVN; if (ov::is_type(mvn)) { newMVN = mvn->copy_with_new_inputs({dequantization.data}); diff --git a/inference-engine/src/low_precision_transformations/src/reshape.cpp b/inference-engine/src/low_precision_transformations/src/reshape.cpp index da44763ba0d..ee8e02e1045 100644 --- a/inference-engine/src/low_precision_transformations/src/reshape.cpp +++ b/inference-engine/src/low_precision_transformations/src/reshape.cpp @@ -83,7 +83,7 @@ void reshapeDequantizationConstant(const std::shared_ptr& resha Shape newOperationConstantBroadcastedShape = constant->get_shape(); // add dimensions to broadcast values if (newOperationConstantBroadcastedShape.size() == 2ul) { - newOperationConstantBroadcastedShape.push_back(dimensionsToBroadcast); + newOperationConstantBroadcastedShape[0] = dimensionsToBroadcast; } else { newOperationConstantBroadcastedShape[2] = dimensionsToBroadcast; } diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp index 200bd778586..a2c1c9818ea 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp @@ -910,7 +910,7 @@ void MKLDNNGraph::SortTopologically() { for (int i = 0; i < node->parentEdges.size(); i++) { auto edge = node->getParentEdgeAt(i); int port = edge->getOutputNum(); - if (!res[port]) + if (port < port_num && !res[port]) res[port] = edge; else res.push_back(edge); @@ -924,7 +924,7 @@ void MKLDNNGraph::SortTopologically() { for (int i = 0; i < node->childEdges.size(); i++) { auto edge = node->getChildEdgeAt(i); int port = edge->getInputNum(); - if (!res[port]) + if (port < port_num && !res[port]) res[port] = edge; else res.push_back(edge); diff --git a/inference-engine/src/transformations/CMakeLists.txt b/inference-engine/src/transformations/CMakeLists.txt index bf4a4f31d6a..59af88ea36e 100644 --- a/inference-engine/src/transformations/CMakeLists.txt +++ b/inference-engine/src/transformations/CMakeLists.txt @@ -53,3 +53,15 @@ install(TARGETS ${TARGET_NAME} EXPORT InferenceEngineTargets RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) + +install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) + +# because inference_engine_transformations is exported in multiple export list +# it needs to be exported in each list it's used +install(TARGETS ${TARGET_NAME} EXPORT ngraphTargets + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) \ No newline at end of file diff --git a/inference-engine/src/transformations/include/ngraph_ops/type_relaxed.hpp b/inference-engine/src/transformations/include/ngraph_ops/type_relaxed.hpp index acc7682bb02..ee7a9e4fb04 100644 --- a/inference-engine/src/transformations/include/ngraph_ops/type_relaxed.hpp +++ b/inference-engine/src/transformations/include/ngraph_ops/type_relaxed.hpp @@ -11,6 +11,7 @@ #include +#include #include "ngraph/op/op.hpp" namespace ngraph { @@ -84,6 +85,7 @@ protected: // to infer output data types element::TypeVector m_input_data_types; element::TypeVector m_output_data_types; + element::TypeVector m_original_output_data_types; }; /// Set another type for a specified output for the period of time when an instance of the class exists. @@ -161,6 +163,7 @@ public: } void validate_and_infer_types() override; + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; @@ -170,6 +173,61 @@ private: } }; +template +bool TypeRelaxed::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { + std::shared_ptr convert; + HostTensorVector casted_inputs(BaseOp::get_input_size()); + for (size_t i = 0; i < BaseOp::get_input_size(); ++i) { + const auto expected_input_type = get_origin_input_type(i); + + if (inputs[i]->get_element_type() == expected_input_type || expected_input_type == element::undefined) { + casted_inputs[i] = inputs[i]; + } else { + if (convert == nullptr) { + convert = std::make_shared(); + } + + convert->set_destination_type(expected_input_type); + casted_inputs[i] = std::make_shared(expected_input_type, inputs[i]->get_shape()); + if (!convert->evaluate({ casted_inputs[i] }, { inputs[i] })) { + return false; + } + } + } + + HostTensorVector original_outputs(BaseOp::get_output_size()); + for (size_t i = 0; i < BaseOp::get_output_size(); ++i) { + const auto expected_output_type = get_overridden_output_type(i); + if (expected_output_type == element::undefined || expected_output_type == m_original_output_data_types[i]) { + original_outputs[i] = outputs[i]; + } else { + original_outputs[i] = std::make_shared(m_original_output_data_types[i], BaseOp::get_output_partial_shape(i)); + } + } + + if (!BaseOp::evaluate(original_outputs, casted_inputs)) { + return false; + } + + for (size_t i = 0; i < BaseOp::get_output_size(); ++i) { + const auto expected_output_type = get_overridden_output_type(i); + + if (expected_output_type != element::undefined && original_outputs[i]->get_element_type() != expected_output_type) { + if (convert == nullptr) { + convert = std::make_shared(); + } + + convert->set_destination_type(expected_output_type); + const auto casted_output = std::make_shared(expected_output_type, original_outputs[i]->get_shape()); + if (!convert->evaluate({ outputs[i] }, { original_outputs[i] })) { + return false; + } + } + } + + return true; +} + template void TypeRelaxed::validate_and_infer_types() { // Remember all input data types @@ -195,6 +253,14 @@ void TypeRelaxed::validate_and_infer_types() { BaseOp::get_input_tensor(i).set_tensor_type(old_input_types[i], BaseOp::get_input_partial_shape(i)); } + if (m_original_output_data_types.empty()) { + m_original_output_data_types = element::TypeVector(BaseOp::get_output_size()); + } + + // Save inferred output types + for (size_t i = 0; i < BaseOp::get_output_size(); ++i) { + m_original_output_data_types[i] = BaseOp::get_output_element_type(i); + } // Override (some) output types for (size_t i = 0; i < BaseOp::get_output_size(); ++i) { diff --git a/inference-engine/src/vpu/graph_transformer/src/utils/runtime_graph.cpp b/inference-engine/src/vpu/graph_transformer/src/utils/runtime_graph.cpp index ed67650de9c..73934b614d4 100644 --- a/inference-engine/src/vpu/graph_transformer/src/utils/runtime_graph.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/utils/runtime_graph.cpp @@ -127,7 +127,8 @@ std::map extractMeta(const StageMetaInfo& stageMeta) { if (stageMeta.execOrder < 0 || stageMeta.execTime == 0) { serializationInfo[ExecGraphInfoSerialization::PERF_COUNTER] = "not_executed"; } else { - serializationInfo[ExecGraphInfoSerialization::PERF_COUNTER] = std::to_string(stageMeta.execTime); + int execTimeMcs = stageMeta.execTime * 1000; // ms to mcs + serializationInfo[ExecGraphInfoSerialization::PERF_COUNTER] = std::to_string(execTimeMcs); } std::stringstream layoutStream; int ind = 0; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/add_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/add_transformation.cpp index 529cd008547..82ce2c47332 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/add_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/add_transformation.cpp @@ -62,7 +62,6 @@ public: operationType(std::move(operationType)) {} }; - ngraph::element::Type precision; bool broadcast; int constInput; TestTransformationParams params; @@ -169,7 +168,6 @@ const std::vector> inputSh const std::vector testValuesWithoutConstantBranches = { // Multiply with zero on the first branch { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -192,7 +190,6 @@ const std::vector testValuesWithoutConstantBranches }, // Multiply with zero on the second branch { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -245,7 +242,6 @@ const std::vector testValuesWithoutConstantBranches // \ / // Add { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -298,7 +294,6 @@ const std::vector testValuesWithoutConstantBranches // \ / // Add { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -328,7 +323,6 @@ const std::vector testValuesWithoutConstantBranches "" }, { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -350,7 +344,6 @@ const std::vector testValuesWithoutConstantBranches "" }, { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -372,7 +365,6 @@ const std::vector testValuesWithoutConstantBranches "" }, { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -394,7 +386,6 @@ const std::vector testValuesWithoutConstantBranches "" }, { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -419,7 +410,6 @@ const std::vector testValuesWithoutConstantBranches // I8 + broadcast { - ngraph::element::f32, true, -1, LayerTransformation::createParamsU8I8(), @@ -441,7 +431,6 @@ const std::vector testValuesWithoutConstantBranches "" }, { - ngraph::element::f32, true, -1, LayerTransformation::createParamsU8I8(), @@ -463,7 +452,6 @@ const std::vector testValuesWithoutConstantBranches "" }, { - ngraph::element::f32, true, -1, LayerTransformation::createParamsU8I8(), @@ -485,7 +473,6 @@ const std::vector testValuesWithoutConstantBranches "" }, { - ngraph::element::f32, true, -1, LayerTransformation::createParamsU8I8(), @@ -507,7 +494,6 @@ const std::vector testValuesWithoutConstantBranches "" }, { - ngraph::element::f32, true, -1, LayerTransformation::createParamsU8I8(), @@ -531,7 +517,6 @@ const std::vector testValuesWithoutConstantBranches // convolution before FQ (choose that branch) { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -554,7 +539,6 @@ const std::vector testValuesWithoutConstantBranches }, // convolution with multiple consumers before FQ ( FP32 on other branch due to possible quantize fusing ) { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -577,7 +561,6 @@ const std::vector testValuesWithoutConstantBranches }, // group convolution before FQ (choose that branch) { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -645,7 +628,6 @@ const std::vector testValuesWithFirstConstantBranch // \ / // Multiply { - ngraph::element::f32, false, 0, LayerTransformation::createParamsU8I8(), @@ -722,7 +704,6 @@ const std::vector testValuesWithSecondConstantBranc // \ / // Multiply { - ngraph::element::f32, false, 1, LayerTransformation::createParamsU8I8(), @@ -772,7 +753,6 @@ const std::vector> inputSh const std::vector specialTestValues = { // constant input: Add -> Subtract { - ngraph::element::f32, false, 1, LayerTransformation::createParamsU8I8(), @@ -797,7 +777,6 @@ const std::vector specialTestValues = { // constant input: Add -> Subtract { - ngraph::element::f32, false, 0, LayerTransformation::createParamsU8I8(), @@ -839,7 +818,6 @@ const std::vector> inputSh const std::vector specialTestValues = { { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -888,7 +866,6 @@ const std::vector> inputSh const std::vector testValues = { { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -910,7 +887,6 @@ const std::vector testValues = { "" }, { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -933,7 +909,6 @@ const std::vector testValues = { }, // multiply with zero { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -982,7 +957,6 @@ const std::vector> inputSh const std::vector testValues = { { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -1004,7 +978,6 @@ const std::vector testValues = { "" }, { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -1027,7 +1000,6 @@ const std::vector testValues = { }, // multiply with zero { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8(), @@ -1077,7 +1049,6 @@ const std::vector> inputSh const std::vector testValues = { // FP32 model, quantized branch: 1 { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8().setUpdatePrecisions(false), @@ -1100,7 +1071,6 @@ const std::vector testValues = { }, // FP32 model, quantized branch: 0 { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8().setUpdatePrecisions(false), @@ -1123,7 +1093,6 @@ const std::vector testValues = { }, // INT8 model (FQ decomposition before LPT), quantized branch: 1 { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8().setUpdatePrecisions(false), @@ -1146,7 +1115,6 @@ const std::vector testValues = { }, // INT8 model (FQ decomposition before LPT), quantized branch: 0 { - ngraph::element::f32, false, -1, LayerTransformation::createParamsU8I8().setUpdatePrecisions(false), diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp index 6a67202125c..38502dc6834 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp @@ -931,6 +931,30 @@ const std::vector testValues = { {{0.1f, 0.01f, 0.1f}, ngraph::element::f32, {1, 3}} } } + }, + // U8: without subtract 2D -> 2D + { + { Dimension::dynamic(), 2 }, + { -1, 6 }, + LayerTransformation::createParamsU8I8(), + { + ngraph::element::u8, + { + {ngraph::element::f32}, + {}, + {{0.1f, 0.02f}, ngraph::element::f32, {1, 2}} + } + }, + { + ngraph::element::u8, + {{}, {}, {}}, + ngraph::element::u8, + { + {ngraph::element::f32}, + {}, + {{0.1f, 0.02f, 0.1f, 0.02f, 0.1f, 0.02f}, ngraph::element::f32, {1, 6}} + } + } } }; diff --git a/inference-engine/tests/functional/inference_engine/ov_executable_network_test.cpp b/inference-engine/tests/functional/inference_engine/ov_executable_network_test.cpp new file mode 100644 index 00000000000..7c4e3ce7d26 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/ov_executable_network_test.cpp @@ -0,0 +1,49 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +using namespace ::testing; +using namespace std; + +TEST(ExecutableNetworkOVTests, throwsOnUninitializedExportStream) { + ov::runtime::ExecutableNetwork exec; + ASSERT_THROW(exec.export_model(std::cout), InferenceEngine::NotAllocated); +} + +TEST(ExecutableNetworkOVTests, throwsOnUninitializedGetFunction) { + ov::runtime::ExecutableNetwork exec; + ASSERT_THROW(exec.get_runtime_function(), InferenceEngine::NotAllocated); +} + +TEST(ExecutableNetworkOVTests, throwsOnUninitializedGetParameters) { + ov::runtime::ExecutableNetwork exec; + ASSERT_THROW(exec.get_parameters(), InferenceEngine::NotAllocated); +} + +TEST(ExecutableNetworkOVTests, throwsOnUninitializedGetResults) { + ov::runtime::ExecutableNetwork exec; + ASSERT_THROW(exec.get_results(), InferenceEngine::NotAllocated); +} + +TEST(ExecutableNetworkOVTests, throwsOnUninitializedSetConfig) { + ov::runtime::ExecutableNetwork exec; + ASSERT_THROW(exec.set_config({{}}), InferenceEngine::NotAllocated); +} + +TEST(ExecutableNetworkOVTests, throwsOnUninitializedGetConfig) { + ov::runtime::ExecutableNetwork exec; + ASSERT_THROW(exec.get_config({}), InferenceEngine::NotAllocated); +} + +TEST(ExecutableNetworkOVTests, throwsOnUninitializedGetMetric) { + ov::runtime::ExecutableNetwork exec; + ASSERT_THROW(exec.get_metric({}), InferenceEngine::NotAllocated); +} + +TEST(ExecutableNetworkOVTests, throwsOnUninitializedGetContext) { + ov::runtime::ExecutableNetwork exec; + ASSERT_THROW(exec.get_context(), InferenceEngine::NotAllocated); +} \ No newline at end of file diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/gather_tree.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/gather_tree.cpp new file mode 100644 index 00000000000..9e80930a703 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/gather_tree.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "shared_test_classes/single_layer/gather_tree.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +TEST_P(GatherTreeLayerTest, Serialize) { + Serialize(); +} + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::I32 +}; + +const std::vector> inputShapes = { {5, 1, 10}, {1, 1, 10}, {20, 1, 10}, {20, 20, 10} }; + +const std::vector secondaryInputTypes = { + ngraph::helpers::InputLayerType::CONSTANT, + ngraph::helpers::InputLayerType::PARAMETER +}; + +INSTANTIATE_TEST_SUITE_P(smoke_GatherTree_Serialization, GatherTreeLayerTest, + ::testing::Combine( + ::testing::ValuesIn(inputShapes), + ::testing::ValuesIn(secondaryInputTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + GatherTreeLayerTest::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/inference_engine/transformations/type_relaxed_tests.cpp b/inference-engine/tests/functional/inference_engine/transformations/type_relaxed_tests.cpp index 44a2a74f20b..8cd8894bf66 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/type_relaxed_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/type_relaxed_tests.cpp @@ -5,6 +5,7 @@ #include #include "common_test_utils/test_common.hpp" #include +#include #include @@ -295,4 +296,75 @@ TEST_F(TypeRelaxedTests, OneOutputMultipleInputPorts) { relaxed_op->validate_and_infer_types(); ASSERT_EQ(param1->output(0).get_element_type(), element::i64); } -} \ No newline at end of file +} + +TEST_F(TypeRelaxedTests, ConstantFoldingCheck) { + std::shared_ptr f; + { + auto const1 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, { 2 }); + auto const2 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, { 2 }); + auto equal = ngraph::opset1::Equal(const1, const2); + auto relaxed_equal = make_shared>(equal, TypeVector{}, TypeVector{ element::u8 }); + + f = make_shared(ngraph::OutputVector{ relaxed_equal }, ngraph::ParameterVector{}); + + ASSERT_NO_THROW(ngraph::pass::ConstantFolding().run_on_function(f)); + auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0); + ASSERT_TRUE(ngraph::is_type(layer_before_result)); + } +} + +TEST_F(TypeRelaxedTests, ConstantFoldingCheck1) { + std::shared_ptr f; + { + auto const1 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, { 2 }); + auto const2 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, { 2 }); + auto equal = ngraph::opset1::Equal(const1, const2); + auto relaxed_equal = make_shared>(equal, TypeVector{}, TypeVector{ element::boolean }); + + f = make_shared(ngraph::OutputVector{ relaxed_equal }, ngraph::ParameterVector{}); + + ASSERT_NO_THROW(ngraph::pass::ConstantFolding().run_on_function(f)); + auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0); + ASSERT_TRUE(ngraph::is_type(layer_before_result)); + } +} + +TEST_F(TypeRelaxedTests, ConstantFoldingCheck2) { + std::shared_ptr f; + { + auto const1 = ngraph::opset1::Constant::create(element::u8, ngraph::Shape{}, { 2 }); + auto const2 = ngraph::opset1::Constant::create(element::i8, ngraph::Shape{}, { 2 }); + + auto original_input_types = TypeVector{ element::i32, element::i32 }; + auto relaxed_equal = std::make_shared>( + ngraph::element::TypeVector{ element::i32, element::i32 }, + ngraph::element::TypeVector{ element::u8 }, + ngraph::op::TemporaryReplaceOutputType(const1, element::i32).get(), + ngraph::op::TemporaryReplaceOutputType(const2, element::i32).get()); + + f = make_shared(ngraph::OutputVector{ relaxed_equal }, ngraph::ParameterVector{}); + + ASSERT_NO_THROW(ngraph::pass::ConstantFolding().run_on_function(f)); + auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0); + ASSERT_TRUE(ngraph::is_type(layer_before_result)); + } +} + +TEST_F(TypeRelaxedTests, ConstantFoldingCheck3) { + std::shared_ptr f; + { + auto const1 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, { 2 }); + auto const2 = ngraph::opset1::Constant::create(element::i32, ngraph::Shape{}, { 2 }); + auto equal = ngraph::opset1::Equal(const1, const2); + + auto original_input_types = TypeVector{ element::f32, element::f32 }; + auto relaxed_equal = make_shared>(equal, original_input_types, TypeVector{ element::u8 }); + + f = make_shared(ngraph::OutputVector{ relaxed_equal }, ngraph::ParameterVector{}); + + ASSERT_NO_THROW(ngraph::pass::ConstantFolding().run_on_function(f)); + auto layer_before_result = f->get_result()->get_input_node_shared_ptr(0); + ASSERT_TRUE(ngraph::is_type(layer_before_result)); + } +} diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/gather_tree.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/gather_tree.cpp index 0076fd7b8fe..6aa1a2b0608 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/gather_tree.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/gather_tree.cpp @@ -23,7 +23,7 @@ const std::vector secondaryInputTypes = { ngraph::helpers::InputLayerType::PARAMETER }; -INSTANTIATE_TEST_SUITE_P(Basic_smoke, GatherTreeLayerTest, +INSTANTIATE_TEST_SUITE_P(smoke_GatherTree, GatherTreeLayerTest, ::testing::Combine( ::testing::ValuesIn(inputShapes), ::testing::ValuesIn(secondaryInputTypes), diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_core_integration.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_core_integration.hpp index 30b3b5873a8..689143d7a92 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/ov_core_integration.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/ov_core_integration.hpp @@ -440,6 +440,36 @@ TEST_P(OVClassNetworkTestP, LoadNetworkActualHeteroDevice2NoThrow) { ASSERT_NO_THROW(ie.compile_model(actualNetwork, CommonTestUtils::DEVICE_HETERO, {{"TARGET_FALLBACK", deviceName}})); } +TEST_P(OVClassNetworkTestP, LoadNetworkCreateDefaultExecGraphResult) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + auto ie = createCoreWithTemplate(); + auto net = ie.compile_model(actualNetwork, deviceName); + auto exec_function = net.GetExecGraphInfo().getFunction(); + ASSERT_NE(nullptr, exec_function); + auto actual_parameters = exec_function->get_parameters(); + auto actual_results = exec_function->get_results(); + auto expected_parameters = actualNetwork->get_parameters(); + auto expected_results = actualNetwork->get_results(); + ASSERT_EQ(expected_parameters.size(), actual_parameters.size()); + for (std::size_t i = 0; i < expected_parameters.size(); ++i) { + auto expected_element_type = expected_parameters[i]->get_output_element_type(0); + auto actual_element_type = actual_parameters[i]->get_output_element_type(0); + ASSERT_EQ(expected_element_type, actual_element_type) << "For index: " << i; + auto expected_shape = expected_parameters[i]->get_output_shape(0); + auto actual_shape = actual_parameters[i]->get_output_shape(0); + ASSERT_EQ(expected_shape, actual_shape) << "For index: " << i; + } + ASSERT_EQ(expected_results.size(), actual_results.size()); + for (std::size_t i = 0; i < expected_results.size(); ++i) { + auto expected_element_type = expected_results[i]->get_input_element_type(0); + auto actual_element_type = actual_results[i]->get_input_element_type(0); + ASSERT_EQ(expected_element_type, actual_element_type) << "For index: " << i; + auto expected_shape = expected_results[i]->get_input_shape(0); + auto actual_shape = actual_results[i]->get_input_shape(0); + ASSERT_EQ(expected_shape, actual_shape) << "For index: " << i; + } +} + // // ImportExportNetwork // diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/softsign.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/softsign.cpp index d38d2d8e914..33734061ef3 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/softsign.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/softsign.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include #include "shared_test_classes/subgraph/softsign.hpp" #include "ngraph_functions/builders.hpp" @@ -37,8 +37,13 @@ void SoftsignTest::SetUp() { auto params = ngraph::builder::makeParams(ngPrc, { inputShape }); auto abs = std::make_shared(params[0]); - auto add = std::make_shared(abs, 1, 1, 1); - auto power = std::make_shared(add, -1, 1, 0); + + auto const_1 = ngraph::opset1::Constant::create(ngPrc, ngraph::Shape{}, {1}); + auto const_neg_1 = ngraph::opset1::Constant::create(ngPrc, ngraph::Shape{}, {-1}); + + auto add = std::make_shared(abs, const_1); + auto power = std::make_shared(add, const_neg_1); + auto mul = std::make_shared(power, params[0]); ngraph::ResultVector results{ std::make_shared(mul) }; function = std::make_shared(results, params, "SoftSignTest"); diff --git a/inference-engine/thirdparty/clDNN/api/cldnn/runtime/engine.hpp b/inference-engine/thirdparty/clDNN/api/cldnn/runtime/engine.hpp index 8aa53a14fe2..fb79a20a785 100644 --- a/inference-engine/thirdparty/clDNN/api/cldnn/runtime/engine.hpp +++ b/inference-engine/thirdparty/clDNN/api/cldnn/runtime/engine.hpp @@ -96,17 +96,20 @@ public: /// Returns user context handle which was used to create the engine virtual void* get_user_context() const = 0; - /// Returns the maximum amount of GPU memory that engine allocated in current process + /// Returns the total maximum amount of GPU memory allocated by engine in current process for all allocation types uint64_t get_max_used_device_memory() const; - /// Returns the amount of GPU memory currently used by the engine - uint64_t get_used_device_memory() const; + /// Returns the maximum amount of GPU memory allocated by engine in current process for the specified allocation @p type + uint64_t get_max_used_device_memory(allocation_type type) const; - /// Adds @p bytes count to currently used memory size - void add_memory_used(uint64_t bytes); + /// Returns the amount of GPU memory specified allocation @p type that currently used by the engine + uint64_t get_used_device_memory(allocation_type type) const; - /// Subtracts @p bytes count from currently used memory size - void subtract_memory_used(uint64_t bytes); + /// Adds @p bytes count to currently used memory size of the specified allocation @p type + void add_memory_used(uint64_t bytes, allocation_type type); + + /// Subtracts @p bytes count from currently used memory size of the specified allocation @p type + void subtract_memory_used(uint64_t bytes, allocation_type type); /// Returns true if USM is enabled in engine config and device/driver supports required features bool use_unified_shared_memory() const; @@ -142,8 +145,8 @@ protected: const device::ptr _device; engine_configuration _configuration; - std::atomic memory_usage = {0}; - std::atomic peak_memory_usage = {0}; + std::map> memory_usage_map; + std::map> peak_memory_usage_map; }; } // namespace cldnn diff --git a/inference-engine/thirdparty/clDNN/runtime/engine.cpp b/inference-engine/thirdparty/clDNN/runtime/engine.cpp index 976e7bae595..3738ec2ae9d 100644 --- a/inference-engine/thirdparty/clDNN/runtime/engine.cpp +++ b/inference-engine/thirdparty/clDNN/runtime/engine.cpp @@ -120,22 +120,51 @@ memory_ptr engine::share_surface(const layout& layout, shared_surface surf, uint #endif // _WIN32 uint64_t engine::get_max_used_device_memory() const { - return peak_memory_usage.load(); + uint64_t total_peak_memory_usage {0}; + for (auto const& m : peak_memory_usage_map) { + total_peak_memory_usage += m.second.load(); + } + return total_peak_memory_usage; } -uint64_t engine::get_used_device_memory() const { - return memory_usage.load(); +uint64_t engine::get_max_used_device_memory(allocation_type type) const { + uint64_t peak_memory_usage {0}; + auto iter = peak_memory_usage_map.find(type); + if (iter != peak_memory_usage_map.end()) { + peak_memory_usage = iter->second.load(); + } + return peak_memory_usage; } -void engine::add_memory_used(size_t bytes) { - memory_usage += bytes; - if (memory_usage > peak_memory_usage) { - peak_memory_usage = memory_usage.load(); +uint64_t engine::get_used_device_memory(allocation_type type) const { + uint64_t memory_usage {0}; + auto iter = memory_usage_map.find(type); + if (iter != memory_usage_map.end()) { + memory_usage = iter->second.load(); + } + return memory_usage; +} + +void engine::add_memory_used(size_t bytes, allocation_type type) { + if (!memory_usage_map.count(type) && !peak_memory_usage_map.count(type)) { + static std::mutex m; + std::lock_guard guard(m); + memory_usage_map[type] = 0; + peak_memory_usage_map[type] = 0; + } + memory_usage_map[type] += bytes; + if (memory_usage_map[type] > peak_memory_usage_map[type]) { + peak_memory_usage_map[type] = memory_usage_map[type].load(); } } -void engine::subtract_memory_used(size_t bytes) { - memory_usage -= bytes; +void engine::subtract_memory_used(size_t bytes, allocation_type type) { + auto iter = memory_usage_map.find(type); + if (iter != memory_usage_map.end()) { + memory_usage_map[type] -= bytes; + } else { + throw std::runtime_error("Attempt to free unallocated memory"); + } } std::shared_ptr engine::create(engine_types engine_type, diff --git a/inference-engine/thirdparty/clDNN/runtime/memory.cpp b/inference-engine/thirdparty/clDNN/runtime/memory.cpp index 80a6ee980ed..9a22d3a2ae9 100644 --- a/inference-engine/thirdparty/clDNN/runtime/memory.cpp +++ b/inference-engine/thirdparty/clDNN/runtime/memory.cpp @@ -20,27 +20,25 @@ namespace cldnn { memory::memory(engine* engine, const layout& layout, allocation_type type, bool reused) : _engine(engine), _layout(layout), _bytes_count(_layout.bytes_count()), _type(type), _reused(reused) { if (!_reused && _engine) { - _engine->add_memory_used(_bytes_count); - } - - GPU_DEBUG_GET_INSTANCE(debug_config); - GPU_DEBUG_IF(debug_config->verbose >= 1) { - GPU_DEBUG_COUT << "Allocate " << _bytes_count << " bytes of " << type << " allocation type" - << " (current=" << _engine->get_used_device_memory() << ";" - << " max=" << _engine->get_max_used_device_memory() << ")" << std::endl; + _engine->add_memory_used(_bytes_count, type); + GPU_DEBUG_GET_INSTANCE(debug_config); + GPU_DEBUG_IF(debug_config->verbose >= 1) { + GPU_DEBUG_COUT << "Allocate " << _bytes_count << " bytes of " << type << " allocation type" + << " (current=" << _engine->get_used_device_memory(type) << ";" + << " max=" << _engine->get_max_used_device_memory(type) << ")" << std::endl; + } } } memory::~memory() { if (!_reused && _engine) { - _engine->subtract_memory_used(_bytes_count); - } - - GPU_DEBUG_GET_INSTANCE(debug_config); - GPU_DEBUG_IF(debug_config->verbose >= 1) { - GPU_DEBUG_COUT << "Free " << _bytes_count << " bytes" - << " (current=" << _engine->get_used_device_memory() << ";" - << " max=" << _engine->get_max_used_device_memory() << ")" << std::endl; + _engine->subtract_memory_used(_bytes_count, _type); + GPU_DEBUG_GET_INSTANCE(debug_config); + GPU_DEBUG_IF(debug_config->verbose >= 1) { + GPU_DEBUG_COUT << "Free " << _bytes_count << " bytes of " << _type << " allocation type" + << " (current=" << _engine->get_used_device_memory(_type) << ";" + << " max=" << _engine->get_max_used_device_memory(_type) << ")" << std::endl; + } } } diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/memory_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/memory_test.cpp index 4582f2ad063..e5e8bd01f09 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/memory_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/memory_test.cpp @@ -403,7 +403,7 @@ TEST(memory_pool, shared_mem_pool_diff_batches) { network network_second(*engine, topo, bo); network_second.set_input_data("input", input_1); auto outputs_second = network_second.execute(); - EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t)3928); + EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t)4328); } TEST(memory_pool, shared_dep_two_output) { diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt index 618f988a4c8..b00272ac7e2 100644 --- a/model-optimizer/automation/package_BOM.txt +++ b/model-optimizer/automation/package_BOM.txt @@ -52,11 +52,9 @@ extensions/back/ResultNormalizer.py extensions/back/ResultRename.py extensions/back/ReverseInputChannels.py extensions/back/RNNSequenceTypeRename.py -extensions/back/ScalarConstNormalize.py extensions/back/SelectBroadcast.py extensions/back/ShapeOfConstFolding.py extensions/back/ShuffleChannelPatternOptimization.py -extensions/back/ShufflenetReLUReorder.py extensions/back/SpecialNodesFinalization.py extensions/back/StridedSliceMasksNormalizer.py extensions/back/TopKNormalizer.py diff --git a/model-optimizer/extensions/analysis/tf_od_api.py b/model-optimizer/extensions/analysis/tf_od_api.py index 8fe6d78964a..214f8963694 100644 --- a/model-optimizer/extensions/analysis/tf_od_api.py +++ b/model-optimizer/extensions/analysis/tf_od_api.py @@ -16,62 +16,73 @@ class TensorFlowObjectDetectionAPIAnalysis(AnalyzeAction): """ graph_condition = [lambda graph: graph.graph['fw'] == 'tf'] - model_scopes = [('MaskRCNN', ['Preprocessor', - 'FirstStageFeatureExtractor', - 'SecondStageFeatureExtractor', - 'SecondStageBoxPredictor', - 'SecondStageBoxPredictor_1', - 'SecondStageFeatureExtractor_1', - ]), - ('RFCN', ['Preprocessor', - 'FirstStageFeatureExtractor', - 'SecondStageFeatureExtractor', - 'SecondStageBoxPredictor', - 'SecondStageBoxPredictor/map', - 'SecondStageBoxPredictor/map_1', - 'SecondStagePostprocessor', - ]), - ('FasterRCNN', ['Preprocessor', - 'FirstStageFeatureExtractor', - 'SecondStageFeatureExtractor', - 'SecondStageBoxPredictor', - 'SecondStagePostprocessor', - ]), - ('SSD', ['Preprocessor', - 'FeatureExtractor', - 'Postprocessor', - ]), - ] - file_patterns = {'MaskRCNN': 'mask_rcnn_support.*\\.json', 'RFCN': 'rfcn_support.*\\.json', 'FasterRCNN': 'faster_rcnn_support.*\\.json', 'SSD': 'ssd.*_support.*\\.json', } + model_scopes = {'MaskRCNN': (['FirstStageFeatureExtractor', + 'SecondStageFeatureExtractor', + 'SecondStageBoxPredictor', + 'SecondStageBoxPredictor_1', + 'SecondStageFeatureExtractor_1', + ],), + 'RFCN': (['FirstStageFeatureExtractor', + 'SecondStageFeatureExtractor', + 'SecondStageBoxPredictor', + 'SecondStageBoxPredictor/map', + 'SecondStageBoxPredictor/map_1', + 'SecondStagePostprocessor', + ],), + 'FasterRCNN': (['FirstStageFeatureExtractor', + 'SecondStageFeatureExtractor', + 'SecondStageBoxPredictor', + 'SecondStagePostprocessor', + ], + ['FirstStageRPNFeatures', + 'FirstStageBoxPredictor', + 'SecondStagePostprocessor', + 'mask_rcnn_keras_box_predictor', + ],), + 'SSD': ([('FeatureExtractor', 'ssd_mobile_net_v2keras_feature_extractor', + 'ssd_mobile_net_v1fpn_keras_feature_extractor', + 'ssd_mobile_net_v2fpn_keras_feature_extractor', 'ResNet50V1_FPN', 'ResNet101V1_FPN', + 'ResNet152V1_FPN' + ), + 'Postprocessor'] + ), + } def analyze(self, graph: Graph): - if any([name not in graph.nodes() for name in ['image_tensor', 'detection_classes', 'detection_boxes', - 'detection_scores']]): + tf_1_names = ['image_tensor', 'detection_classes', 'detection_boxes', 'detection_scores', + ('Preprocessor', 'map')] + tf_1_cond = all([graph_contains_scope(graph, scope) for scope in tf_1_names]) + + tf_2_names = ['input_tensor', 'output_control_node', 'Identity_', ('Preprocessor', 'map')] + tf_2_cond = all([graph_contains_scope(graph, scope) for scope in tf_2_names]) + + if not tf_1_cond and not tf_2_cond: log.debug('The model does not contain nodes that must exist in the TF OD API models') return None, None - for flavor, scopes in __class__.model_scopes: - if all([graph_contains_scope(graph, scope) for scope in scopes]): - result = dict() - result['flavor'] = flavor - result['mandatory_parameters'] = {'tensorflow_use_custom_operations_config': - files_by_pattern(get_mo_root_dir() + '/extensions/front/tf', - __class__.file_patterns[flavor], - add_prefix=True), - 'tensorflow_object_detection_api_pipeline_config': None, - } - message = "Your model looks like TensorFlow Object Detection API Model.\n" \ - "Check if all parameters are specified:\n" \ - "\t--tensorflow_use_custom_operations_config\n" \ - "\t--tensorflow_object_detection_api_pipeline_config\n" \ - "\t--input_shape (optional)\n" \ - "\t--reverse_input_channels (if you convert a model to use with the Inference Engine sample applications)\n" \ - "Detailed information about conversion of this model can be found at\n" \ - "https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models.html" - return {'model_type': {'TF_OD_API': result}}, message + for flavor, scopes_tuple in self.model_scopes.items(): + for scopes in scopes_tuple: + if all([graph_contains_scope(graph, scope) for scope in scopes]): + result = dict() + result['flavor'] = flavor + result['mandatory_parameters'] = {'tensorflow_use_custom_operations_config': + files_by_pattern(get_mo_root_dir() + '/extensions/front/tf', + __class__.file_patterns[flavor], + add_prefix=True), + 'tensorflow_object_detection_api_pipeline_config': None, + } + message = "Your model looks like TensorFlow Object Detection API Model.\n" \ + "Check if all parameters are specified:\n" \ + "\t--transformations_config\n" \ + "\t--tensorflow_object_detection_api_pipeline_config\n" \ + "\t--input_shape (optional)\n" \ + "\t--reverse_input_channels (if you convert a model to use with the Inference Engine sample applications)\n" \ + "Detailed information about conversion of this model can be found at\n" \ + "https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models.html" + return {'model_type': {'TF_OD_API': result}}, message return None, None diff --git a/model-optimizer/extensions/back/ReduceMerge.py b/model-optimizer/extensions/back/ReduceMerge.py index 418f3a18ffc..3ec28a86535 100644 --- a/model-optimizer/extensions/back/ReduceMerge.py +++ b/model-optimizer/extensions/back/ReduceMerge.py @@ -5,7 +5,6 @@ import logging as log import numpy as np -from extensions.back.ScalarConstNormalize import ScalarNormalize from extensions.ops.ReduceOps import reduce_map from mo.back.replacement import BackReplacementPattern from mo.front.common.partial_infer.utils import int64_array @@ -23,9 +22,6 @@ class ReduceMerge(BackReplacementPattern): enabled = True force_clean_up = True - def run_before(self): - return [ScalarNormalize] - @staticmethod def fuse_reduces(first_reduce, second_reduce): first_reduce_name = first_reduce.soft_get('name', first_reduce.id) diff --git a/model-optimizer/extensions/back/ScalarConstNormalize.py b/model-optimizer/extensions/back/ScalarConstNormalize.py deleted file mode 100644 index 923f68b8929..00000000000 --- a/model-optimizer/extensions/back/ScalarConstNormalize.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from extensions.back.ReshapeMutation import ReshapeMutation -from mo.back.replacement import BackReplacementPattern -from mo.front.common.partial_infer.utils import int64_array -from mo.front.tf.graph_utils import create_op_node_with_second_input -from mo.graph.graph import Graph -from mo.ops.reshape import Reshape - - -# Temporary nGraph workaround. TODO: REMOVE -class ScalarNormalize(BackReplacementPattern): - enabled = False - force_clean_up = True - - def run_before(self): - return [ReshapeMutation] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('op', dict(kind='op', type='Const'))], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - if node.value.ndim == 0: - reshape = create_op_node_with_second_input(graph, Reshape, int64_array([1]), - {'name': node.id + '/Dims'}) - node.out_port(0).get_connection().set_source(reshape.out_port(0)) - node.out_port(0).connect(reshape.in_port(0)) - reshape.infer(reshape) diff --git a/model-optimizer/extensions/back/ShufflenetReLUReorder.py b/model-optimizer/extensions/back/ShufflenetReLUReorder.py deleted file mode 100644 index 89cd9d76297..00000000000 --- a/model-optimizer/extensions/back/ShufflenetReLUReorder.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from mo.back.replacement import BackReplacementPattern -from mo.graph.graph import Graph - - -class ShufflenetReLUReorder(BackReplacementPattern): - """ - This pass is workaround for GPU plugin - """ - enabled = False - - def pattern(self): - return dict( - nodes=[ - ('relu', dict(kind='op', type='ReLU')), - ('relu_data', dict(kind='data')), - ('reshape1', dict(kind='op', type='Reshape')), - ('reshape1_data', dict(kind='data')), - ('transpose', dict(kind='op', type='Transpose')), - ('transpose_data', dict(kind='data')), - ('reshape2', dict(kind='op', type='Reshape')), - ('reshape2_data', dict(kind='data')), - ('conv', dict(kind='op', type='Convolution')) - ], - edges=[('relu', 'relu_data'), - ('relu_data', 'reshape1'), - ('reshape1', 'reshape1_data'), - ('reshape1_data', 'transpose'), - ('transpose', 'transpose_data'), - ('transpose_data', 'reshape2'), - ('reshape2', 'reshape2_data'), - ('reshape2_data', 'conv'), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - relu = match['relu'] - reshape1 = match['reshape1'] - reshape2_data = match['reshape2_data'] - conv = match['conv'] - - if np.max(conv.pad) == 0: - return - - relu_input = relu.in_node() - - # Disconnect InputData-x->ReLU->Data-x->Reshape1 - edge_attrs = graph.get_edge_data(relu.out_node().id, reshape1.id)[0] - graph.remove_edge(relu_input.id, relu.id) - graph.remove_edge(relu.out_node().id, reshape1.id) - - # Connect InputData-->Reshape1 - graph.add_edges_from([(relu_input.id, reshape1.id, edge_attrs)]) - - # Insert ReLU: Reshape2Data->ReLU->Data->Convolution - edge_attrs = graph.get_edge_data(reshape2_data.id, conv.id)[0] - graph.remove_edge(reshape2_data.id, conv.id) - graph.add_edges_from([(reshape2_data.id, relu.id, {'in': 0}), (relu.out_node().id, conv.id, edge_attrs)]) diff --git a/model-optimizer/extensions/back/TopKNormalizer.py b/model-optimizer/extensions/back/TopKNormalizer.py index 6fc3f33e3a9..90c43d641a1 100644 --- a/model-optimizer/extensions/back/TopKNormalizer.py +++ b/model-optimizer/extensions/back/TopKNormalizer.py @@ -2,7 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 from extensions.back.Reshape0DToSqueeze import Reshape0DToSqueeze -from extensions.back.ScalarConstNormalize import ScalarNormalize from mo.back.replacement import BackReplacementPattern from mo.front.common.partial_infer.utils import int64_array from mo.front.tf.graph_utils import create_op_node_with_second_input @@ -23,9 +22,6 @@ class TopKNormalizer(BackReplacementPattern): """ enabled = True - def run_after(self): - return [ScalarNormalize] - def run_before(self): return [Reshape0DToSqueeze] diff --git a/model-optimizer/mo/middle/passes/convert_data_type.py b/model-optimizer/mo/middle/passes/convert_data_type.py index 85ccbe85813..c4133df2ecb 100644 --- a/model-optimizer/mo/middle/passes/convert_data_type.py +++ b/model-optimizer/mo/middle/passes/convert_data_type.py @@ -38,10 +38,11 @@ SUPPORTED_DATA_TYPES = { 'I32': (np.int32, 'I32', 'i32'), 'I64': (np.int64, 'I64', 'i64'), 'int8': (np.int8, 'I8', 'i8'), - 'uint8': (np.uint8, 'U8', 'u8'), 'int32': (np.int32, 'I32', 'i32'), 'int64': (np.int64, 'I64', 'i64'), 'bool': (np.bool, 'BOOL', 'boolean'), + 'uint8': (np.uint8, 'U8', 'u8'), + 'uint32': (np.uint32, 'U32', 'u32'), 'uint64': (np.uint64, 'U64', 'u64'), # custom types diff --git a/model-optimizer/mo/utils/model_analysis.py b/model-optimizer/mo/utils/model_analysis.py index 2049f8b41ec..4fe47637b7b 100644 --- a/model-optimizer/mo/utils/model_analysis.py +++ b/model-optimizer/mo/utils/model_analysis.py @@ -112,13 +112,14 @@ class AnalysisCollectorAnchor(AnalyzeAction): pass -def graph_contains_scope(graph: Graph, scope: str): +def graph_contains_scope(graph: Graph, scope: [str, tuple]): """ - Checks whether the graph contains node(s) which name starts with "scope" string. + Checks whether the graph contains node(s) which name includes "scope" string. :param graph: graph to check - :param scope: string defining the scope + :param scope: string or tuple with strings defining the scope :return: the result of the check (True/False) """ - if scope[-1] != '/': - scope += '/' - return any([node.soft_get('name').startswith(scope) for node in graph.get_op_nodes()]) + if type(scope) is str: + return any([node.soft_get('name').find(scope) != -1 for node in graph.get_op_nodes()]) + else: + return any([graph_contains_scope(graph, s) for s in scope]) diff --git a/model-optimizer/unit_tests/extensions/back/ShufflenetReLUReorder_test.py b/model-optimizer/unit_tests/extensions/back/ShufflenetReLUReorder_test.py deleted file mode 100644 index 5b154f22c9d..00000000000 --- a/model-optimizer/unit_tests/extensions/back/ShufflenetReLUReorder_test.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from extensions.back.ShufflenetReLUReorder import ShufflenetReLUReorder -from mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the -# dictionary with node attributes. -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # ReLU - 'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'ReLU'}, - 'relu_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Reshape layers - 'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'}, - 'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'}, - 'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'reshape_3': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'}, - 'reshape_3_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Transpose layer - 'transpose_1': {'type': 'Transpose', 'kind': 'op', 'op': 'Transpose'}, - 'transpose_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Conv layer - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2d'}, - 'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'}, -} - - -class ShufflenetReLUReorderTests(unittest.TestCase): - def test_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'relu_1'), - ('relu_1', 'relu_1_data'), - ('relu_1_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'transpose_1'), - ('transpose_1', 'transpose_1_data'), - ('transpose_1_data', 'reshape_2'), - ('reshape_2', 'reshape_2_data'), - ('reshape_2_data', 'conv_1'), - ('conv_1', 'conv_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'relu_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'reshape_1_data': {'shape': np.array([227, 227, 4, 28])}, - 'transpose_1': {'order': np.array([0, 1, 3, 2])}, - 'transpose_1_data': {'shape': np.array([227, 227, 28, 4])}, - 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])}, - 'conv_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'conv_1': {'pad': np.array([1, 1])} - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'transpose_1'), - ('transpose_1', 'transpose_1_data'), - ('transpose_1_data', 'reshape_2'), - ('reshape_2', 'reshape_2_data'), - ('reshape_2_data', 'relu_1'), - ('relu_1', 'relu_1_data'), - ('relu_1_data', 'conv_1'), - ('conv_1', 'conv_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'relu_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'reshape_1_data': {'shape': np.array([227, 227, 4, 28])}, - 'transpose_1': {'order': np.array([0, 1, 3, 2])}, - 'transpose_1_data': {'shape': np.array([227, 227, 28, 4])}, - 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])}, - 'conv_1_data': {'shape': np.array([1, 227, 227, 112])}, - }) - - pattern = ShufflenetReLUReorder() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'conv_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_2_neg(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'transpose_1'), - ('transpose_1', 'transpose_1_data'), - ('transpose_1_data', 'reshape_2'), - ('reshape_2', 'reshape_2_data'), - ('reshape_2_data', 'conv_1'), - ('conv_1', 'conv_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'relu_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'reshape_1_data': {'shape': np.array([227, 227, 4, 28])}, - 'transpose_1': {'order': np.array([0, 1, 3, 2])}, - 'transpose_1_data': {'shape': np.array([227, 227, 28, 4])}, - 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])}, - 'conv_1_data': {'shape': np.array([1, 227, 227, 112])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'transpose_1'), - ('transpose_1', 'transpose_1_data'), - ('transpose_1_data', 'reshape_2'), - ('reshape_2', 'reshape_2_data'), - ('reshape_2_data', 'conv_1'), - ('conv_1', 'conv_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'relu_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'reshape_1_data': {'shape': np.array([227, 227, 4, 28])}, - 'transpose_1': {'order': np.array([0, 1, 3, 2])}, - 'transpose_1_data': {'shape': np.array([227, 227, 28, 4])}, - 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])}, - 'conv_1_data': {'shape': np.array([1, 227, 227, 112])}, - }) - - pattern = ShufflenetReLUReorder() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'conv_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt index 232ca18727e..0017302b851 100644 --- a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt +++ b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/CMakeLists.txt @@ -15,7 +15,7 @@ add_library(${TARGET_FE_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS}) target_include_directories(${TARGET_FE_NAME} PRIVATE ".") -target_link_libraries(${TARGET_FE_NAME} PRIVATE ngraph::frontend_manager::static) +target_link_libraries(${TARGET_FE_NAME} PRIVATE frontend_manager::static) target_link_libraries(${TARGET_FE_NAME} PUBLIC ngraph PRIVATE ngraph::builder) add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME}) diff --git a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt index 750a0fcb473..ca12d2e0c44 100644 --- a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt +++ b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt @@ -30,7 +30,7 @@ source_group("src" FILES ${PYBIND_FE_SRC}) pybind11_add_module(${PYBIND_FE_NAME} MODULE ${PYBIND_FE_SRC}) -target_link_libraries(${PYBIND_FE_NAME} PRIVATE ngraph::ngraph ngraph::frontend_manager::static) +target_link_libraries(${PYBIND_FE_NAME} PRIVATE ngraph frontend_manager::static) target_link_libraries(${PYBIND_FE_NAME} PRIVATE ${TARGET_FE_NAME}) add_dependencies(${PYBIND_FE_NAME} ${TARGET_FE_NAME}) diff --git a/ngraph/CMakeLists.txt b/ngraph/CMakeLists.txt index 6ef6b3cdeb6..a5573427a89 100644 --- a/ngraph/CMakeLists.txt +++ b/ngraph/CMakeLists.txt @@ -30,7 +30,3 @@ add_subdirectory(core) add_subdirectory(frontend) add_subdirectory(test) - -if(ENABLE_PYTHON) - add_subdirectory(python) -endif() diff --git a/ngraph/core/CMakeLists.txt b/ngraph/core/CMakeLists.txt index 70be17b1281..076958a3be4 100644 --- a/ngraph/core/CMakeLists.txt +++ b/ngraph/core/CMakeLists.txt @@ -71,25 +71,52 @@ set_source_files_properties("${CMAKE_CURRENT_SOURCE_DIR}/src/pass/convert_precis # Defines macro in C++ to load backend plugin target_include_directories(ngraph PUBLIC $ - $ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src ${CMAKE_CURRENT_BINARY_DIR}/include) -#Add an alias so that library can be used inside the build tree, e.g. when testing +# Add an alias so that library can be used inside the build tree, e.g. when testing add_library(ngraph::ngraph ALIAS ngraph) +add_library(openvino::core ALIAS ngraph) target_link_libraries(ngraph PRIVATE ${CMAKE_DL_LIBS}) +#----------------------------------------------------------------------------------------------- +# Export for build tree +#----------------------------------------------------------------------------------------------- + +export(TARGETS ngraph NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") + +# WA for cmake +export(TARGETS ngraph NAMESPACE IE:: + APPEND FILE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake") + +set_target_properties(ngraph PROPERTIES EXPORT_NAME core) +export(TARGETS ngraph NAMESPACE openvino:: + APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") + #----------------------------------------------------------------------------------------------- # Installation logic... #----------------------------------------------------------------------------------------------- -export(TARGETS ngraph NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") - install(TARGETS ngraph EXPORT ngraphTargets RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + INCLUDES DESTINATION ${NGRAPH_INSTALL_INCLUDE}) + +# because ngraph is exported in multiple export list +# it needs to be exported in each list it's used +install(TARGETS ngraph EXPORT InferenceEngineTargets + RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + INCLUDES DESTINATION ${NGRAPH_INSTALL_INCLUDE}) + +install(TARGETS ngraph EXPORT OpenVINOTargets + RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + INCLUDES DESTINATION ${NGRAPH_INSTALL_INCLUDE}) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ DESTINATION ${NGRAPH_INSTALL_INCLUDE} diff --git a/ngraph/core/reference/src/runtime/reference/gather_tree.cpp b/ngraph/core/reference/src/runtime/reference/gather_tree.cpp index de021bc963f..66e6476cf2b 100644 --- a/ngraph/core/reference/src/runtime/reference/gather_tree.cpp +++ b/ngraph/core/reference/src/runtime/reference/gather_tree.cpp @@ -72,11 +72,12 @@ void runtime::reference::gather_tree(const char* step_ids, throw ngraph_error("max_seq_len must have size of BATCH_SIZE"); } - NGRAPH_SUPPRESS_DEPRECATED_START - ngraph::CoordinateTransform cordinate_transform(step_ids_shape); + const auto in_strides = row_major_strides(step_ids_shape); + ngraph::CoordinateTransformBasic cordinate_transform(step_ids_shape); for (const auto& coord : cordinate_transform) { - memcpy(out + cordinate_transform.index(coord) * elem_size, end_token, elem_size); + const auto out_idx = std::inner_product(coord.begin(), coord.end(), in_strides.begin(), 0); + memcpy(out + out_idx * elem_size, end_token, elem_size); } for (size_t batch = 0; batch < batch_size; ++batch) { @@ -87,31 +88,35 @@ void runtime::reference::gather_tree(const char* step_ids, continue; } - auto offset = cordinate_transform.index({max_seq_in_beam - 1, batch, beam}) * elem_size; - + const auto coord = Coordinate({max_seq_in_beam - 1, batch, beam}); + const auto offset = std::inner_product(coord.begin(), coord.end(), in_strides.begin(), 0) * elem_size; memcpy(out + offset, step_ids + offset, elem_size); size_t parent = _asIndex(parent_ids + offset, element_type); for (size_t level = max_seq_in_beam - 1; level-- > 0;) { - memcpy(out + cordinate_transform.index({level, batch, beam}) * elem_size, - step_ids + cordinate_transform.index({level, batch, parent}) * elem_size, - elem_size); + const auto coord_beam = Coordinate({level, batch, beam}); + const auto out_idx = std::inner_product(coord_beam.begin(), coord_beam.end(), in_strides.begin(), 0); - parent = - _asIndex(parent_ids + cordinate_transform.index({level, batch, parent}) * elem_size, element_type); + const auto coord_parent = Coordinate({level, batch, parent}); + const auto step_ids_idx = + std::inner_product(coord_parent.begin(), coord_parent.end(), in_strides.begin(), 0); + + memcpy(out + out_idx * elem_size, step_ids + step_ids_idx * elem_size, elem_size); + + parent = _asIndex(parent_ids + step_ids_idx * elem_size, element_type); } bool finished = false; for (size_t time = 0; time < max_seq_in_beam; ++time) { + const auto out_coord = Coordinate({time, batch, beam}); + const auto out_idx = std::inner_product(out_coord.begin(), out_coord.end(), in_strides.begin(), 0); if (finished) { - memcpy(out + cordinate_transform.index({time, batch, beam}) * elem_size, end_token, elem_size); - } else if (_asIndex(out + cordinate_transform.index({time, batch, beam}) * elem_size, element_type) == - _asIndex(end_token, element_type)) { + memcpy(out + out_idx * elem_size, end_token, elem_size); + } else if (_asIndex(out + out_idx * elem_size, element_type) == _asIndex(end_token, element_type)) { finished = true; } } } } - NGRAPH_SUPPRESS_DEPRECATED_END } diff --git a/ngraph/core/src/interval.cpp b/ngraph/core/src/interval.cpp index 469132f3576..d9aa326772d 100644 --- a/ngraph/core/src/interval.cpp +++ b/ngraph/core/src/interval.cpp @@ -21,7 +21,17 @@ Interval::value_type clip_times(Interval::value_type a, Interval::value_type b) } } Interval::value_type clip_add(Interval::value_type a, Interval::value_type b) { - return (a == Interval::s_max || b == Interval::s_max) ? Interval::s_max : a + b; + if (a == Interval::s_max || b == Interval::s_max) { + return Interval::s_max; + } + + // check overflow without undefined behavior: a + b <= max + const static auto max = std::numeric_limits::max(); + if (b > (max - a)) { + return Interval::s_max; + } + + return a + b; } Interval::value_type clip_minus(Interval::value_type a, Interval::value_type b) { if (a <= b) { diff --git a/ngraph/core/src/op/gather_tree.cpp b/ngraph/core/src/op/gather_tree.cpp index 994e27be700..956a88d738c 100644 --- a/ngraph/core/src/op/gather_tree.cpp +++ b/ngraph/core/src/op/gather_tree.cpp @@ -33,35 +33,68 @@ bool ngraph::op::v1::GatherTree::visit_attributes(AttributeVisitor& visitor) { void op::v1::GatherTree::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_GatherTree_validate_and_infer_types); - const auto& step_ids_rank = get_input_partial_shape(0); - const auto& parent_idx_rank = get_input_partial_shape(1); - const auto& max_seq_len_rank = get_input_partial_shape(2); - const auto& end_token_rank = get_input_partial_shape(3); - - NODE_VALIDATION_CHECK(this, - step_ids_rank.rank().is_dynamic() || step_ids_rank.rank().get_length() == 3, - "step_ids input rank must equal to 3 (step_ids rank: ", - step_ids_rank.rank().get_length(), - ")"); - - NODE_VALIDATION_CHECK(this, - parent_idx_rank.rank().is_dynamic() || parent_idx_rank.rank().get_length() == 3, - "parent_idx input rank must equal to 3 (parent_idx rank: ", - parent_idx_rank.rank().get_length(), - ")"); - - NODE_VALIDATION_CHECK(this, - max_seq_len_rank.rank().is_dynamic() || max_seq_len_rank.rank().get_length() == 1, - "max_seq_len input rank must equal to 1 (max_seq_len rank: ", - max_seq_len_rank.rank().get_length(), - ")"); - - NODE_VALIDATION_CHECK(this, - end_token_rank.rank().is_dynamic() || end_token_rank.rank().get_length() == 0, - "end_token input rank must be scalar (end_token rank: ", - end_token_rank.rank().get_length(), - ")"); const auto& step_ids_et = get_input_element_type(0); - set_output_type(0, step_ids_et, step_ids_rank); + const auto& parent_idx_et = get_input_element_type(1); + const auto& max_seq_len_et = get_input_element_type(2); + const auto& end_token_et = get_input_element_type(3); + + element::Type result_et; + NODE_VALIDATION_CHECK(this, + element::Type::merge(result_et, step_ids_et, parent_idx_et) && + element::Type::merge(result_et, result_et, max_seq_len_et) && + element::Type::merge(result_et, result_et, end_token_et), + "Inputs must have the same element type. Got: step_ids (", + step_ids_et, + "), parent_idx_et (", + parent_idx_et, + "), max_seq_len (", + max_seq_len_et, + "), end_token (", + end_token_et, + ")"); + + NODE_VALIDATION_CHECK(this, + result_et.is_real() || result_et.is_integral_number(), + "Element type of inputs must be numeric. Got: ", + result_et); + + const auto& step_ids_pshape = get_input_partial_shape(0); + const auto& parent_idx_pshape = get_input_partial_shape(1); + const auto& max_seq_len_pshape = get_input_partial_shape(2); + const auto& end_token_pshape = get_input_partial_shape(3); + + PartialShape result_pshape{PartialShape::dynamic()}; + NODE_VALIDATION_CHECK(this, + PartialShape::merge_into(result_pshape, step_ids_pshape) && + PartialShape::merge_into(result_pshape, parent_idx_pshape) && + result_pshape.rank().compatible(3), + "step_ids and parent_idx inputs must have the same shape with rank 3. Got: ", + step_ids_pshape, + " and ", + parent_idx_pshape, + ", respectively"); + + NODE_VALIDATION_CHECK(this, + max_seq_len_pshape.rank().compatible(1), + "max_seq_len input must have rank 1. Got: ", + max_seq_len_pshape); + + if (result_pshape.rank().is_static() && max_seq_len_pshape.rank().is_static()) { + NODE_VALIDATION_CHECK(this, + Dimension::merge(result_pshape[1], result_pshape[1], max_seq_len_pshape[0]), + "Number of elements of max_seq_len input must match BATCH_SIZE dimension of " + "step_ids/parent_idx inputs. Got: ", + result_pshape[1], + " and ", + max_seq_len_pshape[0], + ", respectively"); + } + + NODE_VALIDATION_CHECK(this, + end_token_pshape.rank().compatible(0), + "end_token input must be scalar. Got: ", + end_token_pshape); + + set_output_type(0, result_et, result_pshape); } diff --git a/ngraph/core/src/op/tile.cpp b/ngraph/core/src/op/tile.cpp index 49cf2530ab8..0dbffbc08bf 100644 --- a/ngraph/core/src/op/tile.cpp +++ b/ngraph/core/src/op/tile.cpp @@ -73,7 +73,7 @@ bool op::v0::Tile::evaluate_tile(const HostTensorVector& outputs, const HostTens const auto& data = inputs[0]; const auto& axis = inputs[1]; auto& output = outputs[0]; - auto repeats_val = read_vector(axis); + auto repeats_val = read_index_vector(axis); auto repeats_rank = repeats_val.size(); ov::StaticShape data_shape = data->get_shape(); auto data_rank = data_shape.size(); diff --git a/ngraph/frontend/frontend_manager/CMakeLists.txt b/ngraph/frontend/frontend_manager/CMakeLists.txt index cdb93d5fc0f..df6cbe07a09 100644 --- a/ngraph/frontend/frontend_manager/CMakeLists.txt +++ b/ngraph/frontend/frontend_manager/CMakeLists.txt @@ -17,7 +17,7 @@ source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS}) # Static library add_library(${TARGET_NAME}_static STATIC ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}) -add_library(ngraph::${TARGET_NAME}::static ALIAS ${TARGET_NAME}_static) +add_library(${TARGET_NAME}::static ALIAS ${TARGET_NAME}_static) target_link_libraries(${TARGET_NAME}_static PRIVATE ${CMAKE_DL_LIBS} PUBLIC ngraph) target_include_directories(${TARGET_NAME}_static PUBLIC ${FRONTEND_INCLUDE_DIR}) target_include_directories(${TARGET_NAME}_static PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) @@ -28,8 +28,10 @@ target_compile_definitions(${TARGET_NAME}_static PUBLIC USE_STATIC_FRONTEND_MANA add_library(${TARGET_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}) add_library(ngraph::${TARGET_NAME} ALIAS ${TARGET_NAME}) +add_library(openvino::frontend::manager ALIAS ${TARGET_NAME}) -target_include_directories(${TARGET_NAME} PUBLIC $ +target_include_directories(${TARGET_NAME} PUBLIC + $ $) target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) target_link_libraries(${TARGET_NAME} PRIVATE ${CMAKE_DL_LIBS} PUBLIC ngraph) @@ -53,9 +55,17 @@ install(TARGETS ${TARGET_NAME} EXPORT ngraphTargets ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) +set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME frontend::manager) +install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets + RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + install(DIRECTORY ${FRONTEND_INCLUDE_DIR}/frontend_manager - DESTINATION ${FRONTEND_INSTALL_INCLUDE} - COMPONENT ngraph_dev - FILES_MATCHING PATTERN "*.hpp") + DESTINATION ${FRONTEND_INSTALL_INCLUDE} + COMPONENT ngraph_dev + FILES_MATCHING PATTERN "*.hpp") export(TARGETS ${TARGET_NAME} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") +export(TARGETS ${TARGET_NAME} NAMESPACE openvino:: + APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") diff --git a/ngraph/frontend/ir/CMakeLists.txt b/ngraph/frontend/ir/CMakeLists.txt index 181bfda9d62..5ec3a540361 100644 --- a/ngraph/frontend/ir/CMakeLists.txt +++ b/ngraph/frontend/ir/CMakeLists.txt @@ -23,12 +23,11 @@ add_library(${TARGET_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_P ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} INCLUDE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include" ADDITIONAL_INCLUDE_DIRECTORIES - $) + $) target_include_directories(${TARGET_NAME} PUBLIC $ - $ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src ${CMAKE_CURRENT_BINARY_DIR}) @@ -40,22 +39,14 @@ if(COMMAND ie_add_vs_version_file) FILEDESCRIPTION "FrontEnd to load and convert IR file format") endif() -target_link_libraries(${TARGET_NAME} PRIVATE ngraph::frontend_manager::static - PRIVATE ngraph::builder inference_engine_transformations +target_link_libraries(${TARGET_NAME} PRIVATE frontend_manager::static + ngraph::builder inference_engine_transformations inference_engine pugixml::static inference_engine_plugin_api) add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} EXCLUDE_PATTERNS ${PROTO_SRCS} ${PROTO_HDRS}) -install(TARGETS ${TARGET_NAME} EXPORT ngraphTargets +install(TARGETS ${TARGET_NAME} RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) - -install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/ir_frontend - DESTINATION ${FRONTEND_INSTALL_INCLUDE} - COMPONENT ngraph_dev - FILES_MATCHING PATTERN "*.hpp" -) - -export(TARGETS ${TARGET_NAME} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") diff --git a/ngraph/frontend/onnx/frontend/CMakeLists.txt b/ngraph/frontend/onnx/frontend/CMakeLists.txt index ad830d0ddbc..20a773650d8 100644 --- a/ngraph/frontend/onnx/frontend/CMakeLists.txt +++ b/ngraph/frontend/onnx/frontend/CMakeLists.txt @@ -2,6 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 # +set(TARGET_NAME onnx_ngraph_frontend) + set(ONNX_OPSET_VERSION 13 CACHE INTERNAL "Supported version of ONNX operator set") set(ONNX_FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) @@ -27,35 +29,41 @@ source_group("include" FILES ${LIBRARY_HEADERS}) source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS}) # Create shared library -add_library(onnx_ngraph_frontend SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}) -add_library(ngraph::onnx_ngraph_frontend ALIAS onnx_ngraph_frontend) +add_library(${TARGET_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}) +add_library(openvino::frontend::onnx ALIAS ${TARGET_NAME}) -add_clang_format_target(onnx_ngraph_frontend_clang FOR_TARGETS onnx_ngraph_frontend) +add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) -ov_ncc_naming_style(FOR_TARGET onnx_ngraph_frontend +ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} INCLUDE_DIRECTORY "${ONNX_FRONTEND_INCLUDE_DIR}" DEFINITIONS $ ADDITIONAL_INCLUDE_DIRECTORIES - $) + $) if(COMMAND ie_add_vs_version_file) - ie_add_vs_version_file(NAME onnx_ngraph_frontend + ie_add_vs_version_file(NAME ${TARGET_NAME} FILEDESCRIPTION "nGraph ONNX frontend library") endif() -target_link_libraries(onnx_ngraph_frontend PUBLIC ngraph PRIVATE frontend_manager ngraph::builder onnx_common inference_engine_transformations) +target_link_libraries(${TARGET_NAME} PUBLIC ngraph PRIVATE frontend_manager ngraph::builder onnx_common inference_engine_transformations) -target_include_directories(onnx_ngraph_frontend PUBLIC $ +target_include_directories(${TARGET_NAME} PUBLIC $ $) -target_include_directories(onnx_ngraph_frontend PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) +target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) -target_compile_definitions(onnx_ngraph_frontend PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION}) +target_compile_definitions(${TARGET_NAME} PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION}) if(NGRAPH_USE_PROTOBUF_LITE) - target_compile_definitions(onnx_ngraph_frontend PRIVATE NGRAPH_USE_PROTOBUF_LITE) + target_compile_definitions(${TARGET_NAME} PRIVATE NGRAPH_USE_PROTOBUF_LITE) endif() -install(TARGETS onnx_ngraph_frontend EXPORT ngraphTargets +install(TARGETS ${TARGET_NAME} EXPORT ngraphTargets + RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + +set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME frontend::onnx) +install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) @@ -66,4 +74,6 @@ install(DIRECTORY ${ONNX_FRONTEND_INCLUDE_DIR}/onnx_frontend COMPONENT ngraph_dev FILES_MATCHING PATTERN "*.hpp") -export(TARGETS onnx_ngraph_frontend NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") +export(TARGETS ${TARGET_NAME} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") +export(TARGETS ${TARGET_NAME} NAMESPACE openvino:: + APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") diff --git a/ngraph/frontend/onnx/frontend/src/edge_mapper.cpp b/ngraph/frontend/onnx/frontend/src/edge_mapper.cpp index ee2bb58d5b6..4c6132dd78e 100644 --- a/ngraph/frontend/onnx/frontend/src/edge_mapper.cpp +++ b/ngraph/frontend/onnx/frontend/src/edge_mapper.cpp @@ -217,6 +217,24 @@ bool onnx_editor::EdgeMapper::is_correct_tensor_name(const std::string& name) co return false; } +std::vector onnx_editor::EdgeMapper::get_input_ports(const EditorNode& node) const { + NGRAPH_CHECK(is_correct_and_unambiguous_node(node), + "The node with name: " + (node.m_node_name.empty() ? "not_given" : node.m_node_name) + + ", output_name: " + (node.m_output_name.empty() ? "not_given" : node.m_output_name) + + " is ambiguous"); + const auto node_index = find_node_indexes(node.m_node_name, node.m_output_name)[0]; + return m_node_inputs[node_index]; +} + +std::vector onnx_editor::EdgeMapper::get_output_ports(const EditorNode& node) const { + NGRAPH_CHECK(is_correct_and_unambiguous_node(node), + "The node with name: " + (node.m_node_name.empty() ? "not_given" : node.m_node_name) + + ", output_name: " + (node.m_output_name.empty() ? "not_given" : node.m_output_name) + + " is ambiguous"); + const auto node_index = find_node_indexes(node.m_node_name, node.m_output_name)[0]; + return m_node_outputs[node_index]; +} + std::string onnx_editor::EdgeMapper::get_source_tensor_name(const InputEdge& edge) const { if (edge.m_node_idx >= 0 && edge.m_node_idx < static_cast(m_node_inputs.size()) && edge.m_port_idx >= 0 && edge.m_port_idx < static_cast(m_node_inputs[edge.m_node_idx].size())) { diff --git a/ngraph/frontend/onnx/frontend/src/edge_mapper.hpp b/ngraph/frontend/onnx/frontend/src/edge_mapper.hpp index df57fbe87b4..fb16d147ade 100644 --- a/ngraph/frontend/onnx/frontend/src/edge_mapper.hpp +++ b/ngraph/frontend/onnx/frontend/src/edge_mapper.hpp @@ -98,6 +98,20 @@ public: /// bool is_correct_tensor_name(const std::string& name) const; + /// \brief Get names of input ports of given node. + /// + /// \param node An EditorNode helper structure created based on a node name + /// or a node output name. + /// + std::vector get_input_ports(const EditorNode& node) const; + + /// \brief Get names of output ports of given node. + /// + /// \param node An EditorNode helper structure created based on a node name + /// or a node output name. + /// + std::vector get_output_ports(const EditorNode& node) const; + /// \brief Get name of the tensor which is the source of the input edge. /// /// \note Empty string is returned if the tensor name is not found. diff --git a/ngraph/frontend/onnx/frontend/src/editor.cpp b/ngraph/frontend/onnx/frontend/src/editor.cpp index 0b1ca6d8b2e..6551543d5f1 100644 --- a/ngraph/frontend/onnx/frontend/src/editor.cpp +++ b/ngraph/frontend/onnx/frontend/src/editor.cpp @@ -449,6 +449,16 @@ bool onnx_editor::ONNXModelEditor::is_correct_tensor_name(const std::string& nam return m_pimpl->m_edge_mapper.is_correct_tensor_name(name); } +std::vector onnx_editor::ONNXModelEditor::get_input_ports(const EditorNode& node) const { + update_mapper_if_needed(); + return m_pimpl->m_edge_mapper.get_input_ports(node); +} + +std::vector onnx_editor::ONNXModelEditor::get_output_ports(const EditorNode& node) const { + update_mapper_if_needed(); + return m_pimpl->m_edge_mapper.get_output_ports(node); +} + std::shared_ptr onnx_editor::ONNXModelEditor::decode() { return onnx_import::detail::decode_to_framework_nodes(m_pimpl->m_model_proto, m_model_path); } diff --git a/ngraph/frontend/onnx/frontend/src/editor.hpp b/ngraph/frontend/onnx/frontend/src/editor.hpp index 0605eb03bb2..66d4d7e5ff0 100644 --- a/ngraph/frontend/onnx/frontend/src/editor.hpp +++ b/ngraph/frontend/onnx/frontend/src/editor.hpp @@ -204,6 +204,20 @@ public: /// bool is_correct_tensor_name(const std::string& name) const; + /// \brief Get names of input ports of given node. + /// + /// \param node An EditorNode helper structure created based on a node name + /// or a node output name. + /// + std::vector get_input_ports(const EditorNode& node) const; + + /// \brief Get names of output ports of given node. + /// + /// \param node An EditorNode helper structure created based on a node name + /// or a node output name. + /// + std::vector get_output_ports(const EditorNode& node) const; + /// \brief Returns a nGraph function based on edited model /// decoded to framework nodes /// diff --git a/ngraph/frontend/onnx/frontend/src/input_model.cpp b/ngraph/frontend/onnx/frontend/src/input_model.cpp index 9db743a74b9..f9e2808fb44 100644 --- a/ngraph/frontend/onnx/frontend/src/input_model.cpp +++ b/ngraph/frontend/onnx/frontend/src/input_model.cpp @@ -52,9 +52,17 @@ std::vector InputModelONNX::get_outputs() const { } Place::Ptr InputModelONNX::get_place_by_tensor_name(const std::string& tensor_name) const { - NGRAPH_CHECK(m_editor->is_correct_tensor_name(tensor_name), - "The tensor with name: " + tensor_name + " does not exist in the graph"); - return std::make_shared(tensor_name, m_editor); + if (m_editor->is_correct_tensor_name(tensor_name)) { + return std::make_shared(tensor_name, m_editor); + } + return nullptr; +} + +Place::Ptr InputModelONNX::get_place_by_operation_name(const std::string& operation_name) const { + if (m_editor->is_correct_and_unambiguous_node(operation_name)) { + return std::make_shared(onnx_editor::EditorNode{operation_name}, m_editor); + } + return nullptr; } Place::Ptr InputModelONNX::get_place_by_operation_name_and_input_port(const std::string& operation_name, diff --git a/ngraph/frontend/onnx/frontend/src/input_model.hpp b/ngraph/frontend/onnx/frontend/src/input_model.hpp index 2ed2ff0e8e4..ae838f5b5e3 100644 --- a/ngraph/frontend/onnx/frontend/src/input_model.hpp +++ b/ngraph/frontend/onnx/frontend/src/input_model.hpp @@ -25,6 +25,7 @@ public: std::vector get_inputs() const override; std::vector get_outputs() const override; Place::Ptr get_place_by_tensor_name(const std::string& tensor_name) const override; + Place::Ptr get_place_by_operation_name(const std::string& operation_name) const override; Place::Ptr get_place_by_operation_name_and_input_port(const std::string& operation_name, int input_port_index) override; void set_partial_shape(Place::Ptr place, const ngraph::PartialShape& shape) override; diff --git a/ngraph/frontend/onnx/frontend/src/place.cpp b/ngraph/frontend/onnx/frontend/src/place.cpp index 64c623c2957..21cd516701e 100644 --- a/ngraph/frontend/onnx/frontend/src/place.cpp +++ b/ngraph/frontend/onnx/frontend/src/place.cpp @@ -133,3 +133,59 @@ bool PlaceTensorONNX::is_equal_data(Place::Ptr another) const { return is_equal(another) || (is_input() ? false : get_producing_port()->is_equal(another)) || eq_to_consuming_port(another); } + +PlaceOpONNX::PlaceOpONNX(const onnx_editor::EditorNode& node, std::shared_ptr editor) + : m_node{node}, + m_editor{editor} {} + +std::vector PlaceOpONNX::get_names() const { + return {m_node.m_node_name}; +} + +Place::Ptr PlaceOpONNX::get_output_port() const { + if (m_editor->get_output_ports(m_node).size() == 1) { + return get_output_port(0); + } + return nullptr; +} + +Place::Ptr PlaceOpONNX::get_output_port(int output_port_index) const { + if (output_port_index < m_editor->get_output_ports(m_node).size()) { + const auto output_edge = m_editor->find_output_edge(m_node, onnx_editor::EditorOutput{output_port_index}); + return std::make_shared(output_edge, m_editor); + } + return nullptr; +} + +Place::Ptr PlaceOpONNX::get_output_port(const std::string& output_port_name) const { + const auto output_ports = m_editor->get_output_ports(m_node); + if (std::count(std::begin(output_ports), std::end(output_ports), output_port_name) == 1) { + const auto output_edge = m_editor->find_output_edge(m_node, onnx_editor::EditorOutput{output_port_name}); + return std::make_shared(output_edge, m_editor); + } + return nullptr; +} + +Place::Ptr PlaceOpONNX::get_input_port() const { + if (m_editor->get_input_ports(m_node).size() == 1) { + return get_input_port(0); + } + return nullptr; +} + +Place::Ptr PlaceOpONNX::get_input_port(int input_port_index) const { + if (input_port_index < m_editor->get_input_ports(m_node).size()) { + const auto input_edge = m_editor->find_input_edge(m_node, onnx_editor::EditorInput{input_port_index}); + return std::make_shared(input_edge, m_editor); + } + return nullptr; +} + +Place::Ptr PlaceOpONNX::get_input_port(const std::string& input_name) const { + const auto input_ports = m_editor->get_input_ports(m_node); + if (std::count(std::begin(input_ports), std::end(input_ports), input_name) == 1) { + const auto input_edge = m_editor->find_input_edge(m_node, onnx_editor::EditorInput{input_name}); + return std::make_shared(input_edge, m_editor); + } + return nullptr; +} diff --git a/ngraph/frontend/onnx/frontend/src/place.hpp b/ngraph/frontend/onnx/frontend/src/place.hpp index a236d8506e9..3aae4e8f0c4 100644 --- a/ngraph/frontend/onnx/frontend/src/place.hpp +++ b/ngraph/frontend/onnx/frontend/src/place.hpp @@ -76,6 +76,24 @@ private: std::string m_name; std::shared_ptr m_editor; }; + +class PlaceOpONNX : public Place { +public: + PlaceOpONNX(const onnx_editor::EditorNode& node, std::shared_ptr editor); + std::vector get_names() const override; + + Place::Ptr get_output_port() const override; + Place::Ptr get_output_port(int output_port_index) const override; + Place::Ptr get_output_port(const std::string& output_port_name) const override; + + Place::Ptr get_input_port() const override; + Place::Ptr get_input_port(int input_port_index) const override; + Place::Ptr get_input_port(const std::string& input_name) const override; + +private: + onnx_editor::EditorNode m_node; + std::shared_ptr m_editor; +}; } // namespace frontend } // namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_common/CMakeLists.txt b/ngraph/frontend/onnx/onnx_common/CMakeLists.txt index ec31841e12c..bc63b28893d 100644 --- a/ngraph/frontend/onnx/onnx_common/CMakeLists.txt +++ b/ngraph/frontend/onnx/onnx_common/CMakeLists.txt @@ -16,7 +16,6 @@ source_group("include" FILES ${PUBLIC_HEADERS} ${PUBLIC_HEADERS}) # Create static library add_library(${TARGET_NAME} STATIC ${LIBRARY_SRC} ${PUBLIC_HEADERS}) -add_library(ngraph::onnx_common ALIAS ${TARGET_NAME}) # TODO Add handling ie_faster_build diff --git a/ngraph/frontend/paddlepaddle/CMakeLists.txt b/ngraph/frontend/paddlepaddle/CMakeLists.txt index 75a99e1e385..e0d43583988 100644 --- a/ngraph/frontend/paddlepaddle/CMakeLists.txt +++ b/ngraph/frontend/paddlepaddle/CMakeLists.txt @@ -49,13 +49,14 @@ set_source_files_properties(${PROTO_SRCS} ${PROTO_HDRS} PROPERTIES COMPILE_OPTIO # Create shared library add_library(${TARGET_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS} ${PROTO_SRCS} ${PROTO_HDRS}) +add_library(openvino::frontend::paddlepaddle ALIAS ${TARGET_NAME}) add_dependencies(${TARGET_NAME} paddlepaddle_ngraph_frontend_proto) ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} INCLUDE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include" ADDITIONAL_INCLUDE_DIRECTORIES - $) + $) target_include_directories(${TARGET_NAME} PUBLIC @@ -75,7 +76,7 @@ endif() link_system_libraries(${TARGET_NAME} PRIVATE ${Protobuf_LITE_LIBRARIES}) -target_link_libraries(${TARGET_NAME} PRIVATE ngraph::frontend_manager::static +target_link_libraries(${TARGET_NAME} PRIVATE frontend_manager::static PRIVATE ngraph::builder inference_engine_transformations) add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} @@ -86,10 +87,17 @@ install(TARGETS ${TARGET_NAME} EXPORT ngraphTargets ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) +set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME frontend::paddlepaddle) +install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets + RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph + LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) + install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/paddlepaddle_frontend DESTINATION ${FRONTEND_INSTALL_INCLUDE} COMPONENT ngraph_dev - FILES_MATCHING PATTERN "*.hpp" -) + FILES_MATCHING PATTERN "*.hpp") export(TARGETS ${TARGET_NAME} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") +export(TARGETS ${TARGET_NAME} NAMESPACE openvino:: + APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") diff --git a/ngraph/python/pybind11 b/ngraph/python/pybind11 deleted file mode 160000 index 8de7772cc72..00000000000 --- a/ngraph/python/pybind11 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 8de7772cc72daca8e947b79b83fea46214931604 diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index c682537d532..f2d9417b1b2 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -270,6 +270,7 @@ set(SRC visitors/op/floor_mod.cpp visitors/op/floor.cpp visitors/op/gather.cpp + visitors/op/gather_tree.cpp visitors/op/gelu.cpp visitors/op/greater_equal.cpp visitors/op/greater.cpp diff --git a/ngraph/test/intervals.cpp b/ngraph/test/intervals.cpp index 7f1e383cdd4..314e8248e3e 100644 --- a/ngraph/test/intervals.cpp +++ b/ngraph/test/intervals.cpp @@ -143,3 +143,20 @@ TEST(intervals, sets) { } EXPECT_TRUE(Interval(min_int, max_int) == a_int_b); } + +TEST(intervals, corner_cases) { + Interval::value_type max = numeric_limits::max(); + Interval almost_max(0, max - 10); + Interval dynamic(0, max); + Interval zero(0, 0); + + EXPECT_TRUE(almost_max + almost_max == dynamic); + EXPECT_TRUE(dynamic + almost_max == dynamic); + EXPECT_TRUE(almost_max + dynamic == dynamic); + EXPECT_TRUE(dynamic - almost_max == dynamic); + + EXPECT_TRUE(dynamic * almost_max == dynamic); + EXPECT_TRUE(almost_max * dynamic == dynamic); + EXPECT_TRUE(zero * almost_max == zero); + EXPECT_TRUE(almost_max * zero == zero); +} diff --git a/ngraph/test/onnx/onnx_editor.cpp b/ngraph/test/onnx/onnx_editor.cpp index 590dc7e9350..96034029677 100644 --- a/ngraph/test/onnx/onnx_editor.cpp +++ b/ngraph/test/onnx/onnx_editor.cpp @@ -1347,3 +1347,60 @@ NGRAPH_TEST(onnx_editor, is_correct_tensor_name) { EXPECT_FALSE(editor.is_correct_tensor_name("not_existed")); EXPECT_FALSE(editor.is_correct_tensor_name("")); } + +NGRAPH_TEST(onnx_editor, get_input_ports) { + ONNXModelEditor editor{file_util::path_join(SERIALIZED_ZOO, "onnx/model_editor/subgraph_extraction_tests.onnx")}; + + const auto ports_1 = editor.get_input_ports(EditorNode{"relu1_name"}); + EXPECT_EQ(ports_1.size(), 1); + EXPECT_EQ(ports_1[0], "in1"); + const auto ports_2 = editor.get_input_ports(EditorNode{"split_name"}); + EXPECT_EQ(ports_2.size(), 1); + EXPECT_EQ(ports_2[0], "add2"); + const auto ports_3 = editor.get_input_ports(EditorNode{EditorOutput{"add2"}}); + EXPECT_EQ(ports_3.size(), 2); + EXPECT_EQ(ports_3[0], "relu1"); + EXPECT_EQ(ports_3[1], "add1"); + try { + editor.get_input_ports(EditorNode{"add_ambiguous_name"}); + } catch (const std::exception& e) { + std::string msg{e.what()}; + EXPECT_TRUE(msg.find("The node with name: add_ambiguous_name, output_name: not_given is ambiguous") != + std::string::npos); + } + try { + editor.get_input_ports(EditorNode{""}); + } catch (const std::exception& e) { + std::string msg{e.what()}; + EXPECT_TRUE(msg.find("The node with name: not_given, output_name: not_given is ambiguous") != + std::string::npos); + } +} +NGRAPH_TEST(onnx_editor, get_output_ports) { + ONNXModelEditor editor{file_util::path_join(SERIALIZED_ZOO, "onnx/model_editor/subgraph_extraction_tests.onnx")}; + + const auto ports_1 = editor.get_output_ports(EditorNode{"relu1_name"}); + EXPECT_EQ(ports_1.size(), 1); + EXPECT_EQ(ports_1[0], "relu1"); + const auto ports_2 = editor.get_output_ports(EditorNode{"split_name"}); + EXPECT_EQ(ports_2.size(), 2); + EXPECT_EQ(ports_2[0], "split1"); + EXPECT_EQ(ports_2[1], "split2"); + const auto ports_3 = editor.get_output_ports(EditorNode{EditorOutput{"add2"}}); + EXPECT_EQ(ports_3.size(), 1); + EXPECT_EQ(ports_3[0], "add2"); + try { + editor.get_output_ports(EditorNode{"add_ambiguous_name"}); + } catch (const std::exception& e) { + std::string msg{e.what()}; + EXPECT_TRUE(msg.find("The node with name: add_ambiguous_name, output_name: not_given is ambiguous") != + std::string::npos); + } + try { + editor.get_output_ports(EditorNode{""}); + } catch (const std::exception& e) { + std::string msg{e.what()}; + EXPECT_TRUE(msg.find("The node with name: not_given, output_name: not_given is ambiguous") != + std::string::npos); + } +} diff --git a/ngraph/test/type_prop/gather_tree.cpp b/ngraph/test/type_prop/gather_tree.cpp index aa3ce09cc5f..da9a87ad202 100644 --- a/ngraph/test/type_prop/gather_tree.cpp +++ b/ngraph/test/type_prop/gather_tree.cpp @@ -2,6 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // +#include +#include + #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" #include "util/type_prop.hpp" @@ -9,78 +12,280 @@ using namespace std; using namespace ngraph; +namespace { +constexpr size_t step_ids_input_idx = 0; +constexpr size_t parent_idx_input_idx = 1; +constexpr size_t max_seq_len_input_idx = 2; +constexpr size_t end_token_input_idx = 3; +constexpr size_t gather_tree_required_inputs = 4; +struct GatherTreeInputInfo { + element::Type in_et; + PartialShape in_pshape; +}; + +using GatherTreeInputParams = std::array; + +std::shared_ptr makeGatherTreeOp(const GatherTreeInputParams& p) { + if (p.size() != gather_tree_required_inputs) { + throw runtime_error("GatherTree requires 4 inputs"); + } + auto step_ids = make_shared(p.at(step_ids_input_idx).in_et, p.at(step_ids_input_idx).in_pshape); + auto parent_idx = + make_shared(p.at(parent_idx_input_idx).in_et, p.at(parent_idx_input_idx).in_pshape); + auto max_seq_len = + make_shared(p.at(max_seq_len_input_idx).in_et, p.at(max_seq_len_input_idx).in_pshape); + auto end_token = make_shared(p.at(end_token_input_idx).in_et, p.at(end_token_input_idx).in_pshape); + return make_shared(step_ids, parent_idx, max_seq_len, end_token); +} +} // namespace + +TEST(type_prop, gather_tree_invalid_input_element_type) { + Shape scalar_shape{}; + Shape vector_shape{2}; + Shape tensor_shape{1, 2, 3}; + + element::Type input_et = element::boolean; + GatherTreeInputParams params{GatherTreeInputInfo{input_et, tensor_shape}, + GatherTreeInputInfo{input_et, tensor_shape}, + GatherTreeInputInfo{input_et, vector_shape}, + GatherTreeInputInfo{input_et, scalar_shape}}; + try { + auto gather_tree = makeGatherTreeOp(params); + FAIL() << "Invalid element types for inputs not detected"; + } catch (const NodeValidationFailure& error) { + EXPECT_HAS_SUBSTRING(error.what(), "Element type of inputs must be numeric."); + } catch (...) { + FAIL() << "Element type check for inputs failed for unexpected reason"; + } +} + +TEST(type_prop, gather_tree_incompatible_input_element_types) { + element::Type float_et = element::f32; + element::Type integer_et = element::i32; + + Shape scalar_shape{}; + Shape vector_shape{2}; + Shape tensor_shape{1, 2, 3}; + + vector test_cases = {// step_ids input has incompatible element type + GatherTreeInputParams{GatherTreeInputInfo{integer_et, tensor_shape}, + GatherTreeInputInfo{float_et, tensor_shape}, + GatherTreeInputInfo{float_et, vector_shape}, + GatherTreeInputInfo{float_et, scalar_shape}}, + // parent_idx input has incompatible element type + GatherTreeInputParams{GatherTreeInputInfo{float_et, tensor_shape}, + GatherTreeInputInfo{integer_et, tensor_shape}, + GatherTreeInputInfo{float_et, vector_shape}, + GatherTreeInputInfo{float_et, scalar_shape}}, + // max_seq_len input has incompatible element type + GatherTreeInputParams{GatherTreeInputInfo{float_et, tensor_shape}, + GatherTreeInputInfo{float_et, tensor_shape}, + GatherTreeInputInfo{integer_et, vector_shape}, + GatherTreeInputInfo{float_et, scalar_shape}}, + // end_token input has incompatible element type + GatherTreeInputParams{GatherTreeInputInfo{float_et, tensor_shape}, + GatherTreeInputInfo{float_et, tensor_shape}, + GatherTreeInputInfo{float_et, vector_shape}, + GatherTreeInputInfo{integer_et, scalar_shape}}}; + + for (const auto& test_case : test_cases) { + try { + auto gather_tree = makeGatherTreeOp(test_case); + FAIL() << "Incompatible element types for inputs not detected"; + } catch (const NodeValidationFailure& error) { + EXPECT_HAS_SUBSTRING(error.what(), "Inputs must have the same element type."); + } catch (...) { + FAIL() << "Element type check for inputs failed for unexpected reason"; + } + } +} + +TEST(type_prop, gather_tree_input_element_types) { + Shape scalar_shape{}; + Shape vector_shape{2}; + Shape tensor_shape{1, 2, 3}; + + std::vector element_types{element::u4, + element::u8, + element::u16, + element::u32, + element::i8, + element::i16, + element::i32, + element::i64, + element::f32, + element::f64, + element::u32}; + std::vector test_cases; + std::for_each(std::begin(element_types), std::end(element_types), [&](element::Type et) { + GatherTreeInputParams params{GatherTreeInputInfo{et, tensor_shape}, + GatherTreeInputInfo{et, tensor_shape}, + GatherTreeInputInfo{et, vector_shape}, + GatherTreeInputInfo{et, scalar_shape}}; + test_cases.insert(test_cases.end(), params); + }); + for (const auto& test_case : test_cases) { + try { + EXPECT_NO_THROW(makeGatherTreeOp(test_case)); + } catch (...) { + FAIL() << "Inputs element type validation check failed for unexpected reason"; + } + } +} + +TEST(type_prop, gather_tree_invalid_step_ids_and_parent_idx_input_shapes) { + element::Type et = element::f32; + + Shape scalar_shape{}; + PartialShape vector_shape{Dimension()}; + + std::vector> input_shapes = { + {PartialShape{1}, PartialShape{1, 2, 3}}, + {PartialShape{1, 2, 3}, PartialShape{3, 3, 3, 3}}, + {PartialShape{Dimension(), Dimension(), 3}, PartialShape::dynamic(4)}, + {PartialShape::dynamic(2), PartialShape::dynamic()}, + {PartialShape{1, 2, 3}, PartialShape{Dimension(), Dimension(3, 5), 3}}}; + std::vector test_cases; + std::for_each(std::begin(input_shapes), std::end(input_shapes), [&](std::pair shapes) { + GatherTreeInputParams params{GatherTreeInputInfo{et, shapes.first}, + GatherTreeInputInfo{et, shapes.second}, + GatherTreeInputInfo{et, vector_shape}, + GatherTreeInputInfo{et, scalar_shape}}; + test_cases.insert(test_cases.end(), params); + }); + for (const auto& test_case : test_cases) { + try { + auto gather_tree = makeGatherTreeOp(test_case); + FAIL() << "Incompatible shapes for inputs step_ids and parent_idx not detected"; + } catch (const NodeValidationFailure& error) { + EXPECT_HAS_SUBSTRING(error.what(), "step_ids and parent_idx inputs must have the same shape with rank 3."); + } catch (...) { + FAIL() << "Shape check for step_ids and parent_idx inputs failed for unexpected reason"; + } + } +} + +TEST(type_prop, gather_tree_invalid_max_seq_len_rank) { + element::Type et = element::f32; + + Shape tensor_shape{1, 2, 3}; + Shape scalar_shape{}; + + std::vector max_seq_len_shapes = {{}, {Dimension(), 1}, PartialShape::dynamic(3), {1, 2, 3, 4}}; + + std::vector test_cases; + std::for_each(std::begin(max_seq_len_shapes), std::end(max_seq_len_shapes), [&](PartialShape shape) { + GatherTreeInputParams params{GatherTreeInputInfo{et, tensor_shape}, + GatherTreeInputInfo{et, tensor_shape}, + GatherTreeInputInfo{et, shape}, + GatherTreeInputInfo{et, scalar_shape}}; + test_cases.insert(test_cases.end(), params); + }); + for (const auto& test_case : test_cases) { + try { + auto gather_tree = makeGatherTreeOp(test_case); + FAIL() << "Invalid shapes for max_seq_len input not detected"; + } catch (const NodeValidationFailure& error) { + EXPECT_HAS_SUBSTRING(error.what(), "max_seq_len input must have rank 1."); + } catch (...) { + FAIL() << "Shape check for max_seq_len input failed for unexpected reason"; + } + } +} + +TEST(type_prop, gather_tree_incompatible_step_ids_and_max_seq_len_shapes) { + element::Type et = element::f32; + + Shape scalar_shape{}; + + std::vector> input_shapes = { + {PartialShape{1, 2, 3}, PartialShape{4}}, + {PartialShape{Dimension(), 2, 3}, PartialShape{Dimension(3, 6)}}}; + std::vector test_cases; + std::for_each(std::begin(input_shapes), std::end(input_shapes), [&](std::pair shapes) { + GatherTreeInputParams params{GatherTreeInputInfo{et, shapes.first}, + GatherTreeInputInfo{et, shapes.first}, + GatherTreeInputInfo{et, shapes.second}, + GatherTreeInputInfo{et, scalar_shape}}; + test_cases.insert(test_cases.end(), params); + }); + for (const auto& test_case : test_cases) { + try { + auto gather_tree = makeGatherTreeOp(test_case); + FAIL() << "Incompatible shapes for inputs step_ids and max_seq_len not detected"; + } catch (const NodeValidationFailure& error) { + EXPECT_HAS_SUBSTRING(error.what(), + "Number of elements of max_seq_len input must match BATCH_SIZE dimension of " + "step_ids/parent_idx inputs."); + } catch (...) { + FAIL() << "Shape check for step_ids and max_seq_len inputs failed for unexpected reason"; + } + } +} + TEST(type_prop, gather_tree_output_shape) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{}); + element::Type et = element::f32; + Shape scalar_shape{}; - auto gather_tree = make_shared(step_ids, parent_idx, max_seq_len, end_token); + std::vector> input_shapes = { + {PartialShape{1, 2, 3}, PartialShape{2}}, + {PartialShape{1, 2, 3}, PartialShape::dynamic(1)}, + {PartialShape{Dimension(), 2, Dimension()}, PartialShape{2}}, + { + PartialShape::dynamic(3), + PartialShape{4}, + }, + {PartialShape{Dimension(), Dimension(3, 5), Dimension()}, PartialShape{Dimension(1, 3)}}, + {PartialShape::dynamic(), PartialShape::dynamic()}}; + std::vector test_cases; + std::for_each(std::begin(input_shapes), std::end(input_shapes), [&](std::pair shapes) { + GatherTreeInputParams params{GatherTreeInputInfo{et, shapes.first}, + GatherTreeInputInfo{et, shapes.first}, + GatherTreeInputInfo{et, shapes.second}, + GatherTreeInputInfo{et, scalar_shape}}; + test_cases.insert(test_cases.end(), params); + }); + for (const auto& test_case : test_cases) { + try { + auto gather_tree = makeGatherTreeOp(test_case); - ASSERT_EQ(gather_tree->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(gather_tree->get_output_element_type(0), element::i64); -} - -TEST(type_prop, gather_tree_pooling_step_ids_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3, 4}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{}); - try { - auto gather_tree = make_shared(step_ids, parent_idx, max_seq_len, end_token); - // Should have thrown, so fail if it didn't - FAIL() << "Ivalid step_ids input rank not detected"; - } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), std::string("step_ids input rank must equal to 3 (step_ids rank: 4)")); - } catch (...) { - FAIL() << "Deduced type check failed for unexpected reason"; + PartialShape result_shape{test_case.at(step_ids_input_idx).in_pshape}; + PartialShape max_seq_len_shape{test_case.at(max_seq_len_input_idx).in_pshape}; + if (result_shape.rank().is_static() && max_seq_len_shape.rank().is_static()) { + result_shape[1] = result_shape[1] & max_seq_len_shape[0]; + } + ASSERT_EQ(gather_tree->get_output_partial_shape(0), result_shape); + ASSERT_EQ(gather_tree->get_output_element_type(0), et); + } catch (...) { + FAIL() << "Output shape check failed for unexpected reason"; + } } } -TEST(type_prop, gather_tree_parent_idx_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3, 4}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{}); - try { - auto gather_tree = make_shared(step_ids, parent_idx, max_seq_len, end_token); - // Should have thrown, so fail if it didn't - FAIL() << "Ivalid parent_idx input rank not detected"; - } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), std::string("parent_idx input rank must equal to 3 (parent_idx rank: 4)")); - } catch (...) { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} +TEST(type_prop, gather_tree_invalid_end_token_rank) { + element::Type et = element::f32; -TEST(type_prop, gather_tree_max_seq_len_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1, 2}); - auto end_token = make_shared(element::i64, Shape{}); - try { - auto gather_tree = make_shared(step_ids, parent_idx, max_seq_len, end_token); - // Should have thrown, so fail if it didn't - FAIL() << "Ivalid parent_idx input rank not detected"; - } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), std::string("max_seq_len input rank must equal to 1 (max_seq_len rank: 2)")); - } catch (...) { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} + Shape tensor_shape{1, 2, 3}; + Shape vector_shape{2}; -TEST(type_prop, gather_tree_end_token_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{1}); - try { - auto gather_tree = make_shared(step_ids, parent_idx, max_seq_len, end_token); - // Should have thrown, so fail if it didn't - FAIL() << "Ivalid end_token input rank not detected"; - } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), std::string("end_token input rank must be scalar (end_token rank: 1)")); - } catch (...) { - FAIL() << "Deduced type check failed for unexpected reason"; + std::vector end_token_shapes = {{3}, {Dimension(), 1}, PartialShape::dynamic(3), {1, 2, 3, 4}}; + + std::vector test_cases; + std::for_each(std::begin(end_token_shapes), std::end(end_token_shapes), [&](PartialShape shape) { + GatherTreeInputParams params{GatherTreeInputInfo{et, tensor_shape}, + GatherTreeInputInfo{et, tensor_shape}, + GatherTreeInputInfo{et, vector_shape}, + GatherTreeInputInfo{et, shape}}; + test_cases.insert(test_cases.end(), params); + }); + for (const auto& test_case : test_cases) { + try { + auto gather_tree = makeGatherTreeOp(test_case); + FAIL() << "Invalid shapes for end_token input not detected"; + } catch (const NodeValidationFailure& error) { + EXPECT_HAS_SUBSTRING(error.what(), "end_token input must be scalar."); + } catch (...) { + FAIL() << "Shape check for end_token input failed for unexpected reason"; + } } } diff --git a/ngraph/test/visitors/op/gather_tree.cpp b/ngraph/test/visitors/op/gather_tree.cpp new file mode 100644 index 00000000000..7a89584199a --- /dev/null +++ b/ngraph/test/visitors/op/gather_tree.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset1.hpp" +#include "util/visitor.hpp" + +using namespace ngraph; +using ngraph::test::NodeBuilder; +using ngraph::test::ValueMap; + +TEST(attributes, gather_tree_op) { + NodeBuilder::get_ops().register_factory(); + + auto step_ids = std::make_shared(element::f32, Shape{1, 2, 3}); + auto parent_idx = std::make_shared(element::f32, Shape{1, 2, 3}); + auto max_seq_len = std::make_shared(element::f32, Shape{2}); + auto end_token = std::make_shared(element::f32, Shape{}); + + auto gather_tree = std::make_shared(step_ids, parent_idx, max_seq_len, end_token); + NodeBuilder builder(gather_tree); + + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} \ No newline at end of file diff --git a/runtime/CMakeLists.txt b/runtime/CMakeLists.txt new file mode 100644 index 00000000000..88bc7064127 --- /dev/null +++ b/runtime/CMakeLists.txt @@ -0,0 +1,7 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +if(ENABLE_PYTHON) + add_subdirectory(bindings/python) +endif() diff --git a/runtime/bindings/python/.clang-format b/runtime/bindings/python/.clang-format new file mode 100644 index 00000000000..ebe747b7838 --- /dev/null +++ b/runtime/bindings/python/.clang-format @@ -0,0 +1,28 @@ +BasedOnStyle: Google +IndentWidth: 4 +UseTab: Never +ColumnLimit: 120 + +Language: Cpp +Standard: Cpp11 + +AccessModifierOffset: -4 +AlignConsecutiveMacros: true +AllowAllArgumentsOnNextLine: false +AllowAllConstructorInitializersOnNextLine: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: Empty +AllowShortLoopsOnASingleLine: false +AlwaysBreakBeforeMultilineStrings: false +BinPackArguments: false +BinPackParameters: false +CommentPragmas: '^#' +DerivePointerAlignment: false +FixNamespaceComments: true +IndentCaseLabels: false +IndentPPDirectives: AfterHash +ForEachMacros: + - foreach + - FOREACH_CHILD diff --git a/ngraph/python/.gitignore b/runtime/bindings/python/.gitignore similarity index 100% rename from ngraph/python/.gitignore rename to runtime/bindings/python/.gitignore diff --git a/ngraph/python/BUILDING.md b/runtime/bindings/python/BUILDING.md similarity index 100% rename from ngraph/python/BUILDING.md rename to runtime/bindings/python/BUILDING.md diff --git a/runtime/bindings/python/CMakeLists.txt b/runtime/bindings/python/CMakeLists.txt new file mode 100644 index 00000000000..4e1b972c17a --- /dev/null +++ b/runtime/bindings/python/CMakeLists.txt @@ -0,0 +1,25 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +cmake_minimum_required (VERSION 3.13) + +project(OpenVINOPython DESCRIPTION "OpenVINO Runtime Python bindings") + +set(LIBRARY_OUTPUT_DIRECTORY_BIN ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}) + +add_subdirectory(thirdparty/pybind11 EXCLUDE_FROM_ALL) +add_subdirectory(src/compatibility) + +if(NGRAPH_UNIT_TEST_ENABLE) + add_subdirectory(tests/mock/mock_py_ngraph_frontend) + add_dependencies(_pyngraph mock_py_ngraph_frontend) + set_target_properties(mock_py_ngraph_frontend PROPERTIES + LIBRARY_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN} + ARCHIVE_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN} + COMPILE_PDB_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN} + PDB_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN}) + + add_subdirectory(tests/mock/pyngraph_fe_mock_api) + add_dependencies(_pyngraph pybind_mock_frontend) +endif() diff --git a/ngraph/python/requirements.txt b/runtime/bindings/python/requirements.txt similarity index 100% rename from ngraph/python/requirements.txt rename to runtime/bindings/python/requirements.txt diff --git a/ngraph/python/requirements_test.txt b/runtime/bindings/python/requirements_test.txt similarity index 100% rename from ngraph/python/requirements_test.txt rename to runtime/bindings/python/requirements_test.txt diff --git a/ngraph/python/setup.py b/runtime/bindings/python/setup.py similarity index 94% rename from ngraph/python/setup.py rename to runtime/bindings/python/setup.py index 5aae74ceb1e..0cd8167f263 100644 --- a/ngraph/python/setup.py +++ b/runtime/bindings/python/setup.py @@ -18,9 +18,8 @@ from distutils.command.build import build as _build __version__ = os.environ.get("NGRAPH_VERSION", "0.0.0.dev0") PYNGRAPH_ROOT_DIR = os.path.abspath(os.path.dirname(__file__)) -NGRAPH_ROOT_DIR = os.path.normpath(os.path.join(PYNGRAPH_ROOT_DIR, "..")) -OPENVINO_ROOT_DIR = os.path.normpath(os.path.join(PYNGRAPH_ROOT_DIR, "../..")) -# Change current working dircectory to ngraph/python +OPENVINO_ROOT_DIR = os.path.normpath(os.path.join(PYNGRAPH_ROOT_DIR, "../../..")) +# Change current working directory to runtime/bindings/python os.chdir(PYNGRAPH_ROOT_DIR) NGRAPH_LIBS = ["ngraph", "onnx_ngraph_frontend"] @@ -145,16 +144,17 @@ class BuildCMakeExt(build_ext): os.makedirs(build_dir, exist_ok=True) os.makedirs(extension_path.parent.absolute(), exist_ok=True) - # If ngraph_DIR is not set try to build from OpenVINO root + # If OpenVINO_DIR is set, try to build Python only, + # otherwise build from scratch using OpenVINO root root_dir = OPENVINO_ROOT_DIR bin_dir = os.path.join(OPENVINO_ROOT_DIR, "bin") - if os.environ.get("ngraph_DIR") is not None: + if os.environ.get("OpenVINO_DIR") is not None: root_dir = PYNGRAPH_ROOT_DIR bin_dir = build_dir self.announce("Configuring cmake project", level=3) ext_args = self.cmake_args.split() if self.cmake_args else [] - self.spawn(["cmake", "-H" + root_dir, "-B" + self.build_temp, + self.spawn(["cmake", "-S" + root_dir, "-B" + self.build_temp, "-DCMAKE_BUILD_TYPE={}".format(self.config), "-DENABLE_PYTHON=ON", "-DNGRAPH_ONNX_FRONTEND_ENABLE=ON"] + ext_args) @@ -181,8 +181,8 @@ class InstallCMakeLibs(install_lib): self.announce("Adding library files", level=3) root_dir = os.path.join(OPENVINO_ROOT_DIR, "bin") - if os.environ.get("ngraph_DIR") is not None: - root_dir = pathlib.Path(os.environ["ngraph_DIR"]) / ".." + if os.environ.get("OpenVINO_DIR") is not None: + root_dir = pathlib.Path(PYNGRAPH_ROOT_DIR) lib_ext = "" if "linux" in sys.platform: @@ -217,7 +217,7 @@ setup( url="https://github.com/openvinotoolkit/openvino", license="License :: OSI Approved :: Apache Software License", ext_modules=[CMakeExtension(name="_pyngraph")], - package_dir={"": "src"}, + package_dir={"": "src/compatibility"}, packages=packages, install_requires=requirements, data_files=data_files, diff --git a/runtime/bindings/python/src/compatibility/CMakeLists.txt b/runtime/bindings/python/src/compatibility/CMakeLists.txt new file mode 100644 index 00000000000..d3272c94ca4 --- /dev/null +++ b/runtime/bindings/python/src/compatibility/CMakeLists.txt @@ -0,0 +1,5 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +add_subdirectory(pyngraph) diff --git a/ngraph/python/src/ngraph/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/__init__.py diff --git a/ngraph/python/src/ngraph/exceptions.py b/runtime/bindings/python/src/compatibility/ngraph/exceptions.py similarity index 100% rename from ngraph/python/src/ngraph/exceptions.py rename to runtime/bindings/python/src/compatibility/ngraph/exceptions.py diff --git a/ngraph/python/src/ngraph/frontend/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/frontend/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/frontend/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/frontend/__init__.py diff --git a/ngraph/python/src/ngraph/helpers.py b/runtime/bindings/python/src/compatibility/ngraph/helpers.py similarity index 100% rename from ngraph/python/src/ngraph/helpers.py rename to runtime/bindings/python/src/compatibility/ngraph/helpers.py diff --git a/ngraph/python/src/ngraph/impl/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/impl/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/impl/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/impl/__init__.py diff --git a/ngraph/python/src/ngraph/impl/op/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/impl/op/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/impl/op/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/impl/op/__init__.py diff --git a/ngraph/python/src/ngraph/impl/op/util/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/impl/op/util/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/impl/op/util/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/impl/op/util/__init__.py diff --git a/ngraph/python/src/ngraph/impl/passes/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/impl/passes/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/impl/passes/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/impl/passes/__init__.py diff --git a/ngraph/python/src/ngraph/opset1/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/opset1/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/opset1/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/opset1/__init__.py diff --git a/ngraph/python/src/ngraph/opset1/ops.py b/runtime/bindings/python/src/compatibility/ngraph/opset1/ops.py similarity index 100% rename from ngraph/python/src/ngraph/opset1/ops.py rename to runtime/bindings/python/src/compatibility/ngraph/opset1/ops.py diff --git a/ngraph/python/src/ngraph/opset2/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/opset2/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/opset2/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/opset2/__init__.py diff --git a/ngraph/python/src/ngraph/opset2/ops.py b/runtime/bindings/python/src/compatibility/ngraph/opset2/ops.py similarity index 100% rename from ngraph/python/src/ngraph/opset2/ops.py rename to runtime/bindings/python/src/compatibility/ngraph/opset2/ops.py diff --git a/ngraph/python/src/ngraph/opset3/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/opset3/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/opset3/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/opset3/__init__.py diff --git a/ngraph/python/src/ngraph/opset3/ops.py b/runtime/bindings/python/src/compatibility/ngraph/opset3/ops.py similarity index 100% rename from ngraph/python/src/ngraph/opset3/ops.py rename to runtime/bindings/python/src/compatibility/ngraph/opset3/ops.py diff --git a/ngraph/python/src/ngraph/opset4/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/opset4/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/opset4/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/opset4/__init__.py diff --git a/ngraph/python/src/ngraph/opset4/ops.py b/runtime/bindings/python/src/compatibility/ngraph/opset4/ops.py similarity index 100% rename from ngraph/python/src/ngraph/opset4/ops.py rename to runtime/bindings/python/src/compatibility/ngraph/opset4/ops.py diff --git a/ngraph/python/src/ngraph/opset5/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/opset5/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/opset5/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/opset5/__init__.py diff --git a/ngraph/python/src/ngraph/opset5/ops.py b/runtime/bindings/python/src/compatibility/ngraph/opset5/ops.py similarity index 100% rename from ngraph/python/src/ngraph/opset5/ops.py rename to runtime/bindings/python/src/compatibility/ngraph/opset5/ops.py diff --git a/ngraph/python/src/ngraph/opset6/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/opset6/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/opset6/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/opset6/__init__.py diff --git a/ngraph/python/src/ngraph/opset6/ops.py b/runtime/bindings/python/src/compatibility/ngraph/opset6/ops.py similarity index 100% rename from ngraph/python/src/ngraph/opset6/ops.py rename to runtime/bindings/python/src/compatibility/ngraph/opset6/ops.py diff --git a/ngraph/python/src/ngraph/opset7/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/opset7/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/opset7/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/opset7/__init__.py diff --git a/ngraph/python/src/ngraph/opset7/ops.py b/runtime/bindings/python/src/compatibility/ngraph/opset7/ops.py similarity index 100% rename from ngraph/python/src/ngraph/opset7/ops.py rename to runtime/bindings/python/src/compatibility/ngraph/opset7/ops.py diff --git a/ngraph/python/src/ngraph/opset8/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/opset8/__init__.py similarity index 99% rename from ngraph/python/src/ngraph/opset8/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/opset8/__init__.py index d294788c282..9b8d8c12d32 100644 --- a/ngraph/python/src/ngraph/opset8/__init__.py +++ b/runtime/bindings/python/src/compatibility/ngraph/opset8/__init__.py @@ -84,7 +84,7 @@ from ngraph.opset4.ops import lstm_cell from ngraph.opset1.ops import lstm_sequence from ngraph.opset1.ops import matmul from ngraph.opset8.ops import matrix_nms -from ngraph.opset1.ops import max_pool +from ngraph.opset8.ops import max_pool from ngraph.opset1.ops import maximum from ngraph.opset1.ops import minimum from ngraph.opset4.ops import mish diff --git a/ngraph/python/src/ngraph/opset8/ops.py b/runtime/bindings/python/src/compatibility/ngraph/opset8/ops.py similarity index 81% rename from ngraph/python/src/ngraph/opset8/ops.py rename to runtime/bindings/python/src/compatibility/ngraph/opset8/ops.py index 82e2a24446b..072433c387e 100644 --- a/ngraph/python/src/ngraph/opset8/ops.py +++ b/runtime/bindings/python/src/compatibility/ngraph/opset8/ops.py @@ -276,3 +276,58 @@ def gather( "batch_dims": batch_dims } return _get_node_factory_opset8().create("Gather", inputs, attributes) + + +@nameable_op +def max_pool( + data: NodeInput, + strides: List[int], + dilations: List[int], + pads_begin: List[int], + pads_end: List[int], + kernel_shape: TensorShape, + rounding_type: str = "floor", + auto_pad: Optional[str] = None, + index_element_type: Optional[str] = "i64", + axis: Optional[int] = 0, + name: Optional[str] = None, +) -> Node: + """Perform max pooling operation and return both values and indices of the selected elements. + + @param data: The node providing input data. + @param strides: The distance (in pixels) to slide the filter on the feature map + over the axes. + @param dilations: The dilation of filter elements(distance between elements). + @param pads_begin: The number of pixels to add at the beginning along each axis. + @param pads_end: The number of pixels to add at the end along each axis. + @param kernel_shape: The pooling operation kernel shape. + @param rounding_type: Determines used rounding schema when computing output shape. + Acceptable values are: ['floor', 'ceil']. Defaults to 'floor'. + @param auto_pad: Determines how the padding is calculated. Acceptable values: + [None, 'same_upper', 'same_lower', 'valid']. Defaults to None. + @param index_element_type: The data type used for the indices output of this operator. + Defaults to i64. + @param axis: The first dimension in the data shape used to determine the maximum + returned index value. The value is the product of all dimensions + starting at the provided axis. Defaults to 0. + @param name: The optional name for the created output node. + + @return The new node performing max pooling operation. + """ + if auto_pad is None: + auto_pad = "explicit" + return _get_node_factory_opset8().create( + "MaxPool", + [as_node(data)], + { + "strides": strides, + "dilations": dilations, + "pads_begin": pads_begin, + "pads_end": pads_end, + "kernel": kernel_shape, + "rounding_type": rounding_type.upper(), + "auto_pad": auto_pad.upper(), + "index_element_type": index_element_type, + "axis": axis, + }, + ) diff --git a/ngraph/python/src/ngraph/opset_utils.py b/runtime/bindings/python/src/compatibility/ngraph/opset_utils.py similarity index 100% rename from ngraph/python/src/ngraph/opset_utils.py rename to runtime/bindings/python/src/compatibility/ngraph/opset_utils.py diff --git a/ngraph/python/src/ngraph/utils/__init__.py b/runtime/bindings/python/src/compatibility/ngraph/utils/__init__.py similarity index 100% rename from ngraph/python/src/ngraph/utils/__init__.py rename to runtime/bindings/python/src/compatibility/ngraph/utils/__init__.py diff --git a/ngraph/python/src/ngraph/utils/broadcasting.py b/runtime/bindings/python/src/compatibility/ngraph/utils/broadcasting.py similarity index 100% rename from ngraph/python/src/ngraph/utils/broadcasting.py rename to runtime/bindings/python/src/compatibility/ngraph/utils/broadcasting.py diff --git a/ngraph/python/src/ngraph/utils/decorators.py b/runtime/bindings/python/src/compatibility/ngraph/utils/decorators.py similarity index 100% rename from ngraph/python/src/ngraph/utils/decorators.py rename to runtime/bindings/python/src/compatibility/ngraph/utils/decorators.py diff --git a/ngraph/python/src/ngraph/utils/input_validation.py b/runtime/bindings/python/src/compatibility/ngraph/utils/input_validation.py similarity index 100% rename from ngraph/python/src/ngraph/utils/input_validation.py rename to runtime/bindings/python/src/compatibility/ngraph/utils/input_validation.py diff --git a/ngraph/python/src/ngraph/utils/node_factory.py b/runtime/bindings/python/src/compatibility/ngraph/utils/node_factory.py similarity index 100% rename from ngraph/python/src/ngraph/utils/node_factory.py rename to runtime/bindings/python/src/compatibility/ngraph/utils/node_factory.py diff --git a/ngraph/python/src/ngraph/utils/reduction.py b/runtime/bindings/python/src/compatibility/ngraph/utils/reduction.py similarity index 100% rename from ngraph/python/src/ngraph/utils/reduction.py rename to runtime/bindings/python/src/compatibility/ngraph/utils/reduction.py diff --git a/ngraph/python/src/ngraph/utils/tensor_iterator_types.py b/runtime/bindings/python/src/compatibility/ngraph/utils/tensor_iterator_types.py similarity index 100% rename from ngraph/python/src/ngraph/utils/tensor_iterator_types.py rename to runtime/bindings/python/src/compatibility/ngraph/utils/tensor_iterator_types.py diff --git a/ngraph/python/src/ngraph/utils/types.py b/runtime/bindings/python/src/compatibility/ngraph/utils/types.py similarity index 100% rename from ngraph/python/src/ngraph/utils/types.py rename to runtime/bindings/python/src/compatibility/ngraph/utils/types.py diff --git a/ngraph/python/CMakeLists.txt b/runtime/bindings/python/src/compatibility/pyngraph/CMakeLists.txt similarity index 67% rename from ngraph/python/CMakeLists.txt rename to runtime/bindings/python/src/compatibility/pyngraph/CMakeLists.txt index 7de595a5211..dc0644adb86 100644 --- a/ngraph/python/CMakeLists.txt +++ b/runtime/bindings/python/src/compatibility/pyngraph/CMakeLists.txt @@ -2,22 +2,17 @@ # SPDX-License-Identifier: Apache-2.0 # -cmake_minimum_required (VERSION 3.13) - project (pyngraph) if(NOT DEFINED OpenVINO_SOURCE_DIR) find_package(InferenceEngineDeveloperPackage QUIET) - find_package(ngraph REQUIRED) + find_package(OpenVINO REQUIRED) endif() -add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/pybind11 EXCLUDE_FROM_ALL) - # PYTHON_VERSION_MAJOR and PYTHON_VERSION_MINOR are defined inside pybind11 set(PYTHON_VERSION python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}) message(STATUS "Python version=${PYTHON_VERSION}") -set(LIBRARY_OUTPUT_DIRECTORY_BIN ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}) if(OpenVINO_SOURCE_DIR) if(WIN32) set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python_api/${PYTHON_VERSION}/) @@ -53,36 +48,24 @@ endif() # create target -file(GLOB_RECURSE SOURCES src/pyngraph/*.cpp) +file(GLOB_RECURSE SOURCES *.cpp) pybind11_add_module(_${PROJECT_NAME} MODULE ${SOURCES}) -target_include_directories(_${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src") +target_include_directories(_${PROJECT_NAME} PRIVATE "../") -target_link_libraries(_${PROJECT_NAME} PRIVATE ngraph::ngraph ngraph::frontend_manager) +target_link_libraries(_${PROJECT_NAME} PRIVATE openvino::core openvino::frontend::manager) -if (TARGET ngraph::onnx_ngraph_frontend) - add_dependencies(_${PROJECT_NAME} ngraph::onnx_ngraph_frontend) +if(TARGET onnx_ngraph_frontend) + add_dependencies(_${PROJECT_NAME} onnx_ngraph_frontend) endif() -if(NGRAPH_UNIT_TEST_ENABLE) - add_subdirectory(tests/mock/mock_py_ngraph_frontend) - add_dependencies(_${PROJECT_NAME} mock_py_ngraph_frontend) - set_target_properties(mock_py_ngraph_frontend PROPERTIES - LIBRARY_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN} - ARCHIVE_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN} - COMPILE_PDB_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN} - PDB_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_DIRECTORY_BIN}) - - add_subdirectory(tests/mock/pyngraph_fe_mock_api) - add_dependencies(_${PROJECT_NAME} pybind_mock_frontend) -endif() # perform copy if(OpenVINO_SOURCE_DIR) add_custom_command(TARGET _${PROJECT_NAME} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/src/ngraph ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/ngraph + COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/../ngraph ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/ngraph ) endif() @@ -99,12 +82,12 @@ if(OpenVINO_SOURCE_DIR OR InferenceEngineDeveloperPackage_FOUND) DESTINATION python/${PYTHON_VERSION} COMPONENT pyngraph_${PYTHON_VERSION}) - install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/src/ngraph + install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../ngraph DESTINATION python/${PYTHON_VERSION} COMPONENT pyngraph_${PYTHON_VERSION} USE_SOURCE_PERMISSIONS) - install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/tests + install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../../../tests DESTINATION tests/${PROJECT_NAME} COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/ngraph/python/src/pyngraph/axis_set.cpp b/runtime/bindings/python/src/compatibility/pyngraph/axis_set.cpp similarity index 100% rename from ngraph/python/src/pyngraph/axis_set.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/axis_set.cpp diff --git a/ngraph/python/src/pyngraph/axis_set.hpp b/runtime/bindings/python/src/compatibility/pyngraph/axis_set.hpp similarity index 100% rename from ngraph/python/src/pyngraph/axis_set.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/axis_set.hpp diff --git a/ngraph/python/src/pyngraph/axis_vector.cpp b/runtime/bindings/python/src/compatibility/pyngraph/axis_vector.cpp similarity index 100% rename from ngraph/python/src/pyngraph/axis_vector.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/axis_vector.cpp diff --git a/ngraph/python/src/pyngraph/axis_vector.hpp b/runtime/bindings/python/src/compatibility/pyngraph/axis_vector.hpp similarity index 100% rename from ngraph/python/src/pyngraph/axis_vector.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/axis_vector.hpp diff --git a/ngraph/python/src/pyngraph/coordinate.cpp b/runtime/bindings/python/src/compatibility/pyngraph/coordinate.cpp similarity index 100% rename from ngraph/python/src/pyngraph/coordinate.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/coordinate.cpp diff --git a/ngraph/python/src/pyngraph/coordinate.hpp b/runtime/bindings/python/src/compatibility/pyngraph/coordinate.hpp similarity index 100% rename from ngraph/python/src/pyngraph/coordinate.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/coordinate.hpp diff --git a/ngraph/python/src/pyngraph/coordinate_diff.cpp b/runtime/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp similarity index 100% rename from ngraph/python/src/pyngraph/coordinate_diff.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp diff --git a/ngraph/python/src/pyngraph/coordinate_diff.hpp b/runtime/bindings/python/src/compatibility/pyngraph/coordinate_diff.hpp similarity index 100% rename from ngraph/python/src/pyngraph/coordinate_diff.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/coordinate_diff.hpp diff --git a/ngraph/python/src/pyngraph/dict_attribute_visitor.cpp b/runtime/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.cpp similarity index 100% rename from ngraph/python/src/pyngraph/dict_attribute_visitor.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.cpp diff --git a/ngraph/python/src/pyngraph/dict_attribute_visitor.hpp b/runtime/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.hpp similarity index 100% rename from ngraph/python/src/pyngraph/dict_attribute_visitor.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/dict_attribute_visitor.hpp diff --git a/ngraph/python/src/pyngraph/dimension.cpp b/runtime/bindings/python/src/compatibility/pyngraph/dimension.cpp similarity index 100% rename from ngraph/python/src/pyngraph/dimension.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/dimension.cpp diff --git a/ngraph/python/src/pyngraph/dimension.hpp b/runtime/bindings/python/src/compatibility/pyngraph/dimension.hpp similarity index 100% rename from ngraph/python/src/pyngraph/dimension.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/dimension.hpp diff --git a/ngraph/python/src/pyngraph/frontend/frontend.cpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend.cpp similarity index 100% rename from ngraph/python/src/pyngraph/frontend/frontend.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend.cpp diff --git a/ngraph/python/src/pyngraph/frontend/frontend.hpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend.hpp similarity index 100% rename from ngraph/python/src/pyngraph/frontend/frontend.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend.hpp diff --git a/ngraph/python/src/pyngraph/frontend/frontend_manager.cpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend_manager.cpp similarity index 100% rename from ngraph/python/src/pyngraph/frontend/frontend_manager.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend_manager.cpp diff --git a/ngraph/python/src/pyngraph/frontend/frontend_manager.hpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend_manager.hpp similarity index 100% rename from ngraph/python/src/pyngraph/frontend/frontend_manager.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend_manager.hpp diff --git a/ngraph/python/src/pyngraph/frontend/inputmodel.cpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/inputmodel.cpp similarity index 100% rename from ngraph/python/src/pyngraph/frontend/inputmodel.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/frontend/inputmodel.cpp diff --git a/ngraph/python/src/pyngraph/frontend/inputmodel.hpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/inputmodel.hpp similarity index 100% rename from ngraph/python/src/pyngraph/frontend/inputmodel.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/frontend/inputmodel.hpp diff --git a/ngraph/python/src/pyngraph/frontend/place.cpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/place.cpp similarity index 100% rename from ngraph/python/src/pyngraph/frontend/place.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/frontend/place.cpp diff --git a/ngraph/python/src/pyngraph/frontend/place.hpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/place.hpp similarity index 100% rename from ngraph/python/src/pyngraph/frontend/place.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/frontend/place.hpp diff --git a/ngraph/python/src/pyngraph/function.cpp b/runtime/bindings/python/src/compatibility/pyngraph/function.cpp similarity index 100% rename from ngraph/python/src/pyngraph/function.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/function.cpp diff --git a/ngraph/python/src/pyngraph/function.hpp b/runtime/bindings/python/src/compatibility/pyngraph/function.hpp similarity index 100% rename from ngraph/python/src/pyngraph/function.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/function.hpp diff --git a/ngraph/python/src/pyngraph/node.cpp b/runtime/bindings/python/src/compatibility/pyngraph/node.cpp similarity index 100% rename from ngraph/python/src/pyngraph/node.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/node.cpp diff --git a/ngraph/python/src/pyngraph/node.hpp b/runtime/bindings/python/src/compatibility/pyngraph/node.hpp similarity index 100% rename from ngraph/python/src/pyngraph/node.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/node.hpp diff --git a/ngraph/python/src/pyngraph/node_factory.cpp b/runtime/bindings/python/src/compatibility/pyngraph/node_factory.cpp similarity index 100% rename from ngraph/python/src/pyngraph/node_factory.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/node_factory.cpp diff --git a/ngraph/python/src/pyngraph/node_factory.hpp b/runtime/bindings/python/src/compatibility/pyngraph/node_factory.hpp similarity index 100% rename from ngraph/python/src/pyngraph/node_factory.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/node_factory.hpp diff --git a/ngraph/python/src/pyngraph/node_input.cpp b/runtime/bindings/python/src/compatibility/pyngraph/node_input.cpp similarity index 100% rename from ngraph/python/src/pyngraph/node_input.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/node_input.cpp diff --git a/ngraph/python/src/pyngraph/node_input.hpp b/runtime/bindings/python/src/compatibility/pyngraph/node_input.hpp similarity index 100% rename from ngraph/python/src/pyngraph/node_input.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/node_input.hpp diff --git a/ngraph/python/src/pyngraph/node_output.cpp b/runtime/bindings/python/src/compatibility/pyngraph/node_output.cpp similarity index 100% rename from ngraph/python/src/pyngraph/node_output.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/node_output.cpp diff --git a/ngraph/python/src/pyngraph/node_output.hpp b/runtime/bindings/python/src/compatibility/pyngraph/node_output.hpp similarity index 100% rename from ngraph/python/src/pyngraph/node_output.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/node_output.hpp diff --git a/ngraph/python/src/pyngraph/ops/constant.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/constant.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/constant.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/constant.cpp diff --git a/ngraph/python/src/pyngraph/ops/constant.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/constant.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/constant.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/constant.hpp diff --git a/ngraph/python/src/pyngraph/ops/parameter.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/parameter.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp diff --git a/ngraph/python/src/pyngraph/ops/parameter.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/parameter.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/parameter.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/parameter.hpp diff --git a/ngraph/python/src/pyngraph/ops/result.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/result.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/result.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/result.cpp diff --git a/ngraph/python/src/pyngraph/ops/result.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/result.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/result.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/result.hpp diff --git a/ngraph/python/src/pyngraph/ops/util/arithmetic_reduction.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/arithmetic_reduction.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp diff --git a/ngraph/python/src/pyngraph/ops/util/arithmetic_reduction.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/arithmetic_reduction.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.hpp diff --git a/ngraph/python/src/pyngraph/ops/util/binary_elementwise_arithmetic.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/binary_elementwise_arithmetic.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp diff --git a/ngraph/python/src/pyngraph/ops/util/binary_elementwise_arithmetic.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/binary_elementwise_arithmetic.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.hpp diff --git a/ngraph/python/src/pyngraph/ops/util/binary_elementwise_comparison.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/binary_elementwise_comparison.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp diff --git a/ngraph/python/src/pyngraph/ops/util/binary_elementwise_comparison.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/binary_elementwise_comparison.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.hpp diff --git a/ngraph/python/src/pyngraph/ops/util/binary_elementwise_logical.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/binary_elementwise_logical.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp diff --git a/ngraph/python/src/pyngraph/ops/util/binary_elementwise_logical.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/binary_elementwise_logical.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.hpp diff --git a/ngraph/python/src/pyngraph/ops/util/index_reduction.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/index_reduction.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp diff --git a/ngraph/python/src/pyngraph/ops/util/index_reduction.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/index_reduction.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.hpp diff --git a/ngraph/python/src/pyngraph/ops/util/op_annotations.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/op_annotations.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp diff --git a/ngraph/python/src/pyngraph/ops/util/op_annotations.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/op_annotations.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.hpp diff --git a/ngraph/python/src/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.cpp diff --git a/ngraph/python/src/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/regmodule_pyngraph_op_util.hpp diff --git a/ngraph/python/src/pyngraph/ops/util/unary_elementwise_arithmetic.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/unary_elementwise_arithmetic.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp diff --git a/ngraph/python/src/pyngraph/ops/util/unary_elementwise_arithmetic.hpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.hpp similarity index 100% rename from ngraph/python/src/pyngraph/ops/util/unary_elementwise_arithmetic.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.hpp diff --git a/ngraph/python/src/pyngraph/partial_shape.cpp b/runtime/bindings/python/src/compatibility/pyngraph/partial_shape.cpp similarity index 100% rename from ngraph/python/src/pyngraph/partial_shape.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/partial_shape.cpp diff --git a/ngraph/python/src/pyngraph/partial_shape.hpp b/runtime/bindings/python/src/compatibility/pyngraph/partial_shape.hpp similarity index 100% rename from ngraph/python/src/pyngraph/partial_shape.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/partial_shape.hpp diff --git a/ngraph/python/src/pyngraph/passes/manager.cpp b/runtime/bindings/python/src/compatibility/pyngraph/passes/manager.cpp similarity index 100% rename from ngraph/python/src/pyngraph/passes/manager.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/passes/manager.cpp diff --git a/ngraph/python/src/pyngraph/passes/manager.hpp b/runtime/bindings/python/src/compatibility/pyngraph/passes/manager.hpp similarity index 100% rename from ngraph/python/src/pyngraph/passes/manager.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/passes/manager.hpp diff --git a/ngraph/python/src/pyngraph/passes/regmodule_pyngraph_passes.cpp b/runtime/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.cpp similarity index 100% rename from ngraph/python/src/pyngraph/passes/regmodule_pyngraph_passes.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.cpp diff --git a/ngraph/python/src/pyngraph/passes/regmodule_pyngraph_passes.hpp b/runtime/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.hpp similarity index 100% rename from ngraph/python/src/pyngraph/passes/regmodule_pyngraph_passes.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/passes/regmodule_pyngraph_passes.hpp diff --git a/ngraph/python/src/pyngraph/pyngraph.cpp b/runtime/bindings/python/src/compatibility/pyngraph/pyngraph.cpp similarity index 100% rename from ngraph/python/src/pyngraph/pyngraph.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/pyngraph.cpp diff --git a/ngraph/python/src/pyngraph/rt_map.cpp b/runtime/bindings/python/src/compatibility/pyngraph/rt_map.cpp similarity index 100% rename from ngraph/python/src/pyngraph/rt_map.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/rt_map.cpp diff --git a/ngraph/python/src/pyngraph/rt_map.hpp b/runtime/bindings/python/src/compatibility/pyngraph/rt_map.hpp similarity index 100% rename from ngraph/python/src/pyngraph/rt_map.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/rt_map.hpp diff --git a/ngraph/python/src/pyngraph/shape.cpp b/runtime/bindings/python/src/compatibility/pyngraph/shape.cpp similarity index 100% rename from ngraph/python/src/pyngraph/shape.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/shape.cpp diff --git a/ngraph/python/src/pyngraph/shape.hpp b/runtime/bindings/python/src/compatibility/pyngraph/shape.hpp similarity index 100% rename from ngraph/python/src/pyngraph/shape.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/shape.hpp diff --git a/ngraph/python/src/pyngraph/strides.cpp b/runtime/bindings/python/src/compatibility/pyngraph/strides.cpp similarity index 100% rename from ngraph/python/src/pyngraph/strides.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/strides.cpp diff --git a/ngraph/python/src/pyngraph/strides.hpp b/runtime/bindings/python/src/compatibility/pyngraph/strides.hpp similarity index 100% rename from ngraph/python/src/pyngraph/strides.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/strides.hpp diff --git a/ngraph/python/src/pyngraph/types/element_type.cpp b/runtime/bindings/python/src/compatibility/pyngraph/types/element_type.cpp similarity index 100% rename from ngraph/python/src/pyngraph/types/element_type.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/types/element_type.cpp diff --git a/ngraph/python/src/pyngraph/types/element_type.hpp b/runtime/bindings/python/src/compatibility/pyngraph/types/element_type.hpp similarity index 100% rename from ngraph/python/src/pyngraph/types/element_type.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/types/element_type.hpp diff --git a/ngraph/python/src/pyngraph/types/regmodule_pyngraph_types.cpp b/runtime/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.cpp similarity index 100% rename from ngraph/python/src/pyngraph/types/regmodule_pyngraph_types.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.cpp diff --git a/ngraph/python/src/pyngraph/types/regmodule_pyngraph_types.hpp b/runtime/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.hpp similarity index 100% rename from ngraph/python/src/pyngraph/types/regmodule_pyngraph_types.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/types/regmodule_pyngraph_types.hpp diff --git a/ngraph/python/src/pyngraph/util.cpp b/runtime/bindings/python/src/compatibility/pyngraph/util.cpp similarity index 100% rename from ngraph/python/src/pyngraph/util.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/util.cpp diff --git a/ngraph/python/src/pyngraph/util.hpp b/runtime/bindings/python/src/compatibility/pyngraph/util.hpp similarity index 100% rename from ngraph/python/src/pyngraph/util.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/util.hpp diff --git a/ngraph/python/src/pyngraph/util.py b/runtime/bindings/python/src/compatibility/pyngraph/util.py similarity index 100% rename from ngraph/python/src/pyngraph/util.py rename to runtime/bindings/python/src/compatibility/pyngraph/util.py diff --git a/ngraph/python/src/pyngraph/variant.cpp b/runtime/bindings/python/src/compatibility/pyngraph/variant.cpp similarity index 100% rename from ngraph/python/src/pyngraph/variant.cpp rename to runtime/bindings/python/src/compatibility/pyngraph/variant.cpp diff --git a/ngraph/python/src/pyngraph/variant.hpp b/runtime/bindings/python/src/compatibility/pyngraph/variant.hpp similarity index 100% rename from ngraph/python/src/pyngraph/variant.hpp rename to runtime/bindings/python/src/compatibility/pyngraph/variant.hpp diff --git a/ngraph/python/tests/__init__.py b/runtime/bindings/python/tests/__init__.py similarity index 100% rename from ngraph/python/tests/__init__.py rename to runtime/bindings/python/tests/__init__.py diff --git a/ngraph/python/tests/conftest.py b/runtime/bindings/python/tests/conftest.py similarity index 100% rename from ngraph/python/tests/conftest.py rename to runtime/bindings/python/tests/conftest.py diff --git a/ngraph/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt b/runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt similarity index 90% rename from ngraph/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt rename to runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt index b218da4751f..fac5294a572 100644 --- a/ngraph/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt +++ b/runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/CMakeLists.txt @@ -15,7 +15,7 @@ add_library(${TARGET_FE_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS}) target_include_directories(${TARGET_FE_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) -target_link_libraries(${TARGET_FE_NAME} PRIVATE ngraph::frontend_manager::static) +target_link_libraries(${TARGET_FE_NAME} PRIVATE frontend_manager::static) add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME}) diff --git a/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp b/runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp similarity index 100% rename from ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp rename to runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp diff --git a/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp b/runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp similarity index 100% rename from ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp rename to runtime/bindings/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp diff --git a/ngraph/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt b/runtime/bindings/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt similarity index 94% rename from ngraph/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt rename to runtime/bindings/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt index f464e3e316f..08755970fb6 100644 --- a/ngraph/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt +++ b/runtime/bindings/python/tests/mock/pyngraph_fe_mock_api/CMakeLists.txt @@ -11,7 +11,7 @@ source_group("src" FILES ${PYBIND_FE_SRC}) pybind11_add_module(${PYBIND_FE_NAME} MODULE ${PYBIND_FE_SRC}) -target_link_libraries(${PYBIND_FE_NAME} PRIVATE ${TARGET_FE_NAME} ngraph::frontend_manager::static) +target_link_libraries(${PYBIND_FE_NAME} PRIVATE ${TARGET_FE_NAME} frontend_manager::static) add_clang_format_target(${PYBIND_FE_NAME}_clang FOR_TARGETS ${PYBIND_FE_NAME}) diff --git a/ngraph/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp b/runtime/bindings/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp similarity index 100% rename from ngraph/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp rename to runtime/bindings/python/tests/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp diff --git a/ngraph/python/tests/runtime.py b/runtime/bindings/python/tests/runtime.py similarity index 100% rename from ngraph/python/tests/runtime.py rename to runtime/bindings/python/tests/runtime.py diff --git a/ngraph/python/tests/test_frontend/test_frontend_onnx.py b/runtime/bindings/python/tests/test_frontend/test_frontend_onnx.py similarity index 100% rename from ngraph/python/tests/test_frontend/test_frontend_onnx.py rename to runtime/bindings/python/tests/test_frontend/test_frontend_onnx.py diff --git a/ngraph/python/tests/test_frontend/test_frontend_onnx_editor.py b/runtime/bindings/python/tests/test_frontend/test_frontend_onnx_editor.py similarity index 93% rename from ngraph/python/tests/test_frontend/test_frontend_onnx_editor.py rename to runtime/bindings/python/tests/test_frontend/test_frontend_onnx_editor.py index 3542a20ef19..e241945dd0c 100644 --- a/ngraph/python/tests/test_frontend/test_frontend_onnx_editor.py +++ b/runtime/bindings/python/tests/test_frontend/test_frontend_onnx_editor.py @@ -595,6 +595,56 @@ def test_get_place_by_tensor_name(): place3 = model.get_place_by_tensor_name(tensorName="in1") assert place3 - with pytest.raises(Exception) as e: - model.get_place_by_tensor_name(tensorName="0:add_out") - assert "The tensor with name: 0:add_out does not exist in the graph" in str(e) + assert not model.get_place_by_tensor_name(tensorName="0:add_out") + + +def test_get_place_by_operation_name(): + skip_if_onnx_frontend_is_disabled() + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) + assert fe + + model = fe.load("input_model.onnx") + assert model + + place1 = model.get_place_by_operation_name(operationName="split1") + assert place1 + + place2 = model.get_place_by_operation_name(operationName="not_existed") + assert not place2 + + +def test_get_output_port(): + skip_if_onnx_frontend_is_disabled() + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) + assert fe + model = fe.load("input_model.onnx") + assert model + + split_op = model.get_place_by_operation_name(operationName="split1") + place1 = split_op.get_output_port(outputPortIndex=0) + place2 = split_op.get_output_port(outputName="out2") + + assert place1.get_target_tensor().get_names()[0] == "out1" + assert place2.get_target_tensor().get_names()[0] == "out2" + + assert not split_op.get_output_port() + assert not split_op.get_output_port(outputPortIndex=3) + assert not split_op.get_output_port(outputName="not_existed") + + +def test_get_input_port(): + skip_if_onnx_frontend_is_disabled() + fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME) + assert fe + model = fe.load("input_model.onnx") + assert model + + split_op = model.get_place_by_operation_name(operationName="split1") + place1 = split_op.get_input_port(inputPortIndex=0) + assert place1.get_source_tensor().get_names()[0] == "add_out" + + place2 = split_op.get_input_port() + assert place1.is_equal(place2) + + assert not split_op.get_input_port(inputPortIndex=1) + assert not split_op.get_input_port(inputName="not_existed") diff --git a/ngraph/python/tests/test_frontend/test_frontendmanager.py b/runtime/bindings/python/tests/test_frontend/test_frontendmanager.py similarity index 100% rename from ngraph/python/tests/test_frontend/test_frontendmanager.py rename to runtime/bindings/python/tests/test_frontend/test_frontendmanager.py diff --git a/ngraph/python/tests/test_ngraph/__init__.py b/runtime/bindings/python/tests/test_ngraph/__init__.py similarity index 100% rename from ngraph/python/tests/test_ngraph/__init__.py rename to runtime/bindings/python/tests/test_ngraph/__init__.py diff --git a/ngraph/python/tests/test_ngraph/test_adaptive_pool.py b/runtime/bindings/python/tests/test_ngraph/test_adaptive_pool.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_adaptive_pool.py rename to runtime/bindings/python/tests/test_ngraph/test_adaptive_pool.py diff --git a/ngraph/python/tests/test_ngraph/test_basic.py b/runtime/bindings/python/tests/test_ngraph/test_basic.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_basic.py rename to runtime/bindings/python/tests/test_ngraph/test_basic.py diff --git a/ngraph/python/tests/test_ngraph/test_convolution.py b/runtime/bindings/python/tests/test_ngraph/test_convolution.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_convolution.py rename to runtime/bindings/python/tests/test_ngraph/test_convolution.py diff --git a/ngraph/python/tests/test_ngraph/test_core.py b/runtime/bindings/python/tests/test_ngraph/test_core.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_core.py rename to runtime/bindings/python/tests/test_ngraph/test_core.py diff --git a/ngraph/python/tests/test_ngraph/test_create_op.py b/runtime/bindings/python/tests/test_ngraph/test_create_op.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_create_op.py rename to runtime/bindings/python/tests/test_ngraph/test_create_op.py diff --git a/ngraph/python/tests/test_ngraph/test_ctc_loss.py b/runtime/bindings/python/tests/test_ngraph/test_ctc_loss.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_ctc_loss.py rename to runtime/bindings/python/tests/test_ngraph/test_ctc_loss.py diff --git a/ngraph/python/tests/test_ngraph/test_data_movement.py b/runtime/bindings/python/tests/test_ngraph/test_data_movement.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_data_movement.py rename to runtime/bindings/python/tests/test_ngraph/test_data_movement.py diff --git a/ngraph/python/tests/test_ngraph/test_dft.py b/runtime/bindings/python/tests/test_ngraph/test_dft.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_dft.py rename to runtime/bindings/python/tests/test_ngraph/test_dft.py diff --git a/ngraph/python/tests/test_ngraph/test_dyn_attributes.py b/runtime/bindings/python/tests/test_ngraph/test_dyn_attributes.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_dyn_attributes.py rename to runtime/bindings/python/tests/test_ngraph/test_dyn_attributes.py diff --git a/ngraph/python/tests/test_ngraph/test_einsum.py b/runtime/bindings/python/tests/test_ngraph/test_einsum.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_einsum.py rename to runtime/bindings/python/tests/test_ngraph/test_einsum.py diff --git a/ngraph/python/tests/test_ngraph/test_gather.py b/runtime/bindings/python/tests/test_ngraph/test_gather.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_gather.py rename to runtime/bindings/python/tests/test_ngraph/test_gather.py diff --git a/ngraph/python/tests/test_ngraph/test_idft.py b/runtime/bindings/python/tests/test_ngraph/test_idft.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_idft.py rename to runtime/bindings/python/tests/test_ngraph/test_idft.py diff --git a/ngraph/python/tests/test_ngraph/test_input_validation.py b/runtime/bindings/python/tests/test_ngraph/test_input_validation.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_input_validation.py rename to runtime/bindings/python/tests/test_ngraph/test_input_validation.py diff --git a/ngraph/python/tests/test_ngraph/test_log_softmax.py b/runtime/bindings/python/tests/test_ngraph/test_log_softmax.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_log_softmax.py rename to runtime/bindings/python/tests/test_ngraph/test_log_softmax.py diff --git a/ngraph/python/tests/test_ngraph/test_manager.py b/runtime/bindings/python/tests/test_ngraph/test_manager.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_manager.py rename to runtime/bindings/python/tests/test_ngraph/test_manager.py diff --git a/ngraph/python/tests/test_ngraph/test_node_factory.py b/runtime/bindings/python/tests/test_ngraph/test_node_factory.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_node_factory.py rename to runtime/bindings/python/tests/test_ngraph/test_node_factory.py diff --git a/ngraph/python/tests/test_ngraph/test_normalization.py b/runtime/bindings/python/tests/test_ngraph/test_normalization.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_normalization.py rename to runtime/bindings/python/tests/test_ngraph/test_normalization.py diff --git a/ngraph/python/tests/test_ngraph/test_ops.py b/runtime/bindings/python/tests/test_ngraph/test_ops.py similarity index 95% rename from ngraph/python/tests/test_ngraph/test_ops.py rename to runtime/bindings/python/tests/test_ngraph/test_ops.py index bbe22553cd9..89993329d34 100644 --- a/ngraph/python/tests/test_ngraph/test_ops.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops.py @@ -552,10 +552,24 @@ def test_max_pool(): window_shape = [3] strides = [1] * len(window_shape) + dilations = [1] * len(window_shape) pads_begin = [0] * len(window_shape) pads_end = [0] * len(window_shape) + rounding_type = "floor" + auto_pad = "explicit" + idx_elem_type = "i32" - model = ng.max_pool(A, strides, pads_begin, pads_end, window_shape) + model = ng.max_pool( + A, + strides, + dilations, + pads_begin, + pads_end, + window_shape, + rounding_type, + auto_pad, + idx_elem_type, + ) function = Function([model], parameter_list, "test") runtime = get_runtime() @@ -570,7 +584,17 @@ def test_max_pool(): pads_begin = [0] * len(window_shape) pads_end = [0] * len(window_shape) - model = ng.max_pool(A, strides, pads_begin, pads_end, window_shape) + model = ng.max_pool( + A, + strides, + dilations, + pads_begin, + pads_end, + window_shape, + rounding_type, + auto_pad, + idx_elem_type, + ) function = Function([model], parameter_list, "test") size = 4 @@ -590,10 +614,21 @@ def test_max_pool(): window_shape = [3, 3] strides = [1, 1] + dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] - model = ng.max_pool(A, strides, pads_begin, pads_end, window_shape) + model = ng.max_pool( + A, + strides, + dilations, + pads_begin, + pads_end, + window_shape, + rounding_type, + auto_pad, + idx_elem_type, + ) function = Function([model], parameter_list, "test") computation = runtime.computation(function, *parameter_list) @@ -604,10 +639,21 @@ def test_max_pool(): # test 2d with strides strides = [2, 2] + dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] - model = ng.max_pool(A, strides, pads_begin, pads_end, window_shape) + model = ng.max_pool( + A, + strides, + dilations, + pads_begin, + pads_end, + window_shape, + rounding_type, + auto_pad, + idx_elem_type, + ) function = Function([model], parameter_list, "test") computation = runtime.computation(function, *parameter_list) result = computation(input_arr)[0] diff --git a/ngraph/python/tests/test_ngraph/test_ops_binary.py b/runtime/bindings/python/tests/test_ngraph/test_ops_binary.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_ops_binary.py rename to runtime/bindings/python/tests/test_ngraph/test_ops_binary.py diff --git a/ngraph/python/tests/test_ngraph/test_ops_fused.py b/runtime/bindings/python/tests/test_ngraph/test_ops_fused.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_ops_fused.py rename to runtime/bindings/python/tests/test_ngraph/test_ops_fused.py diff --git a/ngraph/python/tests/test_ngraph/test_ops_matmul.py b/runtime/bindings/python/tests/test_ngraph/test_ops_matmul.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_ops_matmul.py rename to runtime/bindings/python/tests/test_ngraph/test_ops_matmul.py diff --git a/ngraph/python/tests/test_ngraph/test_ops_multioutput.py b/runtime/bindings/python/tests/test_ngraph/test_ops_multioutput.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_ops_multioutput.py rename to runtime/bindings/python/tests/test_ngraph/test_ops_multioutput.py diff --git a/ngraph/python/tests/test_ngraph/test_ops_reshape.py b/runtime/bindings/python/tests/test_ngraph/test_ops_reshape.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_ops_reshape.py rename to runtime/bindings/python/tests/test_ngraph/test_ops_reshape.py diff --git a/ngraph/python/tests/test_ngraph/test_ops_scatter.py b/runtime/bindings/python/tests/test_ngraph/test_ops_scatter.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_ops_scatter.py rename to runtime/bindings/python/tests/test_ngraph/test_ops_scatter.py diff --git a/ngraph/python/tests/test_ngraph/test_ops_unary.py b/runtime/bindings/python/tests/test_ngraph/test_ops_unary.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_ops_unary.py rename to runtime/bindings/python/tests/test_ngraph/test_ops_unary.py diff --git a/ngraph/python/tests/test_ngraph/test_pooling.py b/runtime/bindings/python/tests/test_ngraph/test_pooling.py similarity index 62% rename from ngraph/python/tests/test_ngraph/test_pooling.py rename to runtime/bindings/python/tests/test_ngraph/test_pooling.py index afee2ede43f..77da435ff5e 100644 --- a/ngraph/python/tests/test_ngraph/test_pooling.py +++ b/runtime/bindings/python/tests/test_ngraph/test_pooling.py @@ -85,17 +85,35 @@ def test_max_pool_basic(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] + dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] kernel_shape = [2, 2] + rounding_type = "floor" + auto_pad = None + index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - avgpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape) - comp = rt.computation(avgpool_node, data_node) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) result = comp(data) - expected = np.array([[[[5.5, 6.5, 7.5], [9.5, 10.5, 11.5], [13.5, 14.5, 15.5]]]], dtype=np.float32) - assert np.allclose(result, expected) + expected = np.array( + [[[[5.5, 6.5, 7.5], [9.5, 10.5, 11.5], [13.5, 14.5, 15.5]]]], dtype=np.float32 + ) + expected_idx = np.array([[[[5, 6, 7], [9, 10, 11], [13, 14, 15]]]], dtype=np.int32) + assert np.allclose(result[0], expected) + assert np.allclose(result[1], expected_idx) def test_max_pool_strides(): @@ -107,20 +125,36 @@ def test_max_pool_strides(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [2, 1] + dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] kernel_shape = [2, 2] + rounding_type = "floor" + auto_pad = None + index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - avgpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape) - comp = rt.computation(avgpool_node, data_node) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) result = comp(data) expected = np.array([[[[5.5, 6.5, 7.5], [13.5, 14.5, 15.5]]]], dtype=np.float32) - assert np.allclose(result, expected) + expected_idx = np.array([[[[5, 6, 7], [13, 14, 15]]]], dtype=np.int32) + assert np.allclose(result[0], expected) + assert np.allclose(result[1], expected_idx) -def test_max_pool_kernel_shape1d(): +def test_max_pool_kernel_shape1x1(): rt = get_runtime() # array([[[[ 0.5, 1.5, 2.5, 3.5], @@ -129,19 +163,34 @@ def test_max_pool_kernel_shape1d(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] + dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] kernel_shape = [1, 1] + rounding_type = "floor" + auto_pad = None + index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - avgpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape) - comp = rt.computation(avgpool_node, data_node) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) result = comp(data) - assert np.allclose(result, data) + assert np.allclose(result[0], data) + assert np.allclose(result[1], np.arange(0, 16, dtype=np.int32).reshape((1, 1, 4, 4))) -def test_max_pool_kernel_shape3d(): +def test_max_pool_kernel_shape3x3(): rt = get_runtime() # array([[[[ 0.5, 1.5, 2.5, 3.5], @@ -150,17 +199,31 @@ def test_max_pool_kernel_shape3d(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] + dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] kernel_shape = [3, 3] + rounding_type = "floor" + auto_pad = None + index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - avgpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape) - comp = rt.computation(avgpool_node, data_node) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) result = comp(data) expected = np.array([[[[10.5, 11.5], [14.5, 15.5]]]], dtype=np.float32) - assert np.allclose(result, expected) + assert np.allclose(result[0], expected) def test_max_pool_non_zero_pads(): @@ -172,6 +235,7 @@ def test_max_pool_non_zero_pads(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] + dilations = [1, 1] pads_begin = [1, 1] pads_end = [1, 1] # 0 0 , 0 , 0 , 0, 0 @@ -181,10 +245,23 @@ def test_max_pool_non_zero_pads(): # 0 [12.5, 13.5, 14.5, 15.5], 0 # 0 0 , 0 , 0 , 0, 0 kernel_shape = [2, 2] + rounding_type = "floor" + auto_pad = None + index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - avgpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape) - comp = rt.computation(avgpool_node, data_node) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) result = comp(data) expected = np.array( @@ -201,7 +278,22 @@ def test_max_pool_non_zero_pads(): ], dtype=np.float32, ) - assert np.allclose(result, expected) + expected_idx = np.array( + [ + [ + [ + [0, 1, 2, 3, 3], + [4, 5, 6, 7, 7], + [8, 9, 10, 11, 11], + [12, 13, 14, 15, 15], + [12, 13, 14, 15, 15], + ] + ] + ], + dtype=np.int32, + ) + assert np.allclose(result[0], expected) + assert np.allclose(result[1], expected_idx) def test_max_pool_same_upper_auto_pads(): @@ -213,6 +305,7 @@ def test_max_pool_same_upper_auto_pads(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] + dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] # [ 0.5, 1.5, 2.5, 3.5], 0, @@ -222,10 +315,22 @@ def test_max_pool_same_upper_auto_pads(): # 0 , 0 , 0 , 0, 0 kernel_shape = [2, 2] auto_pad = "same_upper" + rounding_type = "floor" + index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - avgpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape, auto_pad=auto_pad) - comp = rt.computation(avgpool_node, data_node) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) result = comp(data) expected = np.array( @@ -241,7 +346,21 @@ def test_max_pool_same_upper_auto_pads(): ], dtype=np.float32, ) - assert np.allclose(result, expected) + expected_idx = np.array( + [ + [ + [ + [5, 6, 7, 7], + [9, 10, 11, 11], + [13, 14, 15, 15], + [13, 14, 15, 15], + ] + ] + ], + dtype=np.int32, + ) + assert np.allclose(result[0], expected) + assert np.allclose(result[1], expected_idx) def test_max_pool_same_lower_auto_pads(): @@ -253,6 +372,7 @@ def test_max_pool_same_lower_auto_pads(): # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) strides = [1, 1] + dilations = [1, 1] pads_begin = [0, 0] pads_end = [0, 0] # 0 0 , 0 , 0 , 0, @@ -262,14 +382,49 @@ def test_max_pool_same_lower_auto_pads(): # 0 [12.5, 13.5, 14.5, 15.5], kernel_shape = [2, 2] auto_pad = "same_lower" + rounding_type = "floor" + index_et = "i32" data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - avgpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape, auto_pad=auto_pad) - comp = rt.computation(avgpool_node, data_node) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) result = comp(data) expected = np.array( - [[[[0.5, 1.5, 2.5, 3.5], [4.5, 5.5, 6.5, 7.5], [8.5, 9.5, 10.5, 11.5], [12.5, 13.5, 14.5, 15.5]]]], + [ + [ + [ + [0.5, 1.5, 2.5, 3.5], + [4.5, 5.5, 6.5, 7.5], + [8.5, 9.5, 10.5, 11.5], + [12.5, 13.5, 14.5, 15.5], + ] + ] + ], dtype=np.float32, ) - assert np.allclose(result, expected) + expected_idx = np.array( + [ + [ + [ + [0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11], + [12, 13, 14, 15], + ] + ] + ], + dtype=np.int32, + ) + assert np.allclose(result[0], expected) + assert np.allclose(result[1], expected_idx) diff --git a/ngraph/python/tests/test_ngraph/test_proposal.py b/runtime/bindings/python/tests/test_ngraph/test_proposal.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_proposal.py rename to runtime/bindings/python/tests/test_ngraph/test_proposal.py diff --git a/ngraph/python/tests/test_ngraph/test_reduction.py b/runtime/bindings/python/tests/test_ngraph/test_reduction.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_reduction.py rename to runtime/bindings/python/tests/test_ngraph/test_reduction.py diff --git a/ngraph/python/tests/test_ngraph/test_roll.py b/runtime/bindings/python/tests/test_ngraph/test_roll.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_roll.py rename to runtime/bindings/python/tests/test_ngraph/test_roll.py diff --git a/ngraph/python/tests/test_ngraph/test_sequence_processing.py b/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_sequence_processing.py rename to runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py diff --git a/ngraph/python/tests/test_ngraph/test_swish.py b/runtime/bindings/python/tests/test_ngraph/test_swish.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_swish.py rename to runtime/bindings/python/tests/test_ngraph/test_swish.py diff --git a/ngraph/python/tests/test_ngraph/test_utils.py b/runtime/bindings/python/tests/test_ngraph/test_utils.py similarity index 100% rename from ngraph/python/tests/test_ngraph/test_utils.py rename to runtime/bindings/python/tests/test_ngraph/test_utils.py diff --git a/ngraph/python/tests/test_ngraph/util.py b/runtime/bindings/python/tests/test_ngraph/util.py similarity index 100% rename from ngraph/python/tests/test_ngraph/util.py rename to runtime/bindings/python/tests/test_ngraph/util.py diff --git a/ngraph/python/tests/test_onnx/__init__.py b/runtime/bindings/python/tests/test_onnx/__init__.py similarity index 100% rename from ngraph/python/tests/test_onnx/__init__.py rename to runtime/bindings/python/tests/test_onnx/__init__.py diff --git a/ngraph/python/tests/test_onnx/model_zoo_preprocess.sh b/runtime/bindings/python/tests/test_onnx/model_zoo_preprocess.sh similarity index 100% rename from ngraph/python/tests/test_onnx/model_zoo_preprocess.sh rename to runtime/bindings/python/tests/test_onnx/model_zoo_preprocess.sh diff --git a/ngraph/python/tests/test_onnx/models/add_abc.onnx b/runtime/bindings/python/tests/test_onnx/models/add_abc.onnx similarity index 100% rename from ngraph/python/tests/test_onnx/models/add_abc.onnx rename to runtime/bindings/python/tests/test_onnx/models/add_abc.onnx diff --git a/ngraph/python/tests/test_onnx/models/data/tensor.data b/runtime/bindings/python/tests/test_onnx/models/data/tensor.data similarity index 100% rename from ngraph/python/tests/test_onnx/models/data/tensor.data rename to runtime/bindings/python/tests/test_onnx/models/data/tensor.data diff --git a/ngraph/python/tests/test_onnx/models/external_data.onnx b/runtime/bindings/python/tests/test_onnx/models/external_data.onnx similarity index 100% rename from ngraph/python/tests/test_onnx/models/external_data.onnx rename to runtime/bindings/python/tests/test_onnx/models/external_data.onnx diff --git a/ngraph/python/tests/test_onnx/test_backend.py b/runtime/bindings/python/tests/test_onnx/test_backend.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_backend.py rename to runtime/bindings/python/tests/test_onnx/test_backend.py diff --git a/ngraph/python/tests/test_onnx/test_onnx_external_data.py b/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_onnx_external_data.py rename to runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py diff --git a/ngraph/python/tests/test_onnx/test_onnx_import.py b/runtime/bindings/python/tests/test_onnx/test_onnx_import.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_onnx_import.py rename to runtime/bindings/python/tests/test_onnx/test_onnx_import.py diff --git a/ngraph/python/tests/test_onnx/test_ops_batchnorm.py b/runtime/bindings/python/tests/test_onnx/test_ops_batchnorm.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_ops_batchnorm.py rename to runtime/bindings/python/tests/test_onnx/test_ops_batchnorm.py diff --git a/ngraph/python/tests/test_onnx/test_ops_binary.py b/runtime/bindings/python/tests/test_onnx/test_ops_binary.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_ops_binary.py rename to runtime/bindings/python/tests/test_onnx/test_ops_binary.py diff --git a/ngraph/python/tests/test_onnx/test_ops_convpool.py b/runtime/bindings/python/tests/test_onnx/test_ops_convpool.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_ops_convpool.py rename to runtime/bindings/python/tests/test_onnx/test_ops_convpool.py diff --git a/ngraph/python/tests/test_onnx/test_ops_logical.py b/runtime/bindings/python/tests/test_onnx/test_ops_logical.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_ops_logical.py rename to runtime/bindings/python/tests/test_onnx/test_ops_logical.py diff --git a/ngraph/python/tests/test_onnx/test_ops_matmul.py b/runtime/bindings/python/tests/test_onnx/test_ops_matmul.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_ops_matmul.py rename to runtime/bindings/python/tests/test_onnx/test_ops_matmul.py diff --git a/ngraph/python/tests/test_onnx/test_ops_nonlinear.py b/runtime/bindings/python/tests/test_onnx/test_ops_nonlinear.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_ops_nonlinear.py rename to runtime/bindings/python/tests/test_onnx/test_ops_nonlinear.py diff --git a/ngraph/python/tests/test_onnx/test_ops_reduction.py b/runtime/bindings/python/tests/test_onnx/test_ops_reduction.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_ops_reduction.py rename to runtime/bindings/python/tests/test_onnx/test_ops_reduction.py diff --git a/ngraph/python/tests/test_onnx/test_ops_reshape.py b/runtime/bindings/python/tests/test_onnx/test_ops_reshape.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_ops_reshape.py rename to runtime/bindings/python/tests/test_onnx/test_ops_reshape.py diff --git a/ngraph/python/tests/test_onnx/test_ops_unary.py b/runtime/bindings/python/tests/test_onnx/test_ops_unary.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_ops_unary.py rename to runtime/bindings/python/tests/test_onnx/test_ops_unary.py diff --git a/ngraph/python/tests/test_onnx/test_ops_variadic.py b/runtime/bindings/python/tests/test_onnx/test_ops_variadic.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_ops_variadic.py rename to runtime/bindings/python/tests/test_onnx/test_ops_variadic.py diff --git a/ngraph/python/tests/test_onnx/test_zoo_models.py b/runtime/bindings/python/tests/test_onnx/test_zoo_models.py similarity index 100% rename from ngraph/python/tests/test_onnx/test_zoo_models.py rename to runtime/bindings/python/tests/test_onnx/test_zoo_models.py diff --git a/ngraph/python/tests/test_onnx/utils/__init__.py b/runtime/bindings/python/tests/test_onnx/utils/__init__.py similarity index 100% rename from ngraph/python/tests/test_onnx/utils/__init__.py rename to runtime/bindings/python/tests/test_onnx/utils/__init__.py diff --git a/ngraph/python/tests/test_onnx/utils/model_importer.py b/runtime/bindings/python/tests/test_onnx/utils/model_importer.py similarity index 100% rename from ngraph/python/tests/test_onnx/utils/model_importer.py rename to runtime/bindings/python/tests/test_onnx/utils/model_importer.py diff --git a/ngraph/python/tests/test_onnx/utils/onnx_backend.py b/runtime/bindings/python/tests/test_onnx/utils/onnx_backend.py similarity index 100% rename from ngraph/python/tests/test_onnx/utils/onnx_backend.py rename to runtime/bindings/python/tests/test_onnx/utils/onnx_backend.py diff --git a/ngraph/python/tests/test_onnx/utils/onnx_helpers.py b/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py similarity index 100% rename from ngraph/python/tests/test_onnx/utils/onnx_helpers.py rename to runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py diff --git a/runtime/bindings/python/thirdparty/pybind11 b/runtime/bindings/python/thirdparty/pybind11 new file mode 160000 index 00000000000..d71ba0cb736 --- /dev/null +++ b/runtime/bindings/python/thirdparty/pybind11 @@ -0,0 +1 @@ +Subproject commit d71ba0cb73616c493d35699a8a9283aa64ef0f6b diff --git a/ngraph/python/tox.ini b/runtime/bindings/python/tox.ini similarity index 98% rename from ngraph/python/tox.ini rename to runtime/bindings/python/tox.ini index d19da1a3005..7f0acd7e96d 100644 --- a/ngraph/python/tox.ini +++ b/runtime/bindings/python/tox.ini @@ -10,7 +10,7 @@ deps = setenv = NGRAPH_BACKEND = {env:NGRAPH_BACKEND:"CPU"} PYTHONPATH = {env:PYTHONPATH} - ngraph_DIR = {env:ngraph_DIR} + OpenVINO_DIR = {env:OpenVINO_DIR} passenv = http_proxy https_proxy diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index ac094ce648b..4a56a98229a 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -10,14 +10,14 @@ ie_shellcheck_process(DIRECTORY "${OpenVINO_SOURCE_DIR}" SKIP "${OpenVINO_SOURCE_DIR}/bin" "${OpenVINO_SOURCE_DIR}/build" "${OpenVINO_SOURCE_DIR}/thirdparty" - "${OpenVINO_SOURCE_DIR}/ngraph/python/pybind11" + "${OpenVINO_SOURCE_DIR}/runtime/bindings/python/thirdparty/pybind11" "${IE_MAIN_SOURCE_DIR}/thirdparty" "${TEMP}" # TODO fix and enable back: "${OpenVINO_SOURCE_DIR}/inference-engine/scripts/dependencies.sh" "${OpenVINO_SOURCE_DIR}/scripts/install_dependencies/install_NEO_OCL_driver.sh" "${OpenVINO_SOURCE_DIR}/scripts/install_dependencies/install_openvino_dependencies.sh" - "${OpenVINO_SOURCE_DIR}/ngraph/python/tests/test_onnx/model_zoo_preprocess.sh" + "${OpenVINO_SOURCE_DIR}/runtime/bindings/python/tests/test_onnx/model_zoo_preprocess.sh" ) # diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index c7920d3fa33..e0579fde533 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -46,6 +46,7 @@ set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\tools\downloader; :: Inference Engine set "InferenceEngine_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\share" +set "OpenVINO_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\share" set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\hddl" set "OPENMP_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\omp\lib" set "GNA_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\gna\lib" diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index 199ef0edd93..e44d8bbecc0 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -31,6 +31,7 @@ done if [ -e "$INSTALLDIR/deployment_tools/inference_engine" ]; then export InferenceEngine_DIR=$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/share + export OpenVINO_DIR=$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/share system_type=$(ls "$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/lib/") IE_PLUGINS_PATH=$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/lib/$system_type diff --git a/tests/fuzz/CMakeLists.txt b/tests/fuzz/CMakeLists.txt index 18cbc60a136..a5f46c5a07e 100644 --- a/tests/fuzz/CMakeLists.txt +++ b/tests/fuzz/CMakeLists.txt @@ -13,14 +13,12 @@ endif() set(OpenVINO_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../") -if (NOT TARGET IE::inference_engine) - find_package(IEDevScripts REQUIRED - PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) -endif() +find_package(IEDevScripts REQUIRED + PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" + NO_CMAKE_FIND_ROOT_PATH + NO_DEFAULT_PATH) -find_package(InferenceEngine REQUIRED) +find_package(OpenVINO REQUIRED) if (NOT ENABLE_FUZZING) message(STATUS diff --git a/tests/fuzz/src/CMakeLists.txt b/tests/fuzz/src/CMakeLists.txt index 7ae1097443f..59056ea3674 100644 --- a/tests/fuzz/src/CMakeLists.txt +++ b/tests/fuzz/src/CMakeLists.txt @@ -16,8 +16,8 @@ foreach(test_source ${tests}) get_filename_component(test_name ${test_source} NAME_WE) add_fuzzer(${test_name} ${test_source}) - target_link_libraries(${test_name} PRIVATE IE::inference_engine cnpy zlib ${NGRAPH_LIBRARIES} - ngraph::frontend_manager) + target_link_libraries(${test_name} PRIVATE + openvino::runtime openvino::frontend::manager cnpy zlib) add_dependencies(fuzz ${test_name}) diff --git a/tests/lib/src/CMakeLists.txt b/tests/lib/src/CMakeLists.txt index 8a2c8ec7bb0..bd0f187a46c 100644 --- a/tests/lib/src/CMakeLists.txt +++ b/tests/lib/src/CMakeLists.txt @@ -9,7 +9,7 @@ add_library(${TARGET_NAME} STATIC ${SRC}) target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) -# Search OpenVINO Inference Engine installed -find_package(InferenceEngine REQUIRED) +# Search OpenVINO Runtime installed +find_package(OpenVINO REQUIRED COMPONENTS Runtime) -target_link_libraries(${TARGET_NAME} PUBLIC ${InferenceEngine_LIBRARIES}) +target_link_libraries(${TARGET_NAME} PUBLIC openvino::runtime) diff --git a/thirdparty/itt_collector/sea_itt_lib/CMakeLists.txt b/thirdparty/itt_collector/sea_itt_lib/CMakeLists.txt index 3618e8ae360..694f9341bd2 100644 --- a/thirdparty/itt_collector/sea_itt_lib/CMakeLists.txt +++ b/thirdparty/itt_collector/sea_itt_lib/CMakeLists.txt @@ -18,6 +18,7 @@ set(TARGET_NAME sea_itt_lib) set(CMAKE_DEBUG_POSTFIX "") set(CMAKE_RELEASE_POSTFIX "") +set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE OFF) file(GLOB_RECURSE SOURCES "*.cpp" "*.h")