Compare commits
198 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e662b1a330 | ||
|
|
0aa5a8f704 | ||
|
|
54f6f11186 | ||
|
|
ea482d8391 | ||
|
|
a93f320a48 | ||
|
|
26e9c69440 | ||
|
|
4727efdb3c | ||
|
|
b7415f5c3b | ||
|
|
0262662050 | ||
|
|
576b99fee9 | ||
|
|
4e790d7b46 | ||
|
|
b0394cc3e4 | ||
|
|
18cb7c94c1 | ||
|
|
064364eb5e | ||
|
|
5ded6fb699 | ||
|
|
eabf199c3a | ||
|
|
0e0d166746 | ||
|
|
a6351294e7 | ||
|
|
cac7e2e1c4 | ||
|
|
13e674b1f8 | ||
|
|
a55d1c21ee | ||
|
|
91a4f73971 | ||
|
|
84a3aab115 | ||
|
|
4ddeecc031 | ||
|
|
9c10e33fc7 | ||
|
|
c32b9a0cd5 | ||
|
|
c32eef361b | ||
|
|
8d54bdd4d5 | ||
|
|
64395f0d5e | ||
|
|
9562161f76 | ||
|
|
cb59f057a0 | ||
|
|
28948502a9 | ||
|
|
34748ae3b5 | ||
|
|
06eb4afd41 | ||
|
|
967d74ade6 | ||
|
|
5ae4e2bb2d | ||
|
|
22f6a3bcc0 | ||
|
|
e842453865 | ||
|
|
2abbec386f | ||
|
|
afb2ebcdd4 | ||
|
|
83e45c5ff3 | ||
|
|
bdb6a44942 | ||
|
|
17cd26077a | ||
|
|
247eb8a9b9 | ||
|
|
68b8748c9f | ||
|
|
852efa2269 | ||
|
|
303fb7a121 | ||
|
|
7f1c6c8ce1 | ||
|
|
55530b47c0 | ||
|
|
69a6097a30 | ||
|
|
1f759456d6 | ||
|
|
b05a7f2ed6 | ||
|
|
f4709ffe8b | ||
|
|
bb1e353e58 | ||
|
|
99c7bbc25e | ||
|
|
33cfcb26fb | ||
|
|
39c84e03f7 | ||
|
|
f59126dde0 | ||
|
|
209d506341 | ||
|
|
a710adf81a | ||
|
|
fa1c41994f | ||
|
|
caae459f54 | ||
|
|
7ef5cbff30 | ||
|
|
85956dfa4d | ||
|
|
2d98cbed74 | ||
|
|
5d47cedcc9 | ||
|
|
9ab5a8f5d9 | ||
|
|
ad84dc6205 | ||
|
|
bd3e4347dd | ||
|
|
0adf0e27ee | ||
|
|
cb7cab1886 | ||
|
|
fd48b0bbdc | ||
|
|
691630b68c | ||
|
|
205feb9421 | ||
|
|
5ef750d5b3 | ||
|
|
80fddfe1c2 | ||
|
|
7eb59527a0 | ||
|
|
21fdda5609 | ||
|
|
9983f74dc7 | ||
|
|
ef0b8161c9 | ||
|
|
9e2dacbc53 | ||
|
|
d299be4202 | ||
|
|
99fe2e9bdc | ||
|
|
6668ec39d7 | ||
|
|
1e5dced9d4 | ||
|
|
7d73bae243 | ||
|
|
d8d4fb9c94 | ||
|
|
11cde296b7 | ||
|
|
44f8dac403 | ||
|
|
41b4fd1057 | ||
|
|
0f89782489 | ||
|
|
d894716fad | ||
|
|
f6fd84d2e1 | ||
|
|
648b2ad308 | ||
|
|
ea5c1b04e5 | ||
|
|
f3d88cbf99 | ||
|
|
e824e482b1 | ||
|
|
e4d0021e2c | ||
|
|
e74cb4084d | ||
|
|
e843e357cd | ||
|
|
ecc502733d | ||
|
|
d1de793552 | ||
|
|
ebaf6a2fcb | ||
|
|
88b006bce9 | ||
|
|
4aae068125 | ||
|
|
41c37c8af9 | ||
|
|
f40f0fa58b | ||
|
|
20dc436b6f | ||
|
|
b2b7a57a4c | ||
|
|
4481bfa17e | ||
|
|
366a5467d1 | ||
|
|
4be1dddb21 | ||
|
|
3fd9b8c3b7 | ||
|
|
66528622a8 | ||
|
|
4fb2cebf28 | ||
|
|
c18a24c05b | ||
|
|
95f0005793 | ||
|
|
9ac239de75 | ||
|
|
ad5c0808a6 | ||
|
|
66c6e125cf | ||
|
|
53bfc41a74 | ||
|
|
9b72c33039 | ||
|
|
c0e9e1b1a1 | ||
|
|
720e283ff1 | ||
|
|
0e87a28791 | ||
|
|
6d17bbb7e9 | ||
|
|
cebbfe65ac | ||
|
|
c4c6567182 | ||
|
|
1a9ce16dd6 | ||
|
|
4e8d5f3798 | ||
|
|
7351859ec2 | ||
|
|
405c5ea03a | ||
|
|
183253e834 | ||
|
|
cfea37b139 | ||
|
|
34f00bd173 | ||
|
|
17326abb72 | ||
|
|
8601042bea | ||
|
|
39958e0dc1 | ||
|
|
6fc9840e32 | ||
|
|
b4452d5630 | ||
|
|
4c69552656 | ||
|
|
6d8b3405ca | ||
|
|
4c2096ad9c | ||
|
|
0c67b90f47 | ||
|
|
83f51e0d00 | ||
|
|
8bb2a2a789 | ||
|
|
c9cfd6755c | ||
|
|
c0060aefa7 | ||
|
|
8a97d3c0e1 | ||
|
|
c5fd3300a2 | ||
|
|
a7f6f5292e | ||
|
|
804df84f7d | ||
|
|
1e49a594f7 | ||
|
|
d5ac1c2e5c | ||
|
|
afb2ae6b7a | ||
|
|
c5623b71cf | ||
|
|
152b11e77f | ||
|
|
5adf3b5ca8 | ||
|
|
a2ccbdf86e | ||
|
|
1440b9950f | ||
|
|
d88d4d22e8 | ||
|
|
ea79006a0a | ||
|
|
a4ff3318ea | ||
|
|
44e7a003e7 | ||
|
|
fa4112593d | ||
|
|
45e378f189 | ||
|
|
9320cbaa8c | ||
|
|
718b194ad6 | ||
|
|
8241540609 | ||
|
|
10d87b7332 | ||
|
|
386d773b33 | ||
|
|
a5312f70db | ||
|
|
8f113ef24e | ||
|
|
c651bc5f87 | ||
|
|
12aab024d1 | ||
|
|
3978511c5c | ||
|
|
0de0efd751 | ||
|
|
53e2997909 | ||
|
|
7779fea76f | ||
|
|
c785551b57 | ||
|
|
8c95c90e45 | ||
|
|
bf829eead4 | ||
|
|
1141e90435 | ||
|
|
15b62d77cc | ||
|
|
e6347544e2 | ||
|
|
fcf261a048 | ||
|
|
bba9f3094b | ||
|
|
aa13ab63f5 | ||
|
|
8f978d2c60 | ||
|
|
a349ba7295 | ||
|
|
73442bbc82 | ||
|
|
76c237da8b | ||
|
|
aebea2337e | ||
|
|
29c672d6d8 | ||
|
|
1f790df33c | ||
|
|
5625424b91 | ||
|
|
c7d0df39b5 | ||
|
|
85b57ea2bf |
@@ -141,7 +141,6 @@ jobs:
|
||||
-DANDROID_STL=c++_shared
|
||||
-DANDROID_PLATFORM=$(ANDROID_SDK_VERSION)
|
||||
-DENABLE_TESTS=ON
|
||||
-DENABLE_INTEL_GPU=ON
|
||||
-DCMAKE_CXX_LINKER_LAUNCHER=ccache
|
||||
-DCMAKE_C_LINKER_LAUNCHER=ccache
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
|
||||
@@ -32,13 +32,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: master
|
||||
ref: releases/2023/0
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: master
|
||||
ref: releases/2023/0
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '$(OV_PYTHON_VERSION)' # Setting only major & minor version will download latest release from GH repo example 3.10 will be 3.10.10.
|
||||
versionSpec: '$(OV_PYTHON_VERSION)' # Setting only major & minor version will download latest release from GH repo example 3.10 will be 3.10.10.
|
||||
addToPath: true
|
||||
disableDownloadFromRegistry: false
|
||||
architecture: 'x64'
|
||||
@@ -245,6 +245,7 @@ jobs:
|
||||
-DCMAKE_CXX_COMPILER=clang++
|
||||
-DCMAKE_C_COMPILER=clang
|
||||
-DENABLE_SYSTEM_SNAPPY=ON
|
||||
-DENABLE_SYSTEM_TBB=ON
|
||||
-DCPACK_GENERATOR=$(CMAKE_CPACK_GENERATOR)
|
||||
-DBUILD_nvidia_plugin=OFF
|
||||
-S $(REPO_DIR)
|
||||
@@ -305,7 +306,7 @@ jobs:
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.1906/linux/x64:$(LD_LIBRARY_PATH)
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.2116/linux/x64:$(LD_LIBRARY_PATH)
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph $(PYTHON_STATIC_ARGS) \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py \
|
||||
@@ -315,7 +316,7 @@ jobs:
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
# For python imports to import pybind_mock_frontend
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.1906/linux/x64:$(LD_LIBRARY_PATH)
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.2116/linux/x64:$(LD_LIBRARY_PATH)
|
||||
export PYTHONPATH=$(INSTALL_TEST_DIR):$(INSTALL_DIR)/python/python3.8:$PYTHONPATH
|
||||
python3 -m pytest -sv $(INSTALL_TEST_DIR)/pyopenvino $(PYTHON_STATIC_ARGS) \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
@@ -325,7 +326,7 @@ jobs:
|
||||
displayName: 'Python API 2.0 Tests'
|
||||
|
||||
- script: |
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.1906/linux/x64:$(LD_LIBRARY_PATH)
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.2116/linux/x64:$(LD_LIBRARY_PATH)
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/mo/unit_tests --junitxml=$(INSTALL_TEST_DIR)/TEST-ModelOptimizer.xml
|
||||
displayName: 'Model Optimizer UT'
|
||||
|
||||
@@ -366,7 +367,7 @@ jobs:
|
||||
displayName: 'Build cpp samples - gcc'
|
||||
|
||||
- script: $(SAMPLES_INSTALL_DIR)/cpp/build_samples.sh -b $(BUILD_DIR)/cpp_samples_clang
|
||||
env:
|
||||
env:
|
||||
CC: clang
|
||||
CXX: clang++
|
||||
displayName: 'Build cpp samples - clang'
|
||||
|
||||
@@ -108,17 +108,17 @@ jobs:
|
||||
|
||||
- checkout: self
|
||||
clean: 'true'
|
||||
submodules: 'true'
|
||||
path: openvino
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
sudo -E $(OPENVINO_REPO_DIR)/install_build_dependencies.sh
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -r $(OPENVINO_REPO_DIR)/src/bindings/python/requirements.txt
|
||||
python3 -m pip install -r $(OPENVINO_REPO_DIR)/src/bindings/python/wheel/requirements-dev.txt
|
||||
# install dependencies needed to build CPU plugin for ARM
|
||||
sudo -E apt --assume-yes install scons crossbuild-essential-arm64
|
||||
# generic dependencies
|
||||
sudo -E apt --assume-yes install cmake ccache
|
||||
# Speed up build
|
||||
sudo -E apt -y --no-install-recommends install unzip
|
||||
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
|
||||
@@ -126,25 +126,60 @@ jobs:
|
||||
sudo cp -v ninja /usr/local/bin/
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- task: CMake@1
|
||||
inputs:
|
||||
cmakeArgs: >
|
||||
-G "Ninja Multi-Config"
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON
|
||||
-DCMAKE_COMPILE_WARNING_AS_ERROR=ON
|
||||
-DOpenCV_DIR=$(INSTALL_OPENCV)/cmake
|
||||
-DENABLE_PYTHON=OFF
|
||||
-DENABLE_TESTS=ON
|
||||
-DENABLE_DATA=OFF
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(OPENVINO_REPO_DIR)/cmake/arm64.toolchain.cmake
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache
|
||||
-DARM_COMPUTE_SCONS_JOBS=$(NUM_PROC)
|
||||
-DCMAKE_INSTALL_PREFIX=$(INSTALL_OPENVINO)
|
||||
-S $(OPENVINO_REPO_DIR)
|
||||
- script: |
|
||||
git submodule update --init -- $(OPENVINO_REPO_DIR)/src/plugins
|
||||
git submodule update --init -- $(OPENVINO_REPO_DIR)/thirdparty/gtest
|
||||
displayName: 'Init submodules for non Conan dependencies'
|
||||
|
||||
- script: |
|
||||
python3 -m pip install conan
|
||||
# generate build profile
|
||||
conan profile detect
|
||||
# generate host profile for linux_arm64
|
||||
echo "include(default)" > $(BUILD_OPENVINO)/linux_arm64
|
||||
echo "[buildenv]" >> $(BUILD_OPENVINO)/linux_arm64
|
||||
echo "CC=aarch64-linux-gnu-gcc" >> $(BUILD_OPENVINO)/linux_arm64
|
||||
echo "CXX=aarch64-linux-gnu-g++" >> $(BUILD_OPENVINO)/linux_arm64
|
||||
# install OpenVINO dependencies
|
||||
export CMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
export CMAKE_C_COMPILER_LAUNCHER=ccache
|
||||
conan install $(OPENVINO_REPO_DIR)/conanfile.txt \
|
||||
-pr:h $(BUILD_OPENVINO)/linux_arm64 \
|
||||
-s:h arch=armv8 \
|
||||
-of $(BUILD_OPENVINO) \
|
||||
-b missing
|
||||
env:
|
||||
CCACHE_DIR: $(OPENVINO_CCACHE_DIR)
|
||||
CCACHE_TEMPDIR: $(TMP_DIR)/ccache
|
||||
CCACHE_BASEDIR: $(Pipeline.Workspace)
|
||||
CCACHE_MAXSIZE: 50G
|
||||
displayName: 'Install conan and dependencies'
|
||||
|
||||
- script: |
|
||||
source $(BUILD_OPENVINO)/conanbuild.sh
|
||||
cmake \
|
||||
-G Ninja \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON \
|
||||
-DBUILD_SHARED_LIBS=ON \
|
||||
-DCMAKE_COMPILE_WARNING_AS_ERROR=ON \
|
||||
-DENABLE_CPPLINT=OFF \
|
||||
-DENABLE_PYTHON=OFF \
|
||||
-DENABLE_TESTS=ON \
|
||||
-DENABLE_DATA=OFF \
|
||||
-DENABLE_SYSTEM_TBB=ON \
|
||||
-DENABLE_SYSTEM_PROTOBUF=ON \
|
||||
-DENABLE_SYSTEM_SNAPPY=ON \
|
||||
-DENABLE_SYSTEM_PUGIXML=ON \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$(BUILD_OPENVINO)/conan_toolchain.cmake \
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
|
||||
-DARM_COMPUTE_SCONS_JOBS=$(NUM_PROC) \
|
||||
-DCMAKE_INSTALL_PREFIX=$(INSTALL_OPENVINO) \
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE) \
|
||||
-S $(OPENVINO_REPO_DIR) \
|
||||
-B $(BUILD_OPENVINO)
|
||||
displayName: 'CMake OpenVINO ARM plugin'
|
||||
source $(BUILD_OPENVINO)/deactivate_conanbuild.sh
|
||||
displayName: 'CMake configure'
|
||||
|
||||
- script: cmake --build $(BUILD_OPENVINO) --parallel --config $(BUILD_TYPE)
|
||||
env:
|
||||
@@ -152,13 +187,13 @@ jobs:
|
||||
CCACHE_TEMPDIR: $(TMP_DIR)/ccache
|
||||
CCACHE_BASEDIR: $(Pipeline.Workspace)
|
||||
CCACHE_MAXSIZE: 50G
|
||||
displayName: 'Build OpenVINO ARM plugin'
|
||||
displayName: 'Build OpenVINO Runtime'
|
||||
|
||||
- script: cmake --build $(BUILD_OPENVINO) --parallel --config $(BUILD_TYPE) --target install
|
||||
displayName: 'Install OpenVINO ARM plugin'
|
||||
displayName: 'Install OpenVINO Runtime'
|
||||
|
||||
- task: PublishBuildArtifacts@1
|
||||
inputs:
|
||||
PathtoPublish: $(Build.ArtifactStagingDirectory)
|
||||
ArtifactName: 'openvino_aarch64_linux'
|
||||
displayName: 'Publish OpenVINO AArch64 linux package'
|
||||
displayName: 'Publish OpenVINO Runtime for ARM'
|
||||
|
||||
@@ -35,6 +35,7 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2023/0
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
|
||||
@@ -4,7 +4,7 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: master
|
||||
ref: releases/2023/0
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
|
||||
@@ -42,11 +42,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: releases/2023/0
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2023/0
|
||||
|
||||
jobs:
|
||||
- job: CUDAPlugin_Lin
|
||||
|
||||
@@ -34,7 +34,7 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: master
|
||||
ref: releases/2023/0
|
||||
|
||||
jobs:
|
||||
- job: Lin_Debian
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# type: github
|
||||
# endpoint: openvinotoolkit
|
||||
# name: openvinotoolkit/testdata
|
||||
# ref: master
|
||||
# ref: releases/2023/0
|
||||
|
||||
jobs:
|
||||
- job: Lin_lohika
|
||||
|
||||
@@ -35,13 +35,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: master
|
||||
ref: releases/2023/0
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: master
|
||||
ref: releases/2023/0
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
|
||||
@@ -32,13 +32,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: master
|
||||
ref: releases/2023/0
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: master
|
||||
ref: releases/2023/0
|
||||
|
||||
jobs:
|
||||
- job: Win
|
||||
|
||||
@@ -35,6 +35,7 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: releases/2023/0
|
||||
|
||||
variables:
|
||||
- group: github
|
||||
@@ -116,7 +117,7 @@ jobs:
|
||||
-G Ninja ^
|
||||
-DENABLE_CPPLINT=OFF ^
|
||||
-DENABLE_GAPI_PREPROCESSING=OFF ^
|
||||
-DENABLE_FASTER_BUILD=ON ^
|
||||
-DENABLE_PLUGINS_XML=ON ^
|
||||
-DCMAKE_COMPILE_WARNING_AS_ERROR=ON ^
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE) ^
|
||||
-DENABLE_PROFILING_ITT=ON ^
|
||||
@@ -153,7 +154,6 @@ jobs:
|
||||
-DVERBOSE_BUILD=ON ^
|
||||
-DENABLE_CPPLINT=OFF ^
|
||||
-DENABLE_GAPI_PREPROCESSING=OFF ^
|
||||
-DENABLE_FASTER_BUILD=ON ^
|
||||
-DENABLE_PROFILING_ITT=OFF ^
|
||||
-DSELECTIVE_BUILD=ON ^
|
||||
-DCMAKE_COMPILE_WARNING_AS_ERROR=ON ^
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:22.04
|
||||
FROM ubuntu:23.04
|
||||
|
||||
LABEL version=2021.03.30.1
|
||||
|
||||
@@ -38,6 +38,7 @@ RUN apt-get update && apt-get -y --no-install-recommends install \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-dev \
|
||||
pybind11-dev \
|
||||
python3-virtualenv \
|
||||
cython3 \
|
||||
tox && \
|
||||
@@ -71,5 +72,5 @@ RUN ninja install
|
||||
WORKDIR /openvino/src/bindings/python
|
||||
ENV OpenVINO_DIR=/openvino/dist/runtime/cmake
|
||||
ENV LD_LIBRARY_PATH=/openvino/dist/runtime/lib/intel64:/openvino/dist/runtime/3rdparty/tbb/lib
|
||||
ENV PYTHONPATH=/openvino/bin/intel64/${BUILD_TYPE}/python_api/python3.10:${PYTHONPATH}
|
||||
ENV PYTHONPATH=/openvino/bin/intel64/${BUILD_TYPE}/python_api/python3.11:${PYTHONPATH}
|
||||
CMD tox
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -26,6 +26,7 @@ temp/
|
||||
.repo/
|
||||
CMakeLists.txt.user
|
||||
docs/IE_PLUGIN_DG/html/
|
||||
CMakeUserPresets.json
|
||||
|
||||
*.project
|
||||
*.cproject
|
||||
|
||||
@@ -40,8 +40,6 @@ endif()
|
||||
|
||||
# resolving dependencies for the project
|
||||
message (STATUS "CMAKE_VERSION ......................... " ${CMAKE_VERSION})
|
||||
message (STATUS "CMAKE_BINARY_DIR ...................... " ${CMAKE_BINARY_DIR})
|
||||
message (STATUS "CMAKE_SOURCE_DIR ...................... " ${CMAKE_SOURCE_DIR})
|
||||
message (STATUS "OpenVINO_SOURCE_DIR ................... " ${OpenVINO_SOURCE_DIR})
|
||||
message (STATUS "OpenVINO_BINARY_DIR ................... " ${OpenVINO_BINARY_DIR})
|
||||
message (STATUS "CMAKE_GENERATOR ....................... " ${CMAKE_GENERATOR})
|
||||
@@ -66,7 +64,7 @@ endif()
|
||||
if(CMAKE_TOOLCHAIN_FILE)
|
||||
message (STATUS "CMAKE_TOOLCHAIN_FILE .................. " ${CMAKE_TOOLCHAIN_FILE})
|
||||
endif()
|
||||
if(OV_GLIBC_VERSION)
|
||||
if(NOT OV_GLIBC_VERSION VERSION_EQUAL 0.0)
|
||||
message (STATUS "GLIBC_VERSION ......................... " ${OV_GLIBC_VERSION})
|
||||
endif()
|
||||
|
||||
|
||||
43
README.md
43
README.md
@@ -2,14 +2,14 @@
|
||||
|
||||
<img src="docs/img/openvino-logo-purple-black.png" width="400px">
|
||||
|
||||
[](https://github.com/openvinotoolkit/openvino/releases/tag/2022.3.0)
|
||||
[](LICENSE)
|
||||

|
||||

|
||||
[](https://badge.fury.io/py/openvino)
|
||||
[](https://anaconda.org/conda-forge/openvino/badges/version.svg)
|
||||
[](https://anaconda.org/conda-forge/openvino)
|
||||
[](https://formulae.brew.sh/formula/openvino)
|
||||
|
||||
[](https://pepy.tech/project/openvino)
|
||||
|
||||
[](https://anaconda.org/conda-forge/openvino/files)
|
||||
[](https://formulae.brew.sh/formula/openvino)
|
||||
|
||||
</div>
|
||||
|
||||
## Contents:
|
||||
@@ -70,24 +70,24 @@ The OpenVINO™ Runtime can infer models on different hardware devices. This sec
|
||||
<tbody>
|
||||
<tr>
|
||||
<td rowspan=2>CPU</td>
|
||||
<td> <a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_supported_plugins_CPU.html#doxid-openvino-docs-o-v-u-g-supported-plugins-c-p-u">Intel CPU</a></tb>
|
||||
<td> <a href="https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_supported_plugins_CPU.html#doxid-openvino-docs-o-v-u-g-supported-plugins-c-p-u">Intel CPU</a></tb>
|
||||
<td><b><i><a href="./src/plugins/intel_cpu">openvino_intel_cpu_plugin</a></i></b></td>
|
||||
<td>Intel Xeon with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel Core Processors with Intel AVX2, Intel Atom Processors with Intel® Streaming SIMD Extensions (Intel® SSE)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> <a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_supported_plugins_ARM_CPU.html">ARM CPU</a></tb>
|
||||
<td> <a href="https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_supported_plugins_CPU.html">ARM CPU</a></tb>
|
||||
<td><b><i><a href="https://github.com/openvinotoolkit/openvino_contrib/tree/master/modules/arm_plugin">openvino_arm_cpu_plugin</a></i></b></td>
|
||||
<td>Raspberry Pi™ 4 Model B, Apple® Mac mini with M1 chip, NVIDIA® Jetson Nano™, Android™ devices
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GPU</td>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_supported_plugins_GPU.html#doxid-openvino-docs-o-v-u-g-supported-plugins-g-p-u">Intel GPU</a></td>
|
||||
<td><a href="https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_supported_plugins_GPU.html#doxid-openvino-docs-o-v-u-g-supported-plugins-g-p-u">Intel GPU</a></td>
|
||||
<td><b><i><a href="./src/plugins/intel_gpu">openvino_intel_gpu_plugin</a></i></b></td>
|
||||
<td>Intel Processor Graphics, including Intel HD Graphics and Intel Iris Graphics</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GNA</td>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_supported_plugins_GNA.html#doxid-openvino-docs-o-v-u-g-supported-plugins-g-n-a">Intel GNA</a></td>
|
||||
<td><a href="https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_supported_plugins_GNA.html#doxid-openvino-docs-o-v-u-g-supported-plugins-g-n-a">Intel GNA</a></td>
|
||||
<td><b><i><a href="./src/plugins/intel_gna">openvino_intel_gna_plugin</a></i></b></td>
|
||||
<td>Intel Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel Pentium Silver J5005 Processor, Intel Pentium Silver N5000 Processor, Intel Celeron J4005 Processor, Intel Celeron J4105 Processor, Intel Celeron Processor N4100, Intel Celeron Processor N4000, Intel Core i3-8121U Processor, Intel Core i7-1065G7 Processor, Intel Core i7-1060G7 Processor, Intel Core i5-1035G4 Processor, Intel Core i5-1035G7 Processor, Intel Core i5-1035G1 Processor, Intel Core i5-1030G7 Processor, Intel Core i5-1030G4 Processor, Intel Core i3-1005G1 Processor, Intel Core i3-1000G1 Processor, Intel Core i3-1000G4 Processor</td>
|
||||
</tr>
|
||||
@@ -105,22 +105,22 @@ OpenVINO™ Toolkit also contains several plugins which simplify loading models
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_IE_DG_supported_plugins_AUTO.html#doxid-openvino-docs-i-e-d-g-supported-plugins-a-u-t-o">Auto</a></td>
|
||||
<td><a href="https://docs.openvino.ai/2023.0/openvino_docs_IE_DG_supported_plugins_AUTO.html#doxid-openvino-docs-i-e-d-g-supported-plugins-a-u-t-o">Auto</a></td>
|
||||
<td><b><i><a href="./src/plugins/auto">openvino_auto_plugin</a></i></b></td>
|
||||
<td>Auto plugin enables selecting Intel device for inference automatically</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_Automatic_Batching.html">Auto Batch</a></td>
|
||||
<td><a href="https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_Automatic_Batching.html">Auto Batch</a></td>
|
||||
<td><b><i><a href="./src/plugins/auto_batch">openvino_auto_batch_plugin</a></i></b></td>
|
||||
<td>Auto batch plugin performs on-the-fly automatic batching (i.e. grouping inference requests together) to improve device utilization, with no programming effort from the user</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_Hetero_execution.html#doxid-openvino-docs-o-v-u-g-hetero-execution">Hetero</a></td>
|
||||
<td><a href="https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_Hetero_execution.html#doxid-openvino-docs-o-v-u-g-hetero-execution">Hetero</a></td>
|
||||
<td><b><i><a href="./src/plugins/hetero">openvino_hetero_plugin</a></i></b></td>
|
||||
<td>Heterogeneous execution enables automatic inference splitting between several devices</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_Running_on_multiple_devices.html#doxid-openvino-docs-o-v-u-g-running-on-multiple-devices">Multi</a></td>
|
||||
<td><a href="https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_Running_on_multiple_devices.html#doxid-openvino-docs-o-v-u-g-running-on-multiple-devices">Multi</a></td>
|
||||
<td><b><i><a href="./src/plugins/auto">openvino_auto_plugin</a></i></b></td>
|
||||
<td>Multi plugin enables simultaneous inference of the same model on several devices in parallel</td>
|
||||
</tr>
|
||||
@@ -157,10 +157,10 @@ The list of OpenVINO tutorials:
|
||||
## System requirements
|
||||
|
||||
The system requirements vary depending on platform and are available on dedicated pages:
|
||||
- [Linux](https://docs.openvino.ai/nightly/openvino_docs_install_guides_installing_openvino_linux_header.html)
|
||||
- [Windows](https://docs.openvino.ai/nightly/openvino_docs_install_guides_installing_openvino_windows_header.html)
|
||||
- [macOS](https://docs.openvino.ai/nightly/openvino_docs_install_guides_installing_openvino_macos_header.html)
|
||||
- [Raspbian](https://docs.openvino.ai/nightly/openvino_docs_install_guides_installing_openvino_raspbian.html)
|
||||
- [Linux](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_installing_openvino_linux_header.html)
|
||||
- [Windows](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_installing_openvino_windows_header.html)
|
||||
- [macOS](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_installing_openvino_macos_header.html)
|
||||
- [Raspbian](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_installing_openvino_raspbian.html)
|
||||
|
||||
## How to build
|
||||
|
||||
@@ -189,7 +189,6 @@ Report questions, issues and suggestions, using:
|
||||
* [Neural Network Compression Framework (NNCF)](https://github.com/openvinotoolkit/nncf) - a suite of advanced algorithms for model inference optimization including quantization, filter pruning, binarization and sparsity
|
||||
* [OpenVINO™ Training Extensions (OTE)](https://github.com/openvinotoolkit/training_extensions) - convenient environment to train Deep Learning models and convert them using OpenVINO for optimized inference.
|
||||
* [OpenVINO™ Model Server (OVMS)](https://github.com/openvinotoolkit/model_server) - a scalable, high-performance solution for serving deep learning models optimized for Intel architectures
|
||||
* [DL Workbench](https://docs.openvino.ai/nightly/workbench_docs_Workbench_DG_Introduction.html) - an alternative, web-based version of OpenVINO designed to facilitate optimization and compression of pre-trained deep learning models.
|
||||
* [Computer Vision Annotation Tool (CVAT)](https://github.com/opencv/cvat) - an online, interactive video and image annotation tool for computer vision purposes.
|
||||
* [Dataset Management Framework (Datumaro)](https://github.com/openvinotoolkit/datumaro) - a framework and CLI tool to build, transform, and analyze datasets.
|
||||
|
||||
@@ -197,7 +196,7 @@ Report questions, issues and suggestions, using:
|
||||
\* Other names and brands may be claimed as the property of others.
|
||||
|
||||
[Open Model Zoo]:https://github.com/openvinotoolkit/open_model_zoo
|
||||
[OpenVINO™ Runtime]:https://docs.openvino.ai/nightly/openvino_docs_OV_UG_OV_Runtime_User_Guide.html
|
||||
[Model Optimizer]:https://docs.openvino.ai/nightly/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html
|
||||
[Post-Training Optimization Tool]:https://docs.openvino.ai/nightly/pot_introduction.html
|
||||
[OpenVINO™ Runtime]:https://docs.openvino.ai/2023.0/openvino_docs_OV_UG_OV_Runtime_User_Guide.html
|
||||
[Model Optimizer]:https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html
|
||||
[Post-Training Optimization Tool]:https://docs.openvino.ai/2023.0/pot_introduction.html
|
||||
[Samples]:https://github.com/openvinotoolkit/openvino/tree/master/samples
|
||||
|
||||
@@ -98,10 +98,10 @@ function(ov_download_tbb)
|
||||
# TODO: add target_path to be platform specific as well, to avoid following if
|
||||
# build oneTBB 2021.2.1 with Visual Studio 2019 (MSVC 14.21)
|
||||
RESOLVE_DEPENDENCY(TBB
|
||||
ARCHIVE_WIN "oneapi-tbb-2021.2.1-win.zip"
|
||||
ARCHIVE_WIN "oneapi-tbb-2021.2.2-win.zip"
|
||||
TARGET_PATH "${TEMP}/tbb"
|
||||
ENVIRONMENT "TBBROOT"
|
||||
SHA256 "d81591673bd7d3d9454054642f8ef799e1fdddc7b4cee810a95e6130eb7323d4"
|
||||
SHA256 "103b19a8af288c6a7d83ed3f0d2239c4afd0dd189fc12aad1d34b3c9e78df94b"
|
||||
USE_NEW_LOCATION TRUE)
|
||||
elseif(ANDROID AND X86_64)
|
||||
RESOLVE_DEPENDENCY(TBB
|
||||
@@ -327,8 +327,8 @@ if(ENABLE_INTEL_GNA)
|
||||
GNA_LIB_DIR
|
||||
libGNA_INCLUDE_DIRS
|
||||
libGNA_LIBRARIES_BASE_PATH)
|
||||
set(GNA_VERSION "03.05.00.1906")
|
||||
set(GNA_HASH "4a5be86d9c026b0e10afac2a57fc7c99d762b30e3d506abb3a3380fbcfe2726e")
|
||||
set(GNA_VERSION "03.05.00.2116")
|
||||
set(GNA_HASH "960350567702bda17276ac4c060d7524fb7ce7ced785004bd861c81ff2bfe2c5")
|
||||
|
||||
set(FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/include)
|
||||
if(WIN32)
|
||||
|
||||
@@ -111,8 +111,8 @@ else()
|
||||
set(BIN_FOLDER "bin/${ARCH_FOLDER}")
|
||||
endif()
|
||||
|
||||
if(CMAKE_GENERATOR MATCHES "^Ninja Multi-Config$")
|
||||
# Ninja-Multi specific, see:
|
||||
if(CMAKE_GENERATOR STREQUAL "Ninja Multi-Config")
|
||||
# 'Ninja Multi-Config' specific, see:
|
||||
# https://cmake.org/cmake/help/latest/variable/CMAKE_DEFAULT_BUILD_TYPE.html
|
||||
set(CMAKE_DEFAULT_BUILD_TYPE "Release" CACHE STRING "CMake default build type")
|
||||
elseif(NOT OV_GENERATOR_MULTI_CONFIG)
|
||||
@@ -240,7 +240,7 @@ if(ENABLE_LTO)
|
||||
LANGUAGES C CXX)
|
||||
|
||||
if(NOT IPO_SUPPORTED)
|
||||
set(ENABLE_LTO "OFF" CACHE STRING "Enable Link Time Optmization" FORCE)
|
||||
set(ENABLE_LTO "OFF" CACHE STRING "Enable Link Time Optimization" FORCE)
|
||||
message(WARNING "IPO / LTO is not supported: ${OUTPUT_MESSAGE}")
|
||||
endif()
|
||||
endif()
|
||||
@@ -250,8 +250,8 @@ endif()
|
||||
macro(ov_install_static_lib target comp)
|
||||
if(NOT BUILD_SHARED_LIBS)
|
||||
get_target_property(target_type ${target} TYPE)
|
||||
if(${target_type} STREQUAL "STATIC_LIBRARY")
|
||||
set_target_properties(${target} PROPERTIES EXCLUDE_FROM_ALL FALSE)
|
||||
if(target_type STREQUAL "STATIC_LIBRARY")
|
||||
set_target_properties(${target} PROPERTIES EXCLUDE_FROM_ALL OFF)
|
||||
endif()
|
||||
install(TARGETS ${target} EXPORT OpenVINOTargets
|
||||
ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${comp} ${ARGN})
|
||||
|
||||
@@ -4,23 +4,28 @@
|
||||
|
||||
if(WIN32)
|
||||
set(PROGRAMFILES_ENV "ProgramFiles(X86)")
|
||||
file(TO_CMAKE_PATH $ENV{${PROGRAMFILES_ENV}} PROGRAMFILES)
|
||||
|
||||
set(WDK_PATHS "${PROGRAMFILES}/Windows Kits/10/bin/${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION}/x64"
|
||||
"${PROGRAMFILES}/Windows Kits/10/bin/x64")
|
||||
# check that PROGRAMFILES_ENV is defined, because in case of cross-compilation for Windows
|
||||
# we don't have such variable
|
||||
if(DEFINED ENV{PROGRAMFILES_ENV})
|
||||
file(TO_CMAKE_PATH $ENV{${PROGRAMFILES_ENV}} PROGRAMFILES)
|
||||
|
||||
message(STATUS "Trying to find apivalidator in: ")
|
||||
foreach(wdk_path IN LISTS WDK_PATHS)
|
||||
message(" * ${wdk_path}")
|
||||
endforeach()
|
||||
set(WDK_PATHS "${PROGRAMFILES}/Windows Kits/10/bin/${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION}/x64"
|
||||
"${PROGRAMFILES}/Windows Kits/10/bin/x64")
|
||||
|
||||
find_host_program(ONECORE_API_VALIDATOR
|
||||
NAMES apivalidator
|
||||
PATHS ${WDK_PATHS}
|
||||
DOC "ApiValidator for OneCore compliance")
|
||||
message(STATUS "Trying to find apivalidator in: ")
|
||||
foreach(wdk_path IN LISTS WDK_PATHS)
|
||||
message(" * ${wdk_path}")
|
||||
endforeach()
|
||||
|
||||
if(ONECORE_API_VALIDATOR)
|
||||
message(STATUS "Found apivalidator: ${ONECORE_API_VALIDATOR}")
|
||||
find_host_program(ONECORE_API_VALIDATOR
|
||||
NAMES apivalidator
|
||||
PATHS ${WDK_PATHS}
|
||||
DOC "ApiValidator for OneCore compliance")
|
||||
|
||||
if(ONECORE_API_VALIDATOR)
|
||||
message(STATUS "Found apivalidator: ${ONECORE_API_VALIDATOR}")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
@@ -4,8 +4,13 @@
|
||||
|
||||
macro(enable_fuzzing)
|
||||
# Enable (libFuzzer)[https://llvm.org/docs/LibFuzzer.html] if supported.
|
||||
set(FUZZING_COMPILER_FLAGS "-fsanitize=fuzzer-no-link -fprofile-instr-generate -fcoverage-mapping")
|
||||
set(FUZZING_LINKER_FLAGS "-fsanitize-coverage=trace-pc-guard -fprofile-instr-generate")
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
# see https://learn.microsoft.com/en-us/cpp/build/reference/fsanitize?view=msvc-160#remarks
|
||||
set(FUZZING_COMPILER_FLAGS "/fsanitize=fuzzer")
|
||||
elseif(OV_COMPILER_IS_CLANG)
|
||||
set(FUZZING_COMPILER_FLAGS "-fsanitize=fuzzer-no-link -fprofile-instr-generate -fcoverage-mapping")
|
||||
set(FUZZING_LINKER_FLAGS "-fsanitize-coverage=trace-pc-guard -fprofile-instr-generate")
|
||||
endif()
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FUZZING_COMPILER_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FUZZING_COMPILER_FLAGS}")
|
||||
@@ -20,6 +25,10 @@ function(add_fuzzer FUZZER_EXE_NAME FUZZER_SOURCES)
|
||||
add_executable(${FUZZER_EXE_NAME} ${FUZZER_SOURCES})
|
||||
target_link_libraries(${FUZZER_EXE_NAME} PRIVATE fuzz-testhelper)
|
||||
if(ENABLE_FUZZING)
|
||||
set_target_properties(${FUZZER_EXE_NAME} PROPERTIES LINK_FLAGS "-fsanitize=fuzzer")
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
# no extra flags are required
|
||||
elseif(OV_COMPILER_IS_CLANG)
|
||||
set_target_properties(${FUZZER_EXE_NAME} PROPERTIES LINK_FLAGS "-fsanitize=fuzzer")
|
||||
endif()
|
||||
endif()
|
||||
endfunction(add_fuzzer)
|
||||
|
||||
@@ -12,23 +12,17 @@ include(CheckCXXCompilerFlag)
|
||||
# Defines ie_c_cxx_deprecated varaible which contains C / C++ compiler flags
|
||||
#
|
||||
macro(ov_disable_deprecated_warnings)
|
||||
if(WIN32)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
set(ie_c_cxx_deprecated "/wd4996")
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if(WIN32)
|
||||
set(ie_c_cxx_deprecated "/Qdiag-disable:1478,1786")
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
set(ie_c_cxx_deprecated "/wd4996")
|
||||
elseif(OV_COMPILER_IS_CLANG)
|
||||
set(ie_c_cxx_deprecated "-Wno-deprecated-declarations")
|
||||
endif()
|
||||
else()
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
else()
|
||||
set(ie_c_cxx_deprecated "-diag-disable=1478,1786")
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(ie_c_cxx_deprecated "-Wno-deprecated-declarations")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT ie_c_cxx_deprecated)
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(ie_c_cxx_deprecated "-Wno-deprecated-declarations")
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
|
||||
@@ -49,24 +43,18 @@ endmacro()
|
||||
# Defines ie_c_cxx_deprecated_no_errors varaible which contains C / C++ compiler flags
|
||||
#
|
||||
macro(ov_deprecated_no_errors)
|
||||
if(WIN32)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
# show 4996 only for /w4
|
||||
set(ie_c_cxx_deprecated_no_errors "/wd4996")
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if(WIN32)
|
||||
set(ie_c_cxx_deprecated_no_errors "/Qdiag-warning:1478,1786")
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
# show 4996 only for /w4
|
||||
set(ie_c_cxx_deprecated_no_errors "/wd4996")
|
||||
elseif(OV_COMPILER_IS_CLANG)
|
||||
set(ie_c_cxx_deprecated_no_errors "-Wno-error=deprecated-declarations")
|
||||
endif()
|
||||
else()
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
else()
|
||||
set(ie_c_cxx_deprecated_no_errors "-diag-warning=1478,1786")
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(ie_c_cxx_deprecated_no_errors "-Wno-error=deprecated-declarations")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT ie_c_cxx_deprecated_no_errors)
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(ie_c_cxx_deprecated_no_errors "-Wno-error=deprecated-declarations")
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
|
||||
@@ -101,23 +89,21 @@ endmacro()
|
||||
# Provides SSE4.2 compilation flags depending on an OS and a compiler
|
||||
#
|
||||
macro(ie_sse42_optimization_flags flags)
|
||||
if(WIN32)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
# No such option for MSVC 2019
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
# No such option for MSVC 2019
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if(WIN32)
|
||||
set(${flags} /QxSSE4.2)
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
set(${flags} -xSSE4.2)
|
||||
endif()
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(${flags} -msse4.2)
|
||||
if(EMSCRIPTEN)
|
||||
list(APPEND ${flags} -msimd128)
|
||||
endif()
|
||||
else()
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
set(${flags} -xSSE4.2)
|
||||
else()
|
||||
set(${flags} -msse4.2)
|
||||
if(EMSCRIPTEN)
|
||||
list(APPEND ${flags} -msimd128)
|
||||
endif()
|
||||
endif()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
@@ -127,20 +113,18 @@ endmacro()
|
||||
# Provides AVX2 compilation flags depending on an OS and a compiler
|
||||
#
|
||||
macro(ie_avx2_optimization_flags flags)
|
||||
if(WIN32)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
set(${flags} /arch:AVX2)
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if(WIN32)
|
||||
set(${flags} /QxCORE-AVX2)
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
set(${flags} /arch:AVX2)
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
else()
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
set(${flags} -xCORE-AVX2)
|
||||
else()
|
||||
set(${flags} -mavx2 -mfma)
|
||||
endif()
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(${flags} -mavx2 -mfma)
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
@@ -151,24 +135,18 @@ endmacro()
|
||||
# depending on an OS and a compiler
|
||||
#
|
||||
macro(ie_avx512_optimization_flags flags)
|
||||
if(WIN32)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
set(${flags} /arch:AVX512)
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if(WIN32)
|
||||
set(${flags} /QxCOMMON-AVX512)
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
set(${flags} /arch:AVX512)
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
else()
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
set(${flags} -xCOMMON-AVX512)
|
||||
endif()
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set(${flags} -mavx512f -mfma)
|
||||
endif()
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "^(Clang|AppleClang)$")
|
||||
set(${flags} -mavx512f -mfma)
|
||||
endif()
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(${flags} -mavx512f -mfma)
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
@@ -265,8 +243,10 @@ endfunction()
|
||||
function(ov_force_include target scope header_file)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
target_compile_options(${target} ${scope} /FI"${header_file}")
|
||||
else()
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
target_compile_options(${target} ${scope} -include "${header_file}")
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
@@ -318,11 +298,11 @@ set(CMAKE_VISIBILITY_INLINES_HIDDEN ON)
|
||||
if(CMAKE_CL_64)
|
||||
# Default char Type Is unsigned
|
||||
# ie_add_compiler_flags(/J)
|
||||
else()
|
||||
elseif(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG)
|
||||
ie_add_compiler_flags(-fsigned-char)
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
#
|
||||
# Common options / warnings enabled
|
||||
#
|
||||
@@ -335,16 +315,14 @@ if(WIN32)
|
||||
# This option helps ensure the fewest possible hard-to-find code defects. Similar to -Wall on GNU / Clang
|
||||
ie_add_compiler_flags(/W3)
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
# Increase Number of Sections in .Obj file
|
||||
ie_add_compiler_flags(/bigobj)
|
||||
# Build with multiple processes
|
||||
ie_add_compiler_flags(/MP)
|
||||
# Increase Number of Sections in .Obj file
|
||||
ie_add_compiler_flags(/bigobj)
|
||||
# Build with multiple processes
|
||||
ie_add_compiler_flags(/MP)
|
||||
|
||||
if(AARCH64 AND NOT MSVC_VERSION LESS 1930)
|
||||
# otherwise, _ARM64_EXTENDED_INTRINSICS is defined, which defines 'mvn' macro
|
||||
ie_add_compiler_flags(/D_ARM64_DISTINCT_NEON_TYPES)
|
||||
endif()
|
||||
if(AARCH64 AND NOT MSVC_VERSION LESS 1930)
|
||||
# otherwise, _ARM64_EXTENDED_INTRINSICS is defined, which defines 'mvn' macro
|
||||
ie_add_compiler_flags(/D_ARM64_DISTINCT_NEON_TYPES)
|
||||
endif()
|
||||
|
||||
# Handle Large Addresses
|
||||
@@ -361,42 +339,62 @@ if(WIN32)
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /WX")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /WX")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /WX")
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
ie_add_compiler_flags(/Qdiag-warning:47,1740,1786)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
#
|
||||
# Disable noisy warnings
|
||||
#
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
# C4251 needs to have dll-interface to be used by clients of class
|
||||
ie_add_compiler_flags(/wd4251)
|
||||
# C4275 non dll-interface class used as base for dll-interface class
|
||||
ie_add_compiler_flags(/wd4275)
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
# 161: unrecognized pragma
|
||||
# 177: variable was declared but never referenced
|
||||
# 556: not matched type of assigned function pointer
|
||||
# 1744: field of class type without a DLL interface used in a class with a DLL interface
|
||||
# 1879: unimplemented pragma ignored
|
||||
# 2586: decorated name length exceeded, name was truncated
|
||||
# 2651: attribute does not apply to any entity
|
||||
# 3180: unrecognized OpenMP pragma
|
||||
# 11075: To get full report use -Qopt-report:4 -Qopt-report-phase ipo
|
||||
# 15335: was not vectorized: vectorization possible but seems inefficient. Use vector always directive or /Qvec-threshold0 to override
|
||||
ie_add_compiler_flags(/Qdiag-disable:161,177,556,1744,1879,2586,2651,3180,11075,15335)
|
||||
endif()
|
||||
# C4251 needs to have dll-interface to be used by clients of class
|
||||
ie_add_compiler_flags(/wd4251)
|
||||
# C4275 non dll-interface class used as base for dll-interface class
|
||||
ie_add_compiler_flags(/wd4275)
|
||||
|
||||
#
|
||||
# Debug information flags, by default CMake adds /Zi option
|
||||
# but provides no way to specify CMAKE_COMPILE_PDB_NAME on root level
|
||||
# In order to avoid issues with ninja we are replacing default flag instead of having two of them
|
||||
# and observing warning D9025 about flag override
|
||||
#
|
||||
|
||||
string(REPLACE "/Zi" "/Z7" CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}")
|
||||
string(REPLACE "/Zi" "/Z7" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}")
|
||||
string(REPLACE "/Zi" "/Z7" CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO}")
|
||||
string(REPLACE "/Zi" "/Z7" CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}")
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel" AND WIN32)
|
||||
#
|
||||
# Warnings as errors
|
||||
#
|
||||
|
||||
if(CMAKE_COMPILE_WARNING_AS_ERROR AND CMAKE_VERSION VERSION_LESS 3.24)
|
||||
ie_add_compiler_flags(/Qdiag-warning:47,1740,1786)
|
||||
endif()
|
||||
|
||||
#
|
||||
# Disable noisy warnings
|
||||
#
|
||||
|
||||
# 161: unrecognized pragma
|
||||
ie_add_compiler_flags(/Qdiag-disable:161)
|
||||
# 177: variable was declared but never referenced
|
||||
ie_add_compiler_flags(/Qdiag-disable:177)
|
||||
# 556: not matched type of assigned function pointer
|
||||
ie_add_compiler_flags(/Qdiag-disable:556)
|
||||
# 1744: field of class type without a DLL interface used in a class with a DLL interface
|
||||
ie_add_compiler_flags(/Qdiag-disable:1744)
|
||||
# 1879: unimplemented pragma ignored
|
||||
ie_add_compiler_flags(/Qdiag-disable:1879)
|
||||
# 2586: decorated name length exceeded, name was truncated
|
||||
ie_add_compiler_flags(/Qdiag-disable:2586)
|
||||
# 2651: attribute does not apply to any entity
|
||||
ie_add_compiler_flags(/Qdiag-disable:2651)
|
||||
# 3180: unrecognized OpenMP pragma
|
||||
ie_add_compiler_flags(/Qdiag-disable:3180)
|
||||
# 11075: To get full report use -Qopt-report:4 -Qopt-report-phase ipo
|
||||
ie_add_compiler_flags(/Qdiag-disable:11075)
|
||||
# 15335: was not vectorized: vectorization possible but seems inefficient.
|
||||
# Use vector always directive or /Qvec-threshold0 to override
|
||||
ie_add_compiler_flags(/Qdiag-disable:15335)
|
||||
else()
|
||||
#
|
||||
# Common enabled warnings
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
include(CheckCXXCompilerFlag)
|
||||
|
||||
if (ENABLE_SANITIZER)
|
||||
if (WIN32)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
# the flag is available since MSVC 2019 16.9
|
||||
# see https://learn.microsoft.com/en-us/cpp/build/reference/fsanitize?view=msvc-160
|
||||
check_cxx_compiler_flag("/fsanitize=address" SANITIZE_ADDRESS_SUPPORTED)
|
||||
if (SANITIZE_ADDRESS_SUPPORTED)
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} /fsanitize=address")
|
||||
@@ -14,21 +16,23 @@ if (ENABLE_SANITIZER)
|
||||
"Please, check requirements:\n"
|
||||
"https://github.com/openvinotoolkit/openvino/wiki/AddressSanitizer-and-LeakSanitizer")
|
||||
endif()
|
||||
else()
|
||||
elseif(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG)
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize=address")
|
||||
check_cxx_compiler_flag("-fsanitize-recover=address" SANITIZE_RECOVER_ADDRESS_SUPPORTED)
|
||||
if (SANITIZE_RECOVER_ADDRESS_SUPPORTED)
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize-recover=address")
|
||||
endif()
|
||||
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fsanitize=address")
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (ENABLE_UB_SANITIZER)
|
||||
if (WIN32)
|
||||
message(FATAL_ERROR "UndefinedBehavior sanitizer is not supported in Windows")
|
||||
if(ENABLE_UB_SANITIZER)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
message(FATAL_ERROR "UndefinedBehavior sanitizer is not supported in Windows with MSVC compiler. Please, use clang-cl or mingw")
|
||||
endif()
|
||||
|
||||
|
||||
# TODO: Remove -fno-sanitize=null as thirdparty/ocl/clhpp_headers UBSAN compatibility resolved:
|
||||
# https://github.com/KhronosGroup/OpenCL-CLHPP/issues/17
|
||||
# Mute -fsanitize=function Indirect call of a function through a function pointer of the wrong type.
|
||||
@@ -48,43 +52,50 @@ if (ENABLE_UB_SANITIZER)
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fno-sanitize=function")
|
||||
endif()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
# TODO: Remove -Wno-maybe-uninitialized after CVS-61143 fix
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
# TODO: Remove -Wno-maybe-uninitialized after CVS-61143 is fixed
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -Wno-maybe-uninitialized")
|
||||
endif()
|
||||
check_cxx_compiler_flag("-fsanitize-recover=undefined" SANITIZE_RECOVER_UNDEFINED_SUPPORTED)
|
||||
if (SANITIZE_RECOVER_UNDEFINED_SUPPORTED)
|
||||
if(SANITIZE_RECOVER_UNDEFINED_SUPPORTED)
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize-recover=undefined")
|
||||
endif()
|
||||
|
||||
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fsanitize=undefined")
|
||||
endif()
|
||||
|
||||
if (ENABLE_THREAD_SANITIZER)
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize=thread")
|
||||
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fsanitize=thread")
|
||||
if(ENABLE_THREAD_SANITIZER)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
message(FATAL_ERROR "Thread sanitizer is not supported in Windows with MSVC compiler. Please, use clang-cl or mingw")
|
||||
elseif(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG)
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize=thread")
|
||||
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fsanitize=thread")
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# common sanitizer options
|
||||
if (DEFINED SANITIZER_COMPILER_FLAGS)
|
||||
if(DEFINED SANITIZER_COMPILER_FLAGS)
|
||||
# ensure symbols are present
|
||||
if (NOT WIN32)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} /Oy-")
|
||||
elseif(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG)
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -g -fno-omit-frame-pointer")
|
||||
if(NOT OV_COMPILER_IS_CLANG)
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
# GPU plugin tests compilation is slow with -fvar-tracking-assignments on GCC.
|
||||
# Clang has no var-tracking-assignments.
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fno-var-tracking-assignments")
|
||||
endif()
|
||||
# prevent unloading libraries at runtime, so sanitizer can resolve their symbols
|
||||
if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||
if(NOT OV_COMPILER_IS_APPLECLANG)
|
||||
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -Wl,-z,nodelete")
|
||||
if(OV_COMPILER_IS_CLANG AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 8.0)
|
||||
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
else()
|
||||
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} /Oy-")
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
|
||||
|
||||
@@ -2,61 +2,68 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
if(UNIX)
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -Wformat -Wformat-security")
|
||||
if(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG OR
|
||||
(UNIX AND CMAKE_CXX_COMPILER_ID STREQUAL "Intel"))
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} -Wformat -Wformat-security")
|
||||
|
||||
if (NOT ENABLE_SANITIZER)
|
||||
if(EMSCRIPTEN)
|
||||
# emcc does not support fortification, see:
|
||||
# https://stackoverflow.com/questions/58854858/undefined-symbol-stack-chk-guard-in-libopenh264-so-when-building-ffmpeg-wit
|
||||
else()
|
||||
# ASan does not support fortification https://github.com/google/sanitizers/issues/247
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -D_FORTIFY_SOURCE=2")
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} -D_FORTIFY_SOURCE=2")
|
||||
endif()
|
||||
endif()
|
||||
if(NOT APPLE)
|
||||
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -pie")
|
||||
endif()
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set(IE_LINKER_FLAGS "${IE_LINKER_FLAGS} -z noexecstack -z relro -z now")
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fno-strict-overflow -fno-delete-null-pointer-checks -fwrapv")
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} -fno-strict-overflow -fno-delete-null-pointer-checks -fwrapv")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9)
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-all")
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} -fstack-protector-all")
|
||||
else()
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-strong")
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} -fstack-protector-strong")
|
||||
endif()
|
||||
if (NOT ENABLE_SANITIZER)
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -s")
|
||||
# Remove all symbol table and relocation information from the executable
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} -s")
|
||||
endif()
|
||||
if(NOT MINGW)
|
||||
set(OV_LINKER_FLAGS "${OV_LINKER_FLAGS} -z noexecstack -z relro -z now")
|
||||
endif()
|
||||
elseif(OV_COMPILER_IS_CLANG)
|
||||
if(EMSCRIPTEN)
|
||||
# emcc does not support fortification
|
||||
# https://stackoverflow.com/questions/58854858/undefined-symbol-stack-chk-guard-in-libopenh264-so-when-building-ffmpeg-wit
|
||||
else()
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-all")
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} -fstack-protector-all")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
if (NOT ENABLE_SANITIZER)
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -Wl,--strip-all")
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} -Wl,--strip-all")
|
||||
endif()
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-strong")
|
||||
set(IE_LINKER_FLAGS "${IE_LINKER_FLAGS} -z noexecstack -z relro -z now")
|
||||
endif()
|
||||
else()
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} /sdl")
|
||||
endif()
|
||||
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} /guard:cf")
|
||||
if(ENABLE_INTEGRITYCHECK)
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /INTEGRITYCHECK")
|
||||
endif()
|
||||
if(ENABLE_QSPECTRE)
|
||||
ie_add_compiler_flags(/Qspectre)
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} -fstack-protector-strong")
|
||||
set(OV_LINKER_FLAGS "${OV_LINKER_FLAGS} -z noexecstack -z relro -z now")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} /sdl /guard:cf")
|
||||
endif()
|
||||
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${IE_C_CXX_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${IE_C_CXX_FLAGS}")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} ${IE_LINKER_FLAGS}")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} ${IE_LINKER_FLAGS}")
|
||||
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} ${IE_LINKER_FLAGS}")
|
||||
if(ENABLE_QSPECTRE)
|
||||
set(OV_C_CXX_FLAGS "${OV_C_CXX_FLAGS} /Qspectre")
|
||||
endif()
|
||||
|
||||
if(ENABLE_INTEGRITYCHECK)
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /INTEGRITYCHECK")
|
||||
endif()
|
||||
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${OV_C_CXX_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${OV_C_CXX_FLAGS}")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} ${OV_LINKER_FLAGS}")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} ${OV_LINKER_FLAGS}")
|
||||
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} ${OV_LINKER_FLAGS}")
|
||||
|
||||
unset(OV_C_CXX_FLAGS)
|
||||
unset(OV_LINKER_FLAGS)
|
||||
|
||||
8
cmake/developer_package/cpplint/cpplint.py
vendored
8
cmake/developer_package/cpplint/cpplint.py
vendored
@@ -641,7 +641,7 @@ _repository = None
|
||||
# Files to exclude from linting. This is set by the --exclude flag.
|
||||
_excludes = None
|
||||
|
||||
# Whether to supress PrintInfo messages
|
||||
# Whether to suppress PrintInfo messages
|
||||
_quiet = False
|
||||
|
||||
# The allowed line length of files.
|
||||
@@ -752,7 +752,7 @@ def ParseNolintSuppressions(filename, raw_line, linenum, error):
|
||||
'Unknown NOLINT error category: %s' % category)
|
||||
|
||||
|
||||
def ProcessGlobalSuppresions(lines):
|
||||
def ProcessGlobalSuppressions(lines):
|
||||
"""Updates the list of global error suppressions.
|
||||
|
||||
Parses any lint directives in the file that have global effect.
|
||||
@@ -780,7 +780,7 @@ def IsErrorSuppressedByNolint(category, linenum):
|
||||
"""Returns true if the specified error category is suppressed on this line.
|
||||
|
||||
Consults the global error_suppressions map populated by
|
||||
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
|
||||
ParseNolintSuppressions/ProcessGlobalSuppressions/ResetNolintSuppressions.
|
||||
|
||||
Args:
|
||||
category: str, the category of the error.
|
||||
@@ -6203,7 +6203,7 @@ def ProcessFileData(filename, file_extension, lines, error,
|
||||
ResetNolintSuppressions()
|
||||
|
||||
CheckForCopyright(filename, lines, error)
|
||||
ProcessGlobalSuppresions(lines)
|
||||
ProcessGlobalSuppressions(lines)
|
||||
RemoveMultiLineComments(filename, lines, error)
|
||||
clean_lines = CleansedLines(lines)
|
||||
|
||||
|
||||
@@ -74,7 +74,12 @@ ie_option (VERBOSE_BUILD "shows extra information about build" OFF)
|
||||
|
||||
ie_option (ENABLE_UNSAFE_LOCATIONS "skip check for MD5 for dependency" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_FUZZING "instrument build for fuzzing" OFF "OV_COMPILER_IS_CLANG;NOT WIN32" OFF)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC" AND MSVC_VERSION GREATER_EQUAL 1930)
|
||||
# Visual Studio 2022: 1930-1939 = VS 17.0 (v143 toolset)
|
||||
set(_msvc_version_2022 ON)
|
||||
endif()
|
||||
|
||||
ie_dependent_option (ENABLE_FUZZING "instrument build for fuzzing" OFF "OV_COMPILER_IS_CLANG OR _msvc_version_2022" OFF)
|
||||
|
||||
#
|
||||
# Check features
|
||||
|
||||
@@ -171,7 +171,7 @@ macro(ov_add_frontend)
|
||||
endforeach()
|
||||
|
||||
# Disable all warnings for generated code
|
||||
set_source_files_properties(${PROTO_SRCS} ${PROTO_HDRS} PROPERTIES COMPILE_OPTIONS -w GENERATED TRUE)
|
||||
set_source_files_properties(${PROTO_SRCS} ${PROTO_HDRS} PROPERTIES COMPILE_OPTIONS -w GENERATED ON)
|
||||
|
||||
# Create library
|
||||
add_library(${TARGET_NAME} ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}
|
||||
@@ -204,8 +204,7 @@ macro(ov_add_frontend)
|
||||
ov_add_vs_version_file(NAME ${TARGET_NAME}
|
||||
FILEDESCRIPTION ${OV_FRONTEND_FILEDESCRIPTION})
|
||||
|
||||
target_link_libraries(${TARGET_NAME} PUBLIC openvino::runtime)
|
||||
target_link_libraries(${TARGET_NAME} PRIVATE ${OV_FRONTEND_LINK_LIBRARIES})
|
||||
target_link_libraries(${TARGET_NAME} PRIVATE ${OV_FRONTEND_LINK_LIBRARIES} PUBLIC openvino::runtime)
|
||||
ov_add_library_version(${TARGET_NAME})
|
||||
|
||||
# WA for TF frontends which always require protobuf (not protobuf-lite)
|
||||
@@ -216,23 +215,34 @@ macro(ov_add_frontend)
|
||||
|
||||
if(proto_files)
|
||||
if(OV_FRONTEND_PROTOBUF_LITE)
|
||||
if(NOT protobuf_lite_installed)
|
||||
ov_install_static_lib(${Protobuf_LITE_LIBRARIES} ${OV_CPACK_COMP_CORE})
|
||||
set(protobuf_lite_installed ON CACHE INTERNAL "" FORCE)
|
||||
endif()
|
||||
link_system_libraries(${TARGET_NAME} PRIVATE ${Protobuf_LITE_LIBRARIES})
|
||||
set(protobuf_target_name libprotobuf-lite)
|
||||
set(protobuf_install_name "protobuf_lite_installed")
|
||||
else()
|
||||
if(NOT protobuf_installed)
|
||||
ov_install_static_lib(${Protobuf_LIBRARIES} ${OV_CPACK_COMP_CORE})
|
||||
set(protobuf_installed ON CACHE INTERNAL "" FORCE)
|
||||
endif()
|
||||
link_system_libraries(${TARGET_NAME} PRIVATE ${Protobuf_LIBRARIES})
|
||||
set(protobuf_target_name libprotobuf)
|
||||
set(protobuf_install_name "protobuf_installed")
|
||||
endif()
|
||||
if(ENABLE_SYSTEM_PROTOBUF)
|
||||
# use imported target name with namespace
|
||||
set(protobuf_target_name "protobuf::${protobuf_target_name}")
|
||||
endif()
|
||||
|
||||
# prptobuf generated code emits -Wsuggest-override error
|
||||
link_system_libraries(${TARGET_NAME} PRIVATE ${protobuf_target_name})
|
||||
|
||||
# protobuf generated code emits -Wsuggest-override error
|
||||
if(SUGGEST_OVERRIDE_SUPPORTED)
|
||||
target_compile_options(${TARGET_NAME} PRIVATE -Wno-suggest-override)
|
||||
endif()
|
||||
|
||||
# install protobuf if it is not installed yet
|
||||
if(NOT ${protobuf_install_name})
|
||||
if(ENABLE_SYSTEM_PROTOBUF)
|
||||
# we have to add find_package(Protobuf) to the OpenVINOConfig.cmake for static build
|
||||
# no needs to install protobuf
|
||||
else()
|
||||
ov_install_static_lib(${protobuf_target_name} ${OV_CPACK_COMP_CORE})
|
||||
set("${protobuf_install_name}" ON CACHE INTERNAL "" FORCE)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(flatbuffers_schema_files)
|
||||
|
||||
@@ -2,41 +2,6 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
include(target_flags)
|
||||
|
||||
# TODO: remove this function: we must not have conditions for particular OS names or versions
|
||||
|
||||
# cmake needs to look at /etc files only when we build for Linux on Linux
|
||||
if(CMAKE_HOST_LINUX AND LINUX)
|
||||
function(get_linux_name res_var)
|
||||
if(EXISTS "/etc/lsb-release")
|
||||
# linux version detection using cat /etc/lsb-release
|
||||
file(READ "/etc/lsb-release" release_data)
|
||||
set(name_regex "DISTRIB_ID=([^ \n]*)\n")
|
||||
set(version_regex "DISTRIB_RELEASE=([0-9]+(\\.[0-9]+)?)")
|
||||
else()
|
||||
execute_process(COMMAND find -L /etc/ -maxdepth 1 -type f -name *-release -exec cat {} \;
|
||||
OUTPUT_VARIABLE release_data
|
||||
RESULT_VARIABLE result)
|
||||
string(REPLACE "Red Hat" "CentOS" release_data "${release_data}")
|
||||
set(name_regex "NAME=\"([^ \"\n]*).*\"\n")
|
||||
set(version_regex "VERSION=\"([0-9]+(\\.[0-9]+)?)[^\n]*\"")
|
||||
endif()
|
||||
|
||||
string(REGEX MATCH ${name_regex} name ${release_data})
|
||||
set(os_name ${CMAKE_MATCH_1})
|
||||
|
||||
string(REGEX MATCH ${version_regex} version ${release_data})
|
||||
set(os_name "${os_name} ${CMAKE_MATCH_1}")
|
||||
|
||||
if(os_name)
|
||||
set(${res_var} ${os_name} PARENT_SCOPE)
|
||||
else ()
|
||||
set(${res_var} NOTFOUND PARENT_SCOPE)
|
||||
endif ()
|
||||
endfunction()
|
||||
else()
|
||||
function(get_linux_name res_var)
|
||||
set(${res_var} NOTFOUND PARENT_SCOPE)
|
||||
endfunction()
|
||||
endif ()
|
||||
function(get_linux_name res_var)
|
||||
set(${res_var} NOTFOUND PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
@@ -99,6 +99,10 @@ function(ov_native_compile_external_project)
|
||||
list(APPEND ARG_CMAKE_ARGS "-DCMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER}")
|
||||
endif()
|
||||
|
||||
if(DEFINED CMAKE_MAKE_PROGRAM)
|
||||
list(APPEND ARG_CMAKE_ARGS "-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}")
|
||||
endif()
|
||||
|
||||
ExternalProject_Add(${ARG_TARGET_NAME}
|
||||
# Directory Options
|
||||
SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
|
||||
@@ -25,7 +25,7 @@ macro(ov_common_libraries_cpack_set_dirs)
|
||||
set(OV_CPACK_IE_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/inferenceengine${OpenVINO_VERSION})
|
||||
set(OV_CPACK_NGRAPH_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/ngraph${OpenVINO_VERSION})
|
||||
set(OV_CPACK_OPENVINO_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/openvino${OpenVINO_VERSION})
|
||||
set(OV_CPACK_DOCDIR ${CMAKE_INSTALL_DATADIR}/doc/openvino-${OpenVINO_VERSION})
|
||||
set(OV_CPACK_LICENSESDIR licenses)
|
||||
|
||||
ov_get_pyversion(pyversion)
|
||||
if(pyversion)
|
||||
|
||||
@@ -31,6 +31,7 @@ macro(ov_debian_cpack_set_dirs)
|
||||
set(OV_CPACK_NGRAPH_CMAKEDIR ${OV_CPACK_RUNTIMEDIR}/cmake/ngraph${OpenVINO_VERSION})
|
||||
set(OV_CPACK_OPENVINO_CMAKEDIR ${OV_CPACK_RUNTIMEDIR}/cmake/openvino${OpenVINO_VERSION})
|
||||
set(OV_CPACK_DOCDIR ${CMAKE_INSTALL_DATADIR}/doc/openvino-${OpenVINO_VERSION})
|
||||
set(OV_CPACK_LICENSESDIR ${OV_CPACK_DOCDIR}/licenses)
|
||||
set(OV_CPACK_PYTHONDIR lib/python3/dist-packages)
|
||||
|
||||
# non-native stuff
|
||||
|
||||
@@ -29,6 +29,7 @@ macro(ov_cpack_set_dirs)
|
||||
set(OV_CPACK_NGRAPH_CMAKEDIR runtime/cmake)
|
||||
set(OV_CPACK_OPENVINO_CMAKEDIR runtime/cmake)
|
||||
set(OV_CPACK_DOCDIR docs)
|
||||
set(OV_CPACK_LICENSESDIR ${OV_CPACK_DOCDIR}/licenses)
|
||||
set(OV_CPACK_SAMPLESDIR samples)
|
||||
set(OV_CPACK_WHEELSDIR tools)
|
||||
set(OV_CPACK_TOOLSDIR tools)
|
||||
@@ -99,10 +100,10 @@ endif()
|
||||
# if <FILE> is a symlink, we resolve it, but install file with a name of symlink
|
||||
#
|
||||
function(ov_install_with_name file component)
|
||||
if((APPLE AND file MATCHES "^[^\.]+\.[0-9]+${CMAKE_SHARED_LIBRARY_SUFFIX}$") OR
|
||||
(file MATCHES "^.*\.${CMAKE_SHARED_LIBRARY_SUFFIX}\.[0-9]+$"))
|
||||
get_filename_component(actual_name "${file}" NAME)
|
||||
if((APPLE AND actual_name MATCHES "^[^\.]+\.[0-9]+${CMAKE_SHARED_LIBRARY_SUFFIX}$") OR
|
||||
(actual_name MATCHES "^.*\.${CMAKE_SHARED_LIBRARY_SUFFIX}\.[0-9]+$"))
|
||||
if(IS_SYMLINK "${file}")
|
||||
get_filename_component(actual_name "${file}" NAME)
|
||||
get_filename_component(file "${file}" REALPATH)
|
||||
set(install_rename RENAME "${actual_name}")
|
||||
endif()
|
||||
@@ -162,7 +163,7 @@ elseif(CPACK_GENERATOR STREQUAL "RPM")
|
||||
include(packaging/rpm/rpm)
|
||||
elseif(CPACK_GENERATOR STREQUAL "NSIS")
|
||||
include(packaging/nsis)
|
||||
elseif(CPACK_GENERATOR MATCHES "^(CONDA-FORGE|BREW)$")
|
||||
elseif(CPACK_GENERATOR MATCHES "^(CONDA-FORGE|BREW|CONAN)$")
|
||||
include(packaging/common-libraries)
|
||||
endif()
|
||||
|
||||
|
||||
@@ -22,6 +22,11 @@ macro(ov_rpm_cpack_set_dirs)
|
||||
set(OV_CPACK_NGRAPH_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/ngraph${OpenVINO_VERSION})
|
||||
set(OV_CPACK_OPENVINO_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/openvino${OpenVINO_VERSION})
|
||||
set(OV_CPACK_DOCDIR ${CMAKE_INSTALL_DATADIR}/doc/openvino-${OpenVINO_VERSION})
|
||||
set(OV_CPACK_LICENSESDIR ${OV_CPACK_DOCDIR}/licenses)
|
||||
|
||||
# TODO:
|
||||
# 1. define python installation directories for RPM packages
|
||||
# 2. make sure only a single version of python API can be installed at the same time (define conflicts section)
|
||||
# set(OV_CPACK_PYTHONDIR lib/python3/dist-packages)
|
||||
|
||||
ov_get_pyversion(pyversion)
|
||||
|
||||
@@ -17,20 +17,44 @@ if(WIN32 AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
endif()
|
||||
|
||||
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
|
||||
set(arch_flag X86_64)
|
||||
set(host_arch_flag X86_64)
|
||||
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "i686.*|i386.*|x86.*|amd64.*|AMD64.*")
|
||||
set(arch_flag X86)
|
||||
set(host_arch_flag X86)
|
||||
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^(arm64.*|aarch64.*|AARCH64.*|ARM64.*)")
|
||||
set(arch_flag AARCH64)
|
||||
set(host_arch_flag AARCH64)
|
||||
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)")
|
||||
set(arch_flag ARM)
|
||||
set(host_arch_flag ARM)
|
||||
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^riscv64$")
|
||||
set(arch_flag RISCV64)
|
||||
set(host_arch_flag RISCV64)
|
||||
endif()
|
||||
|
||||
set(HOST_${arch_flag} ON)
|
||||
set(HOST_${host_arch_flag} ON)
|
||||
|
||||
macro(_ie_process_msvc_generator_platform arch_flag)
|
||||
macro(_ov_detect_arch_by_processor_type)
|
||||
if(CMAKE_OSX_ARCHITECTURES AND APPLE)
|
||||
if(CMAKE_OSX_ARCHITECTURES STREQUAL "arm64")
|
||||
set(AARCH64 ON)
|
||||
elseif(CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64")
|
||||
set(X86_64 ON)
|
||||
elseif(CMAKE_OSX_ARCHITECTURES MATCHES ".*x86_64.*" AND CMAKE_OSX_ARCHITECTURES MATCHES ".*arm64.*")
|
||||
set(UNIVERSAL2 ON)
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported value: CMAKE_OSX_ARCHITECTURES = ${CMAKE_OSX_ARCHITECTURES}")
|
||||
endif()
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
|
||||
set(X86_64 ON)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686.*|i386.*|x86.*|amd64.*|AMD64.*|wasm")
|
||||
set(X86 ON)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm64.*|aarch64.*|AARCH64.*|ARM64.*|armv8)")
|
||||
set(AARCH64 ON)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)")
|
||||
set(ARM ON)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^riscv64$")
|
||||
set(RISCV64 ON)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(_ov_process_msvc_generator_platform)
|
||||
# if cmake -A <ARM|ARM64|x64|Win32> is passed
|
||||
if(CMAKE_GENERATOR_PLATFORM STREQUAL "ARM64")
|
||||
set(AARCH64 ON)
|
||||
@@ -41,45 +65,30 @@ macro(_ie_process_msvc_generator_platform arch_flag)
|
||||
elseif(CMAKE_GENERATOR_PLATFORM STREQUAL "Win32")
|
||||
set(X86 ON)
|
||||
else()
|
||||
set(${arch_flag} ON)
|
||||
_ov_detect_arch_by_processor_type()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
# TODO: why OpenCV is found by cmake
|
||||
if(MSVC64 OR MINGW64)
|
||||
_ie_process_msvc_generator_platform(${arch_flag})
|
||||
_ov_process_msvc_generator_platform()
|
||||
elseif(MINGW OR (MSVC AND NOT CMAKE_CROSSCOMPILING))
|
||||
_ie_process_msvc_generator_platform(${arch_flag})
|
||||
elseif(CMAKE_OSX_ARCHITECTURES AND APPLE)
|
||||
if(CMAKE_OSX_ARCHITECTURES STREQUAL "arm64")
|
||||
set(AARCH64 ON)
|
||||
elseif(CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64")
|
||||
set(X86_64 ON)
|
||||
elseif(CMAKE_OSX_ARCHITECTURES MATCHES ".*x86_64.*" AND CMAKE_OSX_ARCHITECTURES MATCHES ".*arm64.*")
|
||||
set(UNIVERSAL2 ON)
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported value: CMAKE_OSX_ARCHITECTURES = ${CMAKE_OSX_ARCHITECTURES}")
|
||||
endif()
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
|
||||
set(X86_64 ON)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686.*|i386.*|x86.*|amd64.*|AMD64.*")
|
||||
set(X86 ON)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm64.*|aarch64.*|AARCH64.*|ARM64.*)")
|
||||
set(AARCH64 ON)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)")
|
||||
set(ARM ON)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^riscv64$")
|
||||
set(RISCV64 ON)
|
||||
_ov_process_msvc_generator_platform()
|
||||
else()
|
||||
_ov_detect_arch_by_processor_type()
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Emscripten")
|
||||
set(EMSCRIPTEN ON)
|
||||
endif()
|
||||
|
||||
if(UNIX AND NOT (APPLE OR ANDROID OR EMSCRIPTEN))
|
||||
if(UNIX AND NOT (APPLE OR ANDROID OR EMSCRIPTEN OR CYGWIN))
|
||||
set(LINUX ON)
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED CMAKE_HOST_LINUX AND CMAKE_HOST_SYSTEM_NAME STREQUAL "Linux")
|
||||
if(CMAKE_VERSION VERSION_LESS 3.25 AND CMAKE_HOST_SYSTEM_NAME STREQUAL "Linux")
|
||||
# the variable is available since 3.25
|
||||
# https://cmake.org/cmake/help/latest/variable/CMAKE_HOST_LINUX.html
|
||||
set(CMAKE_HOST_LINUX ON)
|
||||
endif()
|
||||
|
||||
|
||||
@@ -40,6 +40,7 @@ function(ieTargetLinkWholeArchive targetName)
|
||||
"-Wl,-noall_load"
|
||||
)
|
||||
else()
|
||||
# non-Apple Clang and GCC / MinGW
|
||||
list(APPEND libs
|
||||
"-Wl,--whole-archive"
|
||||
${staticLib}
|
||||
|
||||
@@ -22,7 +22,7 @@ else()
|
||||
set(ENABLE_INTEL_GPU_DEFAULT OFF)
|
||||
endif()
|
||||
|
||||
ie_dependent_option (ENABLE_INTEL_GPU "GPU OpenCL-based plugin for OpenVINO Runtime" ${ENABLE_INTEL_GPU_DEFAULT} "X86_64 OR AARCH64;NOT APPLE;NOT MINGW;NOT WINDOWS_STORE;NOT WINDOWS_PHONE" OFF)
|
||||
ie_dependent_option (ENABLE_INTEL_GPU "GPU OpenCL-based plugin for OpenVINO Runtime" ${ENABLE_INTEL_GPU_DEFAULT} "X86_64 OR AARCH64;NOT APPLE;NOT WINDOWS_STORE;NOT WINDOWS_PHONE" OFF)
|
||||
|
||||
if (ANDROID OR (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0))
|
||||
# oneDNN doesn't support old compilers and android builds for now, so we'll
|
||||
@@ -34,6 +34,10 @@ endif()
|
||||
|
||||
ie_dependent_option (ENABLE_ONEDNN_FOR_GPU "Enable oneDNN with GPU support" ${ENABLE_ONEDNN_FOR_GPU_DEFAULT} "ENABLE_INTEL_GPU" OFF)
|
||||
|
||||
ie_option (ENABLE_DEBUG_CAPS "enable OpenVINO debug capabilities at runtime" OFF)
|
||||
ie_dependent_option (ENABLE_GPU_DEBUG_CAPS "enable GPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS;ENABLE_INTEL_CPU" OFF)
|
||||
ie_dependent_option (ENABLE_CPU_DEBUG_CAPS "enable CPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS;ENABLE_INTEL_GPU" OFF)
|
||||
|
||||
ie_option (ENABLE_PROFILING_ITT "Build with ITT tracing. Optionally configure pre-built ittnotify library though INTEL_VTUNE_DIR variable." OFF)
|
||||
|
||||
ie_option_enum(ENABLE_PROFILING_FILTER "Enable or disable ITT counter groups.\
|
||||
@@ -81,41 +85,45 @@ ie_dependent_option (ENABLE_TBBBIND_2_5 "Enable TBBBind_2_5 static usage in Open
|
||||
ie_dependent_option (ENABLE_INTEL_GNA "GNA support for OpenVINO Runtime" ON
|
||||
"NOT APPLE;NOT ANDROID;X86_64;CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 5.4" OFF)
|
||||
|
||||
ie_option (ENABLE_INTEL_GNA_DEBUG "GNA debug build" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_INTEL_GNA_DEBUG "GNA debug build" OFF "ENABLE_INTEL_GNA" OFF)
|
||||
ie_dependent_option (ENABLE_V7_SERIALIZE "enables serialization to IR v7" OFF "ENABLE_INTEL_GNA" OFF)
|
||||
ie_dependent_option (ENABLE_IR_V7_READER "Enables IR v7 reader" ${BUILD_SHARED_LIBS} "ENABLE_TESTS;ENABLE_INTEL_GNA" OFF)
|
||||
|
||||
ie_option (ENABLE_GAPI_PREPROCESSING "Enables G-API preprocessing" ON)
|
||||
ie_dependent_option (ENABLE_GAPI_PREPROCESSING "Enables G-API preprocessing" ON "NOT MINGW64" OFF)
|
||||
|
||||
ie_option (ENABLE_MULTI "Enables MULTI Device Plugin" ON)
|
||||
ie_option (ENABLE_AUTO "Enables AUTO Device Plugin" ON)
|
||||
|
||||
ie_option (ENABLE_AUTO_BATCH "Enables Auto-Batching Plugin" ON)
|
||||
|
||||
ie_option (ENABLE_HETERO "Enables Hetero Device Plugin" ON)
|
||||
|
||||
ie_option (ENABLE_TEMPLATE "Enable template plugin" ON)
|
||||
|
||||
ie_dependent_option (ENABLE_PLUGINS_XML "Generate plugins.xml configuration file or not" OFF "NOT BUILD_SHARED_LIBS" OFF)
|
||||
ie_dependent_option (ENABLE_PLUGINS_XML "Generate plugins.xml configuration file or not" OFF "BUILD_SHARED_LIBS" OFF)
|
||||
|
||||
ie_dependent_option (GAPI_TEST_PERF "if GAPI unit tests should examine performance" OFF "ENABLE_TESTS;ENABLE_GAPI_PREPROCESSING" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_DATA "fetch models from testdata repo" ON "ENABLE_FUNCTIONAL_TESTS;NOT ANDROID" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_BEH_TESTS "tests oriented to check OpenVINO Runtime API correctness" ON "ENABLE_TESTS" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_FUNCTIONAL_TESTS "functional tests" ON "ENABLE_TESTS" OFF)
|
||||
|
||||
ie_option (ENABLE_SAMPLES "console samples are part of OpenVINO Runtime package" ON)
|
||||
|
||||
ie_option (ENABLE_OPENCV "enables custom OpenCV download" OFF)
|
||||
|
||||
ie_option (ENABLE_V7_SERIALIZE "enables serialization to IR v7" OFF)
|
||||
|
||||
set(OPENVINO_EXTRA_MODULES "" CACHE STRING "Extra paths for extra modules to include into OpenVINO build")
|
||||
|
||||
ie_dependent_option(ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are linked to the OpenVINO Runtime binaries" ON "THREADING MATCHES TBB;LINUX" OFF)
|
||||
|
||||
find_host_package(PythonInterp 3 QUIET)
|
||||
ie_option(ENABLE_OV_ONNX_FRONTEND "Enable ONNX FrontEnd" ${PYTHONINTERP_FOUND})
|
||||
ie_option(ENABLE_OV_PADDLE_FRONTEND "Enable PaddlePaddle FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_PYTORCH_FRONTEND "Enable PyTorch FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_TF_FRONTEND "Enable TensorFlow FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_TF_LITE_FRONTEND "Enable TensorFlow Lite FrontEnd" ON)
|
||||
ie_dependent_option(ENABLE_SNAPPY_COMPRESSION "Enables compression support for TF FE" ON
|
||||
"ENABLE_OV_TF_FRONTEND" ON)
|
||||
|
||||
if(CMAKE_HOST_LINUX AND LINUX)
|
||||
# Debian packages are enabled on Ubuntu systems
|
||||
# so, system TBB / pugixml / OpenCL can be tried for usage
|
||||
@@ -131,40 +139,37 @@ else()
|
||||
set(ENABLE_SYSTEM_TBB_DEFAULT ${ENABLE_SYSTEM_LIBS_DEFAULT})
|
||||
endif()
|
||||
|
||||
if(BUILD_SHARED_LIBS)
|
||||
set(ENABLE_SYSTEM_PUGIXML_DEFAULT ${ENABLE_SYSTEM_LIBS_DEFAULT})
|
||||
else()
|
||||
# for static libraries case libpugixml.a must be compiled with -fPIC
|
||||
# but we still need an ability to compile with system PugiXML and BUILD_SHARED_LIBS
|
||||
# for Conan case where everything is compiled statically
|
||||
set(ENABLE_SYSTEM_PUGIXML_DEFAULT OFF)
|
||||
endif()
|
||||
|
||||
# users wants to use his own TBB version, specific either via env vars or cmake options
|
||||
if(DEFINED ENV{TBBROOT} OR DEFINED ENV{TBB_DIR} OR DEFINED TBB_DIR OR DEFINED TBBROOT)
|
||||
set(ENABLE_SYSTEM_TBB_DEFAULT OFF)
|
||||
endif()
|
||||
|
||||
# for static libraries case libpugixml.a must be compiled with -fPIC
|
||||
ie_dependent_option (ENABLE_SYSTEM_PUGIXML "use the system copy of pugixml" ${ENABLE_SYSTEM_LIBS_DEFAULT} "BUILD_SHARED_LIBS" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_SYSTEM_TBB "use the system version of TBB" ${ENABLE_SYSTEM_TBB_DEFAULT} "THREADING MATCHES TBB" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_SYSTEM_OPENCL "Use the system version of OpenCL" ${ENABLE_SYSTEM_LIBS_DEFAULT} "BUILD_SHARED_LIBS;ENABLE_INTEL_GPU" OFF)
|
||||
|
||||
ie_option (ENABLE_DEBUG_CAPS "enable OpenVINO debug capabilities at runtime" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_GPU_DEBUG_CAPS "enable GPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS" OFF)
|
||||
|
||||
ie_dependent_option (ENABLE_CPU_DEBUG_CAPS "enable CPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS" OFF)
|
||||
|
||||
find_host_package(PythonInterp 3 QUIET)
|
||||
ie_option(ENABLE_OV_ONNX_FRONTEND "Enable ONNX FrontEnd" ${PYTHONINTERP_FOUND})
|
||||
ie_option(ENABLE_OV_PADDLE_FRONTEND "Enable PaddlePaddle FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_PYTORCH_FRONTEND "Enable PyTorch FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_TF_FRONTEND "Enable TensorFlow FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_TF_LITE_FRONTEND "Enable TensorFlow Lite FrontEnd" ON)
|
||||
ie_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON)
|
||||
|
||||
ie_dependent_option(ENABLE_SNAPPY_COMPRESSION "Enables compression support for TF FE" ON
|
||||
"ENABLE_OV_TF_FRONTEND" ON)
|
||||
ie_dependent_option(ENABLE_SYSTEM_PROTOBUF "Enables use of system protobuf" OFF
|
||||
"ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_TF_FRONTEND;BUILD_SHARED_LIBS" OFF)
|
||||
ie_dependent_option (ENABLE_SYSTEM_TBB "Enables use of system TBB" ${ENABLE_SYSTEM_TBB_DEFAULT}
|
||||
"THREADING MATCHES TBB" OFF)
|
||||
# TODO: turn it off by default during the work on cross-os distribution, because pugixml is not
|
||||
# available out of box on all systems (like RHEL, UBI)
|
||||
ie_option (ENABLE_SYSTEM_PUGIXML "Enables use of system PugiXML" ${ENABLE_SYSTEM_PUGIXML_DEFAULT})
|
||||
# the option is on by default, because we use only flatc compiler and don't use any libraries
|
||||
ie_dependent_option(ENABLE_SYSTEM_FLATBUFFERS "Enables use of system flatbuffers" ON
|
||||
"ENABLE_OV_TF_LITE_FRONTEND" OFF)
|
||||
ie_dependent_option(ENABLE_SYSTEM_SNAPPY "Enables use of system version of snappy" OFF "ENABLE_SNAPPY_COMPRESSION;BUILD_SHARED_LIBS" OFF)
|
||||
ie_dependent_option (ENABLE_SYSTEM_OPENCL "Enables use of system OpenCL" ${ENABLE_SYSTEM_LIBS_DEFAULT}
|
||||
"ENABLE_INTEL_GPU" OFF)
|
||||
# the option is turned off by default, because we compile our own static version of protobuf
|
||||
# with LTO and -fPIC options, while system one does not have such flags
|
||||
ie_dependent_option (ENABLE_SYSTEM_PROTOBUF "Enables use of system Protobuf" OFF
|
||||
"ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_TF_FRONTEND" OFF)
|
||||
# the option is turned off by default, because we don't want to have a dependency on libsnappy.so
|
||||
ie_dependent_option (ENABLE_SYSTEM_SNAPPY "Enables use of system version of Snappy" OFF
|
||||
"ENABLE_SNAPPY_COMPRESSION" OFF)
|
||||
|
||||
ie_option(ENABLE_OPENVINO_DEBUG "Enable output for OPENVINO_DEBUG statements" OFF)
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@ macro(ov_cpack_settings)
|
||||
set(cpack_components_all ${CPACK_COMPONENTS_ALL})
|
||||
unset(CPACK_COMPONENTS_ALL)
|
||||
foreach(item IN LISTS cpack_components_all)
|
||||
# filter out some components, which are not needed to be wrapped to conda-forge | brew
|
||||
if(# python is not a part of conda | brew
|
||||
# filter out some components, which are not needed to be wrapped to conda-forge | brew | conan
|
||||
if(# python is not a part of conda | brew | conan
|
||||
NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO}_python.*" AND
|
||||
# python wheels are not needed to be wrapped by conda | brew packages
|
||||
NOT item STREQUAL OV_CPACK_COMP_PYTHON_WHEELS AND
|
||||
|
||||
@@ -93,7 +93,7 @@ macro(ov_cpack_settings)
|
||||
# - 2022.1.0 is the last public release with debian packages from Intel install team
|
||||
# - 2022.1.1, 2022.2 do not have debian packages enabled, distributed only as archives
|
||||
# - 2022.3 is the first release where Debian updated packages are introduced, others 2022.3.X are LTS
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5 2023.0.0 2023.0.1
|
||||
)
|
||||
|
||||
#
|
||||
|
||||
@@ -6,7 +6,7 @@ if(CPACK_GENERATOR STREQUAL "DEB")
|
||||
include(cmake/packaging/debian.cmake)
|
||||
elseif(CPACK_GENERATOR STREQUAL "RPM")
|
||||
include(cmake/packaging/rpm.cmake)
|
||||
elseif(CPACK_GENERATOR MATCHES "^(CONDA-FORGE|BREW)$")
|
||||
elseif(CPACK_GENERATOR MATCHES "^(CONDA-FORGE|BREW|CONAN)$")
|
||||
include(cmake/packaging/common-libraries.cmake)
|
||||
elseif(CPACK_GENERATOR STREQUAL "NSIS")
|
||||
include(cmake/packaging/nsis.cmake)
|
||||
|
||||
@@ -79,7 +79,7 @@ macro(ov_cpack_settings)
|
||||
# - 2022.1.0 is the last public release with rpm packages from Intel install team
|
||||
# - 2022.1.1, 2022.2 do not have rpm packages enabled, distributed only as archives
|
||||
# - 2022.3 is the first release where RPM updated packages are introduced, others 2022.3.X are LTS
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5 2023.0.0 2023.0.1
|
||||
)
|
||||
|
||||
find_host_program(rpmlint_PROGRAM NAMES rpmlint DOC "Path to rpmlint")
|
||||
|
||||
@@ -142,6 +142,14 @@ if(ENABLE_SYSTEM_PUGIXML)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(_IE_nlohmann_json_FOUND "@nlohmann_json_FOUND@")
|
||||
if(_IE_nlohmann_json_FOUND)
|
||||
find_dependency(nlohmann_json)
|
||||
set_target_properties(nlohmann_json::nlohmann_json PROPERTIES IMPORTED_GLOBAL ON)
|
||||
add_library(IE::nlohmann_json ALIAS nlohmann_json::nlohmann_json)
|
||||
endif()
|
||||
unset(_IE_nlohmann_json_FOUND)
|
||||
|
||||
# inherit OpenCV from main IE project if enabled
|
||||
if ("@OpenCV_FOUND@")
|
||||
load_cache("${cache_path}" READ_WITH_PREFIX "" OpenCV_DIR)
|
||||
|
||||
@@ -85,9 +85,9 @@
|
||||
#
|
||||
# `OpenVINO_VERSION_MAJOR`
|
||||
# Major version component
|
||||
#
|
||||
#
|
||||
# `OpenVINO_VERSION_MINOR`
|
||||
# minor version component
|
||||
# Minor version component
|
||||
#
|
||||
# `OpenVINO_VERSION_PATCH`
|
||||
# Patch version component
|
||||
@@ -138,7 +138,7 @@ endmacro()
|
||||
|
||||
macro(_ov_find_tbb)
|
||||
set(THREADING "@THREADING@")
|
||||
if((THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO") AND NOT TBB_FOUND)
|
||||
if(THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
|
||||
set(enable_pkgconfig_tbb "@tbb_FOUND@")
|
||||
|
||||
# try tbb.pc
|
||||
@@ -153,10 +153,10 @@ macro(_ov_find_tbb)
|
||||
endif()
|
||||
|
||||
pkg_search_module(tbb
|
||||
${pkg_config_quiet_arg}
|
||||
${pkg_config_required_arg}
|
||||
IMPORTED_TARGET
|
||||
tbb)
|
||||
${pkg_config_quiet_arg}
|
||||
${pkg_config_required_arg}
|
||||
IMPORTED_TARGET
|
||||
tbb)
|
||||
unset(pkg_config_quiet_arg)
|
||||
unset(pkg_config_required_arg)
|
||||
|
||||
@@ -223,28 +223,185 @@ macro(_ov_find_tbb)
|
||||
PATHS ${_tbb_bind_dir}
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
NO_DEFAULT_PATH)
|
||||
set_target_properties(${TBBBIND_2_5_IMPORTED_TARGETS} PROPERTIES IMPORTED_GLOBAL ON)
|
||||
unset(_tbb_bind_dir)
|
||||
endif()
|
||||
unset(install_tbbbind)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(_ov_find_pugixml)
|
||||
set(_OV_ENABLE_SYSTEM_PUGIXML "@ENABLE_SYSTEM_PUGIXML@")
|
||||
if(_OV_ENABLE_SYSTEM_PUGIXML)
|
||||
set(_ov_pugixml_pkgconfig_interface "@pugixml_FOUND@")
|
||||
set(_ov_pugixml_cmake_interface "@PugiXML_FOUND@")
|
||||
|
||||
if(_ov_pugixml_pkgconfig_interface AND NOT ANDROID)
|
||||
_ov_find_dependency(PkgConfig)
|
||||
elseif(_ov_pugixml_cmake_interface)
|
||||
_ov_find_dependency(PugiXML REQUIRED)
|
||||
endif()
|
||||
|
||||
if(PugiXML_FOUND)
|
||||
if(TARGET pugixml)
|
||||
set(_ov_pugixml_target pugixml)
|
||||
elseif(TARGET pugixml::pugixml)
|
||||
set(_ov_pugixml_target pugixml::pugixml)
|
||||
endif()
|
||||
if(OpenVINODeveloperPackage_DIR)
|
||||
set_property(TARGET ${_ov_pugixml_target} PROPERTY IMPORTED_GLOBAL ON)
|
||||
# align with build tree
|
||||
add_library(openvino::pugixml ALIAS ${_ov_pugixml_target})
|
||||
endif()
|
||||
unset(_ov_pugixml_target)
|
||||
elseif(PkgConfig_FOUND)
|
||||
if(${CMAKE_FIND_PACKAGE_NAME}_FIND_QUIETLY)
|
||||
set(pkg_config_quiet_arg QUIET)
|
||||
endif()
|
||||
if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED)
|
||||
set(pkg_config_required_arg REQUIRED)
|
||||
endif()
|
||||
|
||||
pkg_search_module(pugixml
|
||||
${pkg_config_quiet_arg}
|
||||
${pkg_config_required_arg}
|
||||
IMPORTED_TARGET
|
||||
GLOBAL
|
||||
pugixml)
|
||||
|
||||
unset(pkg_config_quiet_arg)
|
||||
unset(pkg_config_required_arg)
|
||||
|
||||
if(pugixml_FOUND)
|
||||
if(OpenVINODeveloperPackage_DIR)
|
||||
add_library(openvino::pugixml ALIAS PkgConfig::pugixml)
|
||||
endif()
|
||||
|
||||
# PATCH: on Ubuntu 18.04 pugixml.pc contains incorrect include directories
|
||||
get_target_property(interface_include_dir PkgConfig::pugixml INTERFACE_INCLUDE_DIRECTORIES)
|
||||
if(interface_include_dir AND NOT EXISTS "${interface_include_dir}")
|
||||
set_target_properties(PkgConfig::pugixml PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# debian 9 case: no cmake, no pkg-config files
|
||||
if(NOT TARGET openvino::pugixml)
|
||||
find_library(PUGIXML_LIBRARY NAMES pugixml DOC "Path to pugixml library")
|
||||
if(PUGIXML_LIBRARY)
|
||||
add_library(openvino::pugixml INTERFACE IMPORTED)
|
||||
set_target_properties(openvino::pugixml PROPERTIES INTERFACE_LINK_LIBRARIES "${PUGIXML_LIBRARY}")
|
||||
else()
|
||||
message(FATAL_ERROR "Failed to find system pugixml in OpenVINO Developer Package")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(_ov_find_itt)
|
||||
set(_ENABLE_PROFILING_ITT "@ENABLE_PROFILING_ITT@")
|
||||
# whether 'ittapi' is found via find_package
|
||||
set(_ENABLE_SYSTEM_ITTAPI "@ittapi_FOUND@")
|
||||
if(_ENABLE_PROFILING_ITT AND _ENABLE_SYSTEM_ITTAPI)
|
||||
_ov_find_dependency(ittapi)
|
||||
endif()
|
||||
unset(_ENABLE_PROFILING_ITT)
|
||||
unset(_ENABLE_SYSTEM_ITTAPI)
|
||||
endmacro()
|
||||
|
||||
macro(_ov_find_ade)
|
||||
set(_OV_ENABLE_GAPI_PREPROCESSING "@ENABLE_GAPI_PREPROCESSING@")
|
||||
# whether 'ade' is found via find_package
|
||||
set(_ENABLE_SYSTEM_ADE "@ade_FOUND@")
|
||||
if(_OV_ENABLE_GAPI_PREPROCESSING AND _ENABLE_SYSTEM_ADE)
|
||||
_ov_find_dependency(ade 0.1.2)
|
||||
endif()
|
||||
unset(_OV_ENABLE_GAPI_PREPROCESSING)
|
||||
unset(_ENABLE_SYSTEM_ADE)
|
||||
endmacro()
|
||||
|
||||
macro(_ov_find_intel_cpu_dependencies)
|
||||
set(_OV_ENABLE_CPU_ACL "@DNNL_USE_ACL@")
|
||||
if(_OV_ENABLE_CPU_ACL)
|
||||
if(_ov_as_external_package)
|
||||
set_and_check(ARM_COMPUTE_LIB_DIR "@PACKAGE_ARM_COMPUTE_LIB_DIR@")
|
||||
set(_ov_find_acl_options NO_DEFAULT_PATH)
|
||||
set(_ov_find_acl_path "${CMAKE_CURRENT_LIST_DIR}")
|
||||
else()
|
||||
set_and_check(_ov_find_acl_path "@PACKAGE_FIND_ACL_PATH@")
|
||||
endif()
|
||||
|
||||
_ov_find_dependency(ACL
|
||||
NO_MODULE
|
||||
PATHS "${_ov_find_acl_path}"
|
||||
${_ov_find_acl_options})
|
||||
|
||||
unset(ARM_COMPUTE_LIB_DIR)
|
||||
unset(_ov_find_acl_path)
|
||||
unset(_ov_find_acl_options)
|
||||
endif()
|
||||
unset(_OV_ENABLE_CPU_ACL)
|
||||
endmacro()
|
||||
|
||||
macro(_ov_find_intel_gpu_dependencies)
|
||||
set(_OV_ENABLE_INTEL_GPU "@ENABLE_INTEL_GPU@")
|
||||
set(_OV_ENABLE_SYSTEM_OPENCL "@ENABLE_SYSTEM_OPENCL@")
|
||||
if(_OV_ENABLE_INTEL_GPU AND _OV_ENABLE_SYSTEM_OPENCL)
|
||||
set(_OV_OpenCLICDLoader_FOUND "@OpenCLICDLoader_FOUND@")
|
||||
if(_OV_OpenCLICDLoader_FOUND)
|
||||
_ov_find_dependency(OpenCLICDLoader)
|
||||
else()
|
||||
_ov_find_dependency(OpenCL)
|
||||
endif()
|
||||
unset(_OV_OpenCLICDLoader_FOUND)
|
||||
endif()
|
||||
unset(_OV_ENABLE_INTEL_GPU)
|
||||
unset(_OV_ENABLE_SYSTEM_OPENCL)
|
||||
endmacro()
|
||||
|
||||
macro(_ov_find_intel_gna_dependencies)
|
||||
set(_OV_ENABLE_INTEL_GNA "@ENABLE_INTEL_GNA@")
|
||||
if(_OV_ENABLE_INTEL_GNA AND NOT libGNA_FOUND)
|
||||
if(_OV_ENABLE_INTEL_GNA)
|
||||
set_and_check(GNA_PATH "@PACKAGE_GNA_PATH@")
|
||||
_ov_find_dependency(libGNA
|
||||
COMPONENTS KERNEL
|
||||
CONFIG
|
||||
PATHS "${CMAKE_CURRENT_LIST_DIR}"
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
NO_DEFAULT_PATH)
|
||||
unset(GNA_PATH)
|
||||
endif()
|
||||
unset(_OV_ENABLE_INTEL_GNA)
|
||||
endmacro()
|
||||
|
||||
macro(_ov_find_protobuf_frontend_dependency)
|
||||
set(_OV_ENABLE_SYSTEM_PROTOBUF "@ENABLE_SYSTEM_PROTOBUF@")
|
||||
# TODO: remove check for target existence
|
||||
if(_OV_ENABLE_SYSTEM_PROTOBUF AND NOT TARGET protobuf::libprotobuf)
|
||||
_ov_find_dependency(Protobuf @Protobuf_VERSION@ EXACT)
|
||||
endif()
|
||||
unset(_OV_ENABLE_SYSTEM_PROTOBUF)
|
||||
endmacro()
|
||||
|
||||
macro(_ov_find_tensorflow_frontend_dependencies)
|
||||
set(_OV_ENABLE_SYSTEM_SNAPPY "@ENABLE_SYSTEM_SNAPPY@")
|
||||
set(_ov_snappy_lib "@ov_snappy_lib@")
|
||||
# TODO: remove check for target existence
|
||||
if(_OV_ENABLE_SYSTEM_SNAPPY AND NOT TARGET ${_ov_snappy_lib})
|
||||
_ov_find_dependency(Snappy @Snappy_VERSION@ EXACT)
|
||||
endif()
|
||||
unset(_OV_ENABLE_SYSTEM_SNAPPY)
|
||||
unset(_ov_snappy_lib)
|
||||
set(PACKAGE_PREFIX_DIR ${_ov_package_prefix_dir})
|
||||
endmacro()
|
||||
|
||||
macro(_ov_find_onnx_frontend_dependencies)
|
||||
set(_OV_ENABLE_SYSTEM_ONNX "@ENABLE_SYSTEM_ONNX@")
|
||||
if(_OV_ENABLE_SYSTEM_ONNX)
|
||||
_ov_find_dependency(ONNX @ONNX_VERSION@ EXACT)
|
||||
endif()
|
||||
unset(_OV_ENABLE_SYSTEM_ONNX)
|
||||
endmacro()
|
||||
|
||||
function(_ov_target_no_deprecation_error)
|
||||
if(NOT MSVC)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
@@ -265,13 +422,41 @@ endfunction()
|
||||
# OpenVINO config
|
||||
#
|
||||
|
||||
cmake_policy(PUSH)
|
||||
# we need CMP0057 to allow IN_LIST in if() command
|
||||
if(POLICY CMP0057)
|
||||
cmake_policy(SET CMP0057 NEW)
|
||||
else()
|
||||
message(FATAL_ERROR "OpenVINO requires CMake 3.3 or newer")
|
||||
endif()
|
||||
|
||||
# need to store current PACKAGE_PREFIX_DIR, because it's overwritten by sub-package one
|
||||
set(_ov_package_prefix_dir "${PACKAGE_PREFIX_DIR}")
|
||||
|
||||
set(_OV_ENABLE_OPENVINO_BUILD_SHARED "@BUILD_SHARED_LIBS@")
|
||||
|
||||
if(NOT TARGET openvino)
|
||||
set(_ov_as_external_package ON)
|
||||
endif()
|
||||
|
||||
if(NOT _OV_ENABLE_OPENVINO_BUILD_SHARED)
|
||||
# common openvino dependencies
|
||||
_ov_find_tbb()
|
||||
|
||||
_ov_find_itt()
|
||||
_ov_find_pugixml()
|
||||
|
||||
# preprocessing dependencies
|
||||
_ov_find_ade()
|
||||
|
||||
# frontend dependencies
|
||||
_ov_find_protobuf_frontend_dependency()
|
||||
_ov_find_tensorflow_frontend_dependencies()
|
||||
_ov_find_onnx_frontend_dependencies()
|
||||
|
||||
# plugin dependencies
|
||||
_ov_find_intel_cpu_dependencies()
|
||||
_ov_find_intel_gpu_dependencies()
|
||||
_ov_find_intel_gna_dependencies()
|
||||
endif()
|
||||
|
||||
@@ -279,13 +464,26 @@ _ov_find_dependency(Threads)
|
||||
|
||||
unset(_OV_ENABLE_OPENVINO_BUILD_SHARED)
|
||||
|
||||
if(NOT TARGET openvino)
|
||||
set(_ov_as_external_package ON)
|
||||
set(_ov_imported_libs openvino::runtime openvino::runtime::c
|
||||
openvino::frontend::onnx openvino::frontend::paddle openvino::frontend::tensorflow
|
||||
openvino::frontend::pytorch openvino::frontend::tensorflow_lite)
|
||||
|
||||
if(_ov_as_external_package)
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/OpenVINOTargets.cmake")
|
||||
|
||||
foreach(target IN LISTS _ov_imported_libs)
|
||||
if(TARGET ${target})
|
||||
get_target_property(imported_configs ${target} IMPORTED_CONFIGURATIONS)
|
||||
if(NOT RELWITHDEBINFO IN_LIST imported_configs)
|
||||
set_property(TARGET ${target} PROPERTY MAP_IMPORTED_CONFIG_RELWITHDEBINFO RELEASE)
|
||||
endif()
|
||||
unset(imported_configs)
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
# WA for cmake version < 3.16 which does not export
|
||||
# IMPORTED_LINK_DEPENDENT_LIBRARIES_** properties if no PUBLIC dependencies for the library
|
||||
if((THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO") AND TBB_FOUND)
|
||||
if(THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO")
|
||||
foreach(type RELEASE DEBUG RELWITHDEBINFO MINSIZEREL)
|
||||
foreach(tbb_target TBB::tbb TBB::tbbmalloc PkgConfig::tbb)
|
||||
if(TARGET ${tbb_target})
|
||||
@@ -326,12 +524,12 @@ endif()
|
||||
# Apply common functions
|
||||
#
|
||||
|
||||
foreach(target openvino::runtime openvino::runtime::c
|
||||
openvino::frontend::onnx openvino::frontend::paddle openvino::frontend::tensorflow)
|
||||
foreach(target IN LISTS _ov_imported_libs)
|
||||
if(TARGET ${target} AND _ov_as_external_package)
|
||||
_ov_target_no_deprecation_error(${target})
|
||||
endif()
|
||||
endforeach()
|
||||
unset(_ov_imported_libs)
|
||||
unset(_ov_as_external_package)
|
||||
|
||||
# restore PACKAGE_PREFIX_DIR
|
||||
@@ -349,3 +547,7 @@ unset(${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_Paddle_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_TensorFlowLite_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_PyTorch_FOUND)
|
||||
|
||||
cmake_policy(POP)
|
||||
|
||||
@@ -56,6 +56,7 @@ find_dependency(OpenVINO
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
_ov_find_tbb()
|
||||
_ov_find_pugixml()
|
||||
|
||||
foreach(component @openvino_export_components@)
|
||||
# TODO: remove legacy targets from some tests
|
||||
@@ -65,58 +66,6 @@ foreach(component @openvino_export_components@)
|
||||
# endif()
|
||||
endforeach()
|
||||
|
||||
if(ENABLE_SYSTEM_PUGIXML)
|
||||
set(_ov_pugixml_pkgconfig_interface "@pugixml_FOUND@")
|
||||
set(_ov_pugixml_cmake_interface "@PugiXML_FOUND@")
|
||||
if(_ov_pugixml_pkgconfig_interface)
|
||||
find_dependency(PkgConfig)
|
||||
elseif(_ov_pugixml_cmake_interface)
|
||||
find_dependency(PugiXML)
|
||||
endif()
|
||||
if(PugiXML_FOUND)
|
||||
set_property(TARGET pugixml PROPERTY IMPORTED_GLOBAL TRUE)
|
||||
add_library(openvino::pugixml ALIAS pugixml)
|
||||
elseif(PkgConfig_FOUND)
|
||||
if(${CMAKE_FIND_PACKAGE_NAME}_FIND_QUIETLY)
|
||||
set(pkg_config_quiet_arg QUIET)
|
||||
endif()
|
||||
if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED)
|
||||
set(pkg_config_required_arg REQUIRED)
|
||||
endif()
|
||||
|
||||
pkg_search_module(pugixml
|
||||
${pkg_config_quiet_arg}
|
||||
${pkg_config_required_arg}
|
||||
IMPORTED_TARGET GLOBAL
|
||||
pugixml)
|
||||
|
||||
unset(pkg_config_quiet_arg)
|
||||
unset(pkg_config_required_arg)
|
||||
|
||||
if(pugixml_FOUND)
|
||||
add_library(openvino::pugixml ALIAS PkgConfig::pugixml)
|
||||
|
||||
# PATCH: on Ubuntu 18.04 pugixml.pc contains incorrect include directories
|
||||
get_target_property(interface_include_dir PkgConfig::pugixml INTERFACE_INCLUDE_DIRECTORIES)
|
||||
if(interface_include_dir AND NOT EXISTS "${interface_include_dir}")
|
||||
set_target_properties(PkgConfig::pugixml PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# debian 9 case: no cmake, no pkg-config files
|
||||
if(NOT TARGET openvino::pugixml)
|
||||
find_library(PUGIXML_LIBRARY NAMES pugixml DOC "Path to pugixml library")
|
||||
if(PUGIXML_LIBRARY)
|
||||
add_library(openvino::pugixml INTERFACE IMPORTED GLOBAL)
|
||||
set_target_properties(openvino::pugixml PROPERTIES INTERFACE_LINK_LIBRARIES "${PUGIXML_LIBRARY}")
|
||||
else()
|
||||
message(FATAL_ERROR "Failed to find system pugixml in OpenVINO Developer Package")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# inherit OpenCV from main OpenVINO project if enabled
|
||||
if ("@OpenCV_FOUND@")
|
||||
load_cache("${cache_path}" READ_WITH_PREFIX "" OpenCV_DIR)
|
||||
|
||||
95
cmake/toolchains/mingw-w64.toolchain.cmake
Normal file
95
cmake/toolchains/mingw-w64.toolchain.cmake
Normal file
@@ -0,0 +1,95 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Prerequisites:
|
||||
#
|
||||
# Build platform: Ubuntu
|
||||
# apt-get install mingw-w64 mingw-w64-tools g++-mingw-w64-x86-64 gcc-mingw-w64-x86-64
|
||||
#
|
||||
# Build platform: macOS
|
||||
# brew install mingw-w64
|
||||
#
|
||||
|
||||
set(CMAKE_SYSTEM_NAME Windows)
|
||||
set(CMAKE_SYSTEM_PROCESSOR x86_64)
|
||||
|
||||
set(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc-posix)
|
||||
set(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++-posix)
|
||||
set(PKG_CONFIG_EXECUTABLE x86_64-w64-mingw32-pkg-config CACHE PATH "Path to Windows x86_64 pkg-config")
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
|
||||
|
||||
macro(__cmake_find_root_save_and_reset)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(__save_${v} ${${v}})
|
||||
set(${v} NEVER)
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__cmake_find_root_restore)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(${v} ${__save_${v}})
|
||||
unset(__save_${v})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
|
||||
# macro to find programs on the host OS
|
||||
macro(find_host_program)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
SET(APPLE)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
SET(WIN32)
|
||||
elseif(CMAKE_HOST_UNIX)
|
||||
SET(UNIX 1)
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
endif()
|
||||
find_program(${ARGN})
|
||||
SET(WIN32 1)
|
||||
SET(APPLE)
|
||||
SET(UNIX)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
# macro to find packages on the host OS
|
||||
macro(find_host_package)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
SET(APPLE)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(WIN32)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_UNIX)
|
||||
SET(UNIX 1)
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
endif()
|
||||
find_package(${ARGN})
|
||||
SET(WIN32 1)
|
||||
SET(APPLE)
|
||||
SET(UNIX)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
@@ -24,7 +24,7 @@ set(CMAKE_LINKER ${RISCV_TOOLCHAIN_ROOT}/bin/riscv64-unknown-linux-gnu-ld)
|
||||
set(CMAKE_OBJCOPY ${RISCV_TOOLCHAIN_ROOT}/bin/riscv64-unknown-linux-gnu-objcopy)
|
||||
set(CMAKE_OBJDUMP ${RISCV_TOOLCHAIN_ROOT}/bin/riscv64-unknown-linux-gnu-objdump)
|
||||
set(CMAKE_READELF ${RISCV_TOOLCHAIN_ROOT}/bin/riscv64-unknown-linux-gnu-readelf)
|
||||
set(PKG_CONFIG_EXECUTABLE "NOT-FOUND" CACHE PATH "Path to ARM64 pkg-config")
|
||||
set(PKG_CONFIG_EXECUTABLE "NOT-FOUND" CACHE PATH "Path to RISC-V pkg-config")
|
||||
|
||||
# Don't run the linker on compiler check
|
||||
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
|
||||
|
||||
75
cmake/toolchains/x86_64.linux.toolchain.cmake
Normal file
75
cmake/toolchains/x86_64.linux.toolchain.cmake
Normal file
@@ -0,0 +1,75 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set(CMAKE_SYSTEM_NAME Linux)
|
||||
set(CMAKE_SYSTEM_PROCESSOR amd64)
|
||||
|
||||
set(CMAKE_C_COMPILER x86_64-linux-gnu-gcc)
|
||||
set(CMAKE_CXX_COMPILER x86_64-linux-gnu-g++)
|
||||
set(CMAKE_STRIP x86_64-linux-gnu-strip)
|
||||
set(PKG_CONFIG_EXECUTABLE "NOT-FOUND" CACHE PATH "Path to amd64 pkg-config")
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
|
||||
|
||||
macro(__cmake_find_root_save_and_reset)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(__save_${v} ${${v}})
|
||||
set(${v} NEVER)
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(__cmake_find_root_restore)
|
||||
foreach(v
|
||||
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
|
||||
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE
|
||||
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
|
||||
)
|
||||
set(${v} ${__save_${v}})
|
||||
unset(__save_${v})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
|
||||
# macro to find programs on the host OS
|
||||
macro(find_host_program)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
endif()
|
||||
find_program(${ARGN})
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
SET(UNIX 1)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
|
||||
# macro to find packages on the host OS
|
||||
macro(find_host_package)
|
||||
__cmake_find_root_save_and_reset()
|
||||
if(CMAKE_HOST_WIN32)
|
||||
SET(WIN32 1)
|
||||
SET(UNIX)
|
||||
elseif(CMAKE_HOST_APPLE)
|
||||
SET(APPLE 1)
|
||||
SET(UNIX)
|
||||
endif()
|
||||
find_package(${ARGN})
|
||||
SET(WIN32)
|
||||
SET(APPLE)
|
||||
SET(UNIX 1)
|
||||
__cmake_find_root_restore()
|
||||
endmacro()
|
||||
33
conanfile.txt
Normal file
33
conanfile.txt
Normal file
@@ -0,0 +1,33 @@
|
||||
[requires]
|
||||
ade/0.1.2a
|
||||
onetbb/[>=2021.2.1]
|
||||
pugixml/[>=1.10]
|
||||
protobuf/3.21.9
|
||||
ittapi/[>=3.23.0]
|
||||
zlib/[>=1.2.8]
|
||||
opencl-icd-loader/[>=2022.09.30]
|
||||
# opencl-clhpp-headers/[>=2022.09.30]
|
||||
opencl-headers/[>=2022.09.30]
|
||||
xbyak/[>=6.62]
|
||||
snappy/[>=1.1.7]
|
||||
gflags/2.2.2
|
||||
onnx/1.13.1
|
||||
nlohmann_json/[>=3.1.1]
|
||||
pybind11/[>=2.10.1]
|
||||
flatbuffers/[>=22.9.24]
|
||||
|
||||
[tool_requires]
|
||||
cmake/[>=3.15]
|
||||
patchelf/[>=0.12]
|
||||
protobuf/3.21.9
|
||||
flatbuffers/[>=22.9.24]
|
||||
|
||||
[options]
|
||||
protobuf/*:lite=True
|
||||
onetbb/*:tbbmalloc=True
|
||||
onetbb/*:tbbproxy=True
|
||||
flatbuffers/*:header_only=True
|
||||
|
||||
[generators]
|
||||
CMakeDeps
|
||||
CMakeToolchain
|
||||
@@ -77,7 +77,7 @@ function(build_docs)
|
||||
if(ENABLE_OPENVINO_NOTEBOOKS)
|
||||
set(NBDOC_SCRIPT "${DOCS_SOURCE_DIR}/nbdoc/nbdoc.py")
|
||||
list(APPEND commands
|
||||
COMMAND ${PYTHON_EXECUTABLE} "${NBDOC_SCRIPT}" "${RST_OUTPUT}/notebooks"
|
||||
COMMAND ${PYTHON_EXECUTABLE} "${NBDOC_SCRIPT}" "${DOCS_SOURCE_DIR}/notebooks" "${RST_OUTPUT}/notebooks"
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
76
docs/Documentation/datumaro.md
Normal file
76
docs/Documentation/datumaro.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Datumaro {#datumaro_documentation}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Start working with Datumaro, which offers functionalities for basic data
|
||||
import/export, validation, correction, filtration and transformations.
|
||||
|
||||
|
||||
Datumaro provides a suite of basic data import/export (IE) for more than 35 public vision data
|
||||
formats and manipulation functionalities such as validation, correction, filtration, and some
|
||||
transformations. To achieve the web-scale training, this further aims to merge multiple
|
||||
heterogeneous datasets through comparator and merger. Datumaro is integrated into Geti™, OpenVINO™
|
||||
Training Extensions, and CVAT for the ease of data preparation. Datumaro is open-sourced and
|
||||
available on `GitHub <https://github.com/openvinotoolkit/datumaro>`__.
|
||||
Refer to the official `documentation <https://openvinotoolkit.github.io/datumaro/stable/docs/get-started/introduction.html>`__ to learn more.
|
||||
Plus, enjoy `Jupyter notebooks <https://github.com/openvinotoolkit/datumaro/tree/develop/notebooks>`__ for the real Datumaro practices.
|
||||
|
||||
Detailed Workflow
|
||||
#################
|
||||
|
||||
.. image:: ./_static/images/datumaro.png
|
||||
|
||||
1. To start working with Datumaro, download public datasets or prepare your own annotated dataset.
|
||||
|
||||
.. note::
|
||||
Datumaro provides a CLI `datum download` for downloading `TensorFlow Datasets <https://www.tensorflow.org/datasets>`__.
|
||||
|
||||
2. Import data into Datumaro and manipulate the dataset for the data quality using `Validator`, `Corrector`, and `Filter`.
|
||||
|
||||
3. Compare two datasets and transform the label schemas (category information) before merging them.
|
||||
|
||||
4. Merge two datasets to a large-scale dataset.
|
||||
|
||||
.. note::
|
||||
There are some choices of merger, i.e., `ExactMerger`, `IntersectMerger`, and `UnionMerger`.
|
||||
|
||||
5. Split the unified dataset into subsets, e.g., `train`, `valid`, and `test` through `Splitter`.
|
||||
|
||||
.. note::
|
||||
We can split data with a given ratio of subsets according to both the number of samples or
|
||||
annotations. Please see `SplitTask` for the task-specific split.
|
||||
|
||||
6. Export the cleaned and unified dataset for follow-up workflows such as model training.
|
||||
Go to :doc:`OpenVINO™ Training Extensions <ote_documentation>`.
|
||||
|
||||
If the results are unsatisfactory, add datasets and perform the same steps, starting with dataset annotation.
|
||||
|
||||
Datumaro Components
|
||||
###################
|
||||
|
||||
* `Datumaro CLIs <https://openvinotoolkit.github.io/datumaro/stable/docs/command-reference/overview.html>`__
|
||||
* `Datumaro APIs <https://openvinotoolkit.github.io/datumaro/stable/docs/reference/datumaro_module.html>`__
|
||||
* `Datumaro data format <https://openvinotoolkit.github.io/datumaro/stable/docs/data-formats/datumaro_format.html>`__
|
||||
* `Supported data formats <https://openvinotoolkit.github.io/datumaro/stable/docs/data-formats/formats/index.html>`__
|
||||
|
||||
Tutorials
|
||||
#########
|
||||
|
||||
* `Basic skills <https://openvinotoolkit.github.io/datumaro/stable/docs/level-up/basic_skills/index.html>`__
|
||||
* `Intermediate skills <https://openvinotoolkit.github.io/datumaro/stable/docs/level-up/intermediate_skills/index.html>`__
|
||||
* `Advanced skills <https://openvinotoolkit.github.io/datumaro/stable/docs/level-up/advanced_skills/index.html>`__
|
||||
|
||||
Python Hands-on Examples
|
||||
########################
|
||||
|
||||
* `Data IE <https://openvinotoolkit.github.io/datumaro/stable/docs/jupyter_notebook_examples/dataset_IO.html>`__
|
||||
* `Data manipulation <https://openvinotoolkit.github.io/datumaro/stable/docs/jupyter_notebook_examples/manipulate.html>`__
|
||||
* `Data exploration <https://openvinotoolkit.github.io/datumaro/stable/docs/jupyter_notebook_examples/explore.html>`__
|
||||
* `Data refinement <https://openvinotoolkit.github.io/datumaro/stable/docs/jupyter_notebook_examples/refine.html>`__
|
||||
* `Data transformation <https://openvinotoolkit.github.io/datumaro/stable/docs/jupyter_notebook_examples/transform.html>`__
|
||||
* `Deep learning end-to-end use-cases <https://openvinotoolkit.github.io/datumaro/stable/docs/jupyter_notebook_examples/e2e_example.html>`__
|
||||
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
# Running and Deploying Inference {#openvino_docs_deployment_guide_introduction}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
Run and Deploy Locally <openvino_deployment_guide>
|
||||
Deploy via Model Serving <ovms_what_is_openvino_model_server>
|
||||
|
||||
|
||||
Once you have a model that meets both OpenVINO™ and your requirements, you can choose how to deploy it with your application.
|
||||
|
||||
.. panels::
|
||||
|
||||
:doc:`Deploy via OpenVINO Runtime <openvino_deployment_guide>`
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Local deployment uses OpenVINO Runtime that is called from, and linked to, the application directly.
|
||||
It utilizes resources available to the system and provides the quickest way of launching inference.
|
||||
---
|
||||
|
||||
:doc:`Deploy via Model Server <ovms_what_is_openvino_model_server>`
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Deployment via OpenVINO Model Server allows the application to connect to the inference server set up remotely.
|
||||
This way inference can use external resources instead of those available to the application itself.
|
||||
|
||||
|
||||
Apart from the default deployment options, you may also :doc:`deploy your application for the TensorFlow framework with OpenVINO Integration <ovtf_integration>`
|
||||
|
||||
@endsphinxdirective
|
||||
@@ -17,7 +17,7 @@ OpenVINO Runtime offers multiple inference modes to allow optimum hardware utili
|
||||
The remaining modes assume certain levels of automation in selecting devices for inference. Using them in the deployed solution may potentially increase its performance and portability. The automated modes are:
|
||||
|
||||
* :doc:`Automatic Device Selection (AUTO) <openvino_docs_OV_UG_supported_plugins_AUTO>`
|
||||
* :doc:``Multi-Device Execution (MULTI) <openvino_docs_OV_UG_Running_on_multiple_devices>`
|
||||
* :doc:`Multi-Device Execution (MULTI) <openvino_docs_OV_UG_Running_on_multiple_devices>`
|
||||
* :doc:`Heterogeneous Execution (HETERO) <openvino_docs_OV_UG_Hetero_execution>`
|
||||
* :doc:`Automatic Batching Execution (Auto-batching) <openvino_docs_OV_UG_Automatic_Batching>`
|
||||
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Explore OpenCV Graph API and other media processing frameworks
|
||||
used for development of computer vision solutions.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
# Model Preparation {#openvino_docs_model_processing_introduction}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Preparing models for OpenVINO Runtime. Learn how to convert and compile models from different frameworks or read them directly.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
@@ -10,22 +15,48 @@
|
||||
omz_tools_downloader
|
||||
|
||||
|
||||
Every deep learning workflow begins with obtaining a model. You can choose to prepare a custom one, use a ready-made solution and adjust it to your needs, or even download and run a pre-trained network from an online database, such as OpenVINO's :doc:`Open Model Zoo <model_zoo>`.
|
||||
Every deep learning workflow begins with obtaining a model. You can choose to prepare a custom one, use a ready-made solution and adjust it to your needs, or even download and run a pre-trained network from an online database, such as `TensorFlow Hub <https://tfhub.dev/>`__, `Hugging Face <https://huggingface.co/>`__, `Torchvision models <https://pytorch.org/hub/>`__.
|
||||
|
||||
:doc:`OpenVINO™ supports several model formats <Supported_Model_Formats>` and allows to convert them to it's own, OpenVINO IR, providing a tool dedicated to this task.
|
||||
:doc:`OpenVINO™ supports several model formats <Supported_Model_Formats>` and allows converting them to it's own, `openvino.runtime.Model <api/ie_python_api/_autosummary/openvino.runtime.Model.html>`__ (`ov.Model <api/ie_python_api/_autosummary/openvino.runtime.Model.html>`__ ), providing a tool dedicated to this task.
|
||||
|
||||
:doc:`Model Optimizer <openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide>` reads the original model and creates the OpenVINO IR model (.xml and .bin files) so that inference can ultimately be performed without delays due to format conversion. Optionally, Model Optimizer can adjust the model to be more suitable for inference, for example, by :doc:`alternating input shapes <openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model>`, :doc:`embedding preprocessing <openvino_docs_MO_DG_Additional_Optimization_Use_Cases>` and :doc:`cutting training parts off <openvino_docs_MO_DG_prepare_model_convert_model_Cutting_Model>`.
|
||||
There are several options to convert a model from original framework to OpenVINO model format (``ov.Model``).
|
||||
|
||||
The approach to fully convert a model is considered the default choice, as it allows the full extent of OpenVINO features. The OpenVINO IR model format is used by other conversion and preparation tools, such as the Post-Training Optimization Tool, for further optimization of the converted model.
|
||||
The ``read_model()`` method reads a model from a file and produces ``ov.Model``. If the file is in one of the supported original framework file formats, it is converted automatically to OpenVINO Intermediate Representation. If the file is already in the OpenVINO IR format, it is read "as-is", without any conversion involved. ``ov.Model`` can be serialized to IR using the ``ov.serialize()`` method. The serialized IR can be further optimized using :doc:`Neural Network Compression Framework (NNCF) <ptq_introduction>` that applies post-training quantization methods.
|
||||
|
||||
Conversion is not required for ONNX, PaddlePaddle, TensorFlow Lite and TensorFlow models (check :doc:`TensorFlow Frontend Capabilities and Limitations <openvino_docs_MO_DG_TensorFlow_Frontend>`), as OpenVINO provides C++ and Python APIs for importing them to OpenVINO Runtime directly. It provides a convenient way to quickly switch from framework-based code to OpenVINO-based code in your inference application.
|
||||
Convert a model in Python
|
||||
######################################
|
||||
|
||||
Model conversion API, specifically, the ``mo.convert_model()`` method converts a model from original framework to ``ov.Model``. ``mo.convert_model()`` returns ``ov.Model`` object in memory so the ``read_model()`` method is not required. The resulting ``ov.Model`` can be inferred in the same training environment (python script or Jupiter Notebook). ``mo.convert_model()`` provides a convenient way to quickly switch from framework-based code to OpenVINO-based code in your inference application. In addition to model files, ``mo.convert_model()`` can take OpenVINO extension objects constructed directly in Python for easier conversion of operations that are not supported in OpenVINO. The ``mo.convert_model()`` method also has a set of parameters to :doc:`cut the model <openvino_docs_MO_DG_prepare_model_convert_model_Cutting_Model>`, :doc:`set input shapes or layout <openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model>`, :doc:`add preprocessing <openvino_docs_MO_DG_Additional_Optimization_Use_Cases>`, etc.
|
||||
|
||||
.. image:: _static/images/model_conversion_diagram.svg
|
||||
:alt: model conversion diagram
|
||||
|
||||
Convert a model with ``mo`` command-line tool
|
||||
#############################################
|
||||
|
||||
Another option to convert a model is to use ``mo`` command-line tool. ``mo`` is a cross-platform tool that facilitates the transition between training and deployment environments, performs static model analysis, and adjusts deep learning models for optimal execution on end-point target devices in the same measure, as the ``mo.convert_model`` method.
|
||||
|
||||
``mo`` requires the use of a pre-trained deep learning model in one of the supported formats: TensorFlow, TensorFlow Lite, PaddlePaddle, or ONNX. ``mo`` converts the model to the OpenVINO Intermediate Representation format (IR), which needs to be read with the ``ov.read_model()`` method. Then, you can compile and infer the ``ov.Model`` later with :doc:`OpenVINO™ Runtime <openvino_docs_OV_UG_OV_Runtime_User_Guide>`.
|
||||
|
||||
|
||||
The figure below illustrates the typical workflow for deploying a trained deep learning model:
|
||||
|
||||
.. image:: _static/images/BASIC_FLOW_MO_simplified.svg
|
||||
|
||||
where IR is a pair of files describing the model:
|
||||
|
||||
* ``.xml`` - Describes the network topology.
|
||||
* ``.bin`` - Contains the weights and biases binary data.
|
||||
|
||||
|
||||
Model files (not Python objects) from ONNX, PaddlePaddle, TensorFlow and TensorFlow Lite (check :doc:`TensorFlow Frontend Capabilities and Limitations <openvino_docs_MO_DG_TensorFlow_Frontend>`) do not require a separate step for model conversion, that is ``mo.convert_model``. OpenVINO provides C++ and Python APIs for importing the models to OpenVINO Runtime directly by just calling the ``read_model`` method.
|
||||
|
||||
The results of both ``mo`` and ``mo.convert_model()`` conversion methods described above are the same. You can choose one of them, depending on what is most convenient for you. Keep in mind that there should not be any differences in the results of model conversion if the same set of parameters is used.
|
||||
|
||||
This section describes how to obtain and prepare your model for work with OpenVINO to get the best inference results:
|
||||
|
||||
* :doc:`See the supported formats and how to use them in your project <Supported_Model_Formats>`.
|
||||
* :doc:`Convert different model formats to the OpenVINO IR format <openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide>`.
|
||||
* `Automate model-related tasks with Model Downloader and additional OMZ Tools <https://docs.openvino.ai/latest/omz_tools_downloader.html>`__.
|
||||
* :doc:`Convert different model formats to the ov.Model format <openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide>`.
|
||||
|
||||
To begin with, you may want to :doc:`browse a database of models for use in your projects <model_zoo>`.
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -2,21 +2,25 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: OpenVINO™ is an ecosystem of utilities that have advanced capabilities, which help develop deep learning solutions.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
ote_documentation
|
||||
ovtf_integration
|
||||
datumaro_documentation
|
||||
ovsa_get_started
|
||||
openvino_inference_engine_tools_compile_tool_README
|
||||
openvino_docs_tuning_utilities
|
||||
|
||||
|
||||
OpenVINO™ is not just one tool. It is an expansive ecosystem of utilities, providing a comprehensive workflow for deep learning solution development. Learn more about each of them to reach the full potential of OpenVINO™ Toolkit.
|
||||
|
||||
Neural Network Compression Framework (NNCF)
|
||||
###########################################
|
||||
|
||||
|
||||
**Neural Network Compression Framework (NNCF)**
|
||||
|
||||
A suite of advanced algorithms for Neural Network inference optimization with minimal accuracy drop. NNCF applies quantization, filter pruning, binarization and sparsity algorithms to PyTorch and TensorFlow models during training.
|
||||
|
||||
@@ -27,8 +31,7 @@ More resources:
|
||||
* `PyPI <https://pypi.org/project/nncf/>`__
|
||||
|
||||
|
||||
OpenVINO™ Training Extensions
|
||||
#############################
|
||||
**OpenVINO™ Training Extensions**
|
||||
|
||||
A convenient environment to train Deep Learning models and convert them using the OpenVINO™ toolkit for optimized inference.
|
||||
|
||||
@@ -38,71 +41,60 @@ More resources:
|
||||
* `GitHub <https://github.com/openvinotoolkit/training_extensions>`__
|
||||
* `Documentation <https://openvinotoolkit.github.io/training_extensions/stable/guide/get_started/introduction.html>`__
|
||||
|
||||
OpenVINO™ Security Add-on
|
||||
#########################
|
||||
|
||||
**OpenVINO™ Security Add-on**
|
||||
|
||||
A solution for Model Developers and Independent Software Vendors to use secure packaging and secure model execution.
|
||||
|
||||
More resources:
|
||||
|
||||
* `Documentation <https://docs.openvino.ai/latest/ovsa_get_started.html>`__
|
||||
* :doc:`Documentation <ovsa_get_started>`
|
||||
* `GitHub <https://github.com/openvinotoolkit/security_addon>`__
|
||||
|
||||
|
||||
OpenVINO™ integration with TensorFlow (OVTF)
|
||||
############################################
|
||||
|
||||
A solution empowering TensorFlow developers with OpenVINO's optimization capabilities. With just two lines of code in your application, you can offload inference to OpenVINO, while keeping the TensorFlow API.
|
||||
|
||||
More resources:
|
||||
|
||||
* `Documentation <https://github.com/openvinotoolkit/openvino_tensorflow>`__
|
||||
* `PyPI <https://pypi.org/project/openvino-tensorflow/>`__
|
||||
* `GitHub <https://github.com/openvinotoolkit/openvino_tensorflow>`__
|
||||
|
||||
DL Streamer
|
||||
###########
|
||||
|
||||
A streaming media analytics framework, based on the GStreamer multimedia framework, for creating complex media analytics pipelines.
|
||||
|
||||
More resources:
|
||||
|
||||
* `Documentation on GitHub <https://dlstreamer.github.io/index.html>`__
|
||||
* `Installation Guide on GitHub <https://github.com/openvinotoolkit/dlstreamer_gst/wiki/Install-Guide>`__
|
||||
|
||||
DL Workbench
|
||||
############
|
||||
|
||||
A web-based tool for deploying deep learning models. Built on the core of OpenVINO and equipped with a graphics user interface, DL Workbench is a great way to explore the possibilities of the OpenVINO workflow, import, analyze, optimize, and build your pre-trained models. You can do all that by visiting `Intel® Developer Cloud <https://software.intel.com/content/www/us/en/develop/tools/devcloud.html>`__ and launching DL Workbench online.
|
||||
|
||||
More resources:
|
||||
|
||||
* `Documentation <https://docs.openvino.ai/2022.3/workbench_docs_Workbench_DG_Introduction.html>`__
|
||||
* `Docker Hub <https://hub.docker.com/r/openvino/workbench>`__
|
||||
* `PyPI <https://pypi.org/project/openvino-workbench/>`__
|
||||
|
||||
Computer Vision Annotation Tool (CVAT)
|
||||
######################################
|
||||
|
||||
An online, interactive video and image annotation tool for computer vision purposes.
|
||||
|
||||
More resources:
|
||||
|
||||
* `Documentation on GitHub <https://opencv.github.io/cvat/docs/>`__
|
||||
* `Web application <https://www.cvat.ai/>`__
|
||||
* `Docker Hub <https://hub.docker.com/r/openvino/cvat_server>`__
|
||||
* `GitHub <https://github.com/openvinotoolkit/cvat>`__
|
||||
|
||||
Dataset Management Framework (Datumaro)
|
||||
#######################################
|
||||
**Dataset Management Framework (Datumaro)**
|
||||
|
||||
A framework and CLI tool to build, transform, and analyze datasets.
|
||||
|
||||
More resources:
|
||||
|
||||
* `Documentation on GitHub <https://openvinotoolkit.github.io/datumaro/docs/>`__
|
||||
|
||||
* :doc:`Overview <datumaro_documentation>`
|
||||
* `PyPI <https://pypi.org/project/datumaro/>`__
|
||||
* `GitHub <https://github.com/openvinotoolkit/datumaro>`__
|
||||
* `Documentation <https://openvinotoolkit.github.io/datumaro/stable/docs/get-started/introduction.html>`__
|
||||
|
||||
**Compile Tool**
|
||||
|
||||
|
||||
Compile tool is now deprecated. If you need to compile a model for inference on a specific device, use the following script:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/export_compiled_model.py
|
||||
:language: python
|
||||
:fragment: [export_compiled_model]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/export_compiled_model.cpp
|
||||
:language: cpp
|
||||
:fragment: [export_compiled_model]
|
||||
|
||||
|
||||
To learn which device supports the import / export functionality, see the :doc:`feature support matrix <openvino_docs_OV_UG_Working_with_devices>`.
|
||||
|
||||
For more details on preprocessing steps, refer to the :doc:`Optimize Preprocessing <openvino_docs_OV_UG_Preprocessing_Overview>`. To compile the model with advanced preprocessing capabilities, refer to the :doc:`Use Case - Integrate and Save Preprocessing Steps Into OpenVINO IR <openvino_docs_OV_UG_Preprocess_Usecase_save>`, which shows how to have all the preprocessing in the compiled blob.
|
||||
|
||||
**DL Workbench**
|
||||
|
||||
A web-based tool for deploying deep learning models. Built on the core of OpenVINO and equipped with a graphics user interface, DL Workbench is a great way to explore the possibilities of the OpenVINO workflow, import, analyze, optimize, and build your pre-trained models. You can do all that by visiting `Intel® Developer Cloud <https://software.intel.com/content/www/us/en/develop/tools/devcloud.html>`__ and launching DL Workbench online.
|
||||
|
||||
**OpenVINO™ integration with TensorFlow (OVTF)**
|
||||
|
||||
OpenVINO™ Integration with TensorFlow will no longer be supported as of OpenVINO release 2023.0. As part of the 2023.0 release, OpenVINO will feature a significantly enhanced TensorFlow user experience within native OpenVINO without needing offline model conversions. :doc:`Learn more <openvino_docs_MO_DG_TensorFlow_Frontend>`.
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
# OpenVINO™ integration with TensorFlow {#ovtf_integration}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
**OpenVINO™ integration with TensorFlow** is a solution for TensorFlow developers who want to get started with OpenVINO™ in their inferencing applications. By adding just two lines of code you can now take advantage of OpenVINO™ toolkit optimizations with TensorFlow inference applications across a range of Intel® computation devices.
|
||||
|
||||
This is all you need:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
import openvino_tensorflow
|
||||
openvino_tensorflow.set_backend('<backend_name>')
|
||||
|
||||
|
||||
**OpenVINO™ integration with TensorFlow** accelerates inference across many AI models on a variety of Intel® technologies, such as:
|
||||
|
||||
* Intel® CPUs
|
||||
* Intel® integrated GPUs
|
||||
|
||||
.. note::
|
||||
For maximum performance, efficiency, tooling customization, and hardware control, we recommend developers to adopt native OpenVINO™ solutions.
|
||||
|
||||
To find out more about the product itself, as well as learn how to use it in your project, check its dedicated `GitHub repository <https://github.com/openvinotoolkit/openvino_tensorflow/tree/master/docs>`__.
|
||||
|
||||
|
||||
To see what you can do with **OpenVINO™ integration with TensorFlow**, explore the demos located in the `examples folder <https://github.com/openvinotoolkit/openvino_tensorflow/tree/master/examples>`__ in our GitHub repository.
|
||||
|
||||
Sample tutorials are also hosted on `Intel® DevCloud <https://www.intel.com/content/www/us/en/developer/tools/devcloud/edge/build/ovtfoverview.html>`__. The demo applications are implemented using Jupyter Notebooks. You can interactively execute them on Intel® DevCloud nodes, compare the results of **OpenVINO™ integration with TensorFlow**, native TensorFlow, and OpenVINO™.
|
||||
|
||||
License
|
||||
#######
|
||||
|
||||
**OpenVINO™ integration with TensorFlow** is licensed under `Apache License Version 2.0 <https://github.com/openvinotoolkit/openvino_tensorflow/blob/master/LICENSE>`__.
|
||||
By contributing to the project, you agree to the license and copyright terms therein
|
||||
and release your contribution under these terms.
|
||||
|
||||
Support
|
||||
#######
|
||||
|
||||
Submit your questions, feature requests and bug reports via `GitHub issues <https://github.com/openvinotoolkit/openvino_tensorflow/issues>`__.
|
||||
|
||||
How to Contribute
|
||||
#################
|
||||
|
||||
We welcome community contributions to **OpenVINO™ integration with TensorFlow**. If you have an idea for improvement:
|
||||
|
||||
* Share your proposal via `GitHub issues <https://github.com/openvinotoolkit/openvino_tensorflow/issues>`__.
|
||||
* Submit a `pull request <https://github.com/openvinotoolkit/openvino_tensorflow/pulls>`__.
|
||||
|
||||
We will review your contribution as soon as possible. If any additional fixes or modifications are necessary, we will guide you and provide feedback. Before you make your contribution, make sure you can build **OpenVINO™ integration with TensorFlow** and run all the examples with your fix/patch. If you want to introduce a large feature, create test cases for your feature. Upon our verification of your pull request, we will merge it to the repository provided that the pull request has met the above mentioned requirements and proved acceptable.
|
||||
|
||||
\* Other names and brands may be claimed as the property of others.
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
# OpenVINO™ Training Extensions {#ote_documentation}
|
||||
|
||||
@sphinxdirective
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: OpenVINO™ Training Extensions include advanced algorithms used
|
||||
to create, train and convert deep learning models with OpenVINO
|
||||
Toolkit for optimized inference.
|
||||
|
||||
|
||||
OpenVINO™ Training Extensions provide a suite of advanced algorithms to train
|
||||
Deep Learning models and convert them using the `OpenVINO™
|
||||
@@ -19,21 +25,22 @@ Detailed Workflow
|
||||
.. note::
|
||||
Prepare a separate dataset or split the dataset you have for more accurate quality evaluation.
|
||||
|
||||
3. Having successful evaluation results received, you have an opportunity to deploy your model or continue optimizing it, using NNCF and POT. For more information about these frameworks, go to :doc:`Optimization Guide <openvino_docs_model_optimization_guide>`.
|
||||
3. Having successful evaluation results received, you have an opportunity to deploy your model or continue optimizing it, using NNCF. For more information about these frameworks, go to :doc:`Optimization Guide <openvino_docs_model_optimization_guide>`.
|
||||
|
||||
If the results are unsatisfactory, add datasets and perform the same steps, starting with dataset annotation.
|
||||
|
||||
OpenVINO Training Extensions Components
|
||||
#######################################
|
||||
|
||||
- `OpenVINO Training Extensions SDK <https://github.com/openvinotoolkit/training_extensions/tree/master/ote_sdk>`__
|
||||
- `OpenVINO Training Extensions CLI <https://github.com/openvinotoolkit/training_extensions/tree/master/ote_cli>`__
|
||||
- `OpenVINO Training Extensions Algorithms <https://github.com/openvinotoolkit/training_extensions/tree/master/external>`__
|
||||
* `OpenVINO Training Extensions API <https://github.com/openvinotoolkit/training_extensions/tree/develop/otx/api>`__
|
||||
* `OpenVINO Training Extensions CLI <https://github.com/openvinotoolkit/training_extensions/tree/develop/otx/cli>`__
|
||||
* `OpenVINO Training Extensions Algorithms <https://github.com/openvinotoolkit/training_extensions/tree/develop/otx/algorithms>`__
|
||||
|
||||
Tutorials
|
||||
#########
|
||||
|
||||
`Object Detection <https://github.com/openvinotoolkit/training_extensions/blob/master/ote_cli/notebooks/train.ipynb>`__
|
||||
* `Base tutorial <https://openvinotoolkit.github.io/training_extensions/stable/guide/tutorials/base/index.html>`__
|
||||
* `Advanced tutorial <https://openvinotoolkit.github.io/training_extensions/stable/guide/tutorials/advanced/index.html>`__
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
@@ -3,22 +3,45 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: OpenVINO toolkit workflow usually involves preparation,
|
||||
optimization, and compression of models, running inference and
|
||||
deploying deep learning applications.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
Model Preparation <openvino_docs_model_processing_introduction>
|
||||
Model Optimization and Compression <openvino_docs_model_optimization_guide>
|
||||
Running and Deploying Inference <openvino_docs_deployment_guide_introduction>
|
||||
Running Inference <openvino_docs_OV_UG_OV_Runtime_User_Guide>
|
||||
Deployment on a Local System <openvino_deployment_guide>
|
||||
Deployment on a Model Server <ovms_what_is_openvino_model_server>
|
||||
|
||||
|
||||
| :doc:`Model Preparation <openvino_docs_model_processing_introduction>`
|
||||
| With Model Downloader and Model Optimizer guides, you will learn to download pre-trained models and convert them for use with OpenVINO™. You can use your own models or choose some from a broad selection provided in the Open Model Zoo.
|
||||
| With model conversion API guide, you will learn to convert pre-trained models for use with OpenVINO™. You can use your own models or choose some from a broad selection in online databases, such as `TensorFlow Hub <https://tfhub.dev/>`__, `Hugging Face <https://huggingface.co/>`__, `Torchvision models <https://pytorch.org/hub/>`__..
|
||||
|
||||
| :doc:`Model Optimization and Compression <openvino_docs_model_optimization_guide>`
|
||||
| In this section you will find out how to optimize a model to achieve better inference performance. It describes multiple optimization methods for both the training and post-training stages.
|
||||
|
||||
| :doc:`Deployment <openvino_docs_deployment_guide_introduction>`
|
||||
| This section explains the process of deploying your own inference application using either OpenVINO Runtime or OpenVINO Model Server. It describes how to run inference which is the most basic form of deployment and the quickest way of launching inference.
|
||||
| :doc:`Running Inference <openvino_docs_OV_UG_OV_Runtime_User_Guide>`
|
||||
| This section explains describes how to run inference which is the most basic form of deployment and the quickest way of launching inference.
|
||||
|
||||
|
||||
Once you have a model that meets both OpenVINO™ and your requirements, you can choose how to deploy it with your application.
|
||||
|
||||
|
||||
| :doc:`Option 1. Deployment via OpenVINO Runtime <openvino_deployment_guide>`
|
||||
| Local deployment uses OpenVINO Runtime that is called from, and linked to, the application directly.
|
||||
| It utilizes resources available to the system and provides the quickest way of launching inference.
|
||||
| Deployment on a local system requires performing the steps from the running inference section.
|
||||
|
||||
|
||||
| :doc:`Option 2. Deployment via Model Server <ovms_what_is_openvino_model_server>`
|
||||
| Deployment via OpenVINO Model Server allows the application to connect to the inference server set up remotely.
|
||||
| This way inference can use external resources instead of those available to the application itself.
|
||||
| Deployment on a model server can be done quickly and without performing any additional steps described in the running inference section.
|
||||
|
||||
|
||||
@endsphinxdirective
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn the details of custom kernel support for the GPU device to
|
||||
enable operations not supported by OpenVINO.
|
||||
|
||||
|
||||
To enable operations not supported by OpenVINO™ out of the box, you may need an extension for OpenVINO operation set, and a custom kernel for the device you will target. This article describes custom kernel support for the GPU device.
|
||||
|
||||
The GPU codepath abstracts many details about OpenCL. You need to provide the kernel code in OpenCL C and an XML configuration file that connects the kernel and its parameters to the parameters of the operation.
|
||||
@@ -13,18 +18,20 @@ There are two options for using the custom operation configuration file:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/custom_kernels_api.cpp
|
||||
:language: cpp
|
||||
:fragment: [part0]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/custom_kernels_api.py
|
||||
:language: python
|
||||
:fragment: [part0]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/custom_kernels_api.cpp
|
||||
:language: cpp
|
||||
:fragment: [part0]
|
||||
|
||||
|
||||
All OpenVINO samples, except the trivial ``hello_classification``, and most Open Model Zoo demos
|
||||
feature a dedicated command-line option ``-c`` to load custom kernels. For example, to load custom operations for the classification sample, run the command below:
|
||||
@@ -235,7 +242,8 @@ Example Configuration File
|
||||
The following code sample provides an example configuration file in XML
|
||||
format. For information on the configuration file structure, see the `Configuration File Format <#config-file-format>`__.
|
||||
|
||||
.. code-block:: cpp
|
||||
.. code-block:: xml
|
||||
:force:
|
||||
|
||||
<CustomLayer name="ReLU" type="SimpleGPU" version="1">
|
||||
<Kernel entry="example_relu_kernel">
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Explore OpenVINO™ Extensibility API, which allows adding
|
||||
support for models with custom operations and their further implementation
|
||||
in applications.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
@@ -9,7 +14,6 @@
|
||||
openvino_docs_Extensibility_UG_add_openvino_ops
|
||||
openvino_docs_Extensibility_UG_Frontend_Extensions
|
||||
openvino_docs_Extensibility_UG_GPU
|
||||
openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
@@ -18,14 +22,20 @@
|
||||
openvino_docs_transformations
|
||||
OpenVINO Plugin Developer Guide <openvino_docs_ie_plugin_dg_overview>
|
||||
|
||||
The Intel® Distribution of OpenVINO™ toolkit supports neural network models trained with various frameworks, including
|
||||
TensorFlow, PyTorch, ONNX, TensorFlow Lite, PaddlePaddle, Apache MXNet, Caffe, and Kaldi. The list of supported operations is different for
|
||||
each of the supported frameworks. To see the operations supported by your framework, refer to :doc:`Supported Framework Operations <openvino_docs_MO_DG_prepare_model_Supported_Frameworks_Layers>`.
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer
|
||||
|
||||
The Intel® Distribution of OpenVINO™ toolkit supports neural-network models trained with various frameworks, including
|
||||
TensorFlow, PyTorch, ONNX, TensorFlow Lite, and PaddlePaddle (OpenVINO support for Apache MXNet, Caffe, and Kaldi is currently
|
||||
being deprecated and will be removed entirely in the future). The list of supported operations is different for each of the supported frameworks.
|
||||
To see the operations supported by your framework, refer to :doc:`Supported Framework Operations <openvino_resources_supported_operations_frontend>`.
|
||||
|
||||
Custom operations, which are not included in the list, are not recognized by OpenVINO out-of-the-box. The need for custom operation may appear in two cases:
|
||||
|
||||
1. A new or rarely used regular framework operation is not supported in OpenVINO yet.
|
||||
|
||||
2. A new user operation that was created for some specific model topology by the author of the model using framework extension capabilities.
|
||||
|
||||
Importing models with such operations requires additional steps. This guide illustrates the workflow for running inference on models featuring custom operations. This allows plugging in your own implementation for them. OpenVINO Extensibility API enables adding support for those custom operations and using one implementation for Model Optimizer and OpenVINO Runtime.
|
||||
@@ -54,9 +64,9 @@ Mapping of custom operation is implemented differently, depending on model forma
|
||||
|
||||
1. If a model is represented in the ONNX (including models exported from Pytorch in ONNX), TensorFlow Lite, PaddlePaddle or TensorFlow formats, then one of the classes from :doc:`Frontend Extension API <openvino_docs_Extensibility_UG_Frontend_Extensions>` should be used. It consists of several classes available in C++ which can be used with the ``--extensions`` option in Model Optimizer or when a model is imported directly to OpenVINO runtime using the ``read_model`` method. Python API is also available for runtime model import.
|
||||
|
||||
2. If a model is represented in the Caffe, Kaldi or MXNet formats, then :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer>` should be used. This approach is available for model conversion in Model Optimizer only.
|
||||
2. If a model is represented in the Caffe, Kaldi or MXNet formats (as legacy frontends), then :doc:`[Legacy] Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer>` should be used. This approach is available for model conversion in Model Optimizer only.
|
||||
|
||||
Existing of two approaches simultaneously is explained by two different types of frontends used for model conversion in OpenVINO: new frontends (ONNX, PaddlePaddle, TensorFlow Lite and TensorFlow) and legacy frontends (Caffe, Kaldi and Apache MXNet). Model Optimizer can use both front-ends in contrast to the direct import of model with ``read_model`` method which can use new frontends only. Follow one of the appropriate guides referenced above to implement mappings depending on framework frontend.
|
||||
Existing of two approaches simultaneously is explained by two different types of frontends used for model conversion in OpenVINO: new frontends (ONNX, PaddlePaddle, TensorFlow Lite, and TensorFlow) and legacy frontends (Caffe, Kaldi, and Apache MXNet). Model Optimizer can use both frontends in contrast to the direct import of model with ``read_model`` method which can use new frontends only. Follow one of the appropriate guides referenced above to implement mappings depending on framework frontend.
|
||||
|
||||
If you are implementing extensions for new ONNX, PaddlePaddle, TensorFlow Lite or TensorFlow frontends and plan to use the ``--extensions`` option in Model Optimizer for model conversion, then the extensions should be:
|
||||
|
||||
@@ -85,6 +95,13 @@ Extensions can be loaded from a code with the ``:ref:`ov::Core::add_extension <
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [add_extension]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
@@ -92,18 +109,18 @@ Extensions can be loaded from a code with the ``:ref:`ov::Core::add_extension <
|
||||
:language: cpp
|
||||
:fragment: [add_extension]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [add_extension]
|
||||
|
||||
|
||||
The ``Identity`` is a custom operation class defined in :doc:`Custom Operation Guide <openvino_docs_Extensibility_UG_add_openvino_ops>`. This is sufficient to enable reading OpenVINO IR which uses the ``Identity`` extension operation emitted by Model Optimizer. In order to load original model directly to the runtime, add a mapping extension:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [add_frontend_extension]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
@@ -111,16 +128,11 @@ The ``Identity`` is a custom operation class defined in :doc:`Custom Operation G
|
||||
:language: cpp
|
||||
:fragment: [add_frontend_extension]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [add_frontend_extension]
|
||||
|
||||
When Python API is used, there is no way to implement a custom OpenVINO operation. Even if custom OpenVINO operation is implemented in C++ and loaded into the runtime by a shared library, there is still no way to add a frontend mapping extension that refers to this custom operation. In this case, use C++ shared library approach to implement both operations semantics and framework mapping.
|
||||
|
||||
Python can still be used to map and decompose operations when only operations from the standard OpenVINO operation set are used.
|
||||
|
||||
.. _create_a_library_with_extensions:
|
||||
|
||||
Create a Library with Extensions
|
||||
++++++++++++++++++++++++++++++++
|
||||
@@ -165,13 +177,6 @@ This CMake script finds OpenVINO, using the ``find_package`` CMake command.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [add_extension_lib]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
@@ -179,6 +184,13 @@ This CMake script finds OpenVINO, using the ``find_package`` CMake command.
|
||||
:language: python
|
||||
:fragment: [add_extension_lib]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [add_extension_lib]
|
||||
|
||||
|
||||
See Also
|
||||
########
|
||||
@@ -187,4 +199,4 @@ See Also
|
||||
* :doc:`Using OpenVINO Runtime Samples <openvino_docs_OV_UG_Samples_Overview>`
|
||||
* :doc:`Hello Shape Infer SSD sample <openvino_inference_engine_samples_hello_reshape_ssd_README>`
|
||||
|
||||
@endsphinxdirective
|
||||
@endsphinxdirective
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Explore OpenVINO™ Extension API which enables registering
|
||||
custom operations to support models with operations
|
||||
not supported by OpenVINO.
|
||||
|
||||
OpenVINO™ Extension API allows you to register custom operations to support models with operations which OpenVINO™ does not support out-of-the-box. This capability requires writing code in C++, so if you are using Python to develop your application you need to build a separate shared library implemented in C++ first and load it in Python using ``add_extension`` API. Please refer to :ref:`Create library with extensions <create_library_with_extensions>` for more details on library creation and usage. The remining part of this document describes how to implement an operation class.
|
||||
|
||||
Operation Class
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to use frontend extension classes to facilitate the mapping
|
||||
of custom operations from the framework model representation to the OpenVINO
|
||||
representation.
|
||||
|
||||
|
||||
The goal of this chapter is to explain how to use Frontend extension classes to facilitate
|
||||
mapping of custom operations from framework model representation to OpenVINO representation.
|
||||
Refer to :doc:`Introduction to OpenVINO Extension <openvino_docs_Extensibility_UG_Intro>` to
|
||||
@@ -19,6 +25,11 @@ guide.
|
||||
operation that is a placeholder for your real custom operation. You can review the complete code,
|
||||
which is fully compilable, to see how it works.
|
||||
|
||||
|
||||
.. note::
|
||||
You can find more examples of extensions in `openvino_contrib repository <https://github.com/openvinotoolkit/openvino_contrib/tree/master/modules/custom_operations>`_.
|
||||
|
||||
|
||||
Single Operation Mapping with OpExtension
|
||||
#########################################
|
||||
|
||||
@@ -83,6 +94,13 @@ In this case, you can directly say that 'MyRelu' -> ``Relu`` mapping should be u
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [py_frontend_extension_MyRelu]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
@@ -90,13 +108,6 @@ In this case, you can directly say that 'MyRelu' -> ``Relu`` mapping should be u
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_MyRelu]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [py_frontend_extension_MyRelu]
|
||||
|
||||
|
||||
In the resulting converted OpenVINO model, “MyRelu” operation will be replaced by the standard operation
|
||||
``Relu`` from the latest available OpenVINO operation set. Notice that when standard operation is used,
|
||||
@@ -108,10 +119,18 @@ as it was demonstrated with ``TemplateExtension::Identity``.
|
||||
Attribute Mapping
|
||||
++++++++++++++++++
|
||||
|
||||
As described above, ``OpExtension`` is useful when attributes can be mapped one by one or initialized by a constant.
|
||||
As described above, ``OpExtension`` is useful when attributes can be mapped one by one or initialized by a constant.
|
||||
Attributes in OpenVINO operators are identified by their names, so for frameworks that also have named attributes (like TensorFlow, PaddlePaddle, ONNX),
|
||||
you can specify name to name mapping. For frameworks where OpenVINO operator's attributes can be mapped to one of the framework
|
||||
operator inputs (like PyTorch), there's a name to input index mapping.
|
||||
|
||||
|
||||
Named attributes mapping
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If the set of attributes in framework representation and OpenVINO representation completely match by their names and types,
|
||||
nothing should be specified in OpExtension constructor parameters. The attributes are discovered and mapped
|
||||
automatically based on ``visit_attributes`` method that should be defined for any OpenVINO operation.
|
||||
no attribute mapping has to be specified in OpExtension constructor parameters. The attributes are discovered and mapped automatically
|
||||
based on ``visit_attributes`` method that should be defined for any OpenVINO operation.
|
||||
|
||||
Imagine you have CustomOperation class implementation that has two attributes with names: ``attr1`` and ``attr2``.
|
||||
|
||||
@@ -119,14 +138,15 @@ Imagine you have CustomOperation class implementation that has two attributes wi
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_CustomOperation]
|
||||
|
||||
And the original model in the framework representation also has operation named “CustomOperation” with the same
|
||||
And original model in framework representation also has operation with name ``CustomOperation`` with the same
|
||||
``attr1`` and ``attr2`` attributes. Then with the following code:
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_CustomOperation_as_is]
|
||||
|
||||
Both ``attr1`` and ``attr2`` are copied from framework representation to OpenVINO representation automatically.
|
||||
Both ``attr1`` and ``attr2`` are copied from framework representation to OpenVINO representation automatically.
|
||||
|
||||
If for some reason names of attributes are different but values still can be copied “as-is” you can pass attribute
|
||||
names mapping in ``OpExtension`` constructor:
|
||||
|
||||
@@ -144,48 +164,123 @@ to achieve that do the following:
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_CustomOperation_rename_set]
|
||||
|
||||
|
||||
So the conclusion is that each attribute of target OpenVINO operation should be initialized either by
|
||||
|
||||
1. Setting automatically due to name matching
|
||||
2. Mapped by attribute name
|
||||
3. Set to a constant value
|
||||
|
||||
This is achieved by specifying maps as arguments for `OpExtension` constructor.
|
||||
This is achieved by specifying maps as arguments for ``OpExtension`` constructor.
|
||||
|
||||
|
||||
Attribute mapping with named inputs and outputs
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Mappings in previous examples assume that inputs and outputs of an operator in framework model representation come
|
||||
with a particular order so you can directly map framework operation input ``0`` to OpenVINO operation input ``0`` and so on.
|
||||
That's not always the case, for frameworks like PaddlePaddle, operation inputs and outputs are identified by their names
|
||||
and may be defined in any order. So to map it to OpenVINO operation inputs and outputs, you have to specify that order yourself.
|
||||
This can be done by creating two vector of strings, one for input and one for output, where framework operation
|
||||
input name at position ``i`` maps to OpenVINO operation input at position ``i`` (and similarly for outputs).
|
||||
|
||||
|
||||
Let's see the following example. Like previously, we'd like to map ``CustomOperation`` in the original model,
|
||||
to OpenVINO ``CustomOperation`` as is (so their name and attributes names match). This time, that framework operation
|
||||
inputs and outputs are not stricly ordered and can be identified by their names ``A``, ``B``, ``C`` for inputs
|
||||
and ``X``, ``Y`` for outputs. Those inputs and outputs can be mapped to OpenVINO operation, such that inputs
|
||||
``A``, ``B``, ``C`` map to OpenVINO ``CustomOperation`` first, second and third input and ``X`` and ``Y``
|
||||
outputs map to OpenVINO ``CustomOperation`` first and second output respectively.
|
||||
|
||||
Given that, such custom operation can be registered by the following:
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_CustomOperation_as_is_paddle]
|
||||
|
||||
|
||||
Second example shows how to map the operation with named inputs and outputs, but when names of attributes are different:
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_CustomOperation_rename_paddle]
|
||||
|
||||
|
||||
and the last one shows how to map the operation with named inputs and outputs, but when (in order to correctly map framework
|
||||
operation to OpenVINO operation) one of the attributes has to be set to predefined value:
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_CustomOperation_rename_set_paddle]
|
||||
|
||||
|
||||
Mapping attributes from operation inputs
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For models (like PyTorch models), where operations have attributes on the input list, you can specify name to input index mapping.
|
||||
For example, imagine you have created a custom OpenVINO operation that implements a variant of ELU activation function
|
||||
with two attributes ``alpha`` and ``beta``:
|
||||
|
||||
.. math::
|
||||
|
||||
CustomElu=\left\lbrace
|
||||
\begin{array}{ll}
|
||||
beta * x & \textrm{if x > 0} \newline
|
||||
alpha * (exp(x) - 1) & \textrm{otherwise}
|
||||
\end{array}
|
||||
\right.
|
||||
|
||||
Below is a snippet of ``CustomElu`` class showing how to define its attributes:
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_framework_map_CustomElu]
|
||||
|
||||
Let's see an example of how you can map ``CustomElu`` to PyTorch `aten::elu <https://pytorch.org/docs/stable/generated/torch.nn.functional.elu.html>`_
|
||||
(note that if ``beta`` is equal to ``1``, ``CustomElu`` works the same as ``aten::elu``).
|
||||
``aten::elu`` has ``alpha`` attribute second on the input list, but it doesn't have ``beta``.
|
||||
So in order to map it to ``CustomElu`` you can use the following:
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_framework_map_CustomElu_mapping]
|
||||
|
||||
This will map ``alpha`` to the second input and map ``beta`` attribute to constant value ``1.0f``.
|
||||
|
||||
Such created extension can be used, e.g. in dynamic library, please refer to :ref:`Create a library with extensions <create_a_library_with_extensions>`.
|
||||
|
||||
Mapping custom operations to frontends with OPENVINO_FRAMEWORK_MAP macro
|
||||
---------------------------------------------------------------------------
|
||||
########################################################################
|
||||
|
||||
.. note::
|
||||
|
||||
Below solution works only for ONNX and Tensorflow frontends.
|
||||
``OPENVINO_FRAMEWORK_MAP`` is a macro that should be used inside OpenVINO operation's class definition and that lets you specify
|
||||
the mapping between this operation to a frontend operation.
|
||||
|
||||
``OPENVINO_FRAMEWORK_MAP`` is a macro that should be used inside OpenVINO operation's class definition
|
||||
and that lets you specify the mapping between this operation to a frontend operation.
|
||||
Let's consider the following example. Imagine you have an ONNX model with ``CustomOp`` operation (and this operation has ``mode`` attribute),
|
||||
a TensorFlow model with ``CustomOpV3`` operation (this operation has ``axis`` attribute) and a PaddlePaddle model with ``CustomOp`` (with ``mode`` attribute)
|
||||
that has input named "X" and output named "Out" and all of them can be implemented with a single OpenVINO operation ``CustomOp`` like follows:
|
||||
|
||||
Let's consider the following example. Imagine you have an ONNX model with ``CustomOp`` operation
|
||||
(and this operation has ``mode`` attribute) and a Tensorflow model with ``CustomOpV3`` operation
|
||||
(this operation has ``axis`` attribute) and both of them can be implemented with a single OpenVINO
|
||||
operation ``CustomOp`` like follows:
|
||||
|
||||
.. doxygensnippet:: ov_extensions.cpp
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_framework_map_macro_headers]
|
||||
|
||||
.. doxygensnippet:: ov_extensions.cpp
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_framework_map_macro_CustomOp]
|
||||
|
||||
Let's take a closer look at the parameters this macro takes (note that there are two flavors - the second one is to map
|
||||
for PaddlePaddle operations where input and output names have to be specified).
|
||||
|
||||
Let's take a closer look at the parameters this macro takes:
|
||||
|
||||
.. code-block::cpp
|
||||
.. code-block:: cpp
|
||||
|
||||
OPENVINO_FRAMEWORK_MAP(framework, name, attributes_map, attributes_values)
|
||||
OPENVINO_FRAMEWORK_MAP(framework, input_names, output_names, name, attributes_map, attributes_values)
|
||||
|
||||
- ``framework`` - framework name.
|
||||
- ``name`` - the framework operation name. It's optional if the OpenVINO custom operation name
|
||||
(that is the name that is passed as the first parameter to `OPENVINO_OP` macro) is the
|
||||
same as the framework operation name and both ``attributes_map`` and ``attributes_values`` are not provided.
|
||||
(that is the name that is passed as the first parameter to ``OPENVINO_OP`` macro) is the same
|
||||
as the framework operation name and both ``attributes_map`` and ``attributes_values`` are not provided.
|
||||
- ``input_names`` - vector of strings that specify the names of inputs (needed to map PaddlePaddle to OpenVINO operations),
|
||||
- ``output_names`` - vector of strings that specify the names of outputs (needed to map PaddlePaddle to OpenVINO operations),
|
||||
- ``attributes_map`` - used to provide a mapping between OpenVINO operation attribute and
|
||||
framework operation attribute. Contains key-value pairs, where key is an OpenVINO operation
|
||||
attribute name and value is its corresponding framework operation attribute name.
|
||||
@@ -196,14 +291,21 @@ Let's take a closer look at the parameters this macro takes:
|
||||
operation attribute name and the value is this attribute value. This parameter cannot be provided
|
||||
if ``attributes_map`` contains all of OpenVINO operation attributes or if ``attributes_map`` is not provided.
|
||||
|
||||
In the example above, ``OPENVINO_FRAMEWORK_MAP`` is used twice.
|
||||
In the example above, ``OPENVINO_FRAMEWORK_MAP`` is used three times.
|
||||
First, OpenVINO ``CustomOp`` is mapped to ONNX ``CustomOp`` operation, ``m_mode`` attribute is mapped to ``mode``
|
||||
attribute, while ``m_axis`` attribute gets the default value ``-1``. Secondly, OpenVINO `CustomOp` is mapped
|
||||
to Tensorflow ``CustomOpV3`` operation, ``m_axis`` attribute is mapped to ``axis`` attribute, while ``m_mode``
|
||||
attribute gets the default value ``linear``.
|
||||
attribute, while ``m_axis`` attribute gets the default value ``-1``. Secondly, OpenVINO ``CustomOp`` is mapped
|
||||
to TensorFlow ``CustomOpV3`` operation, ``m_axis`` attribute is mapped to ``axis`` attribute, while ``m_mode``
|
||||
attribute gets the default value ``"linear"``. Thirdly, OpenVINO ``CustomOp`` is mapped to PaddlePaddle ``CustomOp`` operation,
|
||||
``m_mode`` attribute is mapped to ``mode`` attribute, while ``m_axis`` attribute gets the default value ``-1``.
|
||||
This mapping also specifies the input name "X" and output name "Out".
|
||||
|
||||
The last step is to register this custom operation by following:
|
||||
@snippet ov_extensions.cpp frontend_extension_framework_map_macro_add_extension
|
||||
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_framework_map_macro_add_extension]
|
||||
|
||||
|
||||
Mapping to Multiple Operations with ConversionExtension
|
||||
#######################################################
|
||||
@@ -222,7 +324,7 @@ operations constructing dependency graph of any complexity.
|
||||
operation classes. Follow chapter :ref:`Build a Model in OpenVINO Runtime <ov_ug_build_model>` to
|
||||
learn how to use OpenVINO operation classes to build a fragment of model for replacement.
|
||||
|
||||
The next example illustrates using ``ConversionExtension`` for conversion of “ThresholdedRelu”
|
||||
Below example illustrates using ``ConversionExtension`` for conversion of “ThresholdedRelu”
|
||||
from ONNX according to the formula: ``ThresholdedRelu(x, alpha) -> Multiply(x, Convert(Greater(x, alpha), type=float))``.
|
||||
|
||||
.. note::
|
||||
@@ -233,6 +335,13 @@ from ONNX according to the formula: ``ThresholdedRelu(x, alpha) -> Multiply(x, C
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [py_frontend_extension_ThresholdedReLU_header]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
@@ -240,14 +349,14 @@ from ONNX according to the formula: ``ThresholdedRelu(x, alpha) -> Multiply(x, C
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_ThresholdedReLU_header]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: python
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [py_frontend_extension_ThresholdedReLU_header]
|
||||
|
||||
.. tab-set::
|
||||
:fragment: [py_frontend_extension_ThresholdedReLU]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
@@ -256,24 +365,47 @@ from ONNX according to the formula: ``ThresholdedRelu(x, alpha) -> Multiply(x, C
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_ThresholdedReLU]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [py_frontend_extension_ThresholdedReLU]
|
||||
|
||||
The next example shows how to use ``ConversionExtension`` to convert PyTorch
|
||||
`aten::hardtanh <https://pytorch.org/docs/stable/generated/torch.nn.functional.hardtanh.html>`_
|
||||
to demonstrate how to use ``get_values_from_const_input`` function to fetch an attribute value from input:
|
||||
|
||||
|
||||
To access original framework operation attribute value and connect to inputs,
|
||||
``node`` object of type ``NodeContext`` is used. It has two main methods:
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [py_frontend_extension_aten_hardtanh]
|
||||
|
||||
|
||||
To access original framework operation attribute value and connect to inputs, ``node`` object of type ``NodeContext`` is used. It has three main methods:
|
||||
|
||||
* ``NodeContext::get_input`` to get input with a given index,
|
||||
|
||||
* ``NodeContext::get_attribute`` to get attribute value with a given name.
|
||||
* ``NodeContext::get_attribute`` to get attribute value with a given name,
|
||||
|
||||
* ``NodeContext::get_values_from_const_input`` to get an attribute with a given input index.
|
||||
|
||||
The conversion function should return a vector of node outputs that are mapped to
|
||||
corresponding outputs of the original framework operation in the same order.
|
||||
|
||||
Some frameworks require output names of the operation to be provided during conversion.
|
||||
For PaddlePaddle operations, it is generally necessary to provide names for all outputs using the ``NamedOutputs`` container.
|
||||
Usually those names can be found in source code of the individual operation in PaddlePaddle code.
|
||||
The next example shows such conversion for the ``top_k_v2`` operation.
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_paddle_TopK]
|
||||
|
||||
For TensorFlow framework, if an operation has more than one output, it is recommended to assign names to
|
||||
those outputs using the ``NamedOutputVector`` structure which allows both indexed and named output access.
|
||||
For a description of TensorFlow operations, including the names of their outputs, refer to the
|
||||
`tf.raw_ops <https://www.tensorflow.org/api_docs/python/tf/raw_ops/>`__ documentation page.
|
||||
The next example shows such conversion for the ``TopKV2`` operation.
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_tf_TopK]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Get to know how Graph Rewrite handles running multiple matcher passes on
|
||||
ov::Model in a single graph traversal.
|
||||
|
||||
|
||||
``:ref:`ov::pass::GraphRewrite <doxid-classov_1_1pass_1_1_graph_rewrite>``` serves for running multiple matcher passes on ``:ref:`ov::Model <doxid-classov_1_1_model>``` in a single graph traversal.
|
||||
Example:
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to create a pattern, implement a callback, register
|
||||
the pattern and Matcher to execute MatcherPass transformation
|
||||
on a model.
|
||||
|
||||
``:ref:`ov::pass::MatcherPass <doxid-classov_1_1pass_1_1_matcher_pass>``` is used for pattern-based transformations.
|
||||
|
||||
Template for MatcherPass transformation class
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to use Model Pass transformation class to take entire
|
||||
ov::Model as input and process it.
|
||||
|
||||
|
||||
``:ref:`ov::pass::ModelPass <doxid-classov_1_1pass_1_1_model_pass>``` is used for transformations that take entire ``:ref:`ov::Model <doxid-classov_1_1_model>``` as an input and process it.
|
||||
|
||||
Template for ModelPass transformation class
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to apply additional model optimizations or transform
|
||||
unsupported subgraphs and operations, using OpenVINO™ Transformations API.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f7c8ab4f15874d235968471bcf876c89c795d601e69891208107b8b72aa58eb1
|
||||
size 70014
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3d5ccf51fe1babb93d96d042494695a6a6e055d1f8ebf7eef5083d54d8987a23
|
||||
size 58789
|
||||
@@ -1,40 +0,0 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#! [complex:transformation]
|
||||
|
||||
from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph
|
||||
from openvino.tools.mo.graph.graph import Graph
|
||||
|
||||
|
||||
class Complex(FrontReplacementSubgraph):
|
||||
enabled = True
|
||||
|
||||
def pattern(self):
|
||||
return dict(
|
||||
nodes=[
|
||||
('strided_slice_real', dict(op='StridedSlice')),
|
||||
('strided_slice_imag', dict(op='StridedSlice')),
|
||||
('complex', dict(op='Complex')),
|
||||
],
|
||||
edges=[
|
||||
('strided_slice_real', 'complex', {'in': 0}),
|
||||
('strided_slice_imag', 'complex', {'in': 1}),
|
||||
])
|
||||
|
||||
@staticmethod
|
||||
def replace_sub_graph(graph: Graph, match: dict):
|
||||
strided_slice_real = match['strided_slice_real']
|
||||
strided_slice_imag = match['strided_slice_imag']
|
||||
complex_node = match['complex']
|
||||
|
||||
# make sure that both strided slice operations get the same data as input
|
||||
assert strided_slice_real.in_port(0).get_source() == strided_slice_imag.in_port(0).get_source()
|
||||
|
||||
# identify the output port of the operation producing datat for strided slice nodes
|
||||
input_node_output_port = strided_slice_real.in_port(0).get_source()
|
||||
input_node_output_port.disconnect()
|
||||
|
||||
# change the connection so now all consumers of "complex_node" get data from input node of strided slice nodes
|
||||
complex_node.out_port(0).get_connection().set_source(input_node_output_port)
|
||||
#! [complex:transformation]
|
||||
@@ -1,27 +0,0 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#! [complex_abs:transformation]
|
||||
import numpy as np
|
||||
|
||||
from openvino.tools.mo.ops.elementwise import Pow
|
||||
from openvino.tools.mo.ops.ReduceOps import ReduceSum
|
||||
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
|
||||
from openvino.tools.mo.graph.graph import Graph, Node
|
||||
from openvino.tools.mo.ops.const import Const
|
||||
|
||||
|
||||
class ComplexAbs(FrontReplacementOp):
|
||||
op = "ComplexAbs"
|
||||
enabled = True
|
||||
|
||||
def replace_op(self, graph: Graph, node: Node):
|
||||
pow_2 = Const(graph, {'value': np.float32(2.0)}).create_node()
|
||||
reduce_axis = Const(graph, {'value': np.int32(-1)}).create_node()
|
||||
pow_0_5 = Const(graph, {'value': np.float32(0.5)}).create_node()
|
||||
|
||||
sq = Pow(graph, dict(name=node.in_node(0).name + '/sq', power=2.0)).create_node([node.in_node(0), pow_2])
|
||||
sum = ReduceSum(graph, dict(name=sq.name + '/sum')).create_node([sq, reduce_axis])
|
||||
sqrt = Pow(graph, dict(name=sum.name + '/sqrt', power=0.5)).create_node([sum, pow_0_5])
|
||||
return [sqrt.id]
|
||||
#! [complex_abs:transformation]
|
||||
@@ -1,33 +0,0 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# ! [fft_ext:extractor]
|
||||
from ...ops.FFT import FFT
|
||||
from openvino.tools.mo.front.extractor import FrontExtractorOp
|
||||
|
||||
|
||||
class FFT2DFrontExtractor(FrontExtractorOp):
|
||||
op = 'FFT2D'
|
||||
enabled = True
|
||||
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
attrs = {
|
||||
'inverse': 0
|
||||
}
|
||||
FFT.update_node_stat(node, attrs)
|
||||
return cls.enabled
|
||||
|
||||
|
||||
class IFFT2DFrontExtractor(FrontExtractorOp):
|
||||
op = 'IFFT2D'
|
||||
enabled = True
|
||||
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
attrs = {
|
||||
'inverse': 1
|
||||
}
|
||||
FFT.update_node_stat(node, attrs)
|
||||
return cls.enabled
|
||||
# ! [fft_ext:extractor]
|
||||
@@ -1,27 +0,0 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#! [fft:operation]
|
||||
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
|
||||
from openvino.tools.mo.graph.graph import Graph
|
||||
from openvino.tools.mo.ops.op import Op
|
||||
|
||||
|
||||
class FFT(Op):
|
||||
op = 'FFT'
|
||||
enabled = False
|
||||
|
||||
def __init__(self, graph: Graph, attrs: dict):
|
||||
super().__init__(graph, {
|
||||
'type': self.op,
|
||||
'op': self.op,
|
||||
'version': 'custom_opset',
|
||||
'inverse': None,
|
||||
'in_ports_count': 1,
|
||||
'out_ports_count': 1,
|
||||
'infer': copy_shape_infer
|
||||
}, attrs)
|
||||
|
||||
def backend_attrs(self):
|
||||
return ['inverse']
|
||||
#! [fft:operation]
|
||||
@@ -1,106 +0,0 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#! [mri_demo:demo]
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import argparse
|
||||
import time
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
|
||||
def kspace_to_image(kspace):
|
||||
assert(len(kspace.shape) == 3 and kspace.shape[-1] == 2)
|
||||
fft = cv.idft(kspace, flags=cv.DFT_SCALE)
|
||||
img = cv.magnitude(fft[:,:,0], fft[:,:,1])
|
||||
return cv.normalize(img, dst=None, alpha=255, beta=0, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='MRI reconstrution demo for network from https://github.com/rmsouza01/Hybrid-CS-Model-MRI (https://arxiv.org/abs/1810.12473)')
|
||||
parser.add_argument('-i', '--input', dest='input', help='Path to input .npy file with MRI scan data.')
|
||||
parser.add_argument('-p', '--pattern', dest='pattern', help='Path to sampling mask in .npy format.')
|
||||
parser.add_argument('-m', '--model', dest='model', help='Path to .xml file of OpenVINO IR.')
|
||||
parser.add_argument('-l', '--cpu_extension', dest='cpu_extension', help='Path to extensions library with FFT implementation.')
|
||||
parser.add_argument('-d', '--device', dest='device', default='CPU',
|
||||
help='Optional. Specify the target device to infer on; CPU, '
|
||||
'GPU, GNA is acceptable. For non-CPU targets, '
|
||||
'HETERO plugin is used with CPU fallbacks to FFT implementation. '
|
||||
'Default value is CPU')
|
||||
args = parser.parse_args()
|
||||
|
||||
xml_path = args.model
|
||||
assert(xml_path.endswith('.xml'))
|
||||
bin_path = xml_path[:xml_path.rfind('.xml')] + '.bin'
|
||||
|
||||
ie = IECore()
|
||||
ie.add_extension(args.cpu_extension, "CPU")
|
||||
|
||||
net = ie.read_network(xml_path, bin_path)
|
||||
|
||||
device = 'CPU' if args.device == 'CPU' else ('HETERO:' + args.device + ',CPU')
|
||||
exec_net = ie.load_network(net, device)
|
||||
|
||||
# Hybrid-CS-Model-MRI/Data/stats_fs_unet_norm_20.npy
|
||||
stats = np.array([2.20295299e-01, 1.11048916e+03, 4.16997984e+00, 4.71741395e+00], dtype=np.float32)
|
||||
# Hybrid-CS-Model-MRI/Data/sampling_mask_20perc.npy
|
||||
var_sampling_mask = np.load(args.pattern) # TODO: can we generate it in runtime?
|
||||
print('Sampling ratio:', 1.0 - var_sampling_mask.sum() / var_sampling_mask.size)
|
||||
|
||||
data = np.load(args.input)
|
||||
num_slices, height, width = data.shape[0], data.shape[1], data.shape[2]
|
||||
pred = np.zeros((num_slices, height, width), dtype=np.uint8)
|
||||
data /= np.sqrt(height * width)
|
||||
|
||||
print('Compute...')
|
||||
start = time.time()
|
||||
for slice_id, kspace in enumerate(data):
|
||||
kspace = kspace.copy()
|
||||
|
||||
# Apply sampling
|
||||
kspace[var_sampling_mask] = 0
|
||||
kspace = (kspace - stats[0]) / stats[1]
|
||||
|
||||
# Forward through network
|
||||
input = np.expand_dims(kspace.transpose(2, 0, 1), axis=0)
|
||||
outputs = exec_net.infer(inputs={'input_1': input})
|
||||
output = next(iter(outputs.values()))
|
||||
output = output.reshape(height, width)
|
||||
|
||||
# Save predictions
|
||||
pred[slice_id] = cv.normalize(output, dst=None, alpha=255, beta=0, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
|
||||
|
||||
print('Elapsed time: %.1f seconds' % (time.time() - start))
|
||||
|
||||
WIN_NAME = 'MRI reconstruction with OpenVINO'
|
||||
|
||||
slice_id = 0
|
||||
def callback(pos):
|
||||
global slice_id
|
||||
slice_id = pos
|
||||
|
||||
kspace = data[slice_id]
|
||||
img = kspace_to_image(kspace)
|
||||
|
||||
kspace[var_sampling_mask] = 0
|
||||
masked = kspace_to_image(kspace)
|
||||
|
||||
rec = pred[slice_id]
|
||||
|
||||
# Add a header
|
||||
border_size = 20
|
||||
render = cv.hconcat((img, masked, rec))
|
||||
render = cv.copyMakeBorder(render, border_size, 0, 0, 0, cv.BORDER_CONSTANT, value=255)
|
||||
cv.putText(render, 'Original', (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
|
||||
cv.putText(render, 'Sampled (PSNR %.1f)' % cv.PSNR(img, masked), (width, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
|
||||
cv.putText(render, 'Reconstructed (PSNR %.1f)' % cv.PSNR(img, rec), (width*2, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
|
||||
|
||||
cv.imshow(WIN_NAME, render)
|
||||
cv.waitKey(1)
|
||||
|
||||
cv.namedWindow(WIN_NAME, cv.WINDOW_NORMAL)
|
||||
print(num_slices)
|
||||
cv.createTrackbar('Slice', WIN_NAME, num_slices // 2, num_slices - 1, callback)
|
||||
callback(num_slices // 2) # Trigger initial visualization
|
||||
cv.waitKey()
|
||||
#! [mri_demo:demo]
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the base ov::IAsyncInferRequest class to implement a custom asynchronous inference request in OpenVINO.
|
||||
|
||||
Asynchronous Inference Request runs an inference pipeline asynchronously in one or several task executors depending on a device pipeline structure.
|
||||
OpenVINO Runtime Plugin API provides the base ov::IAsyncInferRequest class:
|
||||
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to build a plugin using CMake and OpenVINO Developer Package.
|
||||
|
||||
|
||||
OpenVINO build infrastructure provides the OpenVINO Developer Package for plugin development.
|
||||
|
||||
OpenVINO Developer Package
|
||||
@@ -9,7 +13,7 @@ OpenVINO Developer Package
|
||||
|
||||
To automatically generate the OpenVINO Developer Package, run the ``cmake`` tool during a OpenVINO build:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: sh
|
||||
|
||||
$ mkdir openvino-release-build
|
||||
$ cd openvino-release-build
|
||||
@@ -48,7 +52,7 @@ Build Plugin using OpenVINO Developer Package
|
||||
|
||||
To build a plugin source tree using the OpenVINO Developer Package, run the commands below:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: sh
|
||||
|
||||
$ mkdir template-plugin-release-build
|
||||
$ cd template-plugin-release-build
|
||||
@@ -72,7 +76,7 @@ To build a plugin and its tests, run the following CMake scripts:
|
||||
|
||||
The default values of the ``ENABLE_TESTS``, ``ENABLE_FUNCTIONAL_TESTS`` options are shared via the OpenVINO Developer Package and they are the same as for the main OpenVINO build tree. You can override them during plugin build using the command below:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: sh
|
||||
|
||||
$ cmake -DENABLE_FUNCTIONAL_TESTS=OFF -DOpenVINODeveloperPackage_DIR=../openvino-release-build ../template-plugin
|
||||
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the ov::CompiledModel class as the base class for a compiled
|
||||
model and to create an arbitrary number of ov::InferRequest objects.
|
||||
|
||||
ov::CompiledModel class functionality:
|
||||
|
||||
* Compile an ov::Model instance to a backend specific graph representation
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the ov::ISyncInferRequest interface as the base class to implement a synchronous inference request in OpenVINO.
|
||||
|
||||
|
||||
``InferRequest`` class functionality:
|
||||
|
||||
* Allocate input and output tensors needed for a backend-dependent network inference.
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Develop and implement independent inference solutions for
|
||||
different devices with the components of plugin architecture
|
||||
of OpenVINO.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Converting and Preparing Models
|
||||
@@ -87,7 +93,7 @@ Detailed Guides
|
||||
API References
|
||||
##############
|
||||
|
||||
* `OpenVINO Plugin API <https://docs.openvino.ai/nightly/groupov_dev_api.html>`__
|
||||
* `OpenVINO Transformation API <https://docs.openvino.ai/2022.3/groupie_transformation_api.html>`__
|
||||
* `OpenVINO Plugin API <https://docs.openvino.ai/2023.0/groupov_dev_api.html>`__
|
||||
* `OpenVINO Transformation API <https://docs.openvino.ai/2023.0/groupie_transformation_api.html>`__
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Explore OpenVINO Plugin API, which includes functions and
|
||||
helper classes that simplify the development of new plugins.
|
||||
|
||||
|
||||
OpenVINO Plugin usually represents a wrapper around a backend. Backends can be:
|
||||
|
||||
* OpenCL-like backend (e.g. clDNN library) for GPU devices.
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the openvino::funcSharedTests library, which includes
|
||||
a predefined set of functional tests and utilities to verify a plugin.
|
||||
|
||||
|
||||
OpenVINO tests infrastructure provides a predefined set of functional tests and utilities. They are used to verify a plugin using the OpenVINO public API.
|
||||
All the tests are written in the `Google Test C++ framework <https://github.com/google/googletest>`__.
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the ov::Property class to define access rights and
|
||||
specific properties of an OpenVINO plugin.
|
||||
|
||||
|
||||
Plugin can provide own device-specific properties.
|
||||
|
||||
Property Class
|
||||
|
||||
@@ -3,6 +3,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about the support for quantized models with different
|
||||
precisions and the FakeQuantize operation used to express
|
||||
quantization rules.
|
||||
|
||||
One of the feature of OpenVINO is the support of quantized models with different precisions: INT8, INT4, etc.
|
||||
However, it is up to the plugin to define what exact precisions are supported by the particular HW.
|
||||
All quantized models which can be expressed in IR have a unified representation by means of *FakeQuantize* operation.
|
||||
@@ -53,8 +58,8 @@ Thus we can define:
|
||||
Quantization specifics and restrictions
|
||||
#######################################
|
||||
|
||||
In general, OpenVINO can represent and execute quantized models from different sources. However, the Post-training Optimization Tool (POT)
|
||||
is considered the default way to get optimized models. Since the POT supports HW-aware quantization it means that specific rules can be implemented in it for
|
||||
In general, OpenVINO can represent and execute quantized models from different sources. However, the Neural Network Compression Framework (NNCF)
|
||||
is considered the default way to get optimized models. Since the NNCF supports HW-aware quantization it means that specific rules can be implemented in it for
|
||||
the particular HW. However, it is reasonable to have compatibility with general-purpose HW such as CPU and GPU and support their quantization schemes.
|
||||
Below we define these rules as follows:
|
||||
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the ov::RemoteContext class as the base class for a plugin-specific remote context.
|
||||
|
||||
|
||||
ov::RemoteContext class functionality:
|
||||
|
||||
* Represents device-specific inference context.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the ov::IRemoteTensor interface as a base class for device-specific remote tensors.
|
||||
|
||||
|
||||
ov::RemoteTensor class functionality:
|
||||
|
||||
* Provides an interface to work with device-specific memory.
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn more about plugin development and specific features in
|
||||
OpenVINO: precision transformations and support for quantized
|
||||
models with different precisions.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about extra API references required for the development of
|
||||
plugins in OpenVINO.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
@@ -9,9 +13,9 @@
|
||||
../groupov_dev_api
|
||||
../groupie_transformation_api
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
The guides below provides extra API references needed for OpenVINO plugin development:
|
||||
|
||||
* [OpenVINO Plugin API](@ref ov_dev_api)
|
||||
* [OpenVINO Transformation API](@ref ie_transformation_api)
|
||||
* `OpenVINO Plugin API <https://docs.openvino.ai/2023.0/groupov_dev_api.html>`__
|
||||
* `OpenVINO Transformation API <https://docs.openvino.ai/2023.0/groupie_transformation_api.html>`__
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about AvgPoolPrecisionPreserved attribute used only during AvgPool operation.
|
||||
|
||||
:ref:`ngraph::AvgPoolPrecisionPreservedAttribute <doxid-classngraph_1_1_avg_pool_precision_preserved_attribute>` class represents the ``AvgPoolPrecisionPreserved`` attribute.
|
||||
|
||||
Utility attribute, which is used only during ``AvgPool`` operation, precision preserved property definition.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about IntervalsAlignment attribute, which describes a subgraph with the same quantization intervals alignment.
|
||||
|
||||
|
||||
:ref:`ngraph::IntervalsAlignmentAttribute <doxid-classngraph_1_1_intervals_alignment_attribute>` class represents the ``IntervalsAlignment`` attribute.
|
||||
|
||||
The attribute defines a subgraph with the same quantization intervals alignment. ``FakeQuantize`` operations are included. The attribute is used by quantization operations.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about PrecisionPreserved attribute, which describes a precision preserved operation.
|
||||
|
||||
|
||||
:ref:`ngraph::PrecisionPreservedAttribute <doxid-classngraph_1_1_precision_preserved_attribute>` class represents the ``PrecisionPreserved`` attribute.
|
||||
|
||||
The attribute defines a precision preserved operation. If the attribute is absent, then an operation is not precision preserved.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about Precisions attribute, which describes the precision required for an input/output port or an operation.
|
||||
|
||||
|
||||
:ref:`ngraph::PrecisionsAttribute <doxid-classngraph_1_1_precisions_attribute>` class represents the ``Precisions`` attribute.
|
||||
|
||||
The attribute defines precision which is required for input/output port or an operation.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about QuantizationAlignment attribute, which describes a subgraph with the same quantization alignment.
|
||||
|
||||
|
||||
:ref:`ngraph::QuantizationAlignmentAttribute <doxid-classngraph_1_1_quantization_alignment_attribute>` class represents the ``QuantizationAlignment`` attribute.
|
||||
|
||||
The attribute defines a subgraph with the same quantization alignment. ``FakeQuantize`` operations are not included. The attribute is used by quantization operations.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about QuantizationGranularity attribute, which describes quantization granularity of operation inputs.
|
||||
|
||||
|
||||
ngraph::QuantizationAttribute class represents the ``QuantizationGranularity`` attribute.
|
||||
|
||||
The attribute defines quantization granularity of operation inputs.
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about low precision transformations used to infer a quantized model in low precision with the maximum performance on Intel CPU, GPU, and ARM platforms.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Low Precision Transformations
|
||||
@@ -308,13 +311,13 @@ This step is optional. It modifies the nGraph function to a device-specific oper
|
||||
Result model overview
|
||||
#####################
|
||||
|
||||
Let's explore quantized `TensorFlow implementation of ResNet-50 <https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf>`__ model. Use `Model Downloader <https://docs.openvino.ai/2022.3/omz_tools_downloader.html>`__ tool to download the ``fp16`` model from `OpenVINO™ Toolkit - Open Model Zoo repository <https://github.com/openvinotoolkit/open_model_zoo>`__:
|
||||
Let's explore quantized `TensorFlow implementation of ResNet-50 <https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf>`__ model. Use :doc:`Model Downloader <omz_tools_downloader>` tool to download the ``fp16`` model from `OpenVINO™ Toolkit - Open Model Zoo repository <https://github.com/openvinotoolkit/open_model_zoo>`__:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
omz_downloader --name resnet-50-tf --precisions FP16-INT8
|
||||
|
||||
After that you should quantize model by the `Model Quantizer <https://docs.openvino.ai/2022.3/omz_tools_downloader.html>`__ tool.
|
||||
After that you should quantize model by the :doc:`Model Quantizer <omz_tools_downloader>` tool.
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -337,7 +340,7 @@ Results analysis
|
||||
|
||||
Result model depends on different factors:
|
||||
|
||||
* The original model quantization possibility and quantization quality. For some models, some operations are not possible to be quantized by POT and NNCF tools. In this case ``FakeQuantize`` operations are absent before these operations and they will be inferred in original precision.
|
||||
* The original model quantization possibility and quantization quality. For some models, some operations are not possible to be quantized by NNCF tool. In this case ``FakeQuantize`` operations are absent before these operations and they will be inferred in original precision.
|
||||
* LPT customization and plugin supported operations. If plugin doesn't support INT8 inference for some operation then corresponding LPT transformation should be disabled and the operation will be inferred in original precision.
|
||||
|
||||
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Check the lists of attributes created or used by model transformations.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Attributes
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about optional Prerequisites transformations, that
|
||||
prepare a model before applying other low precision transformations.
|
||||
|
||||
Prerequisites transformations are optional. The transformations prepare a model before running other low precision transformations. The transformations do not operate with dequantization operations or update precisions. Prerequisites transformations include:
|
||||
|
||||
* :doc:`PullReshapeThroughDequantization <openvino_docs_OV_UG_lpt_PullReshapeThroughDequantization>`
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about markup transformations, which are used to create
|
||||
attributes for input and output ports and operations during runtime.
|
||||
|
||||
This step defines the optimal ``FakeQuantize`` decomposition precisions for the best inference performance via operations markup with runtime attribute instances. Attributes are created for input and output ports and operations. Transformations do not change the operation output port precisions. A model markup low precision logic is decomposed and implemented into the following common markup transformations. The order of transformations is important:
|
||||
|
||||
1. :doc:`MarkupBias <openvino_docs_OV_UG_lpt_MarkupBias>`
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about main transformations, which are mostly low
|
||||
precision transformations that handle decomposition and
|
||||
dequantization operations.
|
||||
|
||||
|
||||
Main transformations are the majority of low precision transformations. Transformations operate with dequantization operations. Main transformations include:
|
||||
|
||||
* :doc:`AddTransformation <openvino_docs_OV_UG_lpt_AddTransformation>`
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Check the list of transformations used to clean up the
|
||||
resulting model to avoid unhandled dequantization operations.
|
||||
|
||||
* :doc:`EliminateFakeQuantizeTransformation <openvino_docs_OV_UG_lpt_EliminateFakeQuantizeTransformation>`
|
||||
* :doc:`FoldConvertTransformation <openvino_docs_OV_UG_lpt_FoldConvertTransformation>`
|
||||
* :doc:`FoldFakeQuantizeTransformation <openvino_docs_OV_UG_lpt_FoldFakeQuantizeTransformation>`
|
||||
|
||||
@@ -2,8 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about legal information and policies related to the use
|
||||
of Intel® Distribution of OpenVINO™ toolkit.
|
||||
|
||||
Performance varies by use, configuration and other factors. Learn more at [www.intel.com/PerformanceIndex](https://www.intel.com/PerformanceIndex).
|
||||
|
||||
Performance varies by use, configuration and other factors. Learn more at `www.intel.com/PerformanceIndex <https://www.intel.com/PerformanceIndex>`__.
|
||||
|
||||
Performance results are based on testing as of dates shown in configurations and may not reflect all publicly available updates. See backup for configuration details. No product or component can be absolutely secure.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Model Optimizer Usage {#openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide}
|
||||
# Convert a Model {#openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
@@ -8,90 +8,144 @@
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_model_inputs_outputs
|
||||
openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model
|
||||
openvino_docs_MO_DG_prepare_model_convert_model_Cutting_Model
|
||||
openvino_docs_MO_DG_Additional_Optimization_Use_Cases
|
||||
openvino_docs_MO_DG_FP16_Compression
|
||||
openvino_docs_MO_DG_Python_API
|
||||
openvino_docs_MO_DG_prepare_model_Model_Optimizer_FAQ
|
||||
|
||||
.. meta::
|
||||
:description: Model conversion (MO) furthers the transition between training and
|
||||
deployment environments, it adjusts deep learning models for
|
||||
optimal execution on target devices.
|
||||
|
||||
|
||||
Model Optimizer is a cross-platform command-line tool that facilitates the transition between training and deployment environments, performs static model analysis, and adjusts deep learning models for optimal execution on end-point target devices.
|
||||
To convert a model to OpenVINO model format (``ov.Model``), you can use the following command:
|
||||
|
||||
To use it, you need a pre-trained deep learning model in one of the supported formats: TensorFlow, PyTorch, PaddlePaddle, TensorFlow Lite, MXNet, Caffe, Kaldi, or ONNX. Model Optimizer converts the model to the OpenVINO Intermediate Representation format (IR), which you can infer later with :doc:`OpenVINO™ Runtime <openvino_docs_OV_UG_OV_Runtime_User_Guide>`.
|
||||
.. tab-set::
|
||||
|
||||
Note that Model Optimizer does not infer models.
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
The figure below illustrates the typical workflow for deploying a trained deep learning model:
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
.. image:: _static/images/BASIC_FLOW_MO_simplified.svg
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model(INPUT_MODEL)
|
||||
|
||||
where IR is a pair of files describing the model:
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
* ``.xml`` - Describes the network topology.
|
||||
.. code-block:: sh
|
||||
|
||||
* ``.bin`` - Contains the weights and biases binary data.
|
||||
|
||||
The OpenVINO IR can be additionally optimized for inference by :doc:`Post-training optimization <pot_introduction>` that applies post-training quantization methods.
|
||||
|
||||
How to Run Model Optimizer
|
||||
##########################
|
||||
|
||||
To convert a model to IR, you can run Model Optimizer by using the following command:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model INPUT_MODEL
|
||||
mo --input_model INPUT_MODEL
|
||||
|
||||
|
||||
If the out-of-the-box conversion (only the ``--input_model`` parameter is specified) is not successful, use the parameters mentioned below to override input shapes and cut the model:
|
||||
If the out-of-the-box conversion (only the ``input_model`` parameter is specified) is not successful, use the parameters mentioned below to override input shapes and cut the model:
|
||||
|
||||
- Model Optimizer provides two parameters to override original input shapes for model conversion: ``--input`` and ``--input_shape``.
|
||||
For more information about these parameters, refer to the :doc:`Setting Input Shapes <openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model>` guide.
|
||||
- model conversion API provides two parameters to override original input shapes for model conversion: ``input`` and ``input_shape``.
|
||||
For more information about these parameters, refer to the :doc:`Setting Input Shapes <openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model>` guide.
|
||||
|
||||
- To cut off unwanted parts of a model (such as unsupported operations and training sub-graphs),
|
||||
use the ``--input`` and ``--output`` parameters to define new inputs and outputs of the converted model.
|
||||
For a more detailed description, refer to the :doc:`Cutting Off Parts of a Model <openvino_docs_MO_DG_prepare_model_convert_model_Cutting_Model>` guide.
|
||||
use the ``input`` and ``output`` parameters to define new inputs and outputs of the converted model.
|
||||
For a more detailed description, refer to the :doc:`Cutting Off Parts of a Model <openvino_docs_MO_DG_prepare_model_convert_model_Cutting_Model>` guide.
|
||||
|
||||
You can also insert additional input pre-processing sub-graphs into the converted model by using
|
||||
the ``--mean_values``, ``scales_values``, ``--layout``, and other parameters described
|
||||
the ``mean_values``, ``scales_values``, ``layout``, and other parameters described
|
||||
in the :doc:`Embedding Preprocessing Computation <openvino_docs_MO_DG_Additional_Optimization_Use_Cases>` article.
|
||||
|
||||
The ``--compress_to_fp16`` compression parameter in Model Optimizer allows generating IR with constants (for example, weights for convolutions and matrix multiplications) compressed to ``FP16`` data type. For more details, refer to the :doc:`Compression of a Model to FP16 <openvino_docs_MO_DG_FP16_Compression>` guide.
|
||||
The ``compress_to_fp16`` compression parameter in ``mo`` command-line tool allows generating IR with constants (for example, weights for convolutions and matrix multiplications) compressed to ``FP16`` data type. For more details, refer to the :doc:`Compression of a Model to FP16 <openvino_docs_MO_DG_FP16_Compression>` guide.
|
||||
|
||||
To get the full list of conversion parameters available in Model Optimizer, run the following command:
|
||||
To get the full list of conversion parameters, run the following command:
|
||||
|
||||
.. code-block:: sh
|
||||
.. tab-set::
|
||||
|
||||
mo --help
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model(help=True)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --help
|
||||
|
||||
|
||||
Examples of CLI Commands
|
||||
########################
|
||||
Examples of model conversion parameters
|
||||
#######################################
|
||||
|
||||
Below is a list of separate examples for different frameworks and Model Optimizer parameters:
|
||||
Below is a list of separate examples for different frameworks and model conversion parameters:
|
||||
|
||||
1. Launch Model Optimizer for a TensorFlow MobileNet model in the binary protobuf format:
|
||||
1. Launch model conversion for a TensorFlow MobileNet model in the binary protobuf format:
|
||||
|
||||
.. code-block:: sh
|
||||
.. tab-set::
|
||||
|
||||
mo --input_model MobileNet.pb
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("MobileNet.pb")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model MobileNet.pb
|
||||
|
||||
|
||||
Launch Model Optimizer for a TensorFlow BERT model in the SavedModel format with three inputs. Specify input shapes explicitly where the batch size and the sequence length equal 2 and 30 respectively:
|
||||
Launch model conversion for a TensorFlow BERT model in the SavedModel format with three inputs. Specify input shapes explicitly where the batch size and the sequence length equal 2 and 30 respectively:
|
||||
|
||||
.. code-block:: sh
|
||||
.. tab-set::
|
||||
|
||||
mo --saved_model_dir BERT --input mask,word_ids,type_ids --input_shape [2,30],[2,30],[2,30]
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
For more information, refer to the :doc:`Converting a TensorFlow Model <openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_TensorFlow>` guide.
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
2. Launch Model Optimizer for an ONNX OCR model and specify new output explicitly:
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("BERT", input_shape=[[2,30],[2,30],[2,30]])
|
||||
|
||||
.. code-block:: sh
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
mo --input_model ocr.onnx --output probabilities
|
||||
.. code-block:: sh
|
||||
|
||||
mo --saved_model_dir BERT --input_shape [2,30],[2,30],[2,30]
|
||||
|
||||
|
||||
For more information, refer to the :doc:`Converting a TensorFlow Model <openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_TensorFlow>` guide.
|
||||
|
||||
2. Launch model conversion for an ONNX OCR model and specify new output explicitly:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("ocr.onnx", output="probabilities")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model ocr.onnx --output probabilities
|
||||
|
||||
|
||||
For more information, refer to the :doc:`Converting an ONNX Model <openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_ONNX>` guide.
|
||||
@@ -100,43 +154,31 @@ Below is a list of separate examples for different frameworks and Model Optimize
|
||||
|
||||
PyTorch models must be exported to the ONNX format before conversion into IR. More information can be found in :doc:`Converting a PyTorch Model <openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_PyTorch>`.
|
||||
|
||||
3. Launch Model Optimizer for a PaddlePaddle UNet model and apply mean-scale normalization to the input:
|
||||
3. Launch model conversion for a PaddlePaddle UNet model and apply mean-scale normalization to the input:
|
||||
|
||||
.. code-block:: sh
|
||||
.. tab-set::
|
||||
|
||||
mo --input_model unet.pdmodel --mean_values [123,117,104] --scale 255
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("unet.pdmodel", mean_values=[123,117,104], scale=255)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model unet.pdmodel --mean_values [123,117,104] --scale 255
|
||||
|
||||
|
||||
For more information, refer to the :doc:`Converting a PaddlePaddle Model <openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Paddle>` guide.
|
||||
|
||||
4. Launch Model Optimizer for an Apache MXNet SSD Inception V3 model and specify first-channel layout for the input:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model ssd_inception_v3-0000.params --layout NCHW
|
||||
|
||||
|
||||
For more information, refer to the :doc:`Converting an Apache MXNet Model <openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_MxNet>` guide.
|
||||
|
||||
5. Launch Model Optimizer for a Caffe AlexNet model with input channels in the RGB format which needs to be reversed:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model alexnet.caffemodel --reverse_input_channels
|
||||
|
||||
|
||||
For more information, refer to the :doc:`Converting a Caffe Model <openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Caffe>` guide.
|
||||
|
||||
6. Launch Model Optimizer for a Kaldi LibriSpeech nnet2 model:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model librispeech_nnet2.mdl --input_shape [1,140]
|
||||
|
||||
|
||||
For more information, refer to the :doc:`Converting a Kaldi Model <openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Kaldi>` guide.
|
||||
|
||||
- To get conversion recipes for specific TensorFlow, ONNX, PyTorch, Apache MXNet, and Kaldi models, refer to the :doc:`Model Conversion Tutorials <openvino_docs_MO_DG_prepare_model_convert_model_tutorials>`.
|
||||
- To get conversion recipes for specific TensorFlow, ONNX, and PyTorch models, refer to the :doc:`Model Conversion Tutorials <openvino_docs_MO_DG_prepare_model_convert_model_tutorials>`.
|
||||
- For more information about IR, see :doc:`Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™ <openvino_docs_MO_DG_IR_and_opsets>`.
|
||||
- For more information about support of neural network models trained with various frameworks, see :doc:`OpenVINO Extensibility Mechanism <openvino_docs_Extensibility_UG_Intro>`
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn the essentials of representing deep learning models in OpenVINO
|
||||
IR format and the use of supported operation sets.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
@@ -9,7 +13,7 @@
|
||||
openvino_docs_ops_opset
|
||||
openvino_docs_operations_specifications
|
||||
openvino_docs_ops_broadcast_rules
|
||||
|
||||
|
||||
|
||||
This article provides essential information on the format used for representation of deep learning models in OpenVINO toolkit and supported operation sets.
|
||||
|
||||
|
||||
@@ -2,149 +2,243 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
Input data for inference can be different from the training dataset and requires
|
||||
additional preprocessing before inference. To accelerate the whole pipeline including
|
||||
preprocessing and inference, Model Optimizer provides special parameters such as ``--mean_values``,
|
||||
Input data for inference can be different from the training dataset and requires
|
||||
additional preprocessing before inference. To accelerate the whole pipeline including
|
||||
preprocessing and inference, model conversion API provides special parameters such as ``mean_values``,
|
||||
``scale_values``, ``reverse_input_channels``, and ``layout``.
|
||||
|
||||
``--scale_values``, ``--reverse_input_channels``, and ``--layout``. Based on these
|
||||
parameters, Model Optimizer generates OpenVINO IR with additionally inserted sub-graphs
|
||||
to perform the defined preprocessing. This preprocessing block can perform mean-scale
|
||||
normalization of input data, reverting data along channel dimension, and changing
|
||||
the data layout. See the following sections for details on the parameters, or the
|
||||
:doc:`Overview of Preprocessing API <openvino_docs_OV_UG_Preprocessing_Overview>`
|
||||
Based on these parameters, model conversion API generates OpenVINO IR with additionally inserted sub-graphs
|
||||
to perform the defined preprocessing. This preprocessing block can perform mean-scale
|
||||
normalization of input data, reverting data along channel dimension, and changing
|
||||
the data layout. See the following sections for details on the parameters, or the
|
||||
:doc:`Overview of Preprocessing API <openvino_docs_OV_UG_Preprocessing_Overview>`
|
||||
for the same functionality in OpenVINO Runtime.
|
||||
|
||||
Specifying Layout
|
||||
#################
|
||||
|
||||
You may need to set input layouts, as it is required by some preprocessing, for
|
||||
You may need to set input layouts, as it is required by some preprocessing, for
|
||||
example, setting a batch, applying mean or scales, and reversing input channels (BGR<->RGB).
|
||||
|
||||
Layout defines the meaning of dimensions in shape and can be specified for both
|
||||
inputs and outputs. Some preprocessing requires to set input layouts, for example,
|
||||
Layout defines the meaning of dimensions in shape and can be specified for both
|
||||
inputs and outputs. Some preprocessing requires to set input layouts, for example,
|
||||
setting a batch, applying mean or scales, and reversing input channels (BGR<->RGB).
|
||||
|
||||
For the layout syntax, check the :doc:`Layout API overview <openvino_docs_OV_UG_Layout_Overview>`.
|
||||
To specify the layout, you can use the ``--layout`` option followed by the layout value.
|
||||
To specify the layout, you can use the ``layout`` option followed by the layout value.
|
||||
|
||||
For example, the following command specifies the ``NHWC`` layout for a Tensorflow
|
||||
For example, the following command specifies the ``NHWC`` layout for a Tensorflow
|
||||
``nasnet_large`` model that was exported to the ONNX format:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model tf_nasnet_large.onnx --layout nhwc
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("tf_nasnet_large.onnx", layout="nhwc")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model tf_nasnet_large.onnx --layout nhwc
|
||||
|
||||
|
||||
Additionally, if a model has more than one input or needs both input and output
|
||||
Additionally, if a model has more than one input or needs both input and output
|
||||
layouts specified, you need to provide the name of each input or output to apply the layout.
|
||||
|
||||
For example, the following command specifies the layout for an ONNX ``Yolo v3 Tiny``
|
||||
model with its first input ``input_1`` in ``NCHW`` layout and second input ``image_shape``
|
||||
For example, the following command specifies the layout for an ONNX ``Yolo v3 Tiny``
|
||||
model with its first input ``input_1`` in ``NCHW`` layout and second input ``image_shape``
|
||||
having two dimensions: batch and size of the image expressed as the ``N?`` layout:
|
||||
|
||||
.. code-block:: sh
|
||||
.. tab-set::
|
||||
|
||||
mo --input_model yolov3-tiny.onnx --layout input_1(nchw),image_shape(n?)
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("yolov3-tiny.onnx", layout={"input_1": "nchw", "image_shape": "n?"})
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model yolov3-tiny.onnx --layout input_1(nchw),image_shape(n?)
|
||||
|
||||
|
||||
Changing Model Layout
|
||||
#####################
|
||||
|
||||
Changing the model layout may be necessary if it differs from the one presented by input data.
|
||||
Use either ``--layout`` or ``--source_layout`` with ``--target_layout`` to change the layout.
|
||||
Changing the model layout may be necessary if it differs from the one presented by input data.
|
||||
Use either ``layout`` or ``source_layout`` with ``target_layout`` to change the layout.
|
||||
|
||||
For example, for the same ``nasnet_large`` model mentioned previously, you can use
|
||||
For example, for the same ``nasnet_large`` model mentioned previously, you can use
|
||||
the following commands to provide data in the ``NCHW`` layout:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model tf_nasnet_large.onnx --source_layout nhwc --target_layout nchw
|
||||
mo --input_model tf_nasnet_large.onnx --layout "nhwc->nchw"
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("tf_nasnet_large.onnx", source_layout="nhwc", target_layout="nchw")
|
||||
|
||||
ov_model = convert_model("tf_nasnet_large.onnx", layout="nhwc->nchw")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model tf_nasnet_large.onnx --source_layout nhwc --target_layout nchw
|
||||
|
||||
mo --input_model tf_nasnet_large.onnx --layout "nhwc->nchw"
|
||||
|
||||
|
||||
Again, if a model has more than one input or needs both input and output layouts
|
||||
Again, if a model has more than one input or needs both input and output layouts
|
||||
specified, you need to provide the name of each input or output to apply the layout.
|
||||
|
||||
For example, to provide data in the ``NHWC`` layout for the `Yolo v3 Tiny` model
|
||||
For example, to provide data in the ``NHWC`` layout for the `Yolo v3 Tiny` model
|
||||
mentioned earlier, use the following commands:
|
||||
|
||||
.. code-block:: sh
|
||||
.. tab-set::
|
||||
|
||||
mo --input_model yolov3-tiny.onnx --source_layout "input_1(nchw),image_shape(n?)" --target_layout "input_1(nhwc)"
|
||||
mo --input_model yolov3-tiny.onnx --layout "input_1(nchw->nhwc),image_shape(n?)"
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("yolov3-tiny.onnx", source_layout={"input_1": "nchw", "image_shape": "n?"}, target_layout={"input_1": "nhwc"})
|
||||
|
||||
ov_model = convert_model("yolov3-tiny.onnx", layout={"input_1": "nchw->nhwc", "image_shape": "n?"}
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model yolov3-tiny.onnx --source_layout "input_1(nchw),image_shape(n?)" --target_layout "input_1(nhwc)"
|
||||
|
||||
mo --input_model yolov3-tiny.onnx --layout "input_1(nchw->nhwc),image_shape(n?)"
|
||||
|
||||
|
||||
Specifying Mean and Scale Values
|
||||
################################
|
||||
|
||||
Neural network models are usually trained with the normalized input data. This
|
||||
means that the input data values are converted to be in a specific range, for example,
|
||||
``[0, 1]`` or ``[-1, 1]``. Sometimes, the mean values (mean images) are subtracted
|
||||
Neural network models are usually trained with the normalized input data. This
|
||||
means that the input data values are converted to be in a specific range, for example,
|
||||
``[0, 1]`` or ``[-1, 1]``. Sometimes, the mean values (mean images) are subtracted
|
||||
from the input data values as part of the preprocessing.
|
||||
|
||||
There are two cases of how the input data preprocessing is implemented.
|
||||
|
||||
* The input preprocessing operations are a part of a model.
|
||||
|
||||
In this case, the application does not perform a separate preprocessing step:
|
||||
everything is embedded into the model itself. Model Optimizer will generate the
|
||||
OpenVINO IR format with required preprocessing operations, and no ``mean`` and
|
||||
In this case, the application does not perform a separate preprocessing step:
|
||||
everything is embedded into the model itself. ``convert_model()`` will generate the
|
||||
ov.Model with required preprocessing operations, and no ``mean`` and
|
||||
``scale`` parameters are required.
|
||||
* The input preprocessing operations are not a part of a model and the preprocessing
|
||||
* The input preprocessing operations are not a part of a model and the preprocessing
|
||||
is performed within the application which feeds the model with input data.
|
||||
|
||||
In this case, information about mean/scale values should be provided to Model
|
||||
Optimizer to embed it to the generated OpenVINO IR format.
|
||||
In this case, information about mean/scale values should be provided to ``convert_model()``
|
||||
to embed it to the generated ``ov.Model``.
|
||||
|
||||
Model Optimizer provides command-line parameters to specify the values: ``--mean_values``,
|
||||
``--scale_values``, ``--scale``. Using these parameters, Model Optimizer embeds the
|
||||
corresponding preprocessing block for mean-value normalization of the input data
|
||||
and optimizes this block so that the preprocessing takes negligible time for inference.
|
||||
Model conversion API represented by ``convert_model()`` provides command-line parameters
|
||||
to specify the values: ``mean_values``, ``scale_values``, ``scale``. Using these parameters,
|
||||
model conversion API embeds the corresponding preprocessing block for mean-value
|
||||
normalization of the input data and optimizes this block so that the preprocessing
|
||||
takes negligible time for inference.
|
||||
|
||||
For example, the following command runs Model Optimizer for the PaddlePaddle UNet
|
||||
For example, the following command runs model conversion for the PaddlePaddle UNet
|
||||
model and applies mean-scale normalization to the input data:
|
||||
|
||||
.. code-block:: sh
|
||||
.. tab-set::
|
||||
|
||||
mo --input_model unet.pdmodel --mean_values [123,117,104] --scale 255
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("unet.pdmodel", mean_values=[123,117,104], scale=255)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model unet.pdmodel --mean_values [123,117,104] --scale 255
|
||||
|
||||
|
||||
Reversing Input Channels
|
||||
########################
|
||||
|
||||
Sometimes, input images for your application can be of the RGB (or BGR) format
|
||||
and the model is trained on images of the BGR (or RGB) format, which is in the
|
||||
opposite order of color channels. In this case, it is important to preprocess the
|
||||
Sometimes, input images for your application can be of the RGB (or BGR) format
|
||||
and the model is trained on images of the BGR (or RGB) format, which is in the
|
||||
opposite order of color channels. In this case, it is important to preprocess the
|
||||
input images by reverting the color channels before inference.
|
||||
|
||||
To embed this preprocessing step into OpenVINO IR, Model Optimizer provides the
|
||||
``--reverse_input_channels`` command-line parameter to shuffle the color channels.
|
||||
To embed this preprocessing step into ``ov.Model``, model conversion API provides the
|
||||
``reverse_input_channels`` command-line parameter to shuffle the color channels.
|
||||
|
||||
The ``--reverse_input_channels`` parameter can be used to preprocess the model
|
||||
The ``reverse_input_channels`` parameter can be used to preprocess the model
|
||||
input in the following cases:
|
||||
|
||||
* Only one dimension in the input shape has a size equal to ``3``.
|
||||
* One dimension has an undefined size and is marked as ``C`` channel using ``layout`` parameters.
|
||||
|
||||
Using the ``--reverse_input_channels`` parameter, Model Optimizer embeds the corresponding
|
||||
preprocessing block for reverting the input data along channel dimension and optimizes
|
||||
Using the ``reverse_input_channels`` parameter, model conversion API embeds the corresponding
|
||||
preprocessing block for reverting the input data along channel dimension and optimizes
|
||||
this block so that the preprocessing takes only negligible time for inference.
|
||||
|
||||
For example, the following command launches Model Optimizer for the TensorFlow AlexNet
|
||||
For example, the following command launches model conversion for the TensorFlow AlexNet
|
||||
model and embeds the ``reverse_input_channel`` preprocessing block into OpenVINO IR:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model alexnet.pb --reverse_input_channels
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("alexnet.pb", reverse_input_channels=True)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model alexnet.pb --reverse_input_channels
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
If both mean and scale values are specified, the mean is subtracted first and
|
||||
then the scale is applied regardless of the order of options in the command-line.
|
||||
Input values are *divided* by the scale value(s). If the ``--reverse_input_channels``
|
||||
option is also used, ``reverse_input_channels`` will be applied first, then ``mean``
|
||||
and after that ``scale``. The data flow in the model looks as follows:
|
||||
If both mean and scale values are specified, the mean is subtracted first and
|
||||
then the scale is applied regardless of the order of options in the command-line.
|
||||
Input values are *divided* by the scale value(s). If the ``reverse_input_channels``
|
||||
option is also used, ``reverse_input_channels`` will be applied first, then ``mean``
|
||||
and after that ``scale``. The data flow in the model looks as follows:
|
||||
``Parameter -> ReverseInputChannels -> Mean apply-> Scale apply -> the original body of the model``.
|
||||
|
||||
Additional Resources
|
||||
@@ -153,4 +247,3 @@ Additional Resources
|
||||
* :doc:`Overview of Preprocessing API <openvino_docs_OV_UG_Preprocessing_Overview>`
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
@@ -2,16 +2,30 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
Model Optimizer can convert all floating-point weights to the ``FP16`` data type.
|
||||
Optionally, all relevant floating-point weights can be compressed to ``FP16`` data type during model conversion.
|
||||
It results in creating a "compressed ``FP16`` model", which occupies about half of
|
||||
the original space in the file system. The compression may introduce a drop in accuracy.
|
||||
the original space in the file system. The compression may introduce a minor drop in accuracy,
|
||||
but it is negligible for most models.
|
||||
|
||||
To compress the model, use the `--compress_to_fp16` or `--compress_to_fp16=True` option:
|
||||
To compress the model, use the ``compress_to_fp16=True`` option:
|
||||
|
||||
.. code-block:: sh
|
||||
.. tab-set::
|
||||
|
||||
mo --input_model INPUT_MODEL --compress_to_fp16
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model(INPUT_MODEL, compress_to_fp16=True)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model INPUT_MODEL --compress_to_fp16=True
|
||||
|
||||
|
||||
For details on how plugins handle compressed ``FP16`` models, see
|
||||
@@ -26,9 +40,9 @@ For details on how plugins handle compressed ``FP16`` models, see
|
||||
|
||||
.. note::
|
||||
|
||||
Some large models (larger than a few GB) when compressed to ``FP16`` may consume enormous amount of RAM on the loading
|
||||
phase of the inference. In case if you are facing such problems, please try to convert them without compression:
|
||||
`mo --input_model INPUT_MODEL --compress_to_fp16=False`
|
||||
Some large models (larger than a few GB) when compressed to ``FP16`` may consume an overly large amount of RAM on the loading
|
||||
phase of the inference. If that is the case for your model, try to convert it without compression:
|
||||
``convert_model(INPUT_MODEL, compress_to_fp16=False)`` or ``convert_model(INPUT_MODEL)``
|
||||
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user