Compare commits
79 Commits
dev-cpu/20
...
releases/2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a6d0a3c71b | ||
|
|
d8fc645466 | ||
|
|
7eb0dfd08c | ||
|
|
39aba80957 | ||
|
|
c62251c89a | ||
|
|
e1865fd8e0 | ||
|
|
fe4cfc1b43 | ||
|
|
f45fb8f7c8 | ||
|
|
7a6df77198 | ||
|
|
d1b48740cd | ||
|
|
9287ae5d93 | ||
|
|
b2200941ba | ||
|
|
9996a58fc6 | ||
|
|
4192d8879d | ||
|
|
cdb9bec721 | ||
|
|
baf4b23d9a | ||
|
|
43fa3183dc | ||
|
|
63ca94179e | ||
|
|
8723d1cc7e | ||
|
|
cbfb8a1678 | ||
|
|
1ed828982e | ||
|
|
c670e4cc2b | ||
|
|
e124d4f5df | ||
|
|
09462af266 | ||
|
|
0b08b9a14c | ||
|
|
a98059daea | ||
|
|
27b5722944 | ||
|
|
c1fc602c7c | ||
|
|
e65fc4c849 | ||
|
|
994b06b744 | ||
|
|
6cf81ad6a3 | ||
|
|
a7f1710edf | ||
|
|
e20e828a1f | ||
|
|
5835cac31c | ||
|
|
b4b5f3333e | ||
|
|
a423a2b802 | ||
|
|
8890e2906a | ||
|
|
e4fcfa74c2 | ||
|
|
6474d2c94e | ||
|
|
bf11b965e6 | ||
|
|
af5b31c413 | ||
|
|
1d3fab80a8 | ||
|
|
5891a79249 | ||
|
|
c790aa85cb | ||
|
|
f756d55dc6 | ||
|
|
81ffb7a3bc | ||
|
|
205e6ba573 | ||
|
|
b8d23e04f1 | ||
|
|
a43369c152 | ||
|
|
0b4b627e02 | ||
|
|
76c82ae844 | ||
|
|
939c420435 | ||
|
|
7d7af2a9bf | ||
|
|
829c8c98c5 | ||
|
|
5f19d22323 | ||
|
|
cb635050fb | ||
|
|
68863478d3 | ||
|
|
8dacbf789d | ||
|
|
8f9c368aae | ||
|
|
5f755d5e4a | ||
|
|
22a8e75bb7 | ||
|
|
d44cad85ed | ||
|
|
0047db7377 | ||
|
|
4b677dd5b3 | ||
|
|
390ca9f45f | ||
|
|
5f4f27cd73 | ||
|
|
617160492f | ||
|
|
8308b1e122 | ||
|
|
07322aa5aa | ||
|
|
d64c5d8c7c | ||
|
|
c31129c7cd | ||
|
|
db05e54483 | ||
|
|
c80e70a917 | ||
|
|
4d6b43d76f | ||
|
|
cdd4f56ba1 | ||
|
|
3c75a4fd16 | ||
|
|
6354ac6b5d | ||
|
|
b51bc06077 | ||
|
|
93320f4fd6 |
@@ -13,7 +13,7 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: master
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: android_arm64
|
||||
@@ -110,7 +110,6 @@ jobs:
|
||||
-DANDROID_ABI=$(ANDROID_ABI_CONFIG)
|
||||
-DANDROID_STL=c++_shared
|
||||
-DANDROID_PLATFORM=$(ANDROID_SDK_VERSION)
|
||||
-DENABLE_OPENCV=OFF
|
||||
-DENABLE_TESTS=ON
|
||||
-DENABLE_SAMPLES=ON
|
||||
-DENABLE_INTEL_MYRIAD=OFF
|
||||
|
||||
@@ -13,13 +13,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: master
|
||||
ref: releases/2022/1
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: master
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: Lin
|
||||
@@ -111,8 +111,7 @@ jobs:
|
||||
set -e
|
||||
$(REPO_DIR)/install_build_dependencies.sh
|
||||
# Move jdk into contrib
|
||||
# 'clang' compiler is to check that samples can be built using it
|
||||
sudo apt --assume-yes install openjdk-11-jdk clang
|
||||
sudo apt --assume-yes install openjdk-11-jdk
|
||||
# For opencv-python: python3-setuptools and pip upgrade
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -r $(REPO_DIR)/src/bindings/python/src/compatibility/openvino/requirements.txt
|
||||
@@ -158,6 +157,7 @@ jobs:
|
||||
-DENABLE_FASTER_BUILD=ON
|
||||
-DENABLE_STRICT_DEPENDENCIES=OFF
|
||||
-DENABLE_REQUIREMENTS_INSTALL=OFF
|
||||
-DENABLE_OPENCV=ON
|
||||
-DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache
|
||||
@@ -226,14 +226,6 @@ jobs:
|
||||
displayName: 'Build cpp samples'
|
||||
continueOnError: false
|
||||
|
||||
- script: |
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
$(INSTALL_DIR)/samples/cpp/build_samples.sh -i $(INSTALL_DIR)
|
||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||
displayName: 'Build cpp samples - clang'
|
||||
continueOnError: false
|
||||
|
||||
- script: $(INSTALL_DIR)/samples/c/build_samples.sh -i $(INSTALL_DIR)
|
||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||
displayName: 'Build c samples'
|
||||
@@ -263,6 +255,7 @@ jobs:
|
||||
export MO_ROOT=$(INSTALL_DIR)/tools/mo
|
||||
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/tests/mo/unit_tests --junitxml=TEST-ModelOptimizer.xml
|
||||
displayName: 'Model Optimizer UT'
|
||||
condition: false
|
||||
continueOnError: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||
@@ -299,10 +292,6 @@ jobs:
|
||||
displayName: 'VPU UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/XLinkTests --gtest_output=xml:TEST-XLinkTests.xml
|
||||
displayName: 'XLink Tests'
|
||||
continueOnError: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieMultiPluginUnitTests --gtest_output=xml:TEST-ieMultiPluginUnitTests.xml
|
||||
displayName: 'MULTI UT'
|
||||
continueOnError: false
|
||||
|
||||
@@ -13,7 +13,7 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: master
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: linux_arm64
|
||||
@@ -127,7 +127,6 @@ jobs:
|
||||
-GNinja
|
||||
-DVERBOSE_BUILD=ON
|
||||
-DOpenCV_DIR=$(INSTALL_OPENCV)/cmake
|
||||
-DENABLE_OPENCV=OFF
|
||||
-DPYTHON_INCLUDE_DIRS=$(INSTALL_PYTHON)/include/python3.8
|
||||
-DPYTHON_LIBRARY=$(INSTALL_PYTHON)/lib/libpython3.8.so
|
||||
-DENABLE_PYTHON=ON
|
||||
|
||||
@@ -4,7 +4,7 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: master
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: Lin
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
# type: github
|
||||
# endpoint: openvinotoolkit
|
||||
# name: openvinotoolkit/testdata
|
||||
# ref: master
|
||||
|
||||
jobs:
|
||||
- job: Lin_lohika
|
||||
|
||||
@@ -95,7 +95,6 @@ jobs:
|
||||
-DPYTHON_EXECUTABLE=/usr/bin/python3.8
|
||||
-DENABLE_INTEL_MYRIAD_COMMON=OFF
|
||||
-DENABLE_INTEL_GNA=OFF
|
||||
-DENABLE_OPENCV=OFF
|
||||
-DENABLE_CPPLINT=OFF
|
||||
-DENABLE_TESTS=OFF
|
||||
-DENABLE_INTEL_CPU=ON
|
||||
|
||||
@@ -13,13 +13,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: master
|
||||
ref: releases/2022/1
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: master
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: Mac
|
||||
@@ -145,7 +145,6 @@ jobs:
|
||||
set -e
|
||||
mkdir -p $(INSTALL_DIR)/opencv/
|
||||
cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P cmake_install.cmake
|
||||
cp -R $(REPO_DIR)/temp/opencv_4.5.2_osx/opencv/* $(INSTALL_DIR)/opencv/
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
displayName: 'Install tests'
|
||||
|
||||
|
||||
@@ -13,13 +13,13 @@ resources:
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/openvino_contrib
|
||||
ref: master
|
||||
ref: releases/2022/1
|
||||
|
||||
- repository: testdata
|
||||
type: github
|
||||
endpoint: openvinotoolkit
|
||||
name: openvinotoolkit/testdata
|
||||
ref: master
|
||||
ref: releases/2022/1
|
||||
|
||||
jobs:
|
||||
- job: Win
|
||||
@@ -135,7 +135,7 @@ jobs:
|
||||
|
||||
- script: |
|
||||
set PATH=$(WORK_DIR)\ninja-win;%PATH%
|
||||
call "$(MSVS_VARS_PATH)" && $(CMAKE_CMD) -G "Ninja Multi-Config" -DENABLE_WHEEL=ON -DENABLE_ONEDNN_FOR_GPU=$(CMAKE_BUILD_SHARED_LIBS) -DBUILD_SHARED_LIBS=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_REQUIREMENTS_INSTALL=OFF -DENABLE_FASTER_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE="C:\hostedtoolcache\windows\Python\3.7.6\x64\python.exe" -DPYTHON_INCLUDE_DIR="C:\hostedtoolcache\windows\Python\3.7.6\x64\include" -DPYTHON_LIBRARY="C:\hostedtoolcache\windows\Python\3.7.6\x64\libs\python37.lib" -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
|
||||
call "$(MSVS_VARS_PATH)" && $(CMAKE_CMD) -G "Ninja Multi-Config" -DENABLE_WHEEL=ON -DENABLE_ONEDNN_FOR_GPU=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_GAPI_PREPROCESSING=$(CMAKE_BUILD_SHARED_LIBS) -DBUILD_SHARED_LIBS=$(CMAKE_BUILD_SHARED_LIBS) -DENABLE_REQUIREMENTS_INSTALL=OFF -DENABLE_FASTER_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE="C:\hostedtoolcache\windows\Python\3.7.6\x64\python.exe" -DPYTHON_INCLUDE_DIR="C:\hostedtoolcache\windows\Python\3.7.6\x64\include" -DPYTHON_LIBRARY="C:\hostedtoolcache\windows\Python\3.7.6\x64\libs\python37.lib" -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR)
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
displayName: 'CMake'
|
||||
|
||||
@@ -169,6 +169,13 @@ jobs:
|
||||
workingDirectory: $(BUILD_SAMPLES_TESTS_DIR)
|
||||
displayName: 'Install Samples Tests'
|
||||
|
||||
- script: $(CMAKE_CMD) -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P cmake_install.cmake
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
displayName: 'Install tests'
|
||||
|
||||
- script: dir $(INSTALL_DIR) /s
|
||||
displayName: 'List install files'
|
||||
|
||||
- script: $(INSTALL_DIR)\samples\cpp\build_samples_msvc.bat -i $(INSTALL_DIR)
|
||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||
displayName: 'Build cpp samples'
|
||||
@@ -193,15 +200,9 @@ jobs:
|
||||
python -m pytest $(INSTALL_DIR)\tests\smoke_tests\ --env_conf $(INSTALL_DIR)\tests\smoke_tests\env_config.yml -s --junitxml=TEST-SamplesSmokeTests.xml
|
||||
workingDirectory: $(INSTALL_DIR)
|
||||
displayName: 'Samples Smoke Tests'
|
||||
condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON')
|
||||
continueOnError: false
|
||||
|
||||
- script: $(CMAKE_CMD) -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P cmake_install.cmake && xcopy $(REPO_DIR)\temp\opencv_4.5.2\opencv\* $(INSTALL_DIR)\opencv\ /e /h /y
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
displayName: 'Install tests'
|
||||
|
||||
- script: dir $(INSTALL_DIR) /s
|
||||
displayName: 'List install files'
|
||||
|
||||
- script: rd /Q /S $(BUILD_DIR)
|
||||
displayName: 'Clean build dir'
|
||||
continueOnError: false
|
||||
@@ -241,10 +242,6 @@ jobs:
|
||||
displayName: 'VPU UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\XLinkTests --gtest_output=xml:TEST-XLinkTests.xml
|
||||
displayName: 'XLink Tests'
|
||||
continueOnError: false
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
|
||||
displayName: 'ONNX Importer UT'
|
||||
continueOnError: false
|
||||
|
||||
@@ -59,7 +59,6 @@ RUN cmake .. \
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
|
||||
-DENABLE_INTEL_MYRIAD_COMMON=OFF \
|
||||
-DENABLE_INTEL_GNA=OFF \
|
||||
-DENABLE_OPENCV=OFF \
|
||||
-DENABLE_CPPLINT=OFF \
|
||||
-DENABLE_NCC_STYLE=OFF \
|
||||
-DENABLE_TESTS=OFF \
|
||||
|
||||
213
.ci/openvino-onnx/Jenkinsfile
vendored
Normal file
213
.ci/openvino-onnx/Jenkinsfile
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
DOCKER_CONTAINER_NAME= "openvino-onnx-ci-container"
|
||||
DOCKER_IMAGE_TAG = "openvino-onnx-ci-image"
|
||||
ONNX_MODEL_ZOO_SHA = "d58213534f2a4d1c4b19ba62b3bb5f544353256e"
|
||||
|
||||
BACKEND_CONFIGURATIONS = [
|
||||
[ name: "Release", build_type: "Release" ],
|
||||
[ name: "Debug", build_type: "Debug" ],
|
||||
]
|
||||
|
||||
// workaround for aborting previous builds on PR update
|
||||
@NonCPS
|
||||
def stopPreviousRunningBuilds() {
|
||||
def jobname = env.JOB_NAME
|
||||
if (jobname.startsWith("onnx-ci/openvino onnx ci/openvino/PR")){
|
||||
def buildnum = env.BUILD_NUMBER.toInteger()
|
||||
def job = Jenkins.instance.getItemByFullName(jobname)
|
||||
def job_newest = job.builds.first()
|
||||
for (build in job.builds.reverse()[0..<-1]) {
|
||||
if (build.isBuilding()){
|
||||
echo "Stop task = ${build} because newest #${job_newest} is on the way"
|
||||
build.doStop();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
def getGitPrInfo(String project, String workdir) {
|
||||
def gitPrInfo = [
|
||||
prAuthorEmail : "",
|
||||
commitAuthorEmail : "",
|
||||
commitHash : "",
|
||||
commitSubject : ""
|
||||
]
|
||||
try {
|
||||
dir ("${workdir}/${project}") {
|
||||
gitPrInfo.prAuthorEmail = sh (script: 'git log -1 --pretty="format:%ae" ', returnStdout: true).trim()
|
||||
gitPrInfo.commitAuthorEmail = sh (script: 'git log -1 --pretty="format:%ce" ', returnStdout: true).trim()
|
||||
gitPrInfo.commitSubject = sh (script: 'git log -1 --pretty="format:%H" ', returnStdout: true).trim()
|
||||
gitPrInfo.commitHash = sh (script: 'git log -1 --pretty="format:%s" ', returnStdout: true).trim()
|
||||
}
|
||||
}
|
||||
catch(e) {
|
||||
echo "Failed to retrieve ${project} git repository information!"
|
||||
echo "ERROR: ${e}"
|
||||
}
|
||||
return gitPrInfo
|
||||
}
|
||||
|
||||
def notifyByEmail(def gitPrInfo) {
|
||||
stage('Notify') {
|
||||
String notifyPeople = "${gitPrInfo.prAuthorEmail}, ${gitPrInfo.commitAuthorEmail}"
|
||||
emailext (
|
||||
subject: "OpenVino CI: PR ${CHANGE_ID} ${currentBuild.result}!",
|
||||
body: """
|
||||
Status: ${currentBuild.result}
|
||||
Pull Request Title: ${CHANGE_TITLE}
|
||||
Pull Request: ${CHANGE_URL}
|
||||
Branch: ${CHANGE_BRANCH}
|
||||
Commit Hash: ${gitPrInfo.commitSubject}
|
||||
Commit Subject: ${gitPrInfo.commitHash}
|
||||
Jenkins Build: ${RUN_DISPLAY_URL}
|
||||
""",
|
||||
to: "${notifyPeople}"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
def gitSubmoduleUpdate(String repository_name, String workdir) {
|
||||
dir ("${workdir}/${repository_name}") {
|
||||
sh label: "Init ${repository_name} submodules",
|
||||
script:
|
||||
"""
|
||||
git submodule init && git submodule update \
|
||||
--init \
|
||||
--no-fetch \
|
||||
--recursive
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
def prepare_repository(String workdir) {
|
||||
dir("${workdir}") {
|
||||
println "Preparing repository in directory: ${workdir}"
|
||||
checkout scm
|
||||
gitSubmoduleUpdate(PROJECT_NAME, workdir)
|
||||
}
|
||||
}
|
||||
|
||||
def updateModels() {
|
||||
sh """
|
||||
./src/bindings/python/tests/test_onnx/model_zoo_preprocess.sh -d ${HOME}/ONNX_CI/models_data -o -s ${ONNX_MODEL_ZOO_SHA}
|
||||
"""
|
||||
}
|
||||
|
||||
def get_docker_container_name(Map configuration){
|
||||
println "RUN get_docker_container_name for ${configuration.name}"
|
||||
String docker_container_name = "${DOCKER_CONTAINER_NAME}_${BUILD_NUMBER}_${env.CHANGE_ID}_${configuration.name}"
|
||||
return docker_container_name
|
||||
}
|
||||
|
||||
def buildDockerImage(Map configuration, String workdir) {
|
||||
String docker_image_tag = "${DOCKER_IMAGE_TAG}_${BUILD_NUMBER}_${env.CHANGE_ID}_${configuration.name}".toLowerCase()
|
||||
println "docker_image_tag: ${docker_image_tag}"
|
||||
updateModels()
|
||||
sh """
|
||||
docker build --tag=${docker_image_tag} \
|
||||
--build-arg BUILD_TYPE=${configuration.build_type} \
|
||||
--file=.ci/openvino-onnx/Dockerfile \
|
||||
--build-arg http_proxy=${HTTP_PROXY} \
|
||||
--build-arg https_proxy=${HTTPS_PROXY} .
|
||||
"""
|
||||
}
|
||||
|
||||
def runTests(Map configuration, String workdir) {
|
||||
println "Run tests for ${configuration.name}"
|
||||
String docker_image_tag = "${DOCKER_IMAGE_TAG}_${BUILD_NUMBER}_${env.CHANGE_ID}_${configuration.name}".toLowerCase()
|
||||
|
||||
String docker_container_name = get_docker_container_name(configuration)
|
||||
|
||||
// Run only basic unit tests in Debug configuration
|
||||
if (configuration.build_type == "Debug") {
|
||||
sh """
|
||||
docker run --name ${docker_container_name} ${docker_image_tag}
|
||||
"""
|
||||
}
|
||||
|
||||
// Run unit-tests AND large model tests by default
|
||||
else {
|
||||
sh """
|
||||
docker run --name ${docker_container_name} \
|
||||
--volume ${HOME}/ONNX_CI/models_data/model_zoo/onnx_model_zoo_${ONNX_MODEL_ZOO_SHA}:/root/.onnx/model_zoo/onnx_model_zoo \
|
||||
--volume ${HOME}/ONNX_CI/data/model_zoo/MSFT:/root/.onnx/model_zoo/MSFT \
|
||||
${docker_image_tag} /bin/bash -c "tox && tox -e zoo_models"
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
def getConfigurationsMap() {
|
||||
def configurationsMap = [:]
|
||||
for (backend in BACKEND_CONFIGURATIONS) {
|
||||
def configuration = backend.clone()
|
||||
configurationsMap[configuration.name] = {
|
||||
stage(configuration.name) { CONFIGURATION_WORKFLOW(configuration) }
|
||||
}
|
||||
}
|
||||
return configurationsMap
|
||||
}
|
||||
|
||||
CONFIGURATION_WORKFLOW = { configuration ->
|
||||
node("OpenVINO") {
|
||||
String workdir = "${HOME}/workspace/${BUILD_NUMBER}_${env.CHANGE_ID}_${configuration.name}"
|
||||
try {
|
||||
PROJECT_NAME = "openvino"
|
||||
stage("Clone repository") {
|
||||
prepare_repository(workdir)
|
||||
}
|
||||
stage("Prepare Docker environment") {
|
||||
dir("${workdir}") {
|
||||
buildDockerImage(configuration, workdir)
|
||||
}
|
||||
}
|
||||
stage("Run tests") {
|
||||
timeout(time: 60, unit: 'MINUTES') {
|
||||
runTests(configuration, workdir)
|
||||
}
|
||||
}
|
||||
}
|
||||
catch(e) {
|
||||
// Set result to ABORTED if exception contains exit code of a process interrupted by SIGTERM
|
||||
if ("$e".contains("143")) {
|
||||
currentBuild.result = "ABORTED"
|
||||
} else {
|
||||
currentBuild.result = "FAILURE"
|
||||
}
|
||||
def gitPrInfo = getGitPrInfo(PROJECT_NAME, workdir)
|
||||
notifyByEmail(gitPrInfo)
|
||||
}
|
||||
finally {
|
||||
stage("Cleanup") {
|
||||
String docker_container_name = get_docker_container_name(configuration)
|
||||
sh """
|
||||
docker rm -f ${docker_container_name}
|
||||
rm -rf ${workdir}
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
options {
|
||||
skipDefaultCheckout true
|
||||
timeout(activity: true, time: 120, unit: 'MINUTES')
|
||||
}
|
||||
stages {
|
||||
stage('Parallel CI') {
|
||||
steps {
|
||||
stopPreviousRunningBuilds()
|
||||
script {
|
||||
parallelStagesMap = getConfigurationsMap()
|
||||
parallel parallelStagesMap
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
65
.ci/openvino-onnx/watchdog/Jenkinsfile
vendored
Normal file
65
.ci/openvino-onnx/watchdog/Jenkinsfile
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
timeout(30)
|
||||
{
|
||||
node(LABEL) {
|
||||
|
||||
BUILD_WORKSPACE = "$WORKSPACE/$BUILD_NUMBER"
|
||||
WATCHDOG_ROOT = "$BUILD_WORKSPACE/.ci/openvino-onnx/watchdog"
|
||||
VENV_PATH = "${BUILD_WORKSPACE}/.wdvenv"
|
||||
|
||||
try {
|
||||
stage("Clone repository") {
|
||||
dir ("$BUILD_WORKSPACE") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "*/$BRANCH"]],
|
||||
doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'CloneOption', timeout: 30]], submoduleCfg: [],
|
||||
userRemoteConfigs: [[credentialsId: "${GITHUB_KEY}", url: "${OPEN_VINO_URL}"]]])
|
||||
}
|
||||
}
|
||||
stage("Prepare environment") {
|
||||
sh """#!/bin/bash
|
||||
if [ ! -d ${VENV_PATH} ]; then
|
||||
python3 -m venv ${VENV_PATH}
|
||||
source ${VENV_PATH}/bin/activate
|
||||
pip install -r ${WATCHDOG_ROOT}/requirements.txt
|
||||
fi
|
||||
"""
|
||||
}
|
||||
stage("Run script") {
|
||||
withCredentials([
|
||||
usernamePassword(credentialsId: '7157091e-bc04-42f0-99fd-dc4da2922a55',
|
||||
usernameVariable: 'username',
|
||||
passwordVariable: 'password')])
|
||||
{
|
||||
dir ("$BUILD_WORKSPACE") {
|
||||
sh """#!/bin/bash
|
||||
source ${VENV_PATH}/bin/activate
|
||||
export PYTHONHTTPSVERIFY=0
|
||||
python ${WATCHDOG_ROOT}/src/main.py \
|
||||
--msteams-url=${MSTEAMS_URL_FILE} \
|
||||
--github-credentials '${username}' '${password}' \
|
||||
--github-org=${GITHUB_ORG} \
|
||||
--github-project=${GITHUB_PROJECT} \
|
||||
--jenkins-token=${JENKINS_TOKEN_FILE} \
|
||||
--jenkins-server=${JENKINS_SERVER} \
|
||||
--jenkins-user=${JENKINS_USER} \
|
||||
--ci-job=${CI_JOB_NAME} \
|
||||
--watchdog-job=${WATCHDOG_JOB_NAME}
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
echo "$e"
|
||||
currentBuild.result = "FAILURE"
|
||||
} finally {
|
||||
stage("Cleanup") {
|
||||
sh """
|
||||
cd $BUILD_WORKSPACE
|
||||
rm -rf ..?* .[!.]* *
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
6
.ci/openvino-onnx/watchdog/requirements.txt
Normal file
6
.ci/openvino-onnx/watchdog/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
python-jenkins==1.7.0
|
||||
retrying==1.3.3
|
||||
pygithub==1.51
|
||||
timeout-decorator==0.4.1
|
||||
requests==2.23.0
|
||||
wheel
|
||||
108
.ci/openvino-onnx/watchdog/src/git_wrapper.py
Normal file
108
.ci/openvino-onnx/watchdog/src/git_wrapper.py
Normal file
@@ -0,0 +1,108 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import logging
|
||||
import timeout_decorator
|
||||
from datetime import datetime
|
||||
from retrying import retry
|
||||
from github import Github, GithubException
|
||||
|
||||
# Logging
|
||||
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s')
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(logging.INFO)
|
||||
|
||||
_RETRY_LIMIT = 3
|
||||
_RETRY_COOLDOWN_MS = 2000
|
||||
_REQUEST_TIMEOUT_S = 10
|
||||
|
||||
|
||||
class GitWrapper:
|
||||
"""Class wrapping PyGithub API.
|
||||
|
||||
The purpose of this class is to wrap methods from PyGithub API used in Watchdog, for less error-prone and
|
||||
more convenient use. Docs for used API, including wrapped methods can be found at:
|
||||
https://pygithub.readthedocs.io/en/latest/introduction.html
|
||||
|
||||
:param github_credentials: Credentials used for GitHub
|
||||
:param repository: GitHub repository name
|
||||
:param project: GitHub project name
|
||||
:type github_credentials: String
|
||||
:type repository: String
|
||||
:type project: String
|
||||
"""
|
||||
|
||||
def __init__(self, github_credentials, repository, project):
|
||||
self.git = Github(*github_credentials)
|
||||
self.repository = repository
|
||||
self.project = project
|
||||
self.github_credentials = github_credentials
|
||||
|
||||
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
|
||||
def get_git_time(self):
|
||||
"""Retrieve time from GitHub.
|
||||
|
||||
Used to reliably determine time during Watchdog run.
|
||||
|
||||
:return: Datetime object describing current time
|
||||
:rtype: datetime
|
||||
"""
|
||||
try:
|
||||
datetime_object = self._get_git_time()
|
||||
except ValueError as e:
|
||||
raise GitWrapperError(str(e))
|
||||
except GithubException as e:
|
||||
message = 'GitHub Exception during API status retrieval. Exception: {}'.format(str(e))
|
||||
raise GitWrapperError(message)
|
||||
except timeout_decorator.TimeoutError:
|
||||
message = 'GitHub Exception during API status retrieval. Timeout during API request.'
|
||||
raise GitWrapperError(message)
|
||||
return datetime_object
|
||||
|
||||
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
|
||||
def get_pull_requests(self):
|
||||
"""Retrieve paginated list of pull requests from GitHub.
|
||||
|
||||
:return: Paginated list of Pull Requests in GitHub repo
|
||||
:rtype: github.PaginatedList.PaginatedList of github.PullRequest.PullRequest
|
||||
"""
|
||||
try:
|
||||
prs = self._get_pull_requests()
|
||||
except GithubException as e:
|
||||
message = 'GitHub Exception during API status retrieval. Exception: {}'.format(str(e))
|
||||
raise GitWrapperError(message)
|
||||
return prs
|
||||
|
||||
@timeout_decorator.timeout(_REQUEST_TIMEOUT_S)
|
||||
def _get_git_time(self):
|
||||
"""Private method retrieving time from GitHub.
|
||||
|
||||
:return: Datetime object describing current time
|
||||
:rtype: datetime
|
||||
"""
|
||||
datetime_string = self.git.get_api_status().raw_headers.get('date', '')
|
||||
datetime_format = '%a, %d %b %Y %H:%M:%S %Z'
|
||||
datetime_object = datetime.strptime(datetime_string, datetime_format)
|
||||
return datetime_object
|
||||
|
||||
@timeout_decorator.timeout(_REQUEST_TIMEOUT_S)
|
||||
def _get_pull_requests(self):
|
||||
"""Private method retrieving pull requests from GitHub.
|
||||
|
||||
:return: Paginated list of Pull Requests in GitHub repo
|
||||
:rtype: github.PaginatedList.PaginatedList of github.PullRequest.PullRequest
|
||||
"""
|
||||
return self.git.get_organization(self.repository).get_repo(self.project).get_pulls()
|
||||
|
||||
|
||||
class GitWrapperError(Exception):
|
||||
"""Base class for exceptions raised in GitWrapper.
|
||||
|
||||
:param message Explanation of the error
|
||||
"""
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
log.exception(message)
|
||||
91
.ci/openvino-onnx/watchdog/src/jenkins_wrapper.py
Normal file
91
.ci/openvino-onnx/watchdog/src/jenkins_wrapper.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import requests
|
||||
import jenkins
|
||||
import logging
|
||||
from retrying import retry
|
||||
|
||||
# Logging
|
||||
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s')
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(logging.INFO)
|
||||
|
||||
_RETRY_LIMIT = 3
|
||||
_RETRY_COOLDOWN_MS = 5000
|
||||
|
||||
|
||||
class JenkinsWrapper:
|
||||
"""Class wrapping Python-Jenkins API.
|
||||
|
||||
The purpose of this class is to wrap methods from Python-Jenkins API used in Watchdog, for less error-prone and
|
||||
more convenient use. Docs for used API, including wrapped methods can be found at:
|
||||
https://python-jenkins.readthedocs.io/en/latest/
|
||||
|
||||
:param jenkins_token: Token used for Jenkins
|
||||
:param jenkins_user: Username used to connect to Jenkins
|
||||
:param jenkins_server: Jenkins server address
|
||||
:type jenkins_token: String
|
||||
:type jenkins_user: String
|
||||
:type jenkins_server: String
|
||||
"""
|
||||
|
||||
def __init__(self, jenkins_token, jenkins_user, jenkins_server):
|
||||
self.jenkins_server = jenkins_server
|
||||
self.jenkins = jenkins.Jenkins(jenkins_server, username=jenkins_user,
|
||||
password=jenkins_token)
|
||||
|
||||
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
|
||||
def get_build_console_output(self, job_name, build_number):
|
||||
return self.jenkins.get_build_console_output(job_name, build_number)
|
||||
|
||||
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
|
||||
def get_job_info(self, job_name):
|
||||
return self.jenkins.get_job_info(job_name)
|
||||
|
||||
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
|
||||
def get_build_info(self, job_name, build_number):
|
||||
return self.jenkins.get_build_info(job_name, build_number)
|
||||
|
||||
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
|
||||
def get_queue_item(self, queue_id):
|
||||
"""Attempt to retrieve Jenkins job queue item.
|
||||
|
||||
Exception communicating queue doesn't exist is expected,
|
||||
in that case method returns empty dict.
|
||||
|
||||
:param queue_id: Jenkins job queue ID number
|
||||
:type queue_id: int
|
||||
:return: Dictionary representing Jenkins job queue item
|
||||
:rtype: dict
|
||||
"""
|
||||
try:
|
||||
return self.jenkins.get_queue_item(queue_id)
|
||||
except Exception as e:
|
||||
# Exception 'queue does not exist' is expected behaviour when job is running
|
||||
if 'queue' in str(e) and 'does not exist' in str(e):
|
||||
return {}
|
||||
else:
|
||||
raise
|
||||
|
||||
@retry(stop_max_attempt_number=_RETRY_LIMIT, wait_fixed=_RETRY_COOLDOWN_MS)
|
||||
def get_idle_ci_hosts(self):
|
||||
"""Query Jenkins for idle servers.
|
||||
|
||||
Send GET request to Jenkins server, querying for idle servers labeled
|
||||
for OpenVino-ONNX CI job.
|
||||
|
||||
:return: Number of idle hosts delegated to OpenVino-ONNX CI
|
||||
:rtype: int
|
||||
"""
|
||||
jenkins_request_url = self.jenkins_server + 'label/ci&&onnx/api/json?pretty=true'
|
||||
try:
|
||||
log.info('Sending request to Jenkins: %s', jenkins_request_url)
|
||||
r = requests.Request(method='GET', url=jenkins_request_url, verify=False)
|
||||
response = self.jenkins.jenkins_request(r).json()
|
||||
return int(response['totalExecutors']) - int(response['busyExecutors'])
|
||||
except Exception as e:
|
||||
log.exception('Failed to send request to Jenkins!\nException message: %s', str(e))
|
||||
raise
|
||||
89
.ci/openvino-onnx/watchdog/src/main.py
Normal file
89
.ci/openvino-onnx/watchdog/src/main.py
Normal file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from watchdog import Watchdog
|
||||
|
||||
DEFAULT_MSTEAMS_URL_FILE = '/home/lab_nerval/tokens/msteams_url'
|
||||
DEFAULT_GITHUB_ORGANIZATION = 'openvinotoolkit'
|
||||
DEFAULT_GITHUB_PROJECT = 'openvino'
|
||||
DEFAULT_JENKINS_TOKEN_FILE = '/home/lab_nerval/tokens/crackerjack'
|
||||
DEFAULT_JENKINS_SERVER = 'https://crackerjack.intel.com/'
|
||||
DEFAULT_JENKINS_USER = 'lab_nerval'
|
||||
DEFAULT_CI_JOB_NAME = 'onnx/OpenVino_CI'
|
||||
DEFAULT_WATCHDOG_JOB_NAME = 'onnx/ci_watchdog'
|
||||
|
||||
|
||||
def main(args):
|
||||
"""
|
||||
Read args passed to script, load tokens and run watchdog.
|
||||
|
||||
Keyword arguments:
|
||||
:param args: arguments parsed by argparse ArgumentParser
|
||||
|
||||
:return: returns status code 0 on successful completion
|
||||
|
||||
"""
|
||||
jenkins_server = args.jenkins_server.strip()
|
||||
jenkins_user = args.jenkins_user.strip()
|
||||
jenkins_token = open(args.jenkins_token).read().replace('\n', '').strip()
|
||||
msteams_url = open(args.msteams_url).read().replace('\n', '').strip()
|
||||
github_credentials = args.github_credentials
|
||||
github_org = args.github_org
|
||||
github_project = args.github_project
|
||||
ci_job = args.ci_job.strip()
|
||||
watchdog_job = args.watchdog_job.strip()
|
||||
quiet = args.quiet
|
||||
|
||||
wd = Watchdog(jenkins_token=jenkins_token,
|
||||
jenkins_server=jenkins_server,
|
||||
jenkins_user=jenkins_user,
|
||||
github_credentials=github_credentials,
|
||||
git_org=github_org,
|
||||
git_project=github_project,
|
||||
msteams_url=msteams_url,
|
||||
ci_job_name=ci_job,
|
||||
watchdog_job_name=watchdog_job)
|
||||
wd.run(quiet=quiet)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument('--msteams-url', help='Path to MS Teams channel url to communicate messages.',
|
||||
default=DEFAULT_MSTEAMS_URL_FILE, action='store', required=False)
|
||||
|
||||
parser.add_argument('--github-credentials', help='GitHub user credentials to access repo.',
|
||||
nargs="+", required=True)
|
||||
|
||||
parser.add_argument('--github-org', help='Name of organization on GitHub.',
|
||||
default=DEFAULT_GITHUB_ORGANIZATION, action='store', required=False)
|
||||
|
||||
parser.add_argument('--github-project', help='Name of project on GitHub.',
|
||||
default=DEFAULT_GITHUB_PROJECT, action='store', required=False)
|
||||
|
||||
parser.add_argument('--jenkins-token', help='Path to Jenkins user token to access build info.',
|
||||
default=DEFAULT_JENKINS_TOKEN_FILE, action='store', required=False)
|
||||
|
||||
parser.add_argument('--jenkins-server', help='Jenkins server address.',
|
||||
default=DEFAULT_JENKINS_SERVER, action='store', required=False)
|
||||
|
||||
parser.add_argument('--jenkins-user', help='Jenkins user used to log in.',
|
||||
default=DEFAULT_JENKINS_USER, action='store', required=False)
|
||||
|
||||
parser.add_argument('--ci-job', help='Jenkins CI job name.',
|
||||
default=DEFAULT_CI_JOB_NAME, action='store', required=False)
|
||||
|
||||
parser.add_argument('--watchdog-job', help='Jenkins CI Watchdog job name.',
|
||||
default=DEFAULT_WATCHDOG_JOB_NAME, action='store', required=False)
|
||||
|
||||
parser.add_argument('--quiet', help="Quiet mode - doesn\'t send message to communicator.",
|
||||
action='store_true', required=False)
|
||||
|
||||
args = parser.parse_args()
|
||||
sys.exit(main(args))
|
||||
128
.ci/openvino-onnx/watchdog/src/ms_teams_communicator.py
Normal file
128
.ci/openvino-onnx/watchdog/src/ms_teams_communicator.py
Normal file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
class MSTeamsCommunicator:
|
||||
"""Class communicating with MSTeams using Incoming Webhook.
|
||||
|
||||
The purpose of this class is to use MSTeams API to send message.
|
||||
Docs for used API, including wrapped methods can be found at:
|
||||
https://docs.microsoft.com/en-us/outlook/actionable-messages/send-via-connectors
|
||||
"""
|
||||
|
||||
def __init__(self, _ci_alerts_channel_url):
|
||||
self._ci_alerts_channel_url = _ci_alerts_channel_url
|
||||
self._queued_messages = {
|
||||
self._ci_alerts_channel_url: [],
|
||||
}
|
||||
|
||||
@property
|
||||
def messages(self):
|
||||
"""
|
||||
Get list of queued messages.
|
||||
|
||||
:return: List of queued messages
|
||||
:return type: List[String]
|
||||
"""
|
||||
return self._queued_messages.values()
|
||||
|
||||
def queue_message(self, message):
|
||||
"""
|
||||
Queue message to be sent later.
|
||||
|
||||
:param message: Message content
|
||||
:type message: String
|
||||
"""
|
||||
self._queued_messages[self._ci_alerts_channel_url].append(message)
|
||||
|
||||
def _parse_text(self, watchdog_log, message):
|
||||
"""
|
||||
Parse text to display as alert.
|
||||
|
||||
:param watchdog_log: Watchdog log content
|
||||
:param message: Unparsed message content
|
||||
:type watchdog_log: String
|
||||
:type message: String
|
||||
"""
|
||||
message_split = message.split('\n')
|
||||
log_url = None
|
||||
if len(message_split) == 3:
|
||||
log_url = message_split[-1]
|
||||
title = message_split[0]
|
||||
text = message_split[1]
|
||||
header = watchdog_log.split(' - ')
|
||||
header_formatted = '{} - [Watchdog Log]({})'.format(header[0], header[1])
|
||||
return title, log_url, '{}\n\n{}'.format(header_formatted, text)
|
||||
|
||||
def _json_request_content(self, title, log_url, text_formatted):
|
||||
"""
|
||||
Create final json request to send message to MS Teams channel.
|
||||
|
||||
:param title: Title of alert
|
||||
:param log_url: URL to PR
|
||||
:param text_formatted: General content of alert - finally formatted
|
||||
:type title: String
|
||||
:type title: String
|
||||
:type title: String
|
||||
"""
|
||||
data = {
|
||||
'@context': 'https://schema.org/extensions',
|
||||
'@type': 'MessageCard',
|
||||
'themeColor': '0072C6',
|
||||
'title': title,
|
||||
'text': text_formatted,
|
||||
'potentialAction':
|
||||
[
|
||||
{
|
||||
'@type': 'OpenUri',
|
||||
'name': 'Open PR',
|
||||
'targets':
|
||||
[
|
||||
{
|
||||
'os': 'default',
|
||||
'uri': log_url,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
return data
|
||||
|
||||
def _send_to_channel(self, watchdog_log, message_queue, channel_url):
|
||||
"""
|
||||
Send MSTeams message to specified channel.
|
||||
|
||||
:param watchdog_log: Watchdog log content
|
||||
:param message_queue: Queued messages to send
|
||||
:param channel_url: Channel url
|
||||
:type watchdog_log: String
|
||||
:type message_queue: String
|
||||
:type channel_url: String
|
||||
|
||||
"""
|
||||
for message in message_queue:
|
||||
title, log_url, text_formatted = self._parse_text(watchdog_log, message)
|
||||
data = self._json_request_content(title, log_url, text_formatted)
|
||||
|
||||
try:
|
||||
requests.post(url=channel_url, json=data)
|
||||
except Exception as ex:
|
||||
raise Exception('!!CRITICAL!! MSTeamsCommunicator: Could not send message '
|
||||
'due to {}'.format(ex))
|
||||
|
||||
def send_message(self, watchdog_log, quiet=False):
|
||||
"""
|
||||
Send queued messages as single communication.
|
||||
|
||||
:param watchdog_log: Watchdog log content
|
||||
:param quiet: Flag for disabling sending report through MS Teams
|
||||
:type watchdog_log: String
|
||||
:type quiet: Boolean
|
||||
"""
|
||||
for channel, message_queue in self._queued_messages.items():
|
||||
if not quiet and message_queue:
|
||||
self._send_to_channel(watchdog_log, message_queue, channel)
|
||||
505
.ci/openvino-onnx/watchdog/src/watchdog.py
Normal file
505
.ci/openvino-onnx/watchdog/src/watchdog.py
Normal file
@@ -0,0 +1,505 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import datetime
|
||||
import time
|
||||
import re
|
||||
import logging
|
||||
import requests
|
||||
from ms_teams_communicator import MSTeamsCommunicator
|
||||
from jenkins_wrapper import JenkinsWrapper
|
||||
from jenkins import NotFoundException
|
||||
from git_wrapper import GitWrapper, GitWrapperError
|
||||
import os
|
||||
import json
|
||||
|
||||
# Logging
|
||||
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s')
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(logging.INFO)
|
||||
|
||||
# Watchdog static constant variables
|
||||
_SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
_BUILD_DURATION_THRESHOLD = datetime.timedelta(minutes=60)
|
||||
_CI_START_THRESHOLD = datetime.timedelta(minutes=30)
|
||||
_AWAITING_JENKINS_THRESHOLD = datetime.timedelta(minutes=5)
|
||||
_WATCHDOG_DIR = os.path.expanduser('~')
|
||||
_PR_REPORTS_CONFIG_KEY = 'pr_reports'
|
||||
_CI_BUILD_FAIL_MESSAGE = 'ERROR: py3: commands failed'
|
||||
_CI_BUILD_SUCCESS_MESSAGE = 'py3: commands succeeded'
|
||||
_GITHUB_CI_CHECK_NAME = 'OpenVINO-ONNX'
|
||||
|
||||
INTERNAL_ERROR_MESSAGE_HEADER = '!!! --- !!! INTERNAL WATCHDOG ERROR !!! --- !!!'
|
||||
ERROR_MESSAGE_HEADER = '!!! OpenVino-ONNX CI Error !!!'
|
||||
WARNING_MESSAGE_HEADER = 'OpenVino-ONNX CI WARNING'
|
||||
INFO_MESSAGE_HEADER = 'OpenVino-ONNX CI INFO'
|
||||
|
||||
|
||||
class Watchdog:
|
||||
"""Class describing OpenVino-ONNX-CI Watchdog.
|
||||
|
||||
Watchdog connects to GitHub and retrieves the list of current pull requests (PRs) in
|
||||
OpenVino repository. Then it connects to specified Jenkins server to
|
||||
check CI jobs associated with every PR. Watchdog verifies time durations for Jenkins
|
||||
initial response, job queue and execution against time treshold constants. Every fail
|
||||
is logged and reported through MS Teams communicators.
|
||||
|
||||
:param jenkins_token: Token used for Jenkins
|
||||
:param jenkins_server: Jenkins server address
|
||||
:param jenkins_user: Username used to connect to Jenkins
|
||||
:param github_credentials: Credentials used to connect to GitHub
|
||||
:param msteams_url: URL used to connect to MS Teams channel
|
||||
:param ci_job_name: OpenVino-ONNX CI job name used in Jenkins
|
||||
:param watchdog_job_name: Watchdog job name used in Jenkins
|
||||
:type jenkins_token: String
|
||||
:type jenkins_server: String
|
||||
:type jenkins_user: String
|
||||
:type github_credentials: String
|
||||
:type msteams_url: String
|
||||
:type ci_job_name: String
|
||||
:type watchdog_job_name: String
|
||||
|
||||
.. note::
|
||||
Watchdog and OpenVino-ONNX CI job must be placed on the same Jenkins server.
|
||||
"""
|
||||
|
||||
def __init__(self, jenkins_token, jenkins_server, jenkins_user, github_credentials, git_org,
|
||||
git_project, msteams_url, ci_job_name, watchdog_job_name):
|
||||
self._config_path = os.path.join(_WATCHDOG_DIR, '{}/.{}_ci_watchdog.json'.format(_WATCHDOG_DIR, git_project))
|
||||
# Jenkins Wrapper object for CI job
|
||||
self._jenkins = JenkinsWrapper(jenkins_token,
|
||||
jenkins_user=jenkins_user,
|
||||
jenkins_server=jenkins_server)
|
||||
# Load GitHub token and log in, retrieve pull requests
|
||||
self._git = GitWrapper(github_credentials, repository=git_org, project=git_project)
|
||||
# Create MS Teams api object
|
||||
self._msteams_hook = MSTeamsCommunicator(msteams_url)
|
||||
self._ci_job_name = ci_job_name.lower()
|
||||
self._watchdog_job_name = watchdog_job_name
|
||||
# Read config file
|
||||
self._config = self._read_config_file()
|
||||
# Time at Watchdog initiation
|
||||
self._now_time = datetime.datetime.now()
|
||||
self._current_prs = {}
|
||||
self._ms_teams_enabled = True
|
||||
|
||||
def run(self, quiet=False):
|
||||
"""Run main watchdog logic.
|
||||
|
||||
Retrieve list of pull requests and pass it to the method responsible for checking them.
|
||||
|
||||
:param quiet: Flag for disabling sending report through communicator
|
||||
:type quiet: Boolean
|
||||
"""
|
||||
try:
|
||||
pull_requests = self._git.get_pull_requests()
|
||||
except GitWrapperError:
|
||||
message = 'Failed to retrieve Pull Requests!'
|
||||
log.exception(message)
|
||||
self._queue_message(message, message_severity='internal')
|
||||
# Check all pull requests
|
||||
for pr in pull_requests:
|
||||
try:
|
||||
self._check_pr(pr)
|
||||
except Exception as e:
|
||||
log.exception(str(e))
|
||||
self._queue_message(str(e), message_severity='internal', pr=pr)
|
||||
self._update_config()
|
||||
self._send_message(quiet=quiet)
|
||||
|
||||
def _read_config_file(self):
|
||||
"""Read Watchdog config file stored on the system.
|
||||
|
||||
The file stores every fail already reported along with timestamp. This
|
||||
mechanism is used to prevent Watchdog from reporting same failure
|
||||
multiple times. In case there's no config under the expected path,
|
||||
appropriate data structure is created and returned.
|
||||
|
||||
:return: Returns dict of dicts with reported fails with their timestamps
|
||||
:rtype: dict of dicts
|
||||
"""
|
||||
if os.path.isfile(self._config_path):
|
||||
log.info('Reading config file in: {}'.format(self._config_path))
|
||||
file = open(self._config_path, 'r')
|
||||
data = json.load(file)
|
||||
else:
|
||||
log.info('No config file found in: {}'.format(self._config_path))
|
||||
data = {_PR_REPORTS_CONFIG_KEY: {}}
|
||||
return data
|
||||
|
||||
def _check_pr(self, pr):
|
||||
"""Check pull request (if there's no reason to skip).
|
||||
|
||||
Retrieve list of statuses for every PR's last commit and interpret them. Filters out statuses
|
||||
unrelated to OpenVino-ONNX Jenkins CI and passes relevant statuses to method that interprets them.
|
||||
If no commit statuses related to Jenkins are available after time defined by
|
||||
**_AWAITING_JENKINS_THRESHOLD** calls appropriate method to check for builds waiting in queue.
|
||||
|
||||
:param pr: GitHub Pull Requests
|
||||
:type pr: github.PullRequest.PullRequest
|
||||
"""
|
||||
log.info('===============================================')
|
||||
log.info('Checking PR#{}'.format(pr.number))
|
||||
# Get last Jenkins status
|
||||
last_status = self._get_last_status(pr)
|
||||
# Append PR checked in current run for Watchdog config
|
||||
self._current_prs[str(pr.number)] = self._get_pr_timestamps(pr, last_status)
|
||||
if self._should_ignore(pr) or self._updated_since_last_run(pr):
|
||||
log.info('Ignoring PR#{}'.format(pr.number))
|
||||
|
||||
return
|
||||
|
||||
# Calculate time passed since PR update (any commit, merge or comment)
|
||||
pr_time_delta = self._now_time - pr.updated_at
|
||||
if last_status:
|
||||
# Interpret found CI statuses
|
||||
log.info('Last status: {} at {}'.format(last_status.description, last_status.updated_at))
|
||||
self._interpret_status(last_status, pr)
|
||||
elif pr_time_delta > _CI_START_THRESHOLD:
|
||||
# If there's no status after assumed time - check if build is waiting in queue
|
||||
log.info('CI for PR {}: NO JENKINS STATUS YET'.format(pr.number))
|
||||
self._check_missing_status(pr)
|
||||
|
||||
@staticmethod
|
||||
def _get_pr_timestamps(pr, last_status):
|
||||
"""Get dict containing PR timestamp and last status timestamp.
|
||||
|
||||
:param pr: Single PR being currently checked
|
||||
:type pr: github.PullRequest.PullRequest
|
||||
|
||||
:return: Dictionary with PR and last status update timestamps
|
||||
:rtype: dict
|
||||
"""
|
||||
pr_timestamp = time.mktime(pr.updated_at.timetuple())
|
||||
if last_status:
|
||||
status_timestamp = time.mktime(last_status.updated_at.timetuple())
|
||||
else:
|
||||
status_timestamp = None
|
||||
pr_dict = {'pr_timestamp': pr_timestamp,
|
||||
'status_timestamp': status_timestamp}
|
||||
return pr_dict
|
||||
|
||||
@staticmethod
|
||||
def _get_last_status(pr):
|
||||
"""Get last commit status posted from Jenkins.
|
||||
|
||||
:param pr: Single PR being currently checked
|
||||
:type pr: github.PullRequest.PullRequest
|
||||
|
||||
:return: Either last PR status posted from Jenkins or None
|
||||
:rtype: github.CommitStatus.CommitStatus
|
||||
"""
|
||||
# Find last commit in PR
|
||||
last_commit = pr.get_commits().reversed[0]
|
||||
# Get statuses and filter them to contain only those related to Jenkins CI
|
||||
# and check if CI in Jenkins started
|
||||
statuses = last_commit.get_statuses()
|
||||
jenk_statuses = [stat for stat in statuses if
|
||||
_GITHUB_CI_CHECK_NAME in stat.context]
|
||||
try:
|
||||
last_status = jenk_statuses[0]
|
||||
except IndexError:
|
||||
last_status = None
|
||||
return last_status
|
||||
|
||||
@staticmethod
|
||||
def _should_ignore(pr):
|
||||
"""Determine if PR should be ignored.
|
||||
|
||||
:param pr: Single PR being currently checked
|
||||
:type pr: github.PullRequest.PullRequest
|
||||
|
||||
:return: Returns True if PR should be ignored
|
||||
:rtype: Bool
|
||||
"""
|
||||
# Ignore PR if it has WIP label or WIP in title
|
||||
if 'WIP' in pr.title:
|
||||
log.info('PR#{} should be ignored. WIP tag in title.'.format(pr.number))
|
||||
return True
|
||||
|
||||
label_names = [label.name for label in pr.labels]
|
||||
if 'WIP' in label_names:
|
||||
log.info('PR#{} should be ignored. WIP label present.'.format(pr.number))
|
||||
return True
|
||||
|
||||
# Ignore PR if base ref is not master
|
||||
if 'master' not in pr.base.ref:
|
||||
log.info('PR#{} should be ignored. Base ref is not master'.format(pr.number))
|
||||
return True
|
||||
|
||||
# Ignore PR if mergeable state is 'dirty' or 'behind'.
|
||||
# Practically this ignores PR in case of merge conflicts
|
||||
ignored_mergeable_states = ['behind', 'dirty', 'draft']
|
||||
if pr.mergeable_state in ignored_mergeable_states:
|
||||
log.info('PR#{} should be ignored. Mergeable state is {}. '.format(pr.number, pr.mergeable_state))
|
||||
return True
|
||||
|
||||
# If no criteria for ignoring PR are met - return false
|
||||
return False
|
||||
|
||||
def _updated_since_last_run(self, pr):
|
||||
# Ignore if PR was already checked and there was no update in meantime
|
||||
pr_number = str(pr.number)
|
||||
current_pr_timestamps = self._current_prs.get(pr_number)
|
||||
last_pr_timestamps = self._config[_PR_REPORTS_CONFIG_KEY].get(pr_number)
|
||||
if current_pr_timestamps == last_pr_timestamps:
|
||||
log.info('PR#{} - No update since last check'.format(pr.number))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _check_missing_status(self, pr):
|
||||
"""Verify if missing status is expected.
|
||||
|
||||
This method checks if CI build for last was scheduled and still waits in queue for
|
||||
executor.
|
||||
|
||||
:param pr: Single PR being currently checked
|
||||
:type pr: github.PullRequest.PullRequest
|
||||
"""
|
||||
pr_time_delta = self._now_time - pr.updated_at
|
||||
try:
|
||||
build_number = self._build_scheduled(pr)
|
||||
if self._build_in_queue(pr, build_number):
|
||||
message = ('PR# {}: build waiting in queue after {} minutes.'
|
||||
.format(pr.number, pr_time_delta.seconds / 60))
|
||||
severity = 'warning'
|
||||
else:
|
||||
message = ('PR# {}: missing status on GitHub after {} minutes.'
|
||||
.format(pr.number, pr_time_delta.seconds / 60))
|
||||
severity = 'error'
|
||||
self._queue_message(message, message_severity=severity, pr=pr)
|
||||
except TypeError:
|
||||
log.info('Committer outside of OpenVino organization')
|
||||
|
||||
def _build_scheduled(self, pr):
|
||||
"""Check if Jenkins build corresponding to PR was scheduled.
|
||||
|
||||
This method takes last Jenkins build for given PR and compares hash from Jenkins console output
|
||||
and sha from PR object to determine if CI build for appropriate commit was scheduled.
|
||||
|
||||
:param pr: Single PR being currently checked
|
||||
:type pr: github.PullRequest.PullRequest
|
||||
|
||||
:return: Returns build number or -1 if no build found
|
||||
:rtype: int
|
||||
"""
|
||||
pr_number = str(pr.number)
|
||||
project_name_full = self._ci_job_name + '/PR-' + pr_number
|
||||
|
||||
try:
|
||||
# Retrieve console output from last Jenkins build for job corresponding to this PR
|
||||
last_build_number = self._jenkins.get_job_info(project_name_full)['lastBuild']['number']
|
||||
console_output = self._jenkins.get_build_console_output(project_name_full, last_build_number)
|
||||
# Check if CI build was scheduled - commit hash on GH must match hash in last Jenkins build console output
|
||||
# Retrieve hash from Jenkins output
|
||||
match_string = '(?:Obtained .ci/[a-zA-Z/]+Jenkinsfile from ([a-z0-9]{40}))'
|
||||
retrieved_sha = re.search(match_string, console_output).group(1)
|
||||
if retrieved_sha == pr.get_commits().reversed[0].sha:
|
||||
return last_build_number
|
||||
else:
|
||||
return -1
|
||||
except (NotFoundException, AttributeError, requests.exceptions.HTTPError):
|
||||
message = ('PR #{}: Jenkins build corresponding to commit {} not found!'
|
||||
.format(pr_number, pr.get_commits().reversed[0].sha))
|
||||
self._queue_message(message, message_severity='error', pr=pr)
|
||||
return -1
|
||||
|
||||
def _build_in_queue(self, pr, build_number):
|
||||
"""Check if Jenkins build waits in queue.
|
||||
|
||||
This method verifies if CI build is waiting in queue based on console output.
|
||||
|
||||
:param pr: Single PR being currently checked
|
||||
:param build_number: Jenkins build number to retrieve console output from
|
||||
:type pr: github.PullRequest.PullRequest
|
||||
:type build_number: int
|
||||
|
||||
:return: Returns True if CI build is waiting in queue
|
||||
:rtype: Bool
|
||||
"""
|
||||
pr_number = str(pr.number)
|
||||
project_name_full = self._ci_job_name + '/PR-' + pr_number
|
||||
# Retrieve console output
|
||||
try:
|
||||
console_output = self._jenkins.get_build_console_output(project_name_full, build_number)
|
||||
except NotFoundException:
|
||||
return False
|
||||
# Check if build is waiting in queue (and not already running on an executor)
|
||||
if 'Waiting for next available executor on' in console_output \
|
||||
and 'Running on' not in console_output:
|
||||
log.info('CI for PR %s: WAITING IN QUEUE', pr_number)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _interpret_status(self, status, pr):
|
||||
"""
|
||||
Verify GitHub status passed to the method.
|
||||
|
||||
This method verifies last commit status for given PR, calling appropriate methods
|
||||
to further validate the status.
|
||||
|
||||
:param status: GitHub commit status
|
||||
:param pr: Single PR being currently checked
|
||||
:type status: github.CommitStatus.CommitStatus
|
||||
:type pr: github.PullRequest.PullRequest
|
||||
"""
|
||||
try:
|
||||
# Retrieve build number for Jenkins build related to this PR
|
||||
build_number = self._retrieve_build_number(status.target_url)
|
||||
# CI build finished - verify if expected output is present
|
||||
finished_statuses = ['Build finished', 'This commit cannot be built', 'This commit looks good']
|
||||
pending_statuses = ['This commit is being built', 'Testing in progress',
|
||||
'This commit is scheduled to be built']
|
||||
if any(phrase in status.description for phrase in finished_statuses):
|
||||
self._check_finished(pr, build_number)
|
||||
# CI build in progress - verify timeouts for build queue and duration
|
||||
elif any(phrase in status.description for phrase in pending_statuses):
|
||||
self._check_in_progress(pr, build_number)
|
||||
else:
|
||||
message = 'ONNX CI job for PR# {}: unrecognized status: {}'.format(pr.number, status.description)
|
||||
self._queue_message(message, message_severity='error', pr=pr)
|
||||
except Exception:
|
||||
# Log Watchdog internal error in case any status can't be properly verified
|
||||
message = 'Failed to verify status "{}" for PR# {}'.format(status.description, pr.number)
|
||||
log.exception(message)
|
||||
self._queue_message(message, message_severity='internal', pr=pr)
|
||||
|
||||
def _retrieve_build_number(self, url):
|
||||
"""Retrieve Jenkins CI job build number from URL address coming from GitHub commit status.
|
||||
|
||||
:param url: URL address from GitHub commit status
|
||||
:type url: String
|
||||
|
||||
:return: Returns build number
|
||||
:rtype: int
|
||||
"""
|
||||
# Retrieve the build number from url string
|
||||
match_obj = re.search('(?:/PR-[0-9]+/)([0-9]+)', url)
|
||||
try:
|
||||
number = int(match_obj.group(1))
|
||||
return number
|
||||
except Exception:
|
||||
log.exception('Failed to retrieve build number from url link: %s', url)
|
||||
raise
|
||||
|
||||
def _queue_message(self, message, message_severity='info', pr=None):
|
||||
"""Add a message to message queue in communicator object.
|
||||
|
||||
The queued message is constructed based on message string passed as
|
||||
a method argument and message header. Message header is mapped to message severity
|
||||
also passed as an argument.
|
||||
|
||||
:param message: Message content
|
||||
:param message_severity: Message severity level
|
||||
:type message: String
|
||||
:type message_severity: int
|
||||
"""
|
||||
log.info(message)
|
||||
internal = False
|
||||
if 'internal' in message_severity:
|
||||
message_header = INTERNAL_ERROR_MESSAGE_HEADER
|
||||
internal = True
|
||||
elif 'error' in message_severity:
|
||||
message_header = ERROR_MESSAGE_HEADER
|
||||
elif 'warning' in message_severity:
|
||||
message_header = WARNING_MESSAGE_HEADER
|
||||
else:
|
||||
message_header = INFO_MESSAGE_HEADER
|
||||
# If message is related to PR attatch url
|
||||
if pr:
|
||||
message = message + '\n' + pr.html_url
|
||||
|
||||
send = message_header + '\n' + message
|
||||
if self._ms_teams_enabled:
|
||||
self._msteams_hook.queue_message(send)
|
||||
|
||||
def _check_finished(self, pr, build_number):
|
||||
"""Verify if finished build output contains expected string for either fail or success.
|
||||
|
||||
:param pr: Single PR being currently checked
|
||||
:param build_number: Jenkins CI job build number
|
||||
:type pr: github.PullRequest.PullRequest
|
||||
:type build_number: int
|
||||
"""
|
||||
pr_number = str(pr.number)
|
||||
log.info('CI for PR %s: FINISHED', pr_number)
|
||||
# Check if FINISH was valid FAIL / SUCCESS
|
||||
project_name_full = self._ci_job_name + '/PR-' + pr_number
|
||||
build_output = self._jenkins.get_build_console_output(project_name_full, build_number)
|
||||
if _CI_BUILD_FAIL_MESSAGE not in build_output \
|
||||
and _CI_BUILD_SUCCESS_MESSAGE not in build_output:
|
||||
message = ('ONNX CI job for PR #{}: finished but no tests success or fail '
|
||||
'confirmation is present in console output!'.format(pr_number))
|
||||
self._queue_message(message, message_severity='error', pr=pr)
|
||||
|
||||
def _send_message(self, quiet=False):
|
||||
"""Send messages queued in MS Teams objects to designated channel.
|
||||
|
||||
Queued messages are being sent as a single communication.
|
||||
|
||||
:param quiet: Flag for disabling sending report through communicator
|
||||
:type quiet: Boolean
|
||||
"""
|
||||
if any(messages for messages in self._msteams_hook.messages):
|
||||
try:
|
||||
watchdog_build = self._jenkins.get_job_info(self._watchdog_job_name)['lastBuild']
|
||||
watchdog_build_number = watchdog_build['number']
|
||||
watchdog_build_link = watchdog_build['url']
|
||||
except Exception:
|
||||
watchdog_build_number = 'UNKNOWN'
|
||||
watchdog_build_link = self._jenkins.jenkins_server
|
||||
send = self._watchdog_job_name + '- build ' + str(
|
||||
watchdog_build_number) + ' - ' + watchdog_build_link
|
||||
|
||||
if self._ms_teams_enabled:
|
||||
self._msteams_hook.send_message(send, quiet=quiet)
|
||||
else:
|
||||
log.info('Nothing to report.')
|
||||
|
||||
def _check_in_progress(self, pr, build_number):
|
||||
"""Check if CI build succesfully started.
|
||||
|
||||
Checks if build started within designated time threshold, and job is
|
||||
currently running - it didn't cross the time threshold.
|
||||
|
||||
:param pr: Single PR being currently checked
|
||||
:param build_number: Jenkins CI job build number
|
||||
:type pr: github.PullRequest.PullRequest
|
||||
:type build_number: int
|
||||
"""
|
||||
pr_number = str(pr.number)
|
||||
log.info('CI for PR %s: TESTING IN PROGRESS', pr_number)
|
||||
project_name_full = self._ci_job_name + '/PR-' + pr_number
|
||||
build_info = self._jenkins.get_build_info(project_name_full, build_number)
|
||||
build_datetime = datetime.datetime.fromtimestamp(build_info['timestamp'] / 1000.0)
|
||||
build_delta = self._now_time - build_datetime
|
||||
log.info('Build %s: IN PROGRESS, started: %s minutes ago', str(build_number),
|
||||
str(build_delta))
|
||||
# If build still waiting in queue
|
||||
if build_delta > _CI_START_THRESHOLD and self._build_in_queue(pr, build_number):
|
||||
message = ('ONNX CI job build #{}, for PR #{} waiting in queue after {} '
|
||||
'minutes'.format(build_number, pr_number, str(build_delta.seconds / 60)))
|
||||
self._queue_message(message, message_severity='warning', pr=pr)
|
||||
elif build_delta > _BUILD_DURATION_THRESHOLD:
|
||||
# CI job take too long, possibly froze - communicate failure
|
||||
message = ('ONNX CI job build #{}, for PR #{} started, '
|
||||
'but did not finish in designated time of {} '
|
||||
'minutes!'.format(build_number, pr_number,
|
||||
str(_BUILD_DURATION_THRESHOLD.seconds / 60)))
|
||||
self._queue_message(message, message_severity='error', pr=pr)
|
||||
|
||||
def _update_config(self):
|
||||
"""Update Watchdog config file with PRs checked in current Watchdog run, remove old entries.
|
||||
|
||||
:param current_prs: List of PR numbers checked during current Watchdog run
|
||||
:type current_prs: list of ints
|
||||
"""
|
||||
# Cleanup config of old reports
|
||||
log.info('Writing to config file at: {}'.format(self._config_path))
|
||||
new_config = {_PR_REPORTS_CONFIG_KEY: self._current_prs}
|
||||
file = open(self._config_path, 'w+')
|
||||
json.dump(new_config, file)
|
||||
3
.github/dependabot.yml
vendored
3
.github/dependabot.yml
vendored
@@ -7,7 +7,7 @@ updates:
|
||||
directory: "/src/bindings/python"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: sunday
|
||||
day: monday
|
||||
time: "13:00"
|
||||
open-pull-requests-limit: 0
|
||||
reviewers:
|
||||
@@ -15,3 +15,4 @@ updates:
|
||||
- akuporos
|
||||
labels:
|
||||
- "category: dependencies"
|
||||
|
||||
|
||||
9
.github/github_org_control/config.json
vendored
9
.github/github_org_control/config.json
vendored
@@ -5,10 +5,11 @@
|
||||
"IGNORE_LOGINS": [
|
||||
"openvino-ci",
|
||||
"openvino-pushbot",
|
||||
"lab-nerval",
|
||||
"lab-nerval-onnx-ci",
|
||||
"onnx-watchdog-agent",
|
||||
"workbench-ci-bot",
|
||||
"openvino-pot-ci",
|
||||
"sysicvvpux",
|
||||
"ote-ci-bot"
|
||||
"openvino-pot-ci"
|
||||
],
|
||||
"MAX_MEMBERS_TO_REMOVE": 15,
|
||||
"EMAILS_FILE_PATH": "dev_emails-test.txt",
|
||||
@@ -27,7 +28,7 @@
|
||||
"openvino-ie-gna-maintainers": "category: GNA",
|
||||
"openvino-ie-gpu-maintainers": "category: GPU",
|
||||
"openvino-ie-lpt-maintainers": "category: LP transformations",
|
||||
"openvino-ie-auto-multi-maintainers": "category: MULTI",
|
||||
"openvino-ie-multi-maintainers": "category: MULTI",
|
||||
"openvino-ie-python-api-maintainers": "category: python api",
|
||||
"openvino-ie-template-maintainers": "category: TEMPLATE",
|
||||
"openvino-ie-tests-maintainers": "category: IE Tests",
|
||||
|
||||
2
.github/github_org_control/github_api.py
vendored
2
.github/github_org_control/github_api.py
vendored
@@ -157,7 +157,7 @@ class GithubOrgApi:
|
||||
self.github_users_by_email[email] = org_member
|
||||
if not is_valid_name(org_member.name):
|
||||
self.members_to_fix_name.add(org_member)
|
||||
else:
|
||||
elif not is_user_ignored(org_member):
|
||||
self.members_to_remove.add(org_member)
|
||||
|
||||
print("\nOrg members - no Intel emails:")
|
||||
|
||||
69
.github/workflows/py_checks.yml
vendored
69
.github/workflows/py_checks.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Python API Checks
|
||||
name: IE Python Checks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -23,9 +23,8 @@ jobs:
|
||||
with:
|
||||
python-version: '3.6'
|
||||
- name: Install dependencies
|
||||
run: python -m pip install -r src/bindings/python/requirements_test.txt
|
||||
# samples code-style
|
||||
- name: Run flake8 on samples
|
||||
run: python -m pip install -r src/bindings/python/src/compatibility/openvino/requirements_dev.txt
|
||||
- name: Run Flake on samples
|
||||
run: python -m flake8 ./ --config=setup.cfg
|
||||
working-directory: samples/python
|
||||
- name: Create code style diff for samples
|
||||
@@ -39,53 +38,21 @@ jobs:
|
||||
with:
|
||||
name: samples_diff
|
||||
path: samples_diff.diff
|
||||
# IE Python API Flake code-style
|
||||
- name: Run flake8 on IE Python API
|
||||
- name: Run Flake on src
|
||||
run: python -m flake8 ./ --config=setup.cfg
|
||||
working-directory: src/bindings/python/src/compatibility/openvino
|
||||
- name: Create code style diff for IE Python API
|
||||
- name: Create code style diff for Python src
|
||||
if: failure()
|
||||
run: |
|
||||
python -m black -l 160 -S ./
|
||||
git diff > ie_python_diff.diff
|
||||
git diff > src_diff.diff
|
||||
working-directory: src/bindings/python/src/compatibility/openvino
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: failure()
|
||||
with:
|
||||
name: ie_python_diff
|
||||
path: ie_python_diff.diff
|
||||
# nGraph Python API Flake code-style
|
||||
- name: Run flake8 on nGraph Python API
|
||||
run: python -m flake8 ./src/compatibility/ngraph --config=setup.cfg
|
||||
working-directory: src/bindings/python
|
||||
- name: Create code style diff for nGraph Python API
|
||||
if: failure()
|
||||
run: |
|
||||
python -m black -l 160 -S ./
|
||||
git diff > pyngraph_diff.diff
|
||||
working-directory: src/bindings/python/src/compatibility/ngraph
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: failure()
|
||||
with:
|
||||
name: pyngraph_diff
|
||||
path: pyngraph_diff.diff
|
||||
# Python API 2.0 Flake code-style
|
||||
- name: Run flake8 on Python API 2.0
|
||||
run: python -m flake8 ./src/openvino --config=setup.cfg
|
||||
working-directory: src/bindings/python
|
||||
- name: Create code style diff for Python API 2.0
|
||||
if: failure()
|
||||
run: |
|
||||
python -m black -l 160 -S ./
|
||||
git diff > pyopenvino_diff.diff
|
||||
working-directory: src/bindings/python/src/openvino
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: failure()
|
||||
with:
|
||||
name: pyopenvino_diff
|
||||
path: pyopenvino_diff.diff
|
||||
# wheel Flake code-style
|
||||
- name: Run flake8 on wheel
|
||||
name: src_diff
|
||||
path: src_diff.diff
|
||||
- name: Run Flake on wheel
|
||||
run: python -m flake8 ./ --config=../setup.cfg
|
||||
working-directory: src/bindings/python/wheel
|
||||
- name: Create code style diff for wheel
|
||||
@@ -99,24 +66,10 @@ jobs:
|
||||
with:
|
||||
name: wheel_diff
|
||||
path: wheel_diff.diff
|
||||
# Python API 2.0 tests Flake code-style
|
||||
- name: Run flake8 on python tests
|
||||
# ignore lack of docs in tests
|
||||
run: python -m flake8 tests/ --config=setup.cfg
|
||||
working-directory: src/bindings/python
|
||||
# IE Python API mypy check
|
||||
- name: Run mypy on IE Python API
|
||||
|
||||
- name: Run MyPy
|
||||
run: python -m mypy ./ --config-file ./setup.cfg
|
||||
working-directory: src/bindings/python/src/compatibility/openvino
|
||||
# nGraph Python API mypy check
|
||||
- name: Run mypy on nGraph Python API
|
||||
run: python -m mypy ./src/compatibility/ngraph --config-file ./setup.cfg
|
||||
working-directory: src/bindings/python
|
||||
# Python API 2.0 mypy check
|
||||
- name: Run mypy on Python API 2.0
|
||||
run: python -m mypy ./src/openvino --config-file ./setup.cfg
|
||||
working-directory: src/bindings/python
|
||||
|
||||
- name: Run Bandit
|
||||
run: python -m bandit -r ./ -f screen
|
||||
working-directory: src/bindings/python/src/compatibility/openvino
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,5 +1,8 @@
|
||||
# build/artifact dirs
|
||||
_*
|
||||
[Bb]uild*/
|
||||
cmake-build*
|
||||
|
||||
# but ensure we don't skip __init__.py and __main__.py
|
||||
!__init__.py
|
||||
!__main__.py
|
||||
|
||||
6
.gitmodules
vendored
6
.gitmodules
vendored
@@ -1,6 +1,6 @@
|
||||
[submodule "src/plugins/intel_cpu/thirdparty/onednn"]
|
||||
path = src/plugins/intel_cpu/thirdparty/onednn
|
||||
url = https://github.com/luo-cheng2021/oneDNN.git
|
||||
[submodule "src/plugins/intel_cpu/thirdparty/mkl-dnn"]
|
||||
path = src/plugins/intel_cpu/thirdparty/mkl-dnn
|
||||
url = https://github.com/openvinotoolkit/oneDNN.git
|
||||
ignore = dirty
|
||||
[submodule "thirdparty/xbyak"]
|
||||
path = thirdparty/xbyak
|
||||
|
||||
@@ -51,7 +51,6 @@ file(REMOVE "${CMAKE_BINARY_DIR}/InferenceEngineTargets.cmake")
|
||||
file(REMOVE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake")
|
||||
foreach(component IN LISTS openvino_export_components)
|
||||
file(REMOVE "${CMAKE_BINARY_DIR}/${component}_dev_targets.cmake")
|
||||
file(REMOVE "${CMAKE_BINARY_DIR}/ov_${component}_dev_targets.cmake")
|
||||
unset(${component} CACHE)
|
||||
endforeach()
|
||||
unset(openvino_export_components CACHE)
|
||||
|
||||
16
CODEOWNERS
16
CODEOWNERS
@@ -39,10 +39,8 @@ Jenkinsfile @openvinotoolkit/openvino-admins
|
||||
|
||||
# IE CPU:
|
||||
/src/plugins/intel_cpu/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
|
||||
/src/plugins/intel_cpu/thirdparty/onednn/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
|
||||
|
||||
#IE LPT
|
||||
/src/common/low_precision_transformations/ @openvinotoolkit/openvino-ie-lpt-maintainers
|
||||
/src/common/low_precision_transformations/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
|
||||
/src/plugins/intel_cpu/thirdparty/mkl-dnn/ @openvinotoolkit/openvino-ie-cpu-maintainers @openvinotoolkit/openvino-ie-cpu-developers
|
||||
|
||||
# IE GPU:
|
||||
/src/inference/include/ie/gpu/ @openvinotoolkit/openvino-ie-gpu-maintainers @openvinotoolkit/openvino-ie-gpu-developers
|
||||
@@ -79,8 +77,8 @@ Jenkinsfile @openvinotoolkit/openvino-admins
|
||||
/src/frontends/paddle/ @openvinotoolkit/openvino-ie-paddle-maintainers
|
||||
|
||||
# IE Tests:
|
||||
/src/tests/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ie-test-developers
|
||||
/src/tests_deprecated/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ie-test-developers
|
||||
/src/tests/ @openvinotoolkit/openvino-ie-tests-maintainers
|
||||
/src/tests_deprecated/ @openvinotoolkit/openvino-ie-tests-maintainers
|
||||
/src/tests/functional/inference_engine/ngraph_reader/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ngraph-maintainers
|
||||
/src/tests/functional/inference_engine/transformations/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ngraph-maintainers
|
||||
|
||||
@@ -90,6 +88,6 @@ Jenkinsfile @openvinotoolkit/openvino-admins
|
||||
*.md @openvinotoolkit/openvino-docs-maintainers
|
||||
|
||||
# Control 3d party dependencies
|
||||
**/*requirements*.* @openvinotoolkit/openvino-configuration-mgmt
|
||||
**/setup.py @openvinotoolkit/openvino-configuration-mgmt
|
||||
/scripts/install_dependencies/ @openvinotoolkit/openvino-configuration-mgmt
|
||||
**/*requirements*.* @openvino-configuration-mgmt
|
||||
**/setup.py @openvino-configuration-mgmt
|
||||
/scripts/install_dependencies/ @openvino-configuration-mgmt
|
||||
|
||||
@@ -1,55 +1,68 @@
|
||||
# How to contribute to the OpenVINO repository
|
||||
|
||||
We welcome community contributions to OpenVINO™. Please read the following guide to learn how to find ideas for contribution, practices for good pull requests, checking your changes with our tests and more.
|
||||
We suppose that you are an enthusiastic coder, want to contribute some code. For that purpose OpenVINO project now has a repository on the GitHub, to simplify everybody's life! All the bug fixes, new functionality, new tutorials etc. should be submitted via the GitHub's mechanism of pull requests.
|
||||
|
||||
If you are not familiar with the mechanism - do not worry, it's very simple. Keep reading.
|
||||
|
||||
## Before you start contributing you should
|
||||
|
||||
- Make sure you agree to contribute your code under [OpenVINO™ (Apache 2.0)](https://github.com/openvinotoolkit/openvino/blob/master/LICENSE) license.
|
||||
- Figure out what you’re going to contribute. If you don’t know what you are going to work on, navigate to the [Github "Issues" tab](https://github.com/openvinotoolkit/openvino/issues). Make sure that there isn't someone working on it. In the latter case you might provide support or suggestion in the issue or in the linked pull request.
|
||||
- If you are going to fix a bug, check that it's still exists in the latest release. This can be done by building the latest master branch, and make sure that the error is still reproducible there. We do not fix bugs that only affect older non-LTS releases like 2020.2 for example (more details about [branching strategy](https://github.com/openvinotoolkit/openvino/wiki/Branches)).
|
||||
- Make sure you agree to contribute your code under [OpenVINO (Apache 2.0)](https://github.com/openvinotoolkit/openvino/blob/master/LICENSE) license.
|
||||
- If you are submitting a new module, you should go into [openvino_contrib](https://github.com/openvinotoolkit/openvino_contrib) repository by default.
|
||||
- If you are going to fix a bug, check that it's still exists. This can be done by building the latest [releases/2020/3](https://github.com/openvinotoolkit/openvino/tree/releases/2020/3) branch (LTS release) or the latest master branch, and make sure that the error is still reproducible there. We do not fix bugs that only affect older non-LTS releases like 2020.2 for example (more details about [branching strategy](https://github.com/openvinotoolkit/openvino/wiki/Branches))
|
||||
- Make sure that nobody beat you into fixing or reporting the issue by doing a search on the [Github OpenVINO issues](https://github.com/openvinotoolkit/openvino/issues) page, and making sure that there isn't someone working on it. In the latter case you might provide support or suggestion in the issue or in the linked pull request.
|
||||
- If you have a question about the software, then this is **NOT** the right place. You should open up a question at the [OpenVINO forum](https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/bd-p/distribution-openvino-toolkit). In order to post a decent question from the start, feel free to read the official forum guidelines.
|
||||
|
||||
Before you open up anything on the OpenVINO GitHub page, be sure that you are at the right place with your problem.
|
||||
|
||||
## "Fork & Pull Request model" for code contribution
|
||||
|
||||
### [](https://github.com/openvinotoolkit/openvino/blob/master/CONTRIBUTING.md#the-instruction-in-brief)The instruction in brief
|
||||
### [](https://github.com/openvinotoolkit/openvino/wiki/Contribute#the-instruction-in-brief)The instruction in brief
|
||||
|
||||
- Register at GitHub. Create your fork of OpenVINO™ repository [https://github.com/openvinotoolkit/openvino](https://github.com/openvinotoolkit/openvino) (see [https://help.github.com/articles/fork-a-repo](https://help.github.com/articles/fork-a-repo) for details).
|
||||
- Register at GitHub. Create your fork of OpenVINO repository [https://github.com/openvinotoolkit/openvino](https://github.com/openvinotoolkit/openvino) (see [https://help.github.com/articles/fork-a-repo](https://help.github.com/articles/fork-a-repo) for details).
|
||||
- Install Git.
|
||||
- Set your user name and email address in a Git configuration according to GitHub account (see [https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup](https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup) for details).
|
||||
- Choose a task for yourself. It could be a bugfix or some new code.
|
||||
- Choose a base branch for your work. More details about branches and policies are here: [Branches](https://github.com/openvinotoolkit/openvino/wiki/Branches)
|
||||
- Clone your fork to your computer.
|
||||
- Create a new branch (with a meaningful name) from the base branch you chose.
|
||||
- Modify / add the code following our [Coding Style Guide](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLines).
|
||||
- Modify / add the code following our [Coding Style Guide](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLines) and [Documentation guidelines](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLinesDocumentation).
|
||||
- If you want to add a new sample, please look at this [Guide for contributing to C++/C/Python IE samples](https://github.com/openvinotoolkit/openvino/wiki/SampleContribute)
|
||||
- If you want to contribute to the documentation and want to add a new guide, follow that instruction [Documentation guidelines](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLinesDocumentation)
|
||||
- Run testsuite locally:
|
||||
- execute each test binary from the artifacts directory, e.g. `<source dir>/bin/intel64/Release/ieFuncTests`
|
||||
- If you contribute to the documentation and want to add a new guide:
|
||||
- Create a new markdown file in an appropriate folder.
|
||||
- **REQUIRED:** The document title must contain a document label in a form: `{#openvino_docs_<name>}`. For example: `Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™ {#openvino_docs_MO_DG_IR_and_opsets}`.
|
||||
- Add your file to the documentation structure. Open the documentation structure file [`docs/doxygen/ie_docs.xml`](https://github.com/openvinotoolkit/openvino/blob/master/docs/doxygen/ie_docs.xml) and add your file path to the appropriate section.
|
||||
- When you are done, make sure that your branch is to date with latest state of the branch you want to contribute to (e.g. `git fetch upstream && git merge upstream/master`), push your branch to your GitHub fork; then create a pull request from your branch to the base branch (see [https://help.github.com/articles/using-pull-requests](https://help.github.com/articles/using-pull-requests) for details).
|
||||
|
||||
## Making a good pull request
|
||||
|
||||
Following these guidelines will increase the likelihood of your pull request being accepted:
|
||||
|
||||
- One PR – one issue.
|
||||
- Build perfectly on your local system.
|
||||
- Choose the right base branch [Branches](https://github.com/openvinotoolkit/openvino/wiki/Branches).
|
||||
- Follow the [Coding Style Guide](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLines) for your code.
|
||||
- Update documentation using [Documentation guidelines](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLinesDocumentation) if needed.
|
||||
- Cover your changes with test.
|
||||
- Add license at the top of new files [C++ example](https://github.com/openvinotoolkit/openvino/blob/master/samples/cpp/classification_sample_async/main.cpp#L1-L2), [Python example](https://github.com/openvinotoolkit/openvino/blob/master/samples/python/hello_classification/hello_classification.py#L3-L4).
|
||||
- Add enough information: a meaningful title, the reason why you made the commit and a link to the issue page if exists.
|
||||
- Remove unrelated to PR changes.
|
||||
- If it is still WIP and you want to check CI test results early then use _Draft_ PR.
|
||||
- Submit your PR and become an OpenVINO™ contributor!
|
||||
|
||||
- Before pushing your PR to the repository, make sure that it builds perfectly fine on your local system.
|
||||
- Add enough information, like a meaningful title, the reason why you made the commit and a link to the issue page if you opened one for this PR.
|
||||
- Scope your PR to one issue. Before submitting, make sure the diff contains no unrelated changes. If you want to cover more than one issue, submit your changes for each as separate pull requests.
|
||||
- If you have added new functionality, you should update/create the relevant documentation, as well as add tests for it to the testsuite.
|
||||
- Try not to include "oops" commits - ones that just fix an error in the previous commit. If you have those, then before submitting [squash](https://github.com/openvinotoolkit/openvino/wiki/Contribute#https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#Squashing-Commits) those fixes directly into the commits where they belong.
|
||||
- Make sure to choose the right base branch and to follow the [Coding Style Guide](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLines) for your code or [Documentation guidelines](https://github.com/openvinotoolkit/openvino/wiki/CodingStyleGuideLinesDocumentation) you are changing documentation files.
|
||||
- Make sure to add test for new functionality or test that reproduces fixed bug with related test data. Please do not add extra images or videos, if some of existing media files are suitable.
|
||||
|
||||
## Testing and merging pull requests
|
||||
|
||||
Your pull request will be automatically tested by OpenVINO™'s precommit (testing status are automatically reported as "green" or "red" circles in precommit steps on PR's page). If any builders have failed, you need fix the issue. To rerun the automatic builds just push changes to your branch on GitHub. No need to close pull request and open a new one!
|
||||
- Your pull request will be automatically tested by OpenVINO's precommit (testing status are automatically reported as "green" or "red" circles in precommit steps on PR's page). If any builders have failed, you should fix the issue. To rerun the automatic builds just push changes to your branch on GitHub. No need to close pull request and open a new one!
|
||||
- Once all the builders are "green", one of OpenVINO developers will review your code. Reviewer could ask you to modify your pull request. Please provide timely response for reviewers (within weeks, not months), otherwise you submission could be postponed or even rejected.
|
||||
|
||||
## PR review good practices
|
||||
|
||||
- Originator is responsible for driving the review of changes and should ping reviewers periodically.
|
||||
- Originator should close comments from the Reviewer when it is resolved. The Reviewer may re-open the comment if he does not agree with the resolution.
|
||||
- Originator should request re-review from the Reviewer when all comments are resolved by pushing the button in the “Reviewers” section.
|
||||
- If it is still WIP and you want to check CI test results early then use _Draft_ PR.
|
||||
- Do **NOT** rewrite history (push -f) once you converted draft PR into regular one, add new commits instead. Looking at diffs makes review easier.
|
||||
- Write meaningful description of commits resulting from review. _"Addressing review comments"_ is **NOT** a good description! Having a quick look at good descriptions can tell you much what is going on in PR without a need to go through all of resolved comments.
|
||||
|
||||
## Merging PR
|
||||
|
||||
As soon as the reviewer is fine with the pull request and precommit shows "green" status, the "Approved" review status is put, which signals OpenVINO™ maintainers that they can merge your pull request.
|
||||
As soon as the reviewer is fine with the pull request and Precommit likes your code and shows "green" status, the "Approved" review status is put, which signals OpenVINO maintainers that they can merge your pull request.
|
||||
|
||||
© Copyright 2018-2022, OpenVINO team
|
||||
206
README.md
206
README.md
@@ -1,202 +1,42 @@
|
||||
<div align="center">
|
||||
|
||||
<img src="docs/img/openvino-logo-purple-black.png" width="400px">
|
||||
|
||||
# OpenVINO™ Toolkit
|
||||
[](https://github.com/openvinotoolkit/openvino/releases/tag/2022.1)
|
||||
[](LICENSE)
|
||||

|
||||

|
||||
[](https://badge.fury.io/py/openvino)
|
||||
[](https://pepy.tech/project/openvino)
|
||||
|
||||
</div>
|
||||
|
||||
## Contents:
|
||||
This toolkit allows developers to deploy pre-trained deep learning models
|
||||
through a high-level OpenVINO™ Runtime C++ and Python APIs integrated with application logic.
|
||||
|
||||
- [What is OpenVINO?](#what-is-openvino-toolkit)
|
||||
- [Components](#components)
|
||||
- [Supported Hardware matrix](#supported-hardware-matrix)
|
||||
- [License](#license)
|
||||
- [Documentation](#documentation)
|
||||
- [Tutorials](#tutorials)
|
||||
- [Products which use OpenVINO](#products-which-use-openvino)
|
||||
- [System requirements](#system-requirements)
|
||||
- [How to build](#how-to-build)
|
||||
- [How to contribute](#how-to-contribute)
|
||||
- [Get a support](#get-a-support)
|
||||
- [See also](#see-also)
|
||||
|
||||
## What is OpenVINO toolkit?
|
||||
|
||||
OpenVINO™ is an open-source toolkit for optimizing and deploying AI inference.
|
||||
- Boost deep learning performance in computer vision, automatic speech recognition, natural language processing and other common tasks
|
||||
- Use models trained with popular frameworks like TensorFlow, PyTorch and more
|
||||
- Reduce resource demands and efficiently deploy on a range of Intel® platforms from edge to cloud
|
||||
|
||||
|
||||
This open-source version includes several components: namely [Model Optimizer], [OpenVINO™ Runtime], [Post-Training Optimization Tool], as well as CPU, GPU, MYRIAD, multi device and heterogeneous plugins to accelerate deep learning inferencing on Intel® CPUs and Intel® Processor Graphics.
|
||||
This open source version includes several components: namely [Model Optimizer], [OpenVINO™ Runtime], [Post-Training Optimization Tool], as well as CPU, GPU, MYRIAD, multi device and heterogeneous plugins to accelerate deep learning inferencing on Intel® CPUs and Intel® Processor Graphics.
|
||||
It supports pre-trained models from the [Open Model Zoo], along with 100+ open
|
||||
source and public models in popular formats such as TensorFlow, ONNX, PaddlePaddle, MXNet, Caffe, Kaldi.
|
||||
|
||||
### Components
|
||||
* [OpenVINO™ Runtime] - is a set of C++ libraries with C and Python bindings providing a common API to deliver inference solutions on the platform of your choice.
|
||||
* [core](https://github.com/openvinotoolkit/openvino/tree/master/src/core) - provides the base API for model representation and modification.
|
||||
* [inference](https://github.com/openvinotoolkit/openvino/tree/master/src/inference) - provides an API to infer models on device.
|
||||
* [transformations](https://github.com/openvinotoolkit/openvino/tree/master/src/common/transformations) - contains the set of common transformations which are used in OpenVINO plugins.
|
||||
* [low precision transformations](https://github.com/openvinotoolkit/openvino/tree/master/src/common/low_precision_transformations) - contains the set of transformations which are used in low precision models
|
||||
* [bindings](https://github.com/openvinotoolkit/openvino/tree/master/src/bindings) - contains all awailable OpenVINO bindings which are maintained by OpenVINO team.
|
||||
* [c](https://github.com/openvinotoolkit/openvino/tree/master/src/bindings/c) - provides C API for OpenVINO™ Runtime
|
||||
* [python](https://github.com/openvinotoolkit/openvino/tree/master/src/bindings/python) - Python API for OpenVINO™ Runtime
|
||||
* [Plugins](https://github.com/openvinotoolkit/openvino/tree/master/src/plugins) - contains OpenVINO plugins which are maintained in open-source by OpenVINO team. For more information please taje a look to the [list of supported devices](#supported-hardware-matrix).
|
||||
* [Frontends](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends) - contains available OpenVINO frontends which allow to read model from native framework format.
|
||||
* [Model Optimizer] - is a cross-platform command-line tool that facilitates the transition between training and deployment environments, performs static model analysis, and adjusts deep learning models for optimal execution on end-point target devices.
|
||||
* [Post-Training Optimization Tool] - is designed to accelerate the inference of deep learning models by applying special methods without model retraining or fine-tuning, for example, post-training 8-bit quantization.
|
||||
* [Samples] - applications on C, C++ and Python languages which shows basic use cases of OpenVINO usages.
|
||||
|
||||
## Supported Hardware matrix
|
||||
|
||||
The OpenVINO™ Runtime can infer models on different hardware devices. This section provides the list of supported devices.
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Device</th>
|
||||
<th>Plugin</th>
|
||||
<th>Library</th>
|
||||
<th>ShortDescription</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td rowspan=2>CPU</td>
|
||||
<td> <a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_supported_plugins_CPU.html#doxid-openvino-docs-o-v-u-g-supported-plugins-c-p-u">Intel CPU</a></tb>
|
||||
<td><b><i><a href="https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/intel_cpu">openvino_intel_cpu_plugin</a></i></b></td>
|
||||
<td>Intel Xeon with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel Core Processors with Intel AVX2, Intel Atom Processors with Intel® Streaming SIMD Extensions (Intel® SSE)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td> <a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_supported_plugins_ARM_CPU.html">ARM CPU</a></tb>
|
||||
<td><b><i><a href="https://github.com/openvinotoolkit/openvino_contrib/tree/master/modules/arm_plugin">openvino_arm_cpu_plugin</a></i></b></td>
|
||||
<td>Raspberry Pi™ 4 Model B, Apple® Mac mini with M1 chip, NVIDIA® Jetson Nano™, Android™ devices
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GPU</td>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_supported_plugins_GPU.html#doxid-openvino-docs-o-v-u-g-supported-plugins-g-p-u">Intel GPU</a></td>
|
||||
<td><b><i><a href="https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/intel_gpu">openvino_intel_gpu_plugin</a></i></b></td>
|
||||
<td>Intel Processor Graphics, including Intel HD Graphics and Intel Iris Graphics</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GNA</td>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_supported_plugins_GNA.html#doxid-openvino-docs-o-v-u-g-supported-plugins-g-n-a">Intel GNA</a></td>
|
||||
<td><b><i><a href="https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/intel_gna">openvino_intel_gna_plugin</a></i></b></td>
|
||||
<td>Intel Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel Pentium Silver J5005 Processor, Intel Pentium Silver N5000 Processor, Intel Celeron J4005 Processor, Intel Celeron J4105 Processor, Intel Celeron Processor N4100, Intel Celeron Processor N4000, Intel Core i3-8121U Processor, Intel Core i7-1065G7 Processor, Intel Core i7-1060G7 Processor, Intel Core i5-1035G4 Processor, Intel Core i5-1035G7 Processor, Intel Core i5-1035G1 Processor, Intel Core i5-1030G7 Processor, Intel Core i5-1030G4 Processor, Intel Core i3-1005G1 Processor, Intel Core i3-1000G1 Processor, Intel Core i3-1000G4 Processor</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>VPU</td>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_IE_DG_supported_plugins_VPU.html#doxid-openvino-docs-i-e-d-g-supported-plugins-v-p-u">Myriad plugin</a></td>
|
||||
<td><b><i><a href="https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/intel_myriad">openvino_intel_myriad_plugin</a></i></b></td>
|
||||
<td>Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
Also OpenVINO™ Toolkit contains several plugins which should simplify to load model on several hardware devices:
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Plugin</th>
|
||||
<th>Library</th>
|
||||
<th>ShortDescription</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_IE_DG_supported_plugins_AUTO.html#doxid-openvino-docs-i-e-d-g-supported-plugins-a-u-t-o">Auto</a></td>
|
||||
<td><b><i><a href="https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/auto">openvino_auto_plugin</a></i></b></td>
|
||||
<td>Auto plugin enables selecting Intel device for inference automatically</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_Automatic_Batching.html">Auto Batch</a></td>
|
||||
<td><b><i><a href="https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/auto_batch">openvino_auto_batch_plugin</a></i></b></td>
|
||||
<td>Auto batch plugin performs on-the-fly automatic batching (i.e. grouping inference requests together) to improve device utilization, with no programming effort from the user</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_Hetero_execution.html#doxid-openvino-docs-o-v-u-g-hetero-execution">Hetero</a></td>
|
||||
<td><b><i><a href="https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/hetero">openvino_hetero_plugin</a></i></b></td>
|
||||
<td>Heterogeneous execution enables automatic inference splitting between several devices</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><a href="https://docs.openvino.ai/nightly/openvino_docs_OV_UG_Running_on_multiple_devices.html#doxid-openvino-docs-o-v-u-g-running-on-multiple-devices">Multi</a></td>
|
||||
<td><b><i><a href="https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/auto">openvino_auto_plugin</a></i></b></td>
|
||||
<td>Multi plugin enables simultaneous inference of the same model on several devices in parallel</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
## Repository components
|
||||
* [OpenVINO™ Runtime]
|
||||
* [Model Optimizer]
|
||||
* [Post-Training Optimization Tool]
|
||||
* [Samples]
|
||||
|
||||
## License
|
||||
OpenVINO™ Toolkit is licensed under [Apache License Version 2.0](LICENSE).
|
||||
By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms.
|
||||
|
||||
## Documentation
|
||||
|
||||
### User documentation
|
||||
|
||||
The latest documentation for OpenVINO™ Toolkit is availabe [here](https://docs.openvino.ai/). This documentation contains detailed information about all OpenVINO components and provides all important information which could be needed if you create an application which is based on binary OpenVINO distribution or own OpenVINO version without source code modification.
|
||||
|
||||
### Developer documentation
|
||||
|
||||
[Developer documentation](#todo-add) contains information about architectural decisions which are applied inside the OpenVINO components. This documentation has all necessary information which could be needed in order to contribute to OpenVINO.
|
||||
|
||||
## Tutorials
|
||||
|
||||
The list of OpenVINO tutorials:
|
||||
|
||||
- [Jupiter notebooks](https://github.com/openvinotoolkit/openvino_notebooks)
|
||||
|
||||
## Products which use OpenVINO
|
||||
|
||||
- [OpenCV](https://opencv.org/)
|
||||
- [ONNX Runtime](https://onnxruntime.ai/)
|
||||
- [OpenVINO™ Integration with TensorFlow](https://www.intel.com/content/www/us/en/developer/tools/devcloud/edge/build/ovtfoverview.html)
|
||||
- [TNN](https://github.com/Tencent/TNN/tree/master)
|
||||
|
||||
## System requirements
|
||||
|
||||
The full information about system requirements depends on platform and available in section `System requirement` on dedicated pages:
|
||||
- [Linux](https://docs.openvino.ai/latest/openvino_docs_install_guides_installing_openvino_linux.html)
|
||||
- [Windows](https://docs.openvino.ai/latest/openvino_docs_install_guides_installing_openvino_windows.html)
|
||||
- [macOS](https://docs.openvino.ai/latest/openvino_docs_install_guides_installing_openvino_macos.html)
|
||||
- [Raspbian](https://docs.openvino.ai/latest/openvino_docs_install_guides_installing_openvino_raspbian.html)
|
||||
|
||||
## How to build
|
||||
|
||||
Please take a look to [OpenVINO Wiki](https://github.com/openvinotoolkit/openvino/wiki#how-to-build) to get more information about OpenVINO build process.
|
||||
|
||||
## How to contribute
|
||||
|
||||
See [CONTRIBUTING](./CONTRIBUTING.md) for details. Thank you!
|
||||
|
||||
## Get a support
|
||||
|
||||
Please report questions, issues and suggestions using:
|
||||
|
||||
* [GitHub* Issues](https://github.com/openvinotoolkit/openvino/issues)
|
||||
* The [`openvino`](https://stackoverflow.com/questions/tagged/openvino) tag on StackOverflow\*
|
||||
* [Forum](https://software.intel.com/en-us/forums/computer-vision)
|
||||
|
||||
## See also
|
||||
|
||||
* [OpenVINO Wiki](https://github.com/openvinotoolkit/openvino/wiki)
|
||||
* [OpenVINO Storage](https://storage.openvinotoolkit.org/)
|
||||
* Additional OpenVINO™ toolkit modules:
|
||||
* [openvino_contrib](https://github.com/openvinotoolkit/openvino_contrib)
|
||||
## Resources
|
||||
* Docs: https://docs.openvino.ai/
|
||||
* Wiki: https://github.com/openvinotoolkit/openvino/wiki
|
||||
* Issue tracking: https://github.com/openvinotoolkit/openvino/issues
|
||||
* Storage: https://storage.openvinotoolkit.org/
|
||||
* Additional OpenVINO™ toolkit modules: https://github.com/openvinotoolkit/openvino_contrib
|
||||
* [Intel® Distribution of OpenVINO™ toolkit Product Page](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit.html)
|
||||
* [Intel® Distribution of OpenVINO™ toolkit Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes)
|
||||
* [Neural Network Compression Framework (NNCF)](https://github.com/openvinotoolkit/nncf) - a suite of advanced algorithms for model inference optimization including quantization, filter pruning, binarization and sparsity
|
||||
* [OpenVINO™ Training Extensions (OTE)](https://github.com/openvinotoolkit/training_extensions) - convenient environment to train Deep Learning models and convert them using OpenVINO for optimized inference.
|
||||
* [OpenVINO™ Model Server (OVMS)](https://github.com/openvinotoolkit/model_server) - a scalable, high-performance solution for serving deep learning models optimized for Intel architectures
|
||||
* [DL Workbench](https://docs.openvino.ai/nightly/workbench_docs_Workbench_DG_Introduction.html) - An alternative, web-based version of OpenVINO designed to make production of pretrained deep learning models significantly easier.
|
||||
* [Computer Vision Annotation Tool (CVAT)](https://github.com/openvinotoolkit/cvat) - an online, interactive video and image annotation tool for computer vision purposes.
|
||||
* [Dataset Management Framework (Datumaro)](https://github.com/openvinotoolkit/datumaro) - a framework and CLI tool to build, transform, and analyze datasets.
|
||||
|
||||
## Support
|
||||
Please report questions, issues and suggestions using:
|
||||
|
||||
* The [`openvino`](https://stackoverflow.com/questions/tagged/openvino) tag on StackOverflow\*
|
||||
* [GitHub* Issues](https://github.com/openvinotoolkit/openvino/issues)
|
||||
* [Forum](https://software.intel.com/en-us/forums/computer-vision)
|
||||
|
||||
---
|
||||
\* Other names and brands may be claimed as the property of others.
|
||||
@@ -206,3 +46,5 @@ Please report questions, issues and suggestions using:
|
||||
[Model Optimizer]:https://docs.openvino.ai/latest/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html
|
||||
[Post-Training Optimization Tool]:https://docs.openvino.ai/latest/pot_introduction.html
|
||||
[Samples]:https://github.com/openvinotoolkit/openvino/tree/master/samples
|
||||
[tag on StackOverflow]:https://stackoverflow.com/search?q=%23openvino
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ set(CMAKE_SYSTEM_PROCESSOR armv7l)
|
||||
|
||||
set(CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
|
||||
set(CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)
|
||||
set(PKG_CONFIG_EXECUTABLE arm-linux-gnueabihf-pkg-config CACHE PATH "Path to ARM pkg-config")
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
|
||||
@@ -7,7 +7,6 @@ set(CMAKE_SYSTEM_PROCESSOR aarch64)
|
||||
|
||||
set(CMAKE_C_COMPILER aarch64-linux-gnu-gcc)
|
||||
set(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++)
|
||||
set(PKG_CONFIG_EXECUTABLE aarch64-linux-gnu-pkg-config CACHE PATH "Path to ARM64 pkg-config")
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
|
||||
@@ -151,6 +151,9 @@ function(ov_download_tbb)
|
||||
if(EXISTS "${TBBROOT}/lib/cmake/TBB/TBBConfig.cmake")
|
||||
# oneTBB case
|
||||
update_deps_cache(TBB_DIR "${TBBROOT}/lib/cmake/TBB" "Path to TBB cmake folder")
|
||||
elseif(EXISTS "${TBBROOT}/lib/cmake/tbb/TBBConfig.cmake")
|
||||
# oneTBB release package version less than 2021.6.0
|
||||
update_deps_cache(TBB_DIR "${TBBROOT}/lib/cmake/tbb" "Path to TBB cmake folder")
|
||||
elseif(EXISTS "${TBBROOT}/lib64/cmake/TBB/TBBConfig.cmake")
|
||||
# 64-bits oneTBB case
|
||||
update_deps_cache(TBB_DIR "${TBBROOT}/lib64/cmake/TBB" "Path to TBB cmake folder")
|
||||
@@ -314,8 +317,8 @@ if(ENABLE_INTEL_GNA)
|
||||
GNA_LIB_DIR
|
||||
libGNA_INCLUDE_DIRS
|
||||
libGNA_LIBRARIES_BASE_PATH)
|
||||
set(GNA_VERSION "03.00.00.1455.2")
|
||||
set(GNA_HASH "e52785d3f730fefb4e794bb7ab40c8676537ef2f7c69c5b4bb89a5d3cc0bbe60")
|
||||
set(GNA_VERSION "03.00.00.1455.0")
|
||||
set(GNA_HASH "99891696269d8fa10116c96e6b7bda4362736881f0df8df8b56c751ee18e5820")
|
||||
|
||||
set(FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/include)
|
||||
if(WIN32)
|
||||
|
||||
@@ -14,8 +14,8 @@ set(CMAKE_MODULE_PATH "${IEDevScripts_DIR}")
|
||||
function(set_ci_build_number)
|
||||
set(repo_root "${CMAKE_SOURCE_DIR}")
|
||||
include(version)
|
||||
foreach(var CI_BUILD_NUMBER OpenVINO_VERSION OpenVINO_VERSION_BUILD
|
||||
OpenVINO_VERSION_MAJOR OpenVINO_VERSION_MINOR OpenVINO_VERSION_PATCH)
|
||||
foreach(var CI_BUILD_NUMBER IE_VERSION IE_VERSION_BUILD
|
||||
IE_VERSION_MAJOR IE_VERSION_MINOR IE_VERSION_PATCH)
|
||||
if(NOT DEFINED ${var})
|
||||
message(FATAL_ERROR "${var} version component is not defined")
|
||||
endif()
|
||||
@@ -186,8 +186,6 @@ endif()
|
||||
# Use solution folders
|
||||
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
|
||||
|
||||
# cmake_dependent_option() supports full Condition Syntax
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0127 NEW)
|
||||
# Enable CMAKE_<LANG>_COMPILER_ID AppleClang
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0025 NEW)
|
||||
|
||||
|
||||
@@ -76,8 +76,8 @@ function(addIeTarget)
|
||||
|
||||
# remove unnecessary directories
|
||||
foreach(excludedDir ${ARG_EXCLUDED_SOURCE_PATHS})
|
||||
list(FILTER includes EXCLUDE REGEX "${excludedDir}.*")
|
||||
list(FILTER sources EXCLUDE REGEX "${excludedDir}.*")
|
||||
list(FILTER includes EXCLUDE REGEX "${excludedDir}*")
|
||||
list(FILTER sources EXCLUDE REGEX "${excludedDir}*")
|
||||
endforeach()
|
||||
|
||||
source_group("include" FILES ${includes})
|
||||
|
||||
@@ -82,11 +82,10 @@ unset(protobuf_installed CACHE)
|
||||
|
||||
#
|
||||
# ov_add_frontend(NAME <IR|ONNX|...>
|
||||
# FILEDESCRIPTION <description> # used on Windows to describe DLL file
|
||||
# [LINKABLE_FRONTEND] # whether we can use FE API directly or via FEM only
|
||||
# [SKIP_INSTALL] # private frontend, not for end users
|
||||
# [PROTOBUF_LITE] # requires only libprotobuf-lite
|
||||
# [SKIP_NCC_STYLE] # use custom NCC rules
|
||||
# FILEDESCRIPTION <description>
|
||||
# [LINKABLE_FRONTEND]
|
||||
# [SKIP_INSTALL]
|
||||
# [PROTOBUF_LITE]
|
||||
# [LINK_LIBRARIES <lib1 lib2 ...>])
|
||||
#
|
||||
macro(ov_add_frontend)
|
||||
@@ -107,17 +106,6 @@ macro(ov_add_frontend)
|
||||
set(FRONTEND_NAMES "${FRONTEND_NAMES}" CACHE INTERNAL "" FORCE)
|
||||
|
||||
file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp)
|
||||
if (WIN32)
|
||||
# Remove linux specific files
|
||||
file(GLOB_RECURSE LIN_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/os/lin/*.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/os/lin/*.hpp)
|
||||
list(REMOVE_ITEM LIBRARY_SRC "${LIN_FILES}")
|
||||
else()
|
||||
# Remove windows specific files
|
||||
file(GLOB_RECURSE WIN_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/os/win/*.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src/os/win/*.hpp)
|
||||
list(REMOVE_ITEM LIBRARY_SRC "${WIN_FILES}")
|
||||
endif()
|
||||
file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp)
|
||||
file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp)
|
||||
|
||||
@@ -243,7 +231,7 @@ macro(ov_add_frontend)
|
||||
endif()
|
||||
|
||||
if(OV_FRONTEND_LINKABLE_FRONTEND)
|
||||
# install library development files
|
||||
# install -dev part
|
||||
install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/openvino
|
||||
DESTINATION ${FRONTEND_INSTALL_INCLUDE}/
|
||||
COMPONENT core_dev
|
||||
|
||||
@@ -106,6 +106,7 @@ function(ov_ncc_naming_style)
|
||||
"${NCC_STYLE_SOURCE_DIRECTORY}/*.cpp")
|
||||
|
||||
list(APPEND NCC_STYLE_ADDITIONAL_INCLUDE_DIRECTORIES "${NCC_STYLE_SOURCE_DIRECTORY}")
|
||||
|
||||
# without it sources with same name from different directories will map to same .ncc_style target
|
||||
file(RELATIVE_PATH source_dir_rel ${CMAKE_SOURCE_DIR} ${NCC_STYLE_SOURCE_DIRECTORY})
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# custom OpenVINO values
|
||||
CppMethod: '^(operator\W+|[a-z_\d]+|signaling_NaN|quiet_NaN)$'
|
||||
CppMethod: '^(operator\W+|[a-z_\d]+|signaling_NaN|quiet_NaN|OPENVINO_OP)$'
|
||||
ClassName: '^([A-Z][\w]+|b?float16|numeric_limits|ngraph_error|stopwatch|unsupported_op)$'
|
||||
StructName: '^([A-Z][\w]+|element_type_traits|hash|oi_pair)$'
|
||||
FunctionName: '^(operator\W+|[a-z_\d]+)|PrintTo$'
|
||||
|
||||
@@ -11,11 +11,6 @@ macro (ie_option variable description value)
|
||||
list(APPEND IE_OPTIONS ${variable})
|
||||
endmacro()
|
||||
|
||||
# Usage: ov_option(<option_variable> "description" <initial value or boolean expression> [IF <condition>])
|
||||
macro (ov_option variable description value)
|
||||
ie_option(${variable} "${description}" ${value})
|
||||
endmacro()
|
||||
|
||||
macro (ie_dependent_option variable description def_value condition fallback_value)
|
||||
cmake_dependent_option(${variable} "${description}" ${def_value} "${condition}" ${fallback_value})
|
||||
list(APPEND IE_OPTIONS ${variable})
|
||||
|
||||
@@ -69,8 +69,8 @@ macro(ie_cpack)
|
||||
endif()
|
||||
|
||||
foreach(ver IN LISTS MAJOR MINOR PATCH)
|
||||
if(DEFINED OpenVINO_VERSION_${ver})
|
||||
set(CPACK_PACKAGE_VERSION_${ver} ${OpenVINO_VERSION_${ver}})
|
||||
if(DEFINED IE_VERSION_${ver})
|
||||
set(CPACK_PACKAGE_VERSION_${ver} ${IE_VERSION_${ver}})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
|
||||
@@ -13,8 +13,8 @@ function(ie_plugin_get_file_name target_name library_name)
|
||||
set("${library_name}" "${LIB_PREFIX}${target_name}${LIB_SUFFIX}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
if(NOT TARGET ov_plugins)
|
||||
add_custom_target(ov_plugins)
|
||||
if(NOT TARGET ie_plugins)
|
||||
add_custom_target(ie_plugins)
|
||||
endif()
|
||||
|
||||
#
|
||||
@@ -27,12 +27,11 @@ endif()
|
||||
# [OBJECT_LIBRARIES <object_libs>]
|
||||
# [VERSION_DEFINES_FOR <source>]
|
||||
# [SKIP_INSTALL]
|
||||
# [SKIP_REGISTRATION] Skip creation of <device>.xml
|
||||
# [ADD_CLANG_FORMAT]
|
||||
# )
|
||||
#
|
||||
function(ie_add_plugin)
|
||||
set(options SKIP_INSTALL ADD_CLANG_FORMAT AS_EXTENSION SKIP_REGISTRATION)
|
||||
set(options SKIP_INSTALL ADD_CLANG_FORMAT AS_EXTENSION)
|
||||
set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR PSEUDO_PLUGIN_FOR)
|
||||
set(multiValueArgs DEFAULT_CONFIG SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS)
|
||||
cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
@@ -102,7 +101,7 @@ function(ie_add_plugin)
|
||||
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
|
||||
endif()
|
||||
|
||||
add_dependencies(ov_plugins ${IE_PLUGIN_NAME})
|
||||
add_dependencies(ie_plugins ${IE_PLUGIN_NAME})
|
||||
if(TARGET openvino_gapi_preproc)
|
||||
if(BUILD_SHARED_LIBS)
|
||||
add_dependencies(${IE_PLUGIN_NAME} openvino_gapi_preproc)
|
||||
@@ -147,27 +146,25 @@ function(ie_add_plugin)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Enable for static build to generate correct plugins.hpp
|
||||
if(NOT IE_PLUGIN_SKIP_REGISTRATION OR NOT BUILD_SHARED_LIBS)
|
||||
# check that plugin with such name is not registered
|
||||
foreach(plugin_entry IN LISTS PLUGIN_FILES)
|
||||
string(REPLACE ":" ";" plugin_entry "${plugin_entry}")
|
||||
list(GET plugin_entry -1 library_name)
|
||||
list(GET plugin_entry 0 plugin_name)
|
||||
if(plugin_name STREQUAL "${IE_PLUGIN_DEVICE_NAME}" AND
|
||||
NOT library_name STREQUAL ${IE_PLUGIN_NAME})
|
||||
message(FATAL_ERROR "${IE_PLUGIN_NAME} and ${library_name} are both registered as ${plugin_name}")
|
||||
endif()
|
||||
endforeach()
|
||||
# check that plugin with such name is not registered
|
||||
|
||||
# append plugin to the list to register
|
||||
foreach(plugin_entry IN LISTS PLUGIN_FILES)
|
||||
string(REPLACE ":" ";" plugin_entry "${plugin_entry}")
|
||||
list(GET plugin_entry -1 library_name)
|
||||
list(GET plugin_entry 0 plugin_name)
|
||||
if(plugin_name STREQUAL "${IE_PLUGIN_DEVICE_NAME}" AND
|
||||
NOT library_name STREQUAL ${IE_PLUGIN_NAME})
|
||||
message(FATAL_ERROR "${IE_PLUGIN_NAME} and ${library_name} are both registered as ${plugin_name}")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
list(APPEND PLUGIN_FILES "${IE_PLUGIN_DEVICE_NAME}:${IE_PLUGIN_NAME}")
|
||||
set(PLUGIN_FILES "${PLUGIN_FILES}" CACHE INTERNAL "" FORCE)
|
||||
set(${IE_PLUGIN_DEVICE_NAME}_CONFIG "${IE_PLUGIN_DEFAULT_CONFIG}" CACHE INTERNAL "" FORCE)
|
||||
set(${IE_PLUGIN_DEVICE_NAME}_PSEUDO_PLUGIN_FOR "${IE_PLUGIN_PSEUDO_PLUGIN_FOR}" CACHE INTERNAL "" FORCE)
|
||||
set(${IE_PLUGIN_DEVICE_NAME}_AS_EXTENSION "${IE_PLUGIN_AS_EXTENSION}" CACHE INTERNAL "" FORCE)
|
||||
endif()
|
||||
# append plugin to the list to register
|
||||
|
||||
list(APPEND PLUGIN_FILES "${IE_PLUGIN_DEVICE_NAME}:${IE_PLUGIN_NAME}")
|
||||
set(PLUGIN_FILES "${PLUGIN_FILES}" CACHE INTERNAL "" FORCE)
|
||||
set(${IE_PLUGIN_DEVICE_NAME}_CONFIG "${IE_PLUGIN_DEFAULT_CONFIG}" CACHE INTERNAL "" FORCE)
|
||||
set(${IE_PLUGIN_DEVICE_NAME}_PSEUDO_PLUGIN_FOR "${IE_PLUGIN_PSEUDO_PLUGIN_FOR}" CACHE INTERNAL "" FORCE)
|
||||
set(${IE_PLUGIN_DEVICE_NAME}_AS_EXTENSION "${IE_PLUGIN_AS_EXTENSION}" CACHE INTERNAL "" FORCE)
|
||||
endfunction()
|
||||
|
||||
function(ov_add_plugin)
|
||||
@@ -175,12 +172,13 @@ function(ov_add_plugin)
|
||||
endfunction()
|
||||
|
||||
#
|
||||
# ie_register_plugins_dynamic(MAIN_TARGET <main target name>)
|
||||
# ie_register_plugins_dynamic(MAIN_TARGET <main target name>
|
||||
# POSSIBLE_PLUGINS <list of plugins which can be build by this repo>)
|
||||
#
|
||||
macro(ie_register_plugins_dynamic)
|
||||
set(options)
|
||||
set(oneValueArgs MAIN_TARGET)
|
||||
set(multiValueArgs)
|
||||
set(multiValueArgs POSSIBLE_PLUGINS)
|
||||
cmake_parse_arguments(IE_REGISTER "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
if(NOT IE_REGISTER_MAIN_TARGET)
|
||||
@@ -263,15 +261,6 @@ macro(ie_register_plugins)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# ov_register_plugins()
|
||||
#
|
||||
macro(ov_register_plugins)
|
||||
if(BUILD_SHARED_LIBS)
|
||||
ie_register_plugins_dynamic(${ARGN})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# ie_target_link_plugins(<TARGET_NAME>)
|
||||
#
|
||||
|
||||
@@ -19,29 +19,34 @@ function (commitHash VAR)
|
||||
message(FATAL_ERROR "repo_root is not defined")
|
||||
endif()
|
||||
execute_process(
|
||||
COMMAND git rev-parse HEAD
|
||||
COMMAND git rev-parse --short=11 HEAD
|
||||
WORKING_DIRECTORY ${repo_root}
|
||||
OUTPUT_VARIABLE GIT_COMMIT_HASH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (${VAR} ${GIT_COMMIT_HASH} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
macro(ov_parse_ci_build_number)
|
||||
set(OpenVINO_VERSION_BUILD 000)
|
||||
set(IE_VERSION_BUILD ${OpenVINO_VERSION_BUILD})
|
||||
|
||||
macro(ie_parse_ci_build_number)
|
||||
set(IE_VERSION_BUILD 000)
|
||||
if(CI_BUILD_NUMBER MATCHES "^([0-9]+)\.([0-9]+)\.([0-9]+)\-([0-9]+)\-.*")
|
||||
set(OpenVINO_VERSION_MAJOR ${CMAKE_MATCH_1})
|
||||
set(OpenVINO_VERSION_MINOR ${CMAKE_MATCH_2})
|
||||
set(OpenVINO_VERSION_PATCH ${CMAKE_MATCH_3})
|
||||
set(OpenVINO_VERSION_BUILD ${CMAKE_MATCH_4})
|
||||
set(IE_VERSION_MAJOR ${CMAKE_MATCH_1})
|
||||
set(IE_VERSION_MINOR ${CMAKE_MATCH_2})
|
||||
set(IE_VERSION_PATCH ${CMAKE_MATCH_3})
|
||||
set(IE_VERSION_BUILD ${CMAKE_MATCH_4})
|
||||
set(the_whole_version_is_defined_by_ci ON)
|
||||
elseif(CI_BUILD_NUMBER MATCHES "^[0-9]+$")
|
||||
set(IE_VERSION_BUILD ${CI_BUILD_NUMBER})
|
||||
# only build number is defined by CI
|
||||
set(the_whole_version_is_defined_by_ci OFF)
|
||||
elseif(CI_BUILD_NUMBER)
|
||||
message(FATAL_ERROR "Failed to parse CI_BUILD_NUMBER which is ${CI_BUILD_NUMBER}")
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED repo_root)
|
||||
message(FATAL_ERROR "repo_root is not defined")
|
||||
endif()
|
||||
|
||||
macro(ov_get_hpp_version)
|
||||
macro(ie_get_hpp_version)
|
||||
if(NOT DEFINED OpenVINO_SOURCE_DIR)
|
||||
return()
|
||||
endif()
|
||||
@@ -61,12 +66,11 @@ macro(ov_parse_ci_build_number)
|
||||
|
||||
foreach(suffix MAJOR MINOR PATCH)
|
||||
set(ie_version_name "IE_VERSION_${suffix}")
|
||||
set(ov_version_name "OpenVINO_VERSION_${suffix}")
|
||||
set(ov_version_name_hpp "OPENVINO_VERSION_${suffix}")
|
||||
set(ov_version_name "OPENVINO_VERSION_${suffix}")
|
||||
|
||||
string(REGEX REPLACE ".+${ie_version_name}[ ]+([0-9]+).*" "\\1"
|
||||
${ie_version_name}_HPP "${IE_VERSION_PARTS}")
|
||||
string(REGEX REPLACE ".+${ov_version_name_hpp}[ ]+([0-9]+).*" "\\1"
|
||||
string(REGEX REPLACE ".+${ov_version_name}[ ]+([0-9]+).*" "\\1"
|
||||
${ov_version_name}_HPP "${OV_VERSION_PARTS}")
|
||||
|
||||
if(NOT ${ie_version_name}_HPP EQUAL ${ov_version_name}_HPP)
|
||||
@@ -75,42 +79,54 @@ macro(ov_parse_ci_build_number)
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
set(ov_hpp_version_is_found ON)
|
||||
set(ie_hpp_version_is_found ON)
|
||||
endmacro()
|
||||
|
||||
# detect OpenVINO version via openvino/core/version.hpp and ie_version.hpp
|
||||
ov_get_hpp_version()
|
||||
# detect OpenVINO version via ie_version.hpp
|
||||
ie_get_hpp_version()
|
||||
|
||||
if(ov_hpp_version_is_found)
|
||||
foreach(var OpenVINO_VERSION_MAJOR OpenVINO_VERSION_MINOR OpenVINO_VERSION_PATCH)
|
||||
if(ie_hpp_version_is_found)
|
||||
foreach(var IE_VERSION_MAJOR IE_VERSION_MINOR IE_VERSION_PATCH)
|
||||
if(DEFINED ${var} AND NOT ${var} EQUAL ${var}_HPP)
|
||||
message(FATAL_ERROR "${var} parsed from CI_BUILD_NUMBER (${${var}}) \
|
||||
and from openvino/core/version.hpp (${${var}_HPP}) are different")
|
||||
and from ie_version.hpp (${${var}_HPP}) are different")
|
||||
else()
|
||||
# CI_BUILD_NUMBER is not defined well, take info from openvino/core/version.hpp as a baseline
|
||||
# CI_BUILD_NUMBER is not defined well, take info from ie_verison.hpp as a baseline
|
||||
set(${var} ${${var}_HPP})
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
set(OpenVINO_VERSION "${OpenVINO_VERSION_MAJOR}.${OpenVINO_VERSION_MINOR}.${OpenVINO_VERSION_PATCH}")
|
||||
message(STATUS "OpenVINO version is ${OpenVINO_VERSION} (Build ${OpenVINO_VERSION_BUILD})")
|
||||
set(IE_VERSION "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}")
|
||||
message(STATUS "OpenVINO version is ${IE_VERSION} (Build ${IE_VERSION_BUILD})")
|
||||
|
||||
if(NOT the_whole_version_is_defined_by_ci)
|
||||
# create CI_BUILD_NUMBER
|
||||
|
||||
branchName(GIT_BRANCH)
|
||||
commitHash(GIT_COMMIT_HASH)
|
||||
|
||||
if(NOT GIT_BRANCH STREQUAL "master")
|
||||
set(GIT_BRANCH_POSTFIX "-${GIT_BRANCH}")
|
||||
endif()
|
||||
|
||||
set(CI_BUILD_NUMBER "${IE_VERSION}-${IE_VERSION_BUILD}-${GIT_COMMIT_HASH}${GIT_BRANCH_POSTFIX}")
|
||||
|
||||
unset(GIT_BRANCH_POSTFIX)
|
||||
unset(GIT_BRANCH)
|
||||
unset(GIT_COMMIT_HASH)
|
||||
else()
|
||||
unset(the_whole_version_is_defined_by_ci)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
if (DEFINED ENV{CI_BUILD_NUMBER})
|
||||
set(CI_BUILD_NUMBER $ENV{CI_BUILD_NUMBER})
|
||||
else()
|
||||
branchName(GIT_BRANCH)
|
||||
commitHash(GIT_COMMIT_HASH)
|
||||
|
||||
set(custom_build "custom_${GIT_BRANCH}_${GIT_COMMIT_HASH}")
|
||||
set(CI_BUILD_NUMBER "${custom_build}")
|
||||
endif()
|
||||
|
||||
# provides OpenVINO version
|
||||
# provides Inference Engine version
|
||||
# 1. If CI_BUILD_NUMBER is defined, parses this information
|
||||
# 2. Otherwise, parses openvino/core/version.hpp
|
||||
ov_parse_ci_build_number()
|
||||
if (DEFINED ENV{CI_BUILD_NUMBER})
|
||||
set(CI_BUILD_NUMBER $ENV{CI_BUILD_NUMBER})
|
||||
endif()
|
||||
ie_parse_ci_build_number()
|
||||
|
||||
macro (addVersionDefines FILE)
|
||||
set(__version_file ${FILE})
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set(IE_VS_VER_FILEVERSION_QUAD "${OpenVINO_VERSION_MAJOR},${OpenVINO_VERSION_MINOR},${OpenVINO_VERSION_PATCH},${OpenVINO_VERSION_BUILD}")
|
||||
set(IE_VS_VER_PRODUCTVERSION_QUAD "${OpenVINO_VERSION_MAJOR},${OpenVINO_VERSION_MINOR},${OpenVINO_VERSION_PATCH},${OpenVINO_VERSION_BUILD}")
|
||||
set(IE_VS_VER_FILEVERSION_STR "${OpenVINO_VERSION_MAJOR}.${OpenVINO_VERSION_MINOR}.${OpenVINO_VERSION_PATCH}.${OpenVINO_VERSION_BUILD}")
|
||||
set(IE_VS_VER_FILEVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0")
|
||||
set(IE_VS_VER_PRODUCTVERSION_QUAD "${IE_VERSION_MAJOR},${IE_VERSION_MINOR},${IE_VERSION_PATCH},0")
|
||||
set(IE_VS_VER_FILEVERSION_STR "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}.0")
|
||||
|
||||
set(IE_VS_VER_COMPANY_NAME_STR "Intel Corporation")
|
||||
set(IE_VS_VER_PRODUCTVERSION_STR "${CI_BUILD_NUMBER}")
|
||||
|
||||
@@ -6,19 +6,15 @@ function(ie_generate_dev_package_config)
|
||||
# dummy check that OpenCV is here
|
||||
find_package(OpenCV QUIET)
|
||||
|
||||
set(all_dev_targets gflags ov_runtime_libraries)
|
||||
foreach(component IN LISTS openvino_export_components)
|
||||
# export all targets with prefix and use them during extra modules build
|
||||
export(TARGETS ${${component}} NAMESPACE IE::
|
||||
APPEND FILE "${CMAKE_BINARY_DIR}/${component}_dev_targets.cmake")
|
||||
APPEND FILE "${CMAKE_BINARY_DIR}/${component}_dev_targets.cmake")
|
||||
list(APPEND all_dev_targets ${${component}})
|
||||
endforeach()
|
||||
add_custom_target(ie_dev_targets DEPENDS ${all_dev_targets})
|
||||
|
||||
# if we've found system gflags
|
||||
if(gflags_DIR)
|
||||
set(gflags_BINARY_DIR "${gflags_DIR}")
|
||||
endif()
|
||||
|
||||
configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in"
|
||||
"${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig.cmake"
|
||||
INSTALL_DESTINATION share # not used
|
||||
@@ -34,22 +30,18 @@ function(ov_generate_dev_package_config)
|
||||
# dummy check that OpenCV is here
|
||||
find_package(OpenCV QUIET)
|
||||
|
||||
set(all_dev_targets gflags ov_runtime_libraries)
|
||||
foreach(component IN LISTS openvino_export_components)
|
||||
string(FIND "${component}" "_legacy" index)
|
||||
if(index EQUAL -1)
|
||||
if (index EQUAL -1)
|
||||
# export all targets with prefix and use them during extra modules build
|
||||
export(TARGETS ${${component}} NAMESPACE openvino::
|
||||
APPEND FILE "${CMAKE_BINARY_DIR}/ov_${component}_dev_targets.cmake")
|
||||
APPEND FILE "${CMAKE_BINARY_DIR}/ov_${component}_dev_targets.cmake")
|
||||
list(APPEND all_dev_targets ${${component}})
|
||||
endif()
|
||||
endforeach()
|
||||
add_custom_target(ov_dev_targets DEPENDS ${all_dev_targets})
|
||||
|
||||
# if we've found system gflags
|
||||
if(gflags_DIR)
|
||||
set(gflags_BINARY_DIR "${gflags_DIR}")
|
||||
endif()
|
||||
|
||||
configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINODeveloperPackageConfig.cmake.in"
|
||||
"${CMAKE_BINARY_DIR}/OpenVINODeveloperPackageConfig.cmake"
|
||||
INSTALL_DESTINATION share # not used
|
||||
@@ -67,14 +59,14 @@ endfunction()
|
||||
|
||||
function(register_extra_modules)
|
||||
# post export
|
||||
openvino_developer_export_targets(COMPONENT core_legacy TARGETS inference_engine)
|
||||
openvino_developer_export_targets(COMPONENT core_legacy TARGETS ngraph)
|
||||
openvino_developer_export_targets(COMPONENT core TARGETS inference_engine)
|
||||
openvino_developer_export_targets(COMPONENT core TARGETS ngraph)
|
||||
|
||||
set(InferenceEngineDeveloperPackage_DIR "${CMAKE_CURRENT_BINARY_DIR}/runtime")
|
||||
set(OpenVINODeveloperPackage_DIR "${CMAKE_BINARY_DIR}/runtime")
|
||||
|
||||
function(generate_fake_dev_package NS)
|
||||
if(NS STREQUAL "openvino")
|
||||
if (NS STREQUAL "openvino")
|
||||
set(devconfig_file "${OpenVINODeveloperPackage_DIR}/OpenVINODeveloperPackageConfig.cmake")
|
||||
else()
|
||||
set(devconfig_file "${InferenceEngineDeveloperPackage_DIR}/InferenceEngineDeveloperPackageConfig.cmake")
|
||||
@@ -89,6 +81,10 @@ function(register_extra_modules)
|
||||
file(APPEND "${devconfig_file}" "add_library(${NS}::${target} ALIAS ${target})\n")
|
||||
endif()
|
||||
endforeach()
|
||||
if ("${NS}" STREQUAL "openvino")
|
||||
file(APPEND "${devconfig_file}" "add_library(${NS}::runtime ALIAS openvino)\n")
|
||||
file(APPEND "${devconfig_file}" "add_library(${NS}::runtime::dev ALIAS openvino_dev)\n")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
generate_fake_dev_package("openvino")
|
||||
@@ -141,7 +137,7 @@ ie_generate_dev_package_config()
|
||||
ov_generate_dev_package_config()
|
||||
|
||||
# extra modules must be registered after inference_engine library
|
||||
# and all other OpenVINO Core libraries are creared
|
||||
# and all other IE common libraries (ov_runtime_libraries) are creared
|
||||
# because 'register_extra_modules' creates fake InferenceEngineDeveloperPackageConfig.cmake
|
||||
# with all imported developer targets
|
||||
register_extra_modules()
|
||||
|
||||
@@ -59,7 +59,7 @@ cmake_dependent_option (ENABLE_WHEEL "Build wheel packages for PyPi" OFF
|
||||
# Inference Engine specific options
|
||||
#
|
||||
|
||||
# "OneDNN library based on OMP or TBB or Sequential implementation: TBB|OMP|SEQ"
|
||||
# "MKL-DNN library based on OMP or TBB or Sequential implementation: TBB|OMP|SEQ"
|
||||
if(X86 OR ARM OR (MSVC AND (ARM OR AARCH64)) )
|
||||
set(THREADING_DEFAULT "SEQ")
|
||||
else()
|
||||
@@ -126,7 +126,7 @@ ie_dependent_option (ENABLE_FUNCTIONAL_TESTS "functional tests" ON "ENABLE_TESTS
|
||||
|
||||
ie_dependent_option (ENABLE_SAMPLES "console samples are part of inference engine package" ON "NOT MINGW" OFF)
|
||||
|
||||
ie_option (ENABLE_OPENCV "enables OpenCV" ON)
|
||||
ie_option (ENABLE_OPENCV "enables OpenCV" OFF)
|
||||
|
||||
ie_option (ENABLE_V7_SERIALIZE "enables serialization to IR v7" OFF)
|
||||
|
||||
@@ -136,16 +136,7 @@ ie_dependent_option(ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are link
|
||||
|
||||
ie_dependent_option (ENABLE_SYSTEM_PUGIXML "use the system copy of pugixml" OFF "BUILD_SHARED_LIBS" OFF)
|
||||
|
||||
get_linux_name(LINUX_OS_NAME)
|
||||
if(LINUX_OS_NAME MATCHES "^Ubuntu [0-9]+\.[0-9]+$" AND NOT DEFINED ENV{TBBROOT})
|
||||
# Debian packages are enabled on Ubuntu systems
|
||||
# so, system TBB can be tried for usage
|
||||
set(ENABLE_SYSTEM_TBB_DEFAULT ON)
|
||||
else()
|
||||
set(ENABLE_SYSTEM_TBB_DEFAULT OFF)
|
||||
endif()
|
||||
|
||||
ie_dependent_option (ENABLE_SYSTEM_TBB "use the system version of TBB" ${ENABLE_SYSTEM_TBB_DEFAULT} "THREADING MATCHES TBB;LINUX" OFF)
|
||||
ie_dependent_option (ENABLE_SYSTEM_TBB "use the system version of TBB" OFF "THREADING MATCHES TBB;LINUX" OFF)
|
||||
|
||||
ie_option (ENABLE_DEBUG_CAPS "enable OpenVINO debug capabilities at runtime" OFF)
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set(PACKAGE_VERSION_MAJOR @OpenVINO_VERSION_MAJOR@)
|
||||
set(PACKAGE_VERSION_MINOR @OpenVINO_VERSION_MINOR@)
|
||||
set(PACKAGE_VERSION_PATCH @OpenVINO_VERSION_PATCH@)
|
||||
set(PACKAGE_VERSION_MAJOR @IE_VERSION_MAJOR@)
|
||||
set(PACKAGE_VERSION_MINOR @IE_VERSION_MINOR@)
|
||||
set(PACKAGE_VERSION_PATCH @IE_VERSION_PATCH@)
|
||||
set(PACKAGE_VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}.${PACKAGE_VERSION_PATCH}")
|
||||
|
||||
set(PACKAGE_VERSION_EXACT False)
|
||||
|
||||
@@ -12,20 +12,19 @@ set_and_check(OpenVINO_MAIN_SOURCE_DIR "@OpenVINO_SOURCE_DIR@") # KMB
|
||||
|
||||
# Variables to export in plugin's projects
|
||||
|
||||
set(ie_options "@IE_OPTIONS@")
|
||||
list(APPEND ie_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER
|
||||
CMAKE_BUILD_TYPE CMAKE_SKIP_RPATH CMAKE_INSTALL_PREFIX)
|
||||
set(ie_options "@IE_OPTIONS@;CMAKE_BUILD_TYPE;CMAKE_SKIP_RPATH")
|
||||
list(APPEND ie_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER)
|
||||
file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path)
|
||||
|
||||
message(STATUS "The following CMake options are exported from Inference Engine Developer package")
|
||||
message(" ")
|
||||
message("")
|
||||
foreach(option IN LISTS ie_options)
|
||||
if(NOT DEFINED "${option}")
|
||||
load_cache("${cache_path}" READ_WITH_PREFIX "" ${option})
|
||||
endif()
|
||||
message(" ${option}: ${${option}}")
|
||||
endforeach()
|
||||
message(" ")
|
||||
message("")
|
||||
|
||||
# for samples in 3rd party projects
|
||||
set_and_check(gflags_DIR "@gflags_BINARY_DIR@")
|
||||
@@ -49,6 +48,11 @@ find_dependency(ngraph
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
find_dependency(OpenVINODeveloperPackage
|
||||
PATHS "${CMAKE_CURRENT_LIST_DIR}"
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
if(TARGET openvino::runtime AND NOT TARGET IE::runtime)
|
||||
add_library(IE::runtime INTERFACE IMPORTED)
|
||||
set_target_properties(IE::runtime PROPERTIES
|
||||
@@ -66,18 +70,6 @@ foreach(component @openvino_export_components@)
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/${component}_dev_targets.cmake")
|
||||
endforeach()
|
||||
|
||||
if(TARGET IE::ov_core_dev AND NOT TARGET openvino::core::dev)
|
||||
add_library(openvino::core::dev INTERFACE IMPORTED)
|
||||
set_target_properties(openvino::core::dev PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES IE::ov_core_dev)
|
||||
endif()
|
||||
|
||||
if(TARGET IE::runtime::dev AND NOT TARGET openvino::runtime::dev)
|
||||
add_library(openvino::runtime::dev INTERFACE IMPORTED)
|
||||
set_target_properties(openvino::runtime::dev PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES IE::runtime::dev)
|
||||
endif()
|
||||
|
||||
if(ENABLE_SYSTEM_PUGIXML)
|
||||
find_dependency(PugiXML)
|
||||
set_property(TARGET pugixml PROPERTY IMPORTED_GLOBAL TRUE)
|
||||
@@ -94,11 +86,13 @@ endif()
|
||||
# Extra Compile Flags
|
||||
#
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
if(NOT MSVC)
|
||||
ie_add_compiler_flags(-Wno-error=unused-variable)
|
||||
ie_add_compiler_flags(-Wno-error=unused-but-set-variable)
|
||||
if(SUGGEST_OVERRIDE_SUPPORTED)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-suggest-override")
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
ie_add_compiler_flags(-Wno-error=unused-but-set-variable)
|
||||
if(SUGGEST_OVERRIDE_SUPPORTED)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-suggest-override")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set(PACKAGE_VERSION_MAJOR @OpenVINO_VERSION_MAJOR@)
|
||||
set(PACKAGE_VERSION_MINOR @OpenVINO_VERSION_MINOR@)
|
||||
set(PACKAGE_VERSION_PATCH @OpenVINO_VERSION_PATCH@)
|
||||
set(PACKAGE_VERSION_MAJOR @IE_VERSION_MAJOR@)
|
||||
set(PACKAGE_VERSION_MINOR @IE_VERSION_MINOR@)
|
||||
set(PACKAGE_VERSION_PATCH @IE_VERSION_PATCH@)
|
||||
set(PACKAGE_VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}.${PACKAGE_VERSION_PATCH}")
|
||||
|
||||
set(PACKAGE_VERSION_EXACT False)
|
||||
|
||||
@@ -150,12 +150,17 @@ if((THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO") AND NOT TBB_FOUND
|
||||
set(enable_system_tbb "@ENABLE_SYSTEM_TBB@")
|
||||
if(NOT enable_system_tbb)
|
||||
set_and_check(_tbb_dir "@PACKAGE_IE_TBB_DIR@")
|
||||
if(DEFINED ENV{TBBROOT})
|
||||
# see https://stackoverflow.com/questions/28070810/cmake-generate-error-on-windows-as-it-uses-as-escape-seq
|
||||
file(TO_CMAKE_PATH $ENV{TBBROOT} ENV_TBBROOT)
|
||||
endif()
|
||||
set(find_package_tbb_extra_args
|
||||
CONFIG
|
||||
PATHS
|
||||
# oneTBB case exposed via export TBBROOT=<custom TBB root>
|
||||
"$ENV{TBBROOT}/lib64/cmake/TBB"
|
||||
"$ENV{TBBROOT}/lib/cmake/TBB"
|
||||
"${ENV_TBBROOT}/lib64/cmake/TBB"
|
||||
"${ENV_TBBROOT}/lib/cmake/TBB"
|
||||
"${ENV_TBBROOT}/lib/cmake/tbb"
|
||||
# "$ENV{TBB_DIR}"
|
||||
# for custom TBB exposed via cmake -DTBBROOT=<custom TBB root>
|
||||
"${TBBROOT}/cmake"
|
||||
@@ -201,8 +206,7 @@ if(NOT TARGET openvino)
|
||||
set(_ov_as_external_package ON)
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/OpenVINOTargets.cmake")
|
||||
|
||||
# WA for cmake version < 3.16 which does not export
|
||||
# IMPORTED_LINK_DEPENDENT_LIBRARIES_** properties if no PUBLIC dependencies for the library
|
||||
# TODO: WA for cmake version < 3.16
|
||||
if((THREADING STREQUAL "TBB" OR THREADING STREQUAL "TBB_AUTO") AND TBB_FOUND)
|
||||
foreach (type RELEASE DEBUG RELWITHDEBINFO MINSIZEREL)
|
||||
set_property(TARGET openvino::runtime APPEND PROPERTY IMPORTED_LINK_DEPENDENT_LIBRARIES_${type} "TBB::tbb;TBB::tbbmalloc")
|
||||
|
||||
@@ -10,20 +10,19 @@ set_and_check(OpenVINO_SOURCE_DIR "@OpenVINO_SOURCE_DIR@")
|
||||
|
||||
# Variables to export in plugin's projects
|
||||
|
||||
set(ov_options "@IE_OPTIONS@")
|
||||
list(APPEND ov_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER
|
||||
CMAKE_BUILD_TYPE CMAKE_SKIP_RPATH CMAKE_INSTALL_PREFIX)
|
||||
set(ie_options "@IE_OPTIONS@;CMAKE_BUILD_TYPE;CMAKE_SKIP_RPATH")
|
||||
list(APPEND ie_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER)
|
||||
file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path)
|
||||
|
||||
message(STATUS "The following CMake options are exported from OpenVINO Developer package")
|
||||
message(" ")
|
||||
foreach(option IN LISTS ov_options)
|
||||
message("")
|
||||
foreach(option IN LISTS ie_options)
|
||||
if(NOT DEFINED "${option}")
|
||||
load_cache("${cache_path}" READ_WITH_PREFIX "" ${option})
|
||||
endif()
|
||||
message(" ${option}: ${${option}}")
|
||||
endforeach()
|
||||
message(" ")
|
||||
message("")
|
||||
|
||||
# for samples in 3rd party projects
|
||||
set_and_check(gflags_DIR "@gflags_BINARY_DIR@")
|
||||
@@ -52,10 +51,10 @@ endforeach()
|
||||
if(ENABLE_SYSTEM_PUGIXML)
|
||||
find_dependency(PugiXML)
|
||||
set_property(TARGET pugixml PROPERTY IMPORTED_GLOBAL TRUE)
|
||||
add_library(openvino::pugixml ALIAS pugixml)
|
||||
add_library(IE::pugixml ALIAS pugixml)
|
||||
endif()
|
||||
|
||||
# inherit OpenCV from main OpenVINO project if enabled
|
||||
# inherit OpenCV from main IE project if enabled
|
||||
if ("@OpenCV_FOUND@")
|
||||
load_cache("${cache_path}" READ_WITH_PREFIX "" OpenCV_DIR)
|
||||
find_dependency(OpenCV)
|
||||
@@ -65,11 +64,13 @@ endif()
|
||||
# Extra Compile Flags
|
||||
#
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
if(NOT MSVC)
|
||||
ie_add_compiler_flags(-Wno-error=unused-variable)
|
||||
ie_add_compiler_flags(-Wno-error=unused-but-set-variable)
|
||||
if(SUGGEST_OVERRIDE_SUPPORTED)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-suggest-override")
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
ie_add_compiler_flags(-Wno-error=unused-but-set-variable)
|
||||
if(SUGGEST_OVERRIDE_SUPPORTED)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-suggest-override")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
@@ -37,8 +37,8 @@ The implementation `CompileNetwork` is fully device-specific.
|
||||
|
||||
The function accepts a const shared pointer to `ngraph::Function` object and performs the following steps:
|
||||
|
||||
1. Applies nGraph passes using `TransformNetwork` function, which defines plugin-specific conversion pipeline. To support low precision inference, the pipeline can include Low Precision Transformations. These transformations are usually hardware specific. You can find how to use and configure Low Precisions Transformations in [Low Precision Transformations](@ref openvino_docs_OV_UG_lpt) guide.
|
||||
2. Maps the transformed graph to a backend specific graph representation (for example, to CPU plugin internal graph representation).
|
||||
1. Applies ngraph passes using `TransformNetwork` function, which defines plugin-specific conversion pipeline. To support low precision inference, the pipeline can include Low Precision Transformations. These transformations are usually hardware specific. You can find how to use and configure Low Precisions Transformations in [Low Precision Transformations](@ref openvino_docs_OV_UG_lpt) guide.
|
||||
2. Maps the transformed graph to a backend specific graph representation (for example, to MKLDNN graph for Intel CPU).
|
||||
3. Allocates and fills memory for graph weights, backend specific memory handles and so on.
|
||||
|
||||
@snippet src/template_executable_network.cpp executable_network:map_graph
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Inference Engine Plugin usually represents a wrapper around a backend. Backends can be:
|
||||
- OpenCL-like backend (e.g. clDNN library) for GPU devices.
|
||||
- oneDNN backend for Intel CPU devices.
|
||||
- MKLDNN backend for Intel CPU devices.
|
||||
- NVIDIA cuDNN for NVIDIA GPUs.
|
||||
|
||||
The responsibility of Inference Engine Plugin:
|
||||
|
||||
@@ -9,10 +9,10 @@
|
||||
<tab type="user" title="Attributes" url="@ref openvino_docs_OV_UG_lpt_attributes">
|
||||
<tab type="user" title="AvgPoolPrecisionPreserved" url="@ref openvino_docs_OV_UG_lpt_AvgPoolPrecisionPreserved"/>
|
||||
<tab type="user" title="IntervalsAlignment" url="@ref openvino_docs_OV_UG_lpt_IntervalsAlignment"/>
|
||||
<tab type="user" title="PerTensorQuantization" url="@ref openvino_docs_OV_UG_lpt_PerTensorQuantization"/>
|
||||
<tab type="user" title="PrecisionPreserved" url="@ref openvino_docs_OV_UG_lpt_PrecisionPreserved"/>
|
||||
<tab type="user" title="Precisions" url="@ref openvino_docs_OV_UG_lpt_Precisions"/>
|
||||
<tab type="user" title="QuantizationAlignment" url="@ref openvino_docs_OV_UG_lpt_QuantizationAlignment"/>
|
||||
<tab type="user" title="QuantizationGranularity" url="@ref openvino_docs_OV_UG_lpt_QuantizationGranularity"/>
|
||||
</tab>
|
||||
<tab type="user" title="Step 1. Prerequisites transformations" url="@ref openvino_docs_OV_UG_lpt_step1_prerequisites">
|
||||
<tab type="user" title="LinOpSequenceFusion" url="@ref openvino_docs_OV_UG_lpt_LinOpSequenceFusion"/>
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# PerTensorQuantization attribute {#openvino_docs_OV_UG_lpt_PerTensorQuantization}
|
||||
|
||||
ngraph::PerTensorQuantizationAttribute class represents the `PerTensorQuantization` attribute.
|
||||
|
||||
The attribute defines if the operation input port requires per-tensor quantization.
|
||||
|
||||
| Property name | Values |
|
||||
|---------------|----------------------------------------------|
|
||||
| Required | Yes |
|
||||
| Defined | Operation, input ports |
|
||||
| Properties | |
|
||||
@@ -1,11 +0,0 @@
|
||||
# QuantizationGranularity attribute {#openvino_docs_OV_UG_lpt_QuantizationGranularity}
|
||||
|
||||
ngraph::QuantizationAttribute class represents the `QuantizationGranularity` attribute.
|
||||
|
||||
The attribute defines quantization granularity of operation inputs.
|
||||
|
||||
| Property name | Values |
|
||||
|---------------|----------------------------------------------|
|
||||
| Required | No |
|
||||
| Defined | Input ports |
|
||||
| Properties | Quantization granularity |
|
||||
@@ -8,30 +8,29 @@
|
||||
:hidden:
|
||||
|
||||
AvgPoolPrecisionPreserved <openvino_docs_OV_UG_lpt_AvgPoolPrecisionPreserved>
|
||||
IntervalsAlignment <openvino_docs_OV_UG_lpt_IntervalsAlignment>
|
||||
IntervalsAlignment <openvino_docs_OV_UG_lpt_IntervalsAlignment>
|
||||
PerTensorQuantization <openvino_docs_OV_UG_lpt_PerTensorQuantization>
|
||||
PrecisionPreserved <openvino_docs_OV_UG_lpt_PrecisionPreserved>
|
||||
Precisions <openvino_docs_OV_UG_lpt_Precisions>
|
||||
QuantizationAlignment <openvino_docs_OV_UG_lpt_QuantizationAlignment>
|
||||
QuantizationGranularity <openvino_docs_OV_UG_lpt_QuantizationGranularity>
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
## Introduction
|
||||
|
||||
| Name | Target | Required | Mutable |
|
||||
|-------------------------------------------------------------------------------------|--------------------------|----------|---------|
|
||||
| [AvgPoolPrecisionPreserved](@ref openvino_docs_OV_UG_lpt_AvgPoolPrecisionPreserved) | Precision | No | Yes |
|
||||
| [IntervalsAlignment](@ref openvino_docs_OV_UG_lpt_IntervalsAlignment) | Quantization interval | Yes | Yes |
|
||||
| [PrecisionPreserved](@ref openvino_docs_OV_UG_lpt_PrecisionPreserved) | Precision | Yes | Yes |
|
||||
| [Precisions](@ref openvino_docs_OV_UG_lpt_Precisions) | Precision | Yes | Yes |
|
||||
| [QuantizationAlignment](@ref openvino_docs_OV_UG_lpt_QuantizationAlignment) | Quantization granularity | Yes | Yes |
|
||||
| [QuantizationGranularity](@ref openvino_docs_OV_UG_lpt_QuantizationGranularity) | Quantization granularity | Yes | No |
|
||||
| Name | Target | Required | Mutable |
|
||||
|-------------------------------------------------------------------------------------|------------------------|----------|---------|
|
||||
| [AvgPoolPrecisionPreserved](@ref openvino_docs_OV_UG_lpt_AvgPoolPrecisionPreserved) | Precision | No | Yes |
|
||||
| [IntervalsAlignment](@ref openvino_docs_OV_UG_lpt_IntervalsAlignment) | Quantization interval | Yes | Yes |
|
||||
| [PerTensorQuantization](@ref openvino_docs_OV_UG_lpt_PerTensorQuantization) | Precision | Yes | No |
|
||||
| [PrecisionPreserved](@ref openvino_docs_OV_UG_lpt_PrecisionPreserved) | Precision | Yes | Yes |
|
||||
| [Precisions](@ref openvino_docs_OV_UG_lpt_Precisions) | Precision | Yes | Yes |
|
||||
| [QuantizationAlignment](@ref openvino_docs_OV_UG_lpt_QuantizationAlignment) | Quantization alignment | Yes | Yes |
|
||||
|
||||
> `Target` attribute group defines attribute usage during model transformation for the best performance:
|
||||
> - `Precision` - the attribute defines the most optimal output port precision.
|
||||
> - `Quantization interval` - the attribute defines quantization interval.
|
||||
> - `Quantization alignment` - the attribute defines quantization granularity in runtime: per-channel or per-tensor quantization.
|
||||
> - `Quantization granularity` - the attribute is set by plugin to define quantization granularity: per-channel or per-tensor quantization.
|
||||
> - `Quantization alignment` - the attribute defines quantization alignment: per-channel or per-tensor quantization.
|
||||
>
|
||||
> `Required` attribute group defines if attribute usage is required to get an optimal model during transformation:
|
||||
> - `Yes` - the attribute is used by all OpenVINO plugins for low-precision optimization.
|
||||
|
||||
@@ -3,6 +3,10 @@
|
||||
## Introduction to ONNX
|
||||
[ONNX*](https://github.com/onnx/onnx) is a representation format for deep learning models. ONNX allows AI developers easily transfer models between different frameworks that helps to choose the best combination for them. Today, PyTorch\*, Caffe2\*, Apache MXNet\*, Microsoft Cognitive Toolkit\* and other tools are developing ONNX support.
|
||||
|
||||
This page gives instructions on how to convert a model from ONNX format to OpenVINO IR format using Model Optimizer. To use Model Optimizer, install OpenVINO Development Tools by following the [installation instructions here](https://docs.openvino.ai/latest/openvino_docs_install_guides_install_dev_tools.html).
|
||||
|
||||
ONNX models are directly compatible with OpenVINO Runtime and can be loaded in their native `.onnx` format using `net = ie.read_model("model.onnx")`. The benefit of converting ONNX models to the OpenVINO IR format is that it allows them to be easily optimized for target hardware with advanced OpenVINO tools such as [NNCF](../../../optimization_guide/nncf_introduction.md).
|
||||
|
||||
## Convert an ONNX* Model <a name="Convert_From_ONNX"></a>
|
||||
The Model Optimizer process assumes you have an ONNX model that was directly downloaded from a public repository or converted from any framework that supports exporting to the ONNX format.
|
||||
|
||||
@@ -18,4 +22,8 @@ There are no ONNX\* specific parameters, so only framework-agnostic parameters a
|
||||
Refer to [Supported Framework Layers](../Supported_Frameworks_Layers.md) for the list of supported standard layers.
|
||||
|
||||
## See Also
|
||||
[Model Conversion Tutorials](Convert_Model_Tutorials.md)
|
||||
This page provided general instructions for converting ONNX models. See the [Model Conversion Tutorials](Convert_Model_Tutorials.md) page for a set of tutorials that give step-by-step instructions for converting specific ONNX models. Here are some example tutorials:
|
||||
* [Convert ONNX* Faster R-CNN Model](onnx_specific/Convert_Faster_RCNN.md)
|
||||
* [Convert ONNX* GPT-2 Model](onnx_specific/Convert_GPT2.md)
|
||||
* [Convert ONNX* Mask R-CNN Model](onnx_specific/Convert_Mask_RCNN.md)
|
||||
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
# Converting a PaddlePaddle* Model {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Paddle}
|
||||
|
||||
## Convert a PaddlePaddle Model <a name="Convert_From_Paddle"></a>
|
||||
To convert a PaddlePaddle model, use the `mo` script and specify the path to the input model `.pdmodel` file:
|
||||
To convert a PaddlePaddle model, use the `mo` script and specify the path to the input `.pdmodel` model file:
|
||||
|
||||
```sh
|
||||
mo --input_model <INPUT_MODEL>.pdmodel
|
||||
```
|
||||
|
||||
### Example of Converting a PaddlePaddle Model
|
||||
Below is the example command to convert yolo v3 PaddlePaddle network to OpenVINO IR network with Model Optimizer.
|
||||
**For example,** this command converts a yolo v3 PaddlePaddle network to OpenVINO IR network:
|
||||
|
||||
```sh
|
||||
mo --input_model=yolov3.pdmodel --input=image,im_shape,scale_factor --input_shape=[1,3,608,608],[1,2],[1,2] --reverse_input_channels --output=save_infer_model/scale_0.tmp_1,save_infer_model/scale_1.tmp_1
|
||||
@@ -17,9 +14,63 @@ Below is the example command to convert yolo v3 PaddlePaddle network to OpenVINO
|
||||
## Supported PaddlePaddle Layers
|
||||
Refer to [Supported Framework Layers](../Supported_Frameworks_Layers.md) for the list of supported standard layers.
|
||||
|
||||
## Frequently Asked Questions (FAQ)
|
||||
## Officially Supported PaddlePaddle Models
|
||||
The following PaddlePaddle models have been officially validated and confirmed to work (as of OpenVINO 2022.1):
|
||||
|
||||
When Model Optimizer is unable to run to completion due to issues like typographical errors, incorrectly used options, etc., it provides explanatory messages. They describe the potential cause of the problem and give a link to the [Model Optimizer FAQ](../Model_Optimizer_FAQ.md), which provides instructions on how to resolve most issues. The FAQ also includes links to relevant sections in the Model Optimizer Developer Guide to help you understand what went wrong.
|
||||
@sphinxdirective
|
||||
.. list-table::
|
||||
:widths: 20 25 55
|
||||
:header-rows: 1
|
||||
|
||||
* - Model Name
|
||||
- Model Type
|
||||
- Description
|
||||
* - ppocr-det
|
||||
- optical character recognition
|
||||
- Models are exported from `PaddleOCR <https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.1/>`_. Refer to `READ.md <https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.1/#pp-ocr-20-series-model-listupdate-on-dec-15>`_.
|
||||
* - ppocr-rec
|
||||
- optical character recognition
|
||||
- Models are exported from `PaddleOCR <https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.1/>`_. Refer to `READ.md <https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.1/#pp-ocr-20-series-model-listupdate-on-dec-15>`_.
|
||||
* - ResNet-50
|
||||
- classification
|
||||
- Models are exported from `PaddleClas <https://github.com/PaddlePaddle/PaddleClas/tree/release/2.1/>`_. Refer to `getting_started_en.md <https://github.com/PaddlePaddle/PaddleClas/blob/release/2.1/docs/en/tutorials/getting_started_en.md#4-use-the-inference-model-to-predict>`_.
|
||||
* - MobileNet v2
|
||||
- classification
|
||||
- Models are exported from `PaddleClas <https://github.com/PaddlePaddle/PaddleClas/tree/release/2.1/>`_. Refer to `getting_started_en.md <https://github.com/PaddlePaddle/PaddleClas/blob/release/2.1/docs/en/tutorials/getting_started_en.md#4-use-the-inference-model-to-predict>`_.
|
||||
* - MobileNet v3
|
||||
- classification
|
||||
- Models are exported from `PaddleClas <https://github.com/PaddlePaddle/PaddleClas/tree/release/2.1/)>`_. Refer to `getting_started_en.md <https://github.com/PaddlePaddle/PaddleClas/blob/release/2.1/docs/en/tutorials/getting_started_en.md#4-use-the-inference-model-to-predict>`_.
|
||||
* - BiSeNet v2
|
||||
- semantic segmentation
|
||||
- Models are exported from `PaddleSeg <https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.1>`_. Refer to `model_export.md <https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.1/docs/model_export.md#>`_.
|
||||
* - DeepLab v3 plus
|
||||
- semantic segmentation
|
||||
- Models are exported from `PaddleSeg <https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.1>`_. Refer to `model_export.md <https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.1/docs/model_export.md#>`_.
|
||||
* - Fast-SCNN
|
||||
- semantic segmentation
|
||||
- Models are exported from `PaddleSeg <https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.1>`_. Refer to `model_export.md <https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.1/docs/model_export.md#>`_.
|
||||
* - OCRNET
|
||||
- semantic segmentation
|
||||
- Models are exported from `PaddleSeg <https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.1>`_. Refer to `model_export.md <https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.1/docs/model_export.md#>`_.
|
||||
* - Yolo v3
|
||||
- detection
|
||||
- Models are exported from `PaddleDetection <https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.1>`_. Refer to `EXPORT_MODEL.md <https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md#>`_.
|
||||
* - ppyolo
|
||||
- detection
|
||||
- Models are exported from `PaddleDetection <https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.1>`_. Refer to `EXPORT_MODEL.md <https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md#>`_.
|
||||
* - MobileNetv3-SSD
|
||||
- detection
|
||||
- Models are exported from `PaddleDetection <https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.2>`_. Refer to `EXPORT_MODEL.md <https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.2/deploy/EXPORT_MODEL.md#>`_.
|
||||
* - U-Net
|
||||
- semantic segmentation
|
||||
- Models are exported from `PaddleSeg <https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.3>`_. Refer to `model_export.md <https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.3/docs/model_export.md#>`_.
|
||||
* - BERT
|
||||
- language representation
|
||||
- Models are exported from `PaddleNLP <https://github.com/PaddlePaddle/PaddleNLP/tree/v2.1.1>`_. Refer to `README.md <https://github.com/PaddlePaddle/PaddleNLP/tree/develop/examples/language_model/bert#readme>`_.
|
||||
@endsphinxdirective
|
||||
|
||||
## Frequently Asked Questions (FAQ)
|
||||
When Model Optimizer is unable to run to completion due to typographical errors, incorrectly used options, or other issues, it provides explanatory messages. They describe the potential cause of the problem and give a link to the [Model Optimizer FAQ](../Model_Optimizer_FAQ.md), which provides instructions on how to resolve most issues. The FAQ also includes links to relevant sections in the Model Optimizer Developer Guide to help you understand what went wrong.
|
||||
|
||||
## See Also
|
||||
[Model Conversion Tutorials](Convert_Model_Tutorials.md)
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
# Converting a TensorFlow* Model {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_TensorFlow}
|
||||
This page gives instructions on how to convert a model from TensorFlow format to OpenVINO IR format using Model Optimizer. The instructions are different depending on if your model was created with TensorFlow v1.X or TensorFlow v2.X.
|
||||
|
||||
To use Model Optimizer, install OpenVINO Development Tools by following the [installation instructions here](https://docs.openvino.ai/latest/openvino_docs_install_guides_install_dev_tools.html).
|
||||
|
||||
## Convert TensorFlow 1 Models <a name="Convert_From_TF2X"></a>
|
||||
|
||||
@@ -146,9 +149,13 @@ The Model Optimizer provides explanatory messages if it is unable to run to comp
|
||||
In this document, you learned:
|
||||
|
||||
* Basic information about how the Model Optimizer works with TensorFlow models
|
||||
* Which TensorFlow models are supported
|
||||
* How to freeze a TensorFlow model
|
||||
* Which TensorFlow models formats are supported
|
||||
* How to freeze a TensorFlow 1 model
|
||||
* How to convert a trained TensorFlow model using the Model Optimizer with both framework-agnostic and TensorFlow-specific command-line options
|
||||
|
||||
## See Also
|
||||
[Model Conversion Tutorials](Convert_Model_Tutorials.md)
|
||||
This page provided general instructions for converting TensorFlow models. For step-by-step instructions showing how to convert specific TensorFlow models, see the [Model Conversion Tutorials](Convert_Model_Tutorials.md) page. Here are some example tutorials:
|
||||
* [Convert TensorFlow EfficientDet Models](tf_specific/Convert_EfficientDet_Models.md)
|
||||
* [Convert TensorFlow FaceNet Models](tf_specific/Convert_FaceNet_From_Tensorflow.md)
|
||||
* [Convert TensorFlow Object Detection API Models](tf_specific/Convert_Object_Detection_API_Models.md)
|
||||
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
# Setting Input Shapes {#openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model}
|
||||
|
||||
Paragraphs below provide details about specifying input shapes for model conversion.
|
||||
Model Optimizer provides the option of making models more efficient by providing additional shape definition.
|
||||
It is achieved with two parameters: `--input_shape` and `--static_shape`, used under certain conditions.
|
||||
|
||||
@anchor when_to_specify_input_shapes
|
||||
## When to Specify --input_shape Command-line Parameter
|
||||
Model Optimizer supports conversion of models with dynamic input shapes that contain undefined dimensions.
|
||||
However, if the shape of inference data is not going to change from one inference request to another,
|
||||
However, if the shape of data is not going to change from one inference to another,
|
||||
it is recommended to set up static shapes (when all dimensions are fully defined) for the inputs.
|
||||
It can be beneficial from a performance perspective and memory consumption.
|
||||
To set up static shapes, Model Optimizer provides the `--input_shape` parameter.
|
||||
This is an offline approach to set static shapes and it can save time and memory on runtime shape change.
|
||||
To learn more about runtime shape change please see a dedicated article about [reshape feature](../../../OV_Runtime_UG/ShapeInference.md).
|
||||
For more information about the dynamic shapes, refer to [Dynamic Shapes](../../../OV_Runtime_UG/ov_dynamic_shapes.md)
|
||||
The same functionality is also available in runtime via `reshape` method, please refer to [Changing input shapes](../../../OV_Runtime_UG/ShapeInference.md).
|
||||
For more information about dynamic shapes in runtime, refer to [Dynamic Shapes](../../../OV_Runtime_UG/ov_dynamic_shapes.md)
|
||||
|
||||
OpenVINO Runtime API can have limitations to infer models with undefined dimensions on some hardware (see [Features support matrix](../../../OV_Runtime_UG/supported_plugins/Device_Plugins.md) for reference).
|
||||
In this case, the `--input_shape` parameter and the [reshape method](../../../OV_Runtime_UG/ShapeInference.md) can help resolving undefined dimensions.
|
||||
In this case, the `--input_shape` parameter and the [reshape method](../../../OV_Runtime_UG/ShapeInference.md) can help to resolve undefined dimensions.
|
||||
|
||||
Sometimes Model Optimizer is unable to convert models out-of-the-box (only the `--input_model` parameter is specified).
|
||||
Such problem can relate to models with inputs of undefined ranks and a case of cutting off parts of a model.
|
||||
|
||||
@@ -67,8 +67,15 @@ The attribute names are self-explanatory or match the name in the `hparams_confi
|
||||
|
||||
> **NOTE**: The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the `RGB<->BGR` conversion specifying the command-line parameter: `--reverse_input_channels`. Otherwise, inference results may be incorrect. For more information about the parameter, refer to **When to Reverse Input Channels** section of [Converting a Model to Intermediate Representation (IR)](../Converting_Model.md).
|
||||
|
||||
OpenVINO™ toolkit provides samples that can be used to infer EfficientDet model. For more information, refer to
|
||||
[Open Model Zoo Demos](@ref omz_demos) and
|
||||
## OpenVINO™ Toolkit Samples and Open Model Zoo Demos
|
||||
OpenVINO™ toolkit provides samples that can be used to infer EfficientDet models. For more information, refer to the following pages:
|
||||
* [OpenVINO Samples](../../../../OV_Runtime_UG/Samples_Overview.md)
|
||||
* [Hello Reshape SSD - Python](../../../../../samples/python/hello_reshape_ssd/README.md)
|
||||
* [Hello Reshape SSD - C++](../../../../../samples/cpp/hello_reshape_ssd/README.md)
|
||||
* [Open Model Zoo Demos](@ref omz_demos)
|
||||
* [Object Detection Python Demo](https://github.com/openvinotoolkit/open_model_zoo/blob/master/demos/object_detection_demo/python)
|
||||
* [Object Detection C++ Demo](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos/object_detection_demo/cpp)
|
||||
* [Hello Object Detection Jupyter notebook](https://docs.openvino.ai/latest/notebooks/004-hello-detection-with-output.html)
|
||||
|
||||
## <a name="efficientdet-ir-results-interpretation"></a>Interpreting Results of the TensorFlow Model and the IR
|
||||
|
||||
@@ -90,4 +97,4 @@ The output of the IR is a list of 7-element tuples: `[image_id, class_id, confid
|
||||
* `x_max` -- normalized `x` coordinate of the upper right corner of the detected object.
|
||||
* `y_max` -- normalized `y` coordinate of the upper right corner of the detected object.
|
||||
|
||||
The first element with `image_id = -1` means end of data.
|
||||
The first element with `image_id = -1` means end of data.
|
||||
|
||||
@@ -64,7 +64,11 @@ Speech Recognition, Natural Language Processing and others. Refer to the links b
|
||||
|
||||
|
||||
* [OpenVINO Samples](../../../../OV_Runtime_UG/Samples_Overview.md)
|
||||
* [Hello Reshape SSD - Python](../../../../../samples/python/hello_reshape_ssd/README.md)
|
||||
* [Hello Reshape SSD - C++](../../../../../samples/cpp/hello_reshape_ssd/README.md)
|
||||
* [Open Model Zoo Demos](@ref omz_demos)
|
||||
* [Object Detection Python Demo](https://github.com/openvinotoolkit/open_model_zoo/blob/master/demos/object_detection_demo/python)
|
||||
* [Object Detection C++ Demo](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos/object_detection_demo/cpp)
|
||||
|
||||
## Important Notes About Feeding Input Images to the Samples
|
||||
|
||||
|
||||
@@ -229,3 +229,10 @@ The model was trained with input values in the range `[0,1]`. OpenVINO™ to
|
||||
For other applicable parameters, refer to [Convert Model from TensorFlow](../Convert_Model_From_TensorFlow.md).
|
||||
|
||||
> **NOTE**: The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the `RGB<->BGR` conversion specifying the command-line parameter: `--reverse_input_channels`. Otherwise, inference results may be incorrect. For more information about the parameter, refer to **When to Reverse Input Channels** section of [Converting a Model to Intermediate Representation (IR)](../Converting_Model.md).
|
||||
|
||||
|
||||
<a name="yolo-examples"></a>
|
||||
## YOLO Sample Application
|
||||
OpenVINO™ [Open Model Zoo Demos](@ref omz_demos) provide a sample application showing how to run inferencing on a video input with object detection models. The sample is compatible with YOLOv1, YOLOv2, YOLOv3, and YOLOv4 full-size and tiny-size models:
|
||||
* [Object Detection Python Demo](https://github.com/openvinotoolkit/open_model_zoo/blob/master/demos/object_detection_demo/python)
|
||||
* [Object Detection C++ Demo](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos/object_detection_demo/cpp)
|
||||
|
||||
@@ -60,7 +60,6 @@
|
||||
ExperimentalDetectronROIFeatureExtractor-6 <openvino_docs_ops_detection_ExperimentalDetectronROIFeatureExtractor_6>
|
||||
ExperimentalDetectronTopKROIs-6 <openvino_docs_ops_sort_ExperimentalDetectronTopKROIs_6>
|
||||
ExtractImagePatches-3 <openvino_docs_ops_movement_ExtractImagePatches_3>
|
||||
Eye-9 <openvino_docs_ops_generation_Eye_9>
|
||||
FakeQuantize-1 <openvino_docs_ops_quantization_FakeQuantize_1>
|
||||
FloorMod-1 <openvino_docs_ops_arithmetic_FloorMod_1>
|
||||
Floor-1 <openvino_docs_ops_arithmetic_Floor_1>
|
||||
@@ -76,7 +75,6 @@
|
||||
GatherND-8 <openvino_docs_ops_movement_GatherND_8>
|
||||
GELU-2 <openvino_docs_ops_activation_GELU_2>
|
||||
GELU-7 <openvino_docs_ops_activation_GELU_7>
|
||||
GenerateProposals-9 <openvino_docs_ops_detection_GenerateProposals_9>
|
||||
GreaterEqual-1 <openvino_docs_ops_comparison_GreaterEqual_1>
|
||||
Greater-1 <openvino_docs_ops_comparison_Greater_1>
|
||||
GroupConvolutionBackpropData-1 <openvino_docs_ops_convolution_GroupConvolutionBackpropData_1>
|
||||
@@ -87,7 +85,6 @@
|
||||
I420toBGR-8 <openvino_docs_ops_image_I420toBGR_8>
|
||||
I420toRGB-8 <openvino_docs_ops_image_I420toRGB_8>
|
||||
IDFT-7 <openvino_docs_ops_signals_IDFT_7>
|
||||
IRDFT-9 <openvino_docs_ops_signals_IRDFT_9>
|
||||
If-8 <openvino_docs_ops_infrastructure_If_8>
|
||||
Interpolate-1 <openvino_docs_ops_image_Interpolate_1>
|
||||
Interpolate-4 <openvino_docs_ops_image_Interpolate_4>
|
||||
@@ -114,14 +111,12 @@
|
||||
Mish-4 <openvino_docs_ops_activation_Mish_4>
|
||||
Mod-1 <openvino_docs_ops_arithmetic_Mod_1>
|
||||
MulticlassNonMaxSuppression-8 <openvino_docs_ops_sort_MulticlassNonMaxSuppression_8>
|
||||
MulticlassNonMaxSuppression-9 <openvino_docs_ops_sort_MulticlassNonMaxSuppression_9>
|
||||
Multiply-1 <openvino_docs_ops_arithmetic_Multiply_1>
|
||||
Negative-1 <openvino_docs_ops_arithmetic_Negative_1>
|
||||
NonMaxSuppression-1 <openvino_docs_ops_sort_NonMaxSuppression_1>
|
||||
NonMaxSuppression-3 <openvino_docs_ops_sort_NonMaxSuppression_3>
|
||||
NonMaxSuppression-4 <openvino_docs_ops_sort_NonMaxSuppression_4>
|
||||
NonMaxSuppression-5 <openvino_docs_ops_sort_NonMaxSuppression_5>
|
||||
NonMaxSuppression-9 <openvino_docs_ops_sort_NonMaxSuppression_9>
|
||||
NonZero-3 <openvino_docs_ops_condition_NonZero_3>
|
||||
NormalizeL2-1 <openvino_docs_ops_normalization_NormalizeL2_1>
|
||||
NotEqual-1 <openvino_docs_ops_comparison_NotEqual_1>
|
||||
@@ -141,7 +136,6 @@
|
||||
RandomUniform-8 <openvino_docs_ops_generation_RandomUniform_8>
|
||||
Range-1 <openvino_docs_ops_generation_Range_1>
|
||||
Range-4 <openvino_docs_ops_generation_Range_4>
|
||||
RDFT-9<openvino_docs_ops_signals_RDFT_9>
|
||||
ReadValue-3 <openvino_docs_ops_infrastructure_ReadValue_3>
|
||||
ReLU-1 <openvino_docs_ops_activation_ReLU_1>
|
||||
ReduceL1-4 <openvino_docs_ops_reduction_ReduceL1_4>
|
||||
@@ -162,7 +156,6 @@
|
||||
RNNCell-3 <openvino_docs_ops_sequence_RNNCell_3>
|
||||
RNNSequence-5 <openvino_docs_ops_sequence_RNNSequence_5>
|
||||
ROIAlign-3 <openvino_docs_ops_detection_ROIAlign_3>
|
||||
ROIAlign-9 <openvino_docs_ops_detection_ROIAlign_9>
|
||||
ROIPooling-1 <openvino_docs_ops_detection_ROIPooling_1>
|
||||
Roll-7 <openvino_docs_ops_movement_Roll_7>
|
||||
Round-5 <openvino_docs_ops_arithmetic_Round_5>
|
||||
@@ -182,7 +175,6 @@
|
||||
SoftMax-1 <openvino_docs_ops_activation_SoftMax_1>
|
||||
SoftMax-8 <openvino_docs_ops_activation_SoftMax_8>
|
||||
SoftPlus-4 <openvino_docs_ops_activation_SoftPlus_4>
|
||||
SoftSign-9 <openvino_docs_ops_activation_SoftSign_9>
|
||||
SpaceToBatch-2 <openvino_docs_ops_movement_SpaceToBatch_2>
|
||||
SpaceToDepth-1 <openvino_docs_ops_movement_SpaceToDepth_1>
|
||||
Split-1 <openvino_docs_ops_movement_Split_1>
|
||||
|
||||
@@ -28,6 +28,12 @@ Python API allows passing data as tensors. `Tensor` object holds a copy of the d
|
||||
|
||||
@snippet docs/snippets/ov_python_exclusives.py tensor_shared_mode
|
||||
|
||||
### Slices of array's memory
|
||||
|
||||
One of the `Tensor` class constructors allows to share the slice of array's memory. When `shape` is specified in the constructor that has the numpy array as first argument, it triggers the special shared memory mode.
|
||||
|
||||
@snippet docs/snippets/ov_python_exclusives.py tensor_slice_mode
|
||||
|
||||
## Running inference
|
||||
|
||||
Python API supports extra calling methods to synchronous and asynchronous modes for inference.
|
||||
@@ -69,44 +75,3 @@ Another feature of `AsyncInferQueue` is ability of setting callbacks. When callb
|
||||
The callback of `AsyncInferQueue` is uniform for every job. When executed, GIL is acquired to ensure safety of data manipulation inside the function.
|
||||
|
||||
@snippet docs/snippets/ov_python_exclusives.py asyncinferqueue_set_callback
|
||||
|
||||
### Working with u1, u4 and i4 element types
|
||||
|
||||
Since openvino supports low precision element types there are few ways how to handle them in python.
|
||||
To create an input tensor with such element types you may need to pack your data in new numpy array which byte size matches original input size:
|
||||
@snippet docs/snippets/ov_python_exclusives.py packing_data
|
||||
|
||||
To extract low precision values from tensor into numpy array you can use next helper:
|
||||
@snippet docs/snippets/ov_python_exclusives.py unpacking
|
||||
|
||||
### Releasing the GIL
|
||||
|
||||
Some functions in Python API release the Global Lock Interpreter (GIL) while running work-intensive code. It can help you to achieve more parallelism in your application using Python threads. For more information about GIL please refer to the Python documentation.
|
||||
|
||||
@snippet docs/snippets/ov_python_exclusives.py releasing_gil
|
||||
|
||||
> **NOTE**: While GIL is released functions can still modify and/or operate on Python objects in C++, thus there is no reference counting. User is responsible for thread safety if sharing of these objects with other thread occurs. It can affects your code only if multiple threads are spawned in Python.:
|
||||
|
||||
#### List of functions that release the GIL
|
||||
- openvino.runtime.AsyncInferQueue.start_async
|
||||
- openvino.runtime.AsyncInferQueue.is_ready
|
||||
- openvino.runtime.AsyncInferQueue.wait_all
|
||||
- openvino.runtime.AsyncInferQueue.get_idle_request_id
|
||||
- openvino.runtime.CompiledModel.create_infer_request
|
||||
- openvino.runtime.CompiledModel.infer_new_request
|
||||
- openvino.runtime.CompiledModel.__call__
|
||||
- openvino.runtime.CompiledModel.export
|
||||
- openvino.runtime.CompiledModel.get_runtime_model
|
||||
- openvino.runtime.Core.compile_model
|
||||
- openvino.runtime.Core.read_model
|
||||
- openvino.runtime.Core.import_model
|
||||
- openvino.runtime.Core.query_model
|
||||
- openvino.runtime.Core.get_available_devices
|
||||
- openvino.runtime.InferRequest.infer
|
||||
- openvino.runtime.InferRequest.start_async
|
||||
- openvino.runtime.InferRequest.wait
|
||||
- openvino.runtime.InferRequest.wait_for
|
||||
- openvino.runtime.InferRequest.get_profiling_info
|
||||
- openvino.runtime.InferRequest.query_state
|
||||
- openvino.runtime.Model.reshape
|
||||
- openvino.preprocess.PrePostProcessor.build
|
||||
|
||||
@@ -35,6 +35,8 @@ If you install OpenVINO™ Runtime, sample applications for С, C++, and Python
|
||||
* `<INSTALL_DIR>/samples/cpp`
|
||||
* `<INSTALL_DIR>/samples/python`
|
||||
|
||||
Source code for the samples is also available in the [OpenVINO™ samples repository on GitHub](https://github.com/openvinotoolkit/openvino/tree/master/samples). If you installed OpenVINO™ Runtime using PyPI, samples are not installed locally and must be accessed through GitHub.
|
||||
|
||||
The applications include:
|
||||
|
||||
- **Speech Sample** - Acoustic model inference based on Kaldi neural networks and speech feature vectors.
|
||||
|
||||
@@ -24,6 +24,7 @@ The logic behind the choice is as follows:
|
||||
3. Select the highest-priority device capable of supporting the given model, as listed in the table below.
|
||||
4. If model’s precision is FP32 but there is no device capable of supporting it, offload the model to a device supporting FP16.
|
||||
|
||||
@sphinxdirective
|
||||
+----------+------------------------------------------------------+-------------------------------------+
|
||||
| Device || Supported || Supported |
|
||||
| Priority || Device || model precision |
|
||||
@@ -40,6 +41,7 @@ The logic behind the choice is as follows:
|
||||
| 4 || Intel® CPU | FP32, FP16, INT8, BIN |
|
||||
| || (e.g. Intel® Core™ i7-1165G7) | |
|
||||
+----------+------------------------------------------------------+-------------------------------------+
|
||||
@endsphinxdirective
|
||||
|
||||
To put it simply, when loading the model to the first device on the list fails, AUTO will try to load it to the next device in line, until one of them succeeds.
|
||||
What is important, **AUTO always starts inference with the CPU**, as it provides very low latency and can start inference with no additional delays.
|
||||
@@ -53,26 +55,22 @@ Note that if you choose to exclude the CPU from the priority list, it will also
|
||||
|
||||
This mechanism can be easily observed in our Benchmark Application sample ([see here](#Benchmark App Info)), showing how the first-inference latency (the time it takes to compile the model and perform the first inference) is reduced when using AUTO. For example:
|
||||
|
||||
@sphinxdirective
|
||||
.. code-block:: sh
|
||||
```sh
|
||||
benchmark_app -m ../public/alexnet/FP32/alexnet.xml -d GPU -niter 128
|
||||
```
|
||||
|
||||
./benchmark_app -m ../public/alexnet/FP32/alexnet.xml -d GPU -niter 128
|
||||
@endsphinxdirective
|
||||
|
||||
@sphinxdirective
|
||||
.. code-block:: sh
|
||||
|
||||
./benchmark_app -m ../public/alexnet/FP32/alexnet.xml -d AUTO -niter 128
|
||||
@endsphinxdirective
|
||||
```sh
|
||||
benchmark_app -m ../public/alexnet/FP32/alexnet.xml -d AUTO -niter 128
|
||||
```
|
||||
|
||||
|
||||
@sphinxdirective
|
||||
.. note::
|
||||
|
||||
The longer the process runs, the closer realtime performance will be to that of the best-suited device.
|
||||
@endsphinxdirective
|
||||
|
||||
## Using the Auto-Device Plugin
|
||||
|
||||
## Using the Auto-Device Mode
|
||||
|
||||
Following the OpenVINO™ naming convention, the Automatic Device Selection mode is assigned the label of “AUTO.” It may be defined with no additional parameters, resulting in defaults being used, or configured further with the following setup options:
|
||||
|
||||
@@ -106,7 +104,8 @@ Inference with AUTO is configured similarly to when device plugins are used:
|
||||
you compile the model on the plugin with configuration and execute inference.
|
||||
|
||||
### Device candidate list
|
||||
The device candidate list allows users to customize the priority and limit the choice of devices available to the AUTO plugin. If not specified, the plugin assumes all the devices present in the system can be used. Note, that OpenVINO™ Runtime lets you use “GPU” as an alias for “GPU.0” in function calls.
|
||||
The device candidate list allows users to customize the priority and limit the choice of devices available to the AUTO plugin. If not specified, the plugin assumes all the devices present in the system can be used. Note, that OpenVINO™ Runtime lets you use “GPU” as an alias for “GPU.0” in function calls. More detail on enumerating devices can be found in [Working with devices](supported_plugins/Device_Plugins.md).
|
||||
|
||||
The following commands are accepted by the API:
|
||||
|
||||
@sphinxdirective
|
||||
@@ -128,27 +127,24 @@ The following commands are accepted by the API:
|
||||
To check what devices are present in the system, you can use Device API. For information on how to do it, check [Query device properties and configuration](supported_plugins/config_properties.md)
|
||||
|
||||
For C++
|
||||
@sphinxdirective
|
||||
.. code-block:: sh
|
||||
|
||||
ov::runtime::Core::get_available_devices() (see Hello Query Device C++ Sample)
|
||||
@endsphinxdirective
|
||||
```sh
|
||||
ov::runtime::Core::get_available_devices() (see Hello Query Device C++ Sample)
|
||||
```
|
||||
|
||||
For Python
|
||||
@sphinxdirective
|
||||
.. code-block:: sh
|
||||
|
||||
openvino.runtime.Core.available_devices (see Hello Query Device Python Sample)
|
||||
@endsphinxdirective
|
||||
|
||||
```sh
|
||||
openvino.runtime.Core.available_devices (see Hello Query Device Python Sample)
|
||||
```
|
||||
|
||||
### Performance Hints
|
||||
The `ov::hint::performance_mode` property enables you to specify a performance mode for the plugin to be more efficient for particular use cases.
|
||||
|
||||
#### ov::hint::PerformanceMode::THROUGHPUT
|
||||
#### THROUGHPUT Mode
|
||||
This mode prioritizes high throughput, balancing between latency and power. It is best suited for tasks involving multiple jobs, like inference of video feeds or large numbers of images.
|
||||
|
||||
#### ov::hint::PerformanceMode::LATENCY
|
||||
#### LATENCY Mode
|
||||
This mode prioritizes low latency, providing short response time for each inference job. It performs best for tasks where inference is required for a single input image, like a medical analysis of an ultrasound scan image. It also fits the tasks of real-time or nearly real-time applications, such as an industrial robot's response to actions in its environment or obstacle avoidance for autonomous vehicles.
|
||||
Note that currently the `ov::hint` property is supported by CPU and GPU devices only.
|
||||
|
||||
@@ -169,8 +165,9 @@ To enable performance hints for your application, use the following code:
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
### ov::hint::model_priority
|
||||
The property enables you to control the priorities of models in the Auto-Device plugin. A high-priority model will be loaded to a supported high-priority device. A lower-priority model will not be loaded to a device that is occupied by a higher-priority model.
|
||||
### Model Priority
|
||||
|
||||
The `ov::hint::model_priority` property enables you to control the priorities of models in the Auto-Device plugin. A high-priority model will be loaded to a supported high-priority device. A lower-priority model will not be loaded to a device that is occupied by a higher-priority model.
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
@@ -189,8 +186,10 @@ The property enables you to control the priorities of models in the Auto-Device
|
||||
@endsphinxdirective
|
||||
|
||||
## Configuring Individual Devices and Creating the Auto-Device plugin on Top
|
||||
|
||||
Although the methods described above are currently the preferred way to execute inference with AUTO, the following steps can be also used as an alternative. It is currently available as a legacy feature and used if the device candidate list includes Myriad devices, uncapable of utilizing the Performance Hints option.
|
||||
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: C++
|
||||
@@ -212,18 +211,16 @@ Although the methods described above are currently the preferred way to execute
|
||||
To see how the Auto-Device plugin is used in practice and test its performance, take a look at OpenVINO™ samples. All samples supporting the "-d" command-line option (which stands for "device") will accept the plugin out-of-the-box. The Benchmark Application will be a perfect place to start – it presents the optimal performance of the plugin without the need for additional settings, like the number of requests or CPU threads. To evaluate the AUTO performance, you can use the following commands:
|
||||
|
||||
For unlimited device choice:
|
||||
@sphinxdirective
|
||||
.. code-block:: sh
|
||||
|
||||
benchmark_app –d AUTO –m <model> -i <input> -niter 1000
|
||||
@endsphinxdirective
|
||||
```sh
|
||||
benchmark_app –d AUTO –m <model> -i <input> -niter 1000
|
||||
```
|
||||
|
||||
For limited device choice:
|
||||
@sphinxdirective
|
||||
.. code-block:: sh
|
||||
|
||||
benchmark_app –d AUTO:CPU,GPU,MYRIAD –m <model> -i <input> -niter 1000
|
||||
@endsphinxdirective
|
||||
```sh
|
||||
benchmark_app –d AUTO:CPU,GPU,MYRIAD –m <model> -i <input> -niter 1000
|
||||
```
|
||||
|
||||
For more information, refer to the [C++](../../samples/cpp/benchmark_app/README.md) or [Python](../../tools/benchmark_tool/README.md) version instructions.
|
||||
|
||||
|
||||
@@ -124,10 +124,20 @@ The `benchmark_app`, that exists in both [C++](../../samples/cpp/benchmark_app/
|
||||
- Overriding the strict rules of implicit reshaping by the batch dimension via the explicit device notion:
|
||||
- benchmark_app **-hint none -d BATCH:GPU** -m 'path to your favorite model'
|
||||
- Finally, overriding the automatically-deduced batch size as well:
|
||||
- $benchmark_app -hint none -d **BATCH:GPU(16)** -m 'path to your favorite model'
|
||||
- benchmark_app -hint none -d **BATCH:GPU(16)** -m 'path to your favorite model'
|
||||
- This example is also applicable to the CPU or any other device that generally supports the batched execution.
|
||||
- notice that some shell versions (e.g. `bash`) may require adding quotes around complex device names, i.e. -d "BATCH:GPU(16)"
|
||||
|
||||
The last example is also applicable to the CPU or any other device that generally supports the batched execution.
|
||||
|
||||
Notice that the benchmark_app performs a warm-up run of a _single_ request. As the Auto-Batching requires significantly more requests to execute in batch, this warm up run hits the default timeout (1000 ms), which is reported as e.g.:
|
||||
```
|
||||
[ INFO ] First inference took 1000.18ms
|
||||
```
|
||||
This value also exposed as the final execution statistics on the `benchmark_app` exit:
|
||||
```
|
||||
[ INFO ] Latency:
|
||||
[ INFO ] Max: 1000.18 ms
|
||||
```
|
||||
This is NOT the actual latency of the batched execution, so please refer to other metrics in the same log, e.g. "Median" or "Average" execution.
|
||||
|
||||
### See Also
|
||||
[Supported Devices](supported_plugins/Supported_Devices.md)
|
||||
|
||||
@@ -1,34 +1,34 @@
|
||||
# Heterogeneous execution {#openvino_docs_OV_UG_Hetero_execution}
|
||||
|
||||
Heterogeneous execution enables executing inference of one model on several devices. Its purpose is to:
|
||||
The Heterogeneous Execution mode, or HETERO for short, acts as a "virtual" or a "proxy" device, which does not bind to a specific type of hardware. Instead, it executes inference of one model on several devices. Its purpose is to utilize all available hardware more efficiently during one inference. This means that accelerators are used to process the heaviest parts of the model, while fallback devices, like the CPU, execute operations not supported by accelerators.
|
||||
|
||||
* Utilize the power of accelerators to process the heaviest parts of the model and to execute unsupported operations on fallback devices, like the CPU.
|
||||
* Utilize all available hardware more efficiently during one inference.
|
||||
Compiling a model to the Heterogeneous mode assumes splitting it into subgraphs. Each subgraph is compiled on a dedicated device and multiple `ov::CompiledModel` objects are created. The objects are connected via automatically allocated intermediate tensors.
|
||||
|
||||
Execution via the heterogeneous mode can be divided into two independent steps:
|
||||
Importantly, the model division is performed according to pre-defined affinities between hardware and operations. Every set of connected operations with the same affinity becomes a dedicated subgraph. Setting these affinities needs to be done as a separate step (`ov::Core::query_model` is used internally by HETERO), as described below.
|
||||
|
||||
1. Setting hardware affinity to operations (`ov::Core::query_model` is used internally by the Hetero device)
|
||||
2. Compiling a model to the Heterogeneous device assumes splitting the model to parts, compiling them on the specified devices (via `ov::device::priorities`), and executing them in the Heterogeneous mode. The model is split to subgraphs in accordance with the affinities, where a set of connected operations with the same affinity is to be a dedicated subgraph. Each subgraph is compiled on a dedicated device and multiple `ov::CompiledModel` objects are made, which are connected via automatically allocated intermediate tensors.
|
||||
|
||||
These two steps are not interconnected and affinities can be set in one of two ways, used separately or in combination (as described below): in the `manual` or the `automatic` mode.
|
||||
### Using the Hetero Mode
|
||||
|
||||
### Defining and Configuring the Hetero Device
|
||||
|
||||
Following the OpenVINO™ naming convention, the Hetero execution plugin is assigned the label of `"HETERO".` It may be defined with no additional parameters, resulting in defaults being used, or configured further with the following setup options:
|
||||
Following the OpenVINO™ naming convention, the Hetero execution mode is assigned the label of `"HETERO".` It may be defined with no additional parameters, resulting in defaults being used, or configured further with the following setup options:
|
||||
|
||||
@sphinxdirective
|
||||
+-------------------------------+--------------------------------------------+-----------------------------------------------------------+
|
||||
| Parameter Name & C++ property | Property values | Description |
|
||||
| Property | Property values | Description |
|
||||
+===============================+============================================+===========================================================+
|
||||
| | "MULTI_DEVICE_PRIORITIES" | | HETERO: <device names> | | Lists the devices available for selection. |
|
||||
| | `ov::device::priorities` | | comma-separated, no spaces | | The device sequence will be taken as priority |
|
||||
| | | | | | from high to low. |
|
||||
| <device list> | | HETERO: <device names> | | Specifies the devices available for selection. |
|
||||
| | | comma-separated, no spaces | | The device sequence will be taken as priority |
|
||||
+-------------------------------+--------------------------------------------+ | from high to low. |
|
||||
| `ov::device::priorities` | | device names | | |
|
||||
| | | comma-separated, no spaces | | |
|
||||
+-------------------------------+--------------------------------------------+-----------------------------------------------------------+
|
||||
@endsphinxdirective
|
||||
|
||||
### Manual and Automatic modes for assigning affinities
|
||||
|
||||
#### The Manual Mode
|
||||
### Assigning Affinities
|
||||
|
||||
Affinities can be set in one of two ways, used separately or in combination: with the `manual` or the `automatic` option.
|
||||
|
||||
#### The Manual Option
|
||||
It assumes setting affinities explicitly for all operations in the model using `ov::Node::get_rt_info` with the `"affinity"` key.
|
||||
|
||||
@sphinxtabset
|
||||
@@ -47,13 +47,10 @@ It assumes setting affinities explicitly for all operations in the model using `
|
||||
|
||||
@endsphinxtabset
|
||||
|
||||
|
||||
|
||||
|
||||
#### The Automatic Mode
|
||||
#### The Automatic Option
|
||||
It decides automatically which operation is assigned to which device according to the support from dedicated devices (`GPU`, `CPU`, `MYRIAD`, etc.) and query model step is called implicitly by Hetero device during model compilation.
|
||||
|
||||
The automatic mode causes "greedy" behavior and assigns all operations that can be executed on a given device to it, according to the priorities you specify (for example, `ov::device::priorities("GPU,CPU")`).
|
||||
The automatic option causes "greedy" behavior and assigns all operations that can be executed on a given device to it, according to the priorities you specify (for example, `ov::device::priorities("GPU,CPU")`).
|
||||
It does not take into account device peculiarities such as the inability to infer certain operations without other special operations placed before or after that layer. If the device plugin does not support the subgraph topology constructed by the HETERO device, then you should set affinity manually.
|
||||
|
||||
@sphinxtabset
|
||||
@@ -72,7 +69,7 @@ It does not take into account device peculiarities such as the inability to infe
|
||||
|
||||
@endsphinxtabset
|
||||
|
||||
#### Using Manual and Automatic Modes in Combination
|
||||
#### Using Manual and Automatic Options in Combination
|
||||
In some cases you may need to consider manually adjusting affinities which were set automatically. It usually serves minimizing the number of total subgraphs to optimize memory transfers. To do it, you need to "fix" the automatically assigned affinities like so:
|
||||
|
||||
@sphinxtabset
|
||||
@@ -91,10 +88,11 @@ In some cases you may need to consider manually adjusting affinities which were
|
||||
|
||||
@endsphinxtabset
|
||||
|
||||
Importantly, the automatic mode will not work if any operation in a model has its `"affinity"` already initialized.
|
||||
Importantly, the automatic option will not work if any operation in a model has its `"affinity"` already initialized.
|
||||
|
||||
> **NOTE**: `ov::Core::query_model` does not depend on affinities set by a user. Instead, it queries for an operation support based on device capabilities.
|
||||
|
||||
|
||||
### Configure fallback devices
|
||||
If you want different devices in Hetero execution to have different device-specific configuration options, you can use the special helper property `ov::device::properties`:
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
> **NOTE**: Before start using OpenVINO™ Runtime, make sure you set all environment variables during the installation. If you did not, follow the instructions from the _Set the Environment Variables_ section in the installation guides:
|
||||
> **NOTE**: Before start using OpenVINO™ Runtime, make sure you set all environment variables during the installation. To do so, follow the instructions from the _Set the Environment Variables_ section in the installation guides:
|
||||
> * [For Windows* 10](../install_guides/installing-openvino-windows.md)
|
||||
> * [For Linux*](../install_guides/installing-openvino-linux.md)
|
||||
> * [For macOS*](../install_guides/installing-openvino-macos.md)
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
## Use OpenVINO™ Runtime API to Implement Inference Pipeline
|
||||
|
||||
This section provides step-by-step instructions to implement a typical inference pipeline with the OpenVINO™ Runtime C++ API:
|
||||
This section provides step-by-step instructions to implement a typical inference pipeline with the OpenVINO™ Runtime C++ or Python API:
|
||||
|
||||
![ie_api_use_cpp]
|
||||
|
||||
@@ -64,7 +64,7 @@ Use the following code to create OpenVINO™ Core to manage available devices an
|
||||
|
||||
### Step 2. Compile the Model
|
||||
|
||||
`ov::CompiledModel` class represents a device specific compiled model. `ov::CompiledModel` allows you to get information inputs or output ports by a tensor name or index, this approach is aligned with the majority of frameworks.
|
||||
`ov::CompiledModel` class represents a device specific compiled model. `ov::CompiledModel` allows you to get information inputs or output ports by a tensor name or index. This approach is aligned with the majority of frameworks.
|
||||
|
||||
Compile the model for a specific device using `ov::Core::compile_model()`:
|
||||
|
||||
@@ -185,7 +185,7 @@ You can use external memory to create `ov::Tensor` and use the `ov::InferRequest
|
||||
|
||||
### Step 5. Start Inference
|
||||
|
||||
OpenVINO™ Runtime supports inference in either synchronous or asynchronous mode. Using the Async API can improve application's overall frame-rate, because rather than wait for inference to complete, the app can keep working on the host, while the accelerator is busy. You can use `ov::InferRequest::start_async` to start model inference in the asynchronous mode and call `ov::InferRequest::wait` to wait for the inference results:
|
||||
OpenVINO™ Runtime supports inference in either synchronous or asynchronous mode. Using the Async API can improve application's overall frame-rate: instead of waiting for inference to complete, the app can keep working on the host while the accelerator is busy. You can use `ov::InferRequest::start_async` to start model inference in the asynchronous mode and call `ov::InferRequest::wait` to wait for the inference results:
|
||||
|
||||
@sphinxtabset
|
||||
|
||||
@@ -203,7 +203,7 @@ OpenVINO™ Runtime supports inference in either synchronous or asynchronous mod
|
||||
|
||||
@endsphinxtabset
|
||||
|
||||
This section demonstrates a simple pipeline, to get more information about other ways to perform inference, read the dedicated ["Run inference" section](./ov_infer_request.md).
|
||||
This section demonstrates a simple pipeline. To get more information about other ways to perform inference, read the dedicated ["Run inference" section](./ov_infer_request.md).
|
||||
|
||||
### Step 6. Process the Inference Results
|
||||
|
||||
@@ -253,16 +253,20 @@ cd build/
|
||||
cmake ../project
|
||||
cmake --build .
|
||||
```
|
||||
It's allowed to specify additional build options (e.g. to build CMake project on Windows with a specific build tools). Please refer to the [CMake page](https://cmake.org/cmake/help/latest/manual/cmake.1.html#manual:cmake(1)) for details.
|
||||
You can also specify additional build options (e.g. to build CMake project on Windows with a specific build tools). Please refer to the [CMake page](https://cmake.org/cmake/help/latest/manual/cmake.1.html#manual:cmake(1)) for details.
|
||||
|
||||
## Run Your Application
|
||||
|
||||
Congratulations, you have made your first application with OpenVINO™ toolkit, now you may run it.
|
||||
|
||||
This page showed how to implement a typical inference pipeline with OpenVINO. See the [OpenVINO Samples](Samples_Overview.md) page or the [Open Model Zoo Demos](https://docs.openvino.ai/latest/omz_demos.html) page for specific examples of how OpenVINO pipelines are implemented for applications like image classification, text prediction, and many others.
|
||||
|
||||
## See also
|
||||
|
||||
- [OpenVINO™ Runtime Preprocessing](./preprocessing_overview.md)
|
||||
- [Using Encrypted Models with OpenVINO™](./protecting_model_guide.md)
|
||||
- [OpenVINO Samples](Samples_Overview.md)
|
||||
- [Open Model Zoo Demos](https://docs.openvino.ai/latest/omz_demos.html)
|
||||
|
||||
[ie_api_flow_cpp]: img/BASIC_IE_API_workflow_Cpp.svg
|
||||
[ie_api_use_cpp]: img/IMPLEMENT_PIPELINE_with_API_C.svg
|
||||
|
||||
@@ -41,7 +41,7 @@ IR v11 is supported by all OpenVINO Development tools including Post-Training Op
|
||||
API 2.0 also supports models in IR v10 for backward compatibility. If you have IR v10 files, they can be fed to OpenVINO Runtime as well (see [migration steps](common_inference_pipeline.md)).
|
||||
|
||||
Some OpenVINO Development Tools also support both IR v10 and IR v11 as an input:
|
||||
- Accuracy checker uses API 2.0 for model accuracy measurement by default, but also supports switching to the old API using the `--use_new_api False` command line parameter. Both launchers accept IR v10 and v11, but in some cases configuration files should be updated. More details can be found in [Accuracy Checker documentation](https://github.com/openvinotoolkit/open_model_zoo/blob/master/tools/accuracy_checker/openvino/tools/accuracy_checker/launcher/openvino_launcher_readme.md).
|
||||
- Accuracy checker uses API 2.0 for model accuracy measurement by default, but also supports switching to the old API using the `--use_new_api False` command line parameter. Both launchers accept IR v10 and v11, but in some cases configuration files should be updated. More details can be found in [Accuracy Checker documentation](https://github.com/openvinotoolkit/open_model_zoo/blob/releases/2022/1/tools/accuracy_checker/openvino/tools/accuracy_checker/launcher/openvino_launcher_readme.md).
|
||||
- [Compile tool](../../../tools/compile_tool/README.md) compiles the model to be used in API 2.0 by default. If you want to use the resulting compiled blob under the Inference Engine API, the additional `ov_api_1_0` option should be passed.
|
||||
|
||||
The following OpenVINO tools don't support IR v10 as an input and require the latest version of Model Optimizer to generate IR v11 files:
|
||||
|
||||
@@ -1,39 +1,41 @@
|
||||
# Running on multiple devices simultaneously {#openvino_docs_OV_UG_Running_on_multiple_devices}
|
||||
|
||||
## Introducing the Multi-Device Plugin (C++)
|
||||
|
||||
|
||||
The Multi-Device execution mode, or MULTI for short, acts as a "virtual" or a "proxy" device, which does not bind to a specific type of hardware. Instead, it assigns available computing devices to particular inference requests, which are then executed in parallel.
|
||||
|
||||
The potential gains from using Multi-Device execution are:
|
||||
* improved throughput from using multiple devices at once,
|
||||
* increase in performance stability due to multiple devices sharing inference workload.
|
||||
|
||||
Importantly, the Multi-Device mode does not change the application logic, so it does not require you to explicitly compile the model on every device or create and balance inference requests. It appears to use a typical device but internally handles the actual hardware.
|
||||
|
||||
Note that the performance increase in this mode comes from utilizing multiple devices at once. This means that you need to provide the devices with enough inference requests to keep them busy, otherwise you will not benefit much from using MULTI.
|
||||
|
||||
|
||||
## Using the Multi-Device Mode
|
||||
|
||||
Following the OpenVINO™ naming convention, the Multi-Device mode is assigned the label of “MULTI.” The only configuration option available for it is a prioritized list of devices to use:
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-cpp" class="switcher-anchor">C++</div>
|
||||
+---------------------------+---------------------------------+------------------------------------------------------------+
|
||||
| Property | Property values | Description |
|
||||
+===========================+=================================+============================================================+
|
||||
| <device list> | | MULTI: <device names> | | Specifies the devices available for selection. |
|
||||
| | | comma-separated, no spaces | | The device sequence will be taken as priority |
|
||||
+---------------------------+---------------------------------+ | from high to low. |
|
||||
| ov::device::priorities | | device names | | Priorities can be set directly as a string. |
|
||||
| | | comma-separated, no spaces | |
|
||||
+---------------------------+---------------------------------+------------------------------------------------------------+
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
The Multi-Device plugin automatically assigns inference requests to available computational devices to execute the requests in parallel. By contrast, the Heterogeneous plugin can run different layers on different devices but not in parallel. The potential gains with the Multi-Device plugin are:
|
||||
Specifying the device list explicitly is required by MULTI, as it defines the devices available for inference and sets their priorities. Importantly, the list may also specify the number of requests for MULTI to keep for each device, as described below.
|
||||
|
||||
* Improved throughput from using multiple devices (compared to single-device execution)
|
||||
* More consistent performance, since the devices share the inference burden (if one device is too busy, another can take more of the load)
|
||||
Note that OpenVINO™ Runtime enables you to use “GPU” as an alias for “GPU.0” in function calls. More details on enumerating devices can be found in [Working with devices](supported_plugins/Device_Plugins.md).
|
||||
|
||||
Note that with Multi-Device the application logic is left unchanged, so you don't need to explicitly compile the model on every device, create and balance the inference requests and so on. From the application point of view, this is just another device that handles the actual machinery. The only thing that is required to leverage performance is to provide the multi-device (and hence the underlying devices) with enough inference requests to process. For example, if you were processing 4 cameras on the CPU (with 4 inference requests), it might be desirable to process more cameras (with more requests in flight) to keep CPU and GPU busy via Multi-Device.
|
||||
|
||||
The setup of Multi-Device can be described in three major steps:
|
||||
|
||||
1. Prepare configure for each device.
|
||||
2. Compile the model on the Multi-Device plugin created on top of a (prioritized) list of the configured devices with the configure prepared in step one.
|
||||
3. As with any other CompiledModel call (resulting from `compile_model`), you create as many requests as needed to saturate the devices.
|
||||
|
||||
These steps are covered below in detail.
|
||||
|
||||
### Defining and Configuring the Multi-Device Plugin
|
||||
|
||||
Following the OpenVINO™ convention of labeling devices, the Multi-Device plugin uses the name "MULTI". The only configuration option for the Multi-Device plugin is a prioritized list of devices to use:
|
||||
|
||||
| Parameter name | Parameter values | Default | Description |
|
||||
| -------------- | ---------------- | --- | --- |
|
||||
| ov::device::priorities | comma-separated device names with no spaces | N/A | Prioritized list of devices |
|
||||
|
||||
You can set the priorities directly as a string.
|
||||
|
||||
Basically, there are three ways to specify the devices to be use by the "MULTI":
|
||||
The following commands are accepted by the API:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
@@ -43,9 +45,15 @@ Basically, there are three ways to specify the devices to be use by the "MULTI":
|
||||
:language: cpp
|
||||
:fragment: [part0]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [MULTI_0]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
Notice that the priorities of the devices can be changed in real time for the compiled model:
|
||||
Notice that MULTI allows you to **change device priorities on the fly**. You can alter the order, exclude a device, and bring an excluded device back. Still, it does not allow adding new devices.
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
@@ -55,59 +63,23 @@ Notice that the priorities of the devices can be changed in real time for the co
|
||||
:language: cpp
|
||||
:fragment: [part1]
|
||||
|
||||
@endsphinxdirective
|
||||
.. tab:: Python
|
||||
|
||||
Finally, there is a way to specify number of requests that the Multi-Device will internally keep for each device. Suppose your original app was running 4 cameras with 4 inference requests. You would probably want to share these 4 requests between 2 devices used in MULTI. The easiest way is to specify a number of requests for each device using parentheses: "MULTI:CPU(2),GPU(2)" and use the same 4 requests in your app. However, such an explicit configuration is not performance-portable and hence not recommended. Instead, the better way is to configure the individual devices and query the resulting number of requests to be used at the application level (see [Configuring the Individual Devices and Creating the Multi-Device On Top](#configuring-the-individual-devices-and-creating-the-multi-device-on-top)).
|
||||
|
||||
### Enumerating Available Devices
|
||||
The OpenVINO Runtime API features a dedicated methods to enumerate devices and their capabilities. See the [Hello Query Device C++ Sample](../../samples/cpp/hello_query_device/README.md). This is example output from the sample (truncated to device names only):
|
||||
|
||||
```sh
|
||||
./hello_query_device
|
||||
Available devices:
|
||||
Device: CPU
|
||||
...
|
||||
Device: GPU.0
|
||||
...
|
||||
Device: GPU.1
|
||||
...
|
||||
Device: HDDL
|
||||
```
|
||||
|
||||
A simple programmatic way to enumerate the devices and use with the multi-device is as follows:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI2.cpp
|
||||
:language: cpp
|
||||
:fragment: [part2]
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [MULTI_1]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
Beyond the trivial "CPU", "GPU", "HDDL" and so on, when multiple instances of a device are available the names are more qualified. For example, this is how two Intel® Movidius™ Myriad™ X sticks are listed with the hello_query_sample:
|
||||
```
|
||||
...
|
||||
Device: MYRIAD.1.2-ma2480
|
||||
...
|
||||
Device: MYRIAD.1.4-ma2480
|
||||
```
|
||||
|
||||
So the explicit configuration to use both would be "MULTI:MYRIAD.1.2-ma2480,MYRIAD.1.4-ma2480". Accordingly, the code that loops over all available devices of "MYRIAD" type only is below:
|
||||
|
||||
@sphinxdirective
|
||||
One more thing you can define is the **number of requests to allocate for each device**. You can do it simply by adding the number to each device in parentheses, like this: `"MULTI:CPU(2),GPU(2)"`. However, this method is not recommended as it is not performance-portable. The suggested approach is to configure individual devices and query the resulting number of requests to be used at the application level, as described in [Configuring Individual Devices and Creating MULTI On Top](#configuring-the-individual-devices-and-creating-the-multi-device-on-top).
|
||||
|
||||
.. tab:: C++
|
||||
To check what devices are present in the system, you can use the Device API. For information on how to do it, check [Query device properties and configuration](supported_plugins/config_properties.md).
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI3.cpp
|
||||
:language: cpp
|
||||
:fragment: [part3]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
### Configuring the Individual Devices and Creating the Multi-Device On Top
|
||||
As discussed in the first section, you shall configure each individual device as usual and then just create the "MULTI" device on top:
|
||||
### Configuring Individual Devices and Creating the Multi-Device On Top
|
||||
As mentioned previously, executing inference with MULTI may be set up by configuring individual devices before creating the "MULTI" device on top. It may be considered for performance reasons.
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
@@ -117,15 +89,21 @@ As discussed in the first section, you shall configure each individual device as
|
||||
:language: cpp
|
||||
:fragment: [part4]
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [MULTI_4]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
An alternative is to combine all the individual device settings into a single config file and load that, allowing the Multi-Device plugin to parse and apply settings to the right devices. See the code example in the next section.
|
||||
Alternatively, you can combine all the individual device settings into a single config file and load it for MULTI to parse. See the code example in the next section.
|
||||
|
||||
|
||||
Note that while the performance of accelerators combines really well with Multi-Device, the CPU+GPU execution poses some performance caveats, as these devices share the power, bandwidth and other resources. For example it is recommended to enable the GPU throttling hint (which save another CPU thread for the CPU inference).
|
||||
See the [Using the Multi-Device with OpenVINO samples and benchmarking the performance](#using-the-multi-device-with-openvino-samples-and-benchmarking-the-performance) section below.
|
||||
|
||||
### Querying the Optimal Number of Inference Requests
|
||||
You can use the [configure devices](supported_plugins/config_properties.md) to query the optimal number of requests. Similarly, when using the Multi-Device you don't need to sum over included devices yourself, you can query property directly:
|
||||
When using MULTI, you don't need to sum over included devices yourself, you can query the optimal number of requests directly,
|
||||
using the [configure devices](supported_plugins/config_properties.md) property:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
@@ -137,186 +115,46 @@ You can use the [configure devices](supported_plugins/config_properties.md) to q
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
### Using the Multi-Device with OpenVINO Samples and Benchmarking the Performance
|
||||
|
||||
Every OpenVINO sample that supports the `-d` (which stands for "device") command-line option transparently accepts Multi-Device. The [Benchmark Application](../../samples/cpp/benchmark_app/README.md) is the best reference for the optimal usage of Multi-Device. As discussed earlier, you do not need to set up the number of requests, CPU streams or threads because the application provides optimal performance out of the box. Below is an example command to evaluate HDDL+GPU performance with that:
|
||||
|
||||
## Using the Multi-Device with OpenVINO Samples and Benchmarking Performance
|
||||
|
||||
To see how the Multi-Device execution is used in practice and test its performance, take a look at OpenVINO's Benchmark Application which presents the optimal performance of the plugin without the need for additional settings, like the number of requests or CPU threads.
|
||||
Here is an example command to evaluate performance of HDDL+GPU:
|
||||
|
||||
```sh
|
||||
./benchmark_app –d MULTI:HDDL,GPU –m <model> -i <input> -niter 1000
|
||||
```
|
||||
|
||||
The Multi-Device plugin supports FP16 IR files. The CPU plugin automatically upconverts it to FP32 and the other devices support it natively. Note that no demos are (yet) fully optimized for Multi-Device, by means of supporting the ov::optimal_number_of_infer_requests property, using the GPU streams/throttling, and so on.
|
||||
|
||||
### Video: MULTI Plugin
|
||||
For more information, refer to the [C++](../../samples/cpp/benchmark_app/README.md) or [Python](../../tools/benchmark_tool/README.md) version instructions.
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
.. note::
|
||||
|
||||
<iframe allowfullscreen mozallowfullscreen msallowfullscreen oallowfullscreen webkitallowfullscreen width="560" height="315" src="https://www.youtube.com/embed/xbORYFEmrqU" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
You can keep using the FP16 IR without converting it to FP32, even if some of the listed devices do not support it. The conversion will be done automatically for you.
|
||||
|
||||
No demos are yet fully optimized for MULTI, by means of supporting the ov::optimal_number_of_infer_requests property, using the GPU streams/throttling, and so on.
|
||||
@endsphinxdirective
|
||||
|
||||
### See Also
|
||||
[Supported Devices](supported_plugins/Supported_Devices.md)
|
||||
|
||||
## Performance Considerations for the Multi-Device Execution
|
||||
This section covers few recommendations for the multi-device execution (applicable for both Python and C++):
|
||||
- MULTI usually performs best when the fastest device is specified first in the list of the devices.
|
||||
This is particularly important when the request-level parallelism is not sufficient
|
||||
(e.g. the number of request in the flight is not enough to saturate all devices).
|
||||
- Just like with any throughput-oriented execution, it is highly recommended to query the optimal number of inference requests directly from the instance of the `ov:compiled_model`.
|
||||
Please refer to the code of the `benchmark_app`, that exists in both [C++](../../samples/cpp/benchmark_app/README.md) and [Python](../../tools/benchmark_tool/README.md), for more details.
|
||||
- Notice that for example CPU+GPU execution performs better with certain knobs
|
||||
which you can find in the code of the same [Benchmark App](../../samples/cpp/benchmark_app/README.md) sample.
|
||||
One specific example is disabling GPU driver polling, which in turn requires multiple GPU streams to amortize slower
|
||||
communication of inference completion from the device to the host.
|
||||
- Multi-device logic always attempts to save on the (e.g. inputs) data copies between device-agnostic, user-facing inference requests
|
||||
and device-specific 'worker' requests that are being actually scheduled behind the scene.
|
||||
To facilitate the copy savings, it is recommended to run the requests in the order that they were created.
|
||||
|
||||
## Introducing the Multi-Device Plugin (Python)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
|
||||
<div id="switcher-python" class="switcher-anchor">Python</div>
|
||||
@endsphinxdirective
|
||||
|
||||
The Multi-Device plugin automatically assigns inference requests to available computational devices to execute the requests in parallel. By contrast, the Heterogeneous plugin can run different layers on different devices but not in parallel. The potential gains with the Multi-Device plugin are:
|
||||
|
||||
* Improved throughput from using multiple devices (compared to single-device execution)
|
||||
* More consistent performance, since the devices share the inference burden (if one device is too busy, another can take more of the load)
|
||||
|
||||
Note that with Multi-Device the application logic is left unchanged, so you don't need to explicitly compile the model on every device, create and balance the inference requests and so on. From the application point of view, this is just another device that handles the actual machinery. The only thing that is required to leverage performance is to provide the multi-device (and hence the underlying devices) with enough inference requests to process. For example, if you were processing 4 cameras on the CPU (with 4 inference requests), it might be desirable to process more cameras (with more requests in flight) to keep CPU and GPU busy via Multi-Device.
|
||||
|
||||
The setup of Multi-Device can be described in three major steps:
|
||||
|
||||
1. Configure each device (using the conventional [configure devices](supported_plugins/config_properties.md) method
|
||||
2. Compile the model on the Multi-Device plugin created on top of a (prioritized) list of the configured devices. This is the only change needed in the application.
|
||||
3. As with any other CompiledModel call (resulting from `compile_model`), you create as many requests as needed to saturate the devices.
|
||||
|
||||
These steps are covered below in detail.
|
||||
|
||||
### Defining and Configuring the Multi-Device Plugin
|
||||
|
||||
Following the OpenVINO™ convention of labeling devices, the Multi-Device plugin uses the name "MULTI". The only configuration option for the Multi-Device plugin is a prioritized list of devices to use:
|
||||
|
||||
| Parameter name | Parameter values | Default | Description |
|
||||
| -------------- | ---------------- | --- | --- |
|
||||
| "MULTI_DEVICE_PRIORITIES" | comma-separated device names with no spaces | N/A | Prioritized list of devices |
|
||||
|
||||
You can set the configuration directly as a string, or use the metric key `MULTI_DEVICE_PRIORITIES` from the `multi/multi_device_config.hpp` file, which defines the same string.
|
||||
|
||||
#### The Three Ways to Specify Devices Targets for the MULTI plugin
|
||||
|
||||
* Option 1 - Pass a Prioritized List as a Parameter in ie.load_network()
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [Option_1]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
* Option 2 - Pass a List as a Parameter, and Dynamically Change Priorities during Execution
|
||||
Notice that the priorities of the devices can be changed in real time for the compiled model:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [Option_2]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
* Option 3 - Use Explicit Hints for Controlling Request Numbers Executed by Devices
|
||||
There is a way to specify the number of requests that Multi-Device will internally keep for each device. If the original app was running 4 cameras with 4 inference requests, it might be best to share these 4 requests between 2 devices used in the MULTI. The easiest way is to specify a number of requests for each device using parentheses: “MULTI:CPU(2),GPU(2)” and use the same 4 requests in the app. However, such an explicit configuration is not performance-portable and not recommended. The better way is to configure the individual devices and query the resulting number of requests to be used at the application level. See [Configuring the Individual Devices and Creating the Multi-Device On Top](#configuring-the-individual-devices-and-creating-the-multi-device-on-top).
|
||||
For best performance when using the MULTI execution mode you should consider a few recommendations:
|
||||
- MULTI usually performs best when the fastest device is specified first in the device candidate list.
|
||||
This is particularly important when the request-level parallelism is not sufficient
|
||||
(e.g. the number of requests is not enough to saturate all devices).
|
||||
- Just like with any throughput-oriented execution mode, it is highly recommended to query the optimal number of inference requests
|
||||
directly from the instance of the `ov:compiled_model`. Refer to the code of the previously mentioned `benchmark_app` for more details.
|
||||
- Execution on certain device combinations, for example CPU+GPU, performs better with certain knobs. Refer to the `benchmark_app` code for details. One specific example is disabling GPU driver polling, which in turn requires multiple GPU streams to balance out slower
|
||||
communication of inference completion from the device to the host.
|
||||
- The MULTI logic always attempts to save on copying data between device-agnostic and user-facing inference requests,
|
||||
and device-specific 'worker' requests that are being actually scheduled behind the scene.
|
||||
To facilitate the copy savings, it is recommended to run the requests in the order in which they were created.
|
||||
- While performance of accelerators combines well with MULTI, the CPU+GPU execution may introduce certain performance issues. It is due to the devices sharing some resources, like power or bandwidth. Enabling the GPU throttling hint, which saves a CPU thread for CPU inference, is an example of a recommended solution addressing this issue.
|
||||
|
||||
|
||||
### Enumerating Available Devices
|
||||
The OpenVINO Runtime API features a dedicated methods to enumerate devices and their capabilities. See the [Hello Query Device Python Sample](../../samples/python/hello_query_device/README.md). This is example output from the sample (truncated to device names only):
|
||||
|
||||
```sh
|
||||
./hello_query_device
|
||||
Available devices:
|
||||
Device: CPU
|
||||
...
|
||||
Device: GPU.0
|
||||
...
|
||||
Device: GPU.1
|
||||
...
|
||||
Device: HDDL
|
||||
```
|
||||
|
||||
A simple programmatic way to enumerate the devices and use with the multi-device is as follows:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [available_devices_1]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
Beyond the trivial "CPU", "GPU", "HDDL" and so on, when multiple instances of a device are available the names are more qualified. For example, this is how two Intel® Movidius™ Myriad™ X sticks are listed with the hello_query_sample:
|
||||
|
||||
```bash
|
||||
...
|
||||
Device: MYRIAD.1.2-ma2480
|
||||
...
|
||||
Device: MYRIAD.1.4-ma2480
|
||||
```
|
||||
|
||||
So the explicit configuration to use both would be "MULTI:MYRIAD.1.2-ma2480,MYRIAD.1.4-ma2480". Accordingly, the code that loops over all available devices of "MYRIAD" type only is below:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [available_devices_2]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
### Configuring the Individual Devices and Creating the Multi-Device On Top
|
||||
|
||||
It is possible to configure each individual device as usual and then create the "MULTI" device on top:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: Python
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_multi.py
|
||||
:language: python
|
||||
:fragment: [set_property]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
An alternative is to combine all the individual device settings into a single config file and load that, allowing the Multi-Device plugin to parse and apply settings to the right devices. See the code example in the next section.
|
||||
|
||||
Note that while the performance of accelerators works well with Multi-Device, the CPU+GPU execution poses some performance caveats, as these devices share power, bandwidth and other resources. For example it is recommended to enable the GPU throttling hint (which saves another CPU thread for CPU inferencing). See the section below titled Using the Multi-Device with OpenVINO Samples and Benchmarking the Performance.
|
||||
|
||||
|
||||
### Using the Multi-Device with OpenVINO Samples and Benchmarking the Performance
|
||||
|
||||
Every OpenVINO sample that supports the `-d` (which stands for "device") command-line option transparently accepts Multi-Device. The [Benchmark application](../../tools/benchmark_tool/README.md) is the best reference for the optimal usage of Multi-Device. As discussed earlier, you do not need to set up the number of requests, CPU streams or threads because the application provides optimal performance out of the box. Below is an example command to evaluate CPU+GPU performance with the Benchmark application:
|
||||
|
||||
```sh
|
||||
benchmark_app –d MULTI:CPU,GPU –m <model>
|
||||
```
|
||||
|
||||
The Multi-Device plugin supports FP16 IR files. The CPU plugin automatically upconverts it to FP32 and the other devices support it natively. Note that no demos are (yet) fully optimized for Multi-Device, by means of supporting the ov::optimal_number_of_infer_requests property, using the GPU streams/throttling, and so on.
|
||||
|
||||
### Video: MULTI Plugin
|
||||
> **NOTE**: This video is currently available only for C++, but many of the same concepts apply to Python.
|
||||
## See Also
|
||||
[Supported Devices](supported_plugins/Supported_Devices.md)
|
||||
|
||||
@sphinxdirective
|
||||
.. raw:: html
|
||||
@@ -325,5 +163,4 @@ The Multi-Device plugin supports FP16 IR files. The CPU plugin automatically upc
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
### See Also
|
||||
[Supported Devices](supported_plugins/Supported_Devices.md)
|
||||
> **NOTE**: This video is currently available only for C++, but many of the same concepts apply to Python.
|
||||
|
||||
@@ -2,21 +2,22 @@
|
||||
|
||||
Each of OpenVINO's [supported devices](supported_plugins/Device_Plugins.md) offers low-level performance settings. Tweaking this detailed configuration requires deep architecture understanding.
|
||||
Also, while the performance may be optimal for the specific combination of the device and the inferred model, the resulting configuration is not necessarily optimal for another device or model.
|
||||
The OpenVINO performance hints are the new way to configure the performance with _portability_ in mind. As the hints are supported by every OpenVINO device, this is a future-proof solution that is fully compatible with the [automatic device selection](./auto_device_selection.md).
|
||||
The OpenVINO performance hints are the new way to configure the performance with _portability_ in mind.
|
||||
|
||||
The hints also "reverse" the direction of the configuration in the right fashion: rather than map the application needs to the low-level performance settings, and keep an associated application logic to configure each possible device separately, the idea is to express a target scenario with a single config key and let the *device* to configure itself in response.
|
||||
As the hints are supported by every OpenVINO device, this is a completely portable and future-proof solution that is fully compatible with the [automatic device selection](./auto_device_selection.md).
|
||||
|
||||
Previously, a certain level of automatic configuration was coming from the _default_ values of the parameters. For example, the number of CPU streams was deduced from the number of CPU cores, when `ov::streams::AUTO` (`CPU_THROUGHPUT_AUTO` in the pre-OpenVINO 2.0 parlance) is set. However, the resulting number of streams didn't account for actual compute requirements of the model to be inferred.
|
||||
The hints, in contrast, respect the actual model, so the parameters for optimal throughput are calculated for each model individually (based on its compute versus memory bandwidth requirements and capabilities of the device).
|
||||
Previously, a certain level of automatic configuration was coming from the _default_ values of the parameters. For example, number of the CPU streams was deduced from the number of CPU cores, when the `ov::streams::AUTO` (`CPU_THROUGHPUT_AUTO` in the pre-OpenVINO 2.0 parlance) is set. However, the resulting number of streams didn't account for actual compute requirements of the model to be inferred.
|
||||
The hints, in contrast, respect the actual model, so the parameters for optimal throughput are calculated for each model individually (based on it's compute versus memory bandwidth requirements and capabilities of the device).
|
||||
|
||||
## Performance Hints: Latency and Throughput
|
||||
As discussed in the [Optimization Guide](../optimization_guide/dldt_optimization_guide.md) there are a few different metrics associated with inference speed.
|
||||
Throughput and latency are some of the most widely used metrics that measure the overall performance of an application.
|
||||
|
||||
This is why, to ease the configuration of the device, OpenVINO offers two dedicated hints, namely `ov::hint::PerformanceMode::THROUGHPUT` and `ov::hint::PerformanceMode::LATENCY`.
|
||||
This is why, to ease the configuration of the device, the OpenVINO already offers two dedicated hints, namely `ov::hint::PerformanceMode::THROUGHPUT` and `ov::hint::PerformanceMode::LATENCY`.
|
||||
A special `ov::hint::PerformanceMode::UNDEFINED` acts the same as specifying no hint.
|
||||
|
||||
Please also see the last section in this document on conducting performance measurements with the benchmark_app`.
|
||||
Please also see the last section in the document on conducting performance measurements with the benchmark_app`.
|
||||
|
||||
Note that a typical model may take significantly more time to load with `ov::hint::PerformanceMode::THROUGHPUT` and consume much more memory, compared with `ov::hint::PerformanceMode::LATENCY`.
|
||||
|
||||
|
||||
@@ -53,4 +53,53 @@ The table below demonstrates support of key features by OpenVINO device plugins.
|
||||
| [Stateful models](../network_state_intro.md) | Yes | No | Yes | No |
|
||||
| [Extensibility](@ref openvino_docs_Extensibility_UG_Intro) | Yes | Yes | No | No |
|
||||
|
||||
For more details on plugin-specific feature limitations, refer to the corresponding plugin pages.
|
||||
For more details on plugin-specific feature limitations, see the corresponding plugin pages.
|
||||
|
||||
|
||||
|
||||
## Enumerating Available Devices
|
||||
The OpenVINO Runtime API features dedicated methods of enumerating devices and their capabilities. See the [Hello Query Device C++ Sample](../../../samples/cpp/hello_query_device/README.md). This is an example output from the sample (truncated to device names only):
|
||||
|
||||
```sh
|
||||
./hello_query_device
|
||||
Available devices:
|
||||
Device: CPU
|
||||
...
|
||||
Device: GPU.0
|
||||
...
|
||||
Device: GPU.1
|
||||
...
|
||||
Device: HDDL
|
||||
```
|
||||
|
||||
A simple programmatic way to enumerate the devices and use with the multi-device is as follows:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI2.cpp
|
||||
:language: cpp
|
||||
:fragment: [part2]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
Beyond the typical "CPU", "GPU", "HDDL", and so on, when multiple instances of a device are available, the names are more qualified. For example, this is how two Intel® Movidius™ Myriad™ X sticks are listed with the hello_query_sample:
|
||||
```
|
||||
...
|
||||
Device: MYRIAD.1.2-ma2480
|
||||
...
|
||||
Device: MYRIAD.1.4-ma2480
|
||||
```
|
||||
|
||||
So, the explicit configuration to use both would be "MULTI:MYRIAD.1.2-ma2480,MYRIAD.1.4-ma2480". Accordingly, the code that loops over all available devices of the "MYRIAD" type only is as follows:
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. tab:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/MULTI3.cpp
|
||||
:language: cpp
|
||||
:fragment: [part3]
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
4
docs/_static/images/dataset.png
vendored
4
docs/_static/images/dataset.png
vendored
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f5795ad0828f75cb660bea786b1aaa604ce442d7de23e461626212dc7c6cb139
|
||||
size 254
|
||||
oid sha256:b2c952f2699d08c7e0ba3fea8e3b6a13de2b2c3eb6055500cf203ade836ab47f
|
||||
size 82957
|
||||
|
||||
4
docs/_static/images/inputs.png
vendored
4
docs/_static/images/inputs.png
vendored
@@ -1,3 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9c2d33cebe15397ef651521173832d3fed1733e465ec63f31db27c97329e9464
|
||||
size 253
|
||||
oid sha256:7f0b6892f759354381aba3a2e3c54dc0e6214cee244bee508fd83deb27e35c83
|
||||
size 40555
|
||||
|
||||
6
docs/_static/js/gsearch.js
vendored
6
docs/_static/js/gsearch.js
vendored
@@ -265,7 +265,7 @@ function addPagination(query, selectedVersion, currentPage, previousPage, nextPa
|
||||
var $previous = $('<a>', {
|
||||
'text': 'Previous',
|
||||
'class': 'gs-cursor-page-previous',
|
||||
'href': 'search.html?query=' + query + '&page=' + previousPage + '&version=' + selectedVersion
|
||||
'href': 'search.html?page=' + previousPage + '&query=' + query + '&version=' + selectedVersion
|
||||
});
|
||||
$cursor.append($previous);
|
||||
}
|
||||
@@ -274,7 +274,7 @@ function addPagination(query, selectedVersion, currentPage, previousPage, nextPa
|
||||
for(var i = 0; i < pageList.length; i++) {
|
||||
$page = $('<a>', {
|
||||
'class': 'gs-cursor-page',
|
||||
'href': 'search.html?query=' + query + '&page=' + pageList[i] + '&version=' + selectedVersion,
|
||||
'href': 'search.html?page=' + pageList[i] + '&query=' + query + '&version=' + selectedVersion,
|
||||
'text': pageList[i]
|
||||
});
|
||||
if (currentPage === pageList[i]) {
|
||||
@@ -287,7 +287,7 @@ function addPagination(query, selectedVersion, currentPage, previousPage, nextPa
|
||||
var $next = $('<a>', {
|
||||
'text': 'Next',
|
||||
'class': 'gs-cursor-page-next',
|
||||
'href': 'search.html?query=' + query + '&page=' + nextPage + '&version=' + selectedVersion
|
||||
'href': 'search.html?page=' + nextPage + '&query=' + query + '&version=' + selectedVersion
|
||||
});
|
||||
$cursor.append($next);
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
The [Intel® Distribution of OpenVINO™ toolkit](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit.html) helps accelerate deep learning inference across a variety of Intel® processors and accelerators.
|
||||
|
||||
The benchmarks below demonstrate high performance gains on several public neural networks on multiple Intel® CPUs, GPUs and VPUs covering a broad performance range. Use this data to help you decide which hardware is best for your applications and solutions, or to plan your AI workload on the Intel computing already included in your solutions.
|
||||
The benchmarks below demonstrate high performance gains on several public neural networks on multiple Intel® CPUs, GPUs and VPUs covering a broad performance range. Use this data to help you decide which hardware is best for your applications and solutions, or to plan your AI workload on the Intel computing already included in your solutions. To learn how to benchmark your own model in OpenVINO, visit the [benchmark application documentation](../../samples/cpp/benchmark_app/README.md).
|
||||
|
||||
Use the links below to review the benchmarking results for each alternative:
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
This benchmark setup includes a single machine on which both the benchmark application and the OpenVINO™ installation reside.
|
||||
|
||||
The benchmark application loads the OpenVINO™ Runtime and executes inferences on the specified hardware (CPU, GPU or VPU). The benchmark application measures the time spent on actual inferencing (excluding any pre or post processing) and then reports on the inferences per second (or Frames Per Second). For more information on the benchmark application, please also refer to the entry 5 of the [FAQ section](performance_benchmarks_faq.md).
|
||||
The benchmark application loads the OpenVINO™ Runtime and executes inferences on the specified hardware (CPU, GPU or VPU). The benchmark application measures the time spent on actual inferencing (excluding any pre or post processing) and then reports on the inferences per second (or Frames Per Second). To learn how to benchmark your own model in OpenVINO, visit the [benchmark application documentation](../../samples/cpp/benchmark_app/README.md).
|
||||
|
||||
Measuring inference performance involves many variables and is extremely use-case and application dependent. We use the below four parameters for measurements, which are key elements to consider for a successful deep learning inference application:
|
||||
|
||||
@@ -355,4 +355,4 @@ Testing by Intel done on: see test date for each HW platform below.
|
||||
| BIOS Release | September 21, 2018 | September 21, 2018 |
|
||||
| Test Date | March 17, 2022 | March 17, 2022 |
|
||||
|
||||
For more detailed configuration descriptions, see [Configuration Details](https://docs.openvino.ai/resources/benchmark_files/system_configurations_2022.1.html).
|
||||
For more detailed configuration descriptions, see [Configuration Details](https://docs.openvino.ai/resources/benchmark_files/system_configurations_2022.1.html).
|
||||
|
||||
@@ -95,13 +95,13 @@ repositories = {
|
||||
'openvino': {
|
||||
'github_user': 'openvinotoolkit',
|
||||
'github_repo': 'openvino',
|
||||
'github_version': 'master',
|
||||
'github_version': 'releases/2022/1',
|
||||
'host_url': 'https://github.com'
|
||||
},
|
||||
'pot': {
|
||||
'github_user': 'openvinotoolkit',
|
||||
'github_repo': 'openvino',
|
||||
'github_version': 'master',
|
||||
'github_version': 'releases/2022/1',
|
||||
'host_url': 'https://github.com'
|
||||
},
|
||||
'ote': {
|
||||
@@ -113,13 +113,13 @@ repositories = {
|
||||
'open_model_zoo': {
|
||||
'github_user': 'openvinotoolkit',
|
||||
'github_repo': 'open_model_zoo',
|
||||
'github_version': 'master',
|
||||
'github_version': 'releases/2022/1',
|
||||
'host_url': 'https://github.com'
|
||||
},
|
||||
'ovms': {
|
||||
'github_user': 'openvinotoolkit',
|
||||
'github_repo': 'model_server',
|
||||
'github_version': 'main',
|
||||
'github_version': 'develop',
|
||||
'host_url': 'https://github.com'
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
| LPR | License-Plate Recognition |
|
||||
| LRN | Local Response Normalization |
|
||||
| mAP | Mean Average Precision |
|
||||
| Intel(R) OneDNN | Intel(R) OneAPI Deep Neural Network Library |
|
||||
| Intel(R) MKL-DNN | Intel(R) Math Kernel Library Deep Neural Networks |
|
||||
| MO | Model Optimizer |
|
||||
| MVN | Mean Variance Normalization |
|
||||
| NCDHW | Number of images, Channels, Depth, Height, Width |
|
||||
|
||||
1
docs/img/badge_logo.svg
Normal file
1
docs/img/badge_logo.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="109" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="a"><rect width="109" height="20" fill="#fff" rx="3"/></clipPath><g clip-path="url(#a)"><path fill="#555" d="M0 0h64v20H0z"/><path fill="#579aca" d="M64 0h45v20H64z"/><path fill="url(#b)" d="M0 0h109v20H0z"/></g><g fill="#fff" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110" text-anchor="middle"><image width="14" height="14" x="5" y="3" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC"/><text x="415" y="150" fill="#010101" fill-opacity=".3" textLength="370" transform="scale(.1)">launch</text><text x="415" y="140" textLength="370" transform="scale(.1)">launch</text><text x="855" y="150" fill="#010101" fill-opacity=".3" textLength="350" transform="scale(.1)">binder</text><text x="855" y="140" textLength="350" transform="scale(.1)">binder</text></g></svg>
|
||||
|
After Width: | Height: | Size: 3.3 KiB |
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:61e237b3ced7eaa0cf1f8c2688753867b172712925068a4a47e07b5c71e48bdf
|
||||
size 89866
|
||||
@@ -6,8 +6,6 @@
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
This page introduces additional configurations for Intel® Neural Compute Stick 2 with Intel® Distribution of OpenVINO™ toolkit on Linux, Raspbian OS and macOS.
|
||||
|
||||
## Linux
|
||||
|
||||
Once you have your Intel® Distribution of OpenVINO™ toolkit installed, follow the steps to be able to work on NCS2:
|
||||
@@ -85,9 +83,9 @@ Proceed to the [Get Started Guide](@ref get_started) section to learn the basic
|
||||
|
||||
These steps are required only if you want to perform inference on Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X VPU.
|
||||
|
||||
To perform inference on Intel® Neural Compute Stick 2, the `libusb` library is required. You can build it from the [source code](https://github.com/libusb/libusb) or install using the macOS package manager you prefer: [Homebrew](https://brew.sh/), [MacPorts](https://www.macports.org/) or other.
|
||||
To perform inference on Intel® Neural Compute Stick 2, the `libusb` library is required. You can build it from the [source code](https://github.com/libusb/libusb) or install using the macOS package manager you prefer: [Homebrew*](https://brew.sh/), [MacPorts*](https://www.macports.org/) or other.
|
||||
|
||||
For example, to install the `libusb` library using Homebrew, use the following command:
|
||||
For example, to install the `libusb` library using Homebrew\*, use the following command:
|
||||
```sh
|
||||
brew install libusb
|
||||
```
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
The steps in this guide are only required if you want to perform inference on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs with OpenVINO™ on Linux or Windows.
|
||||
The steps in this guide are only required if you want to perform inference on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs.
|
||||
|
||||
For troubleshooting issues, please see the [Troubleshooting Guide](troubleshooting.md) for more information.
|
||||
|
||||
@@ -21,7 +21,7 @@ For troubleshooting issues, please see the [Troubleshooting Guide](troubleshooti
|
||||
|
||||
For Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, the following additional installation steps are required.
|
||||
|
||||
> **NOTE**: If you installed the Intel® Distribution of OpenVINO™ toolkit to the non-default install directory, replace `/opt/intel` with the directory in which you installed the software.
|
||||
> **NOTE**: If you installed the Intel® Distribution of OpenVINO™ to the non-default install directory, replace `/opt/intel` with the directory in which you installed the software.
|
||||
|
||||
1. Set the environment variables:
|
||||
```sh
|
||||
@@ -52,13 +52,14 @@ For advanced configuration steps for your **IEI Mustang-V100-MX8-R10** accelerat
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
|
||||
## Windows
|
||||
|
||||
To enable inference on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, the following additional installation steps are required:
|
||||
|
||||
1. Download and install <a href="https://www.microsoft.com/en-us/download/details.aspx?id=48145">Visual C++ Redistributable for Visual Studio 2017</a>
|
||||
2. Check with a support engineer if your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs card requires SMBUS connection to PCIe slot (most unlikely). Install the SMBUS driver only if confirmed (by default, it's not required):
|
||||
1. Go to the `<INSTALL_DIR>\runtime\3rdparty\hddl\drivers\SMBusDriver` directory, where `<INSTALL_DIR>` is the directory in which the Intel® Distribution of OpenVINO™ toolkit is installed.
|
||||
1. Go to the `<INSTALL_DIR>\runtime\3rdparty\hddl\drivers\SMBusDriver` directory, where `<INSTALL_DIR>` is the directory in which the Intel Distribution of OpenVINO toolkit is installed.
|
||||
2. Right click on the `hddlsmbus.inf` file and choose **Install** from the pop up menu.
|
||||
|
||||
You are done installing your device driver and are ready to use your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs.
|
||||
|
||||
@@ -8,7 +8,7 @@ Intel® Distribution of OpenVINO™ toolkit is a comprehensive toolkit for devel
|
||||
|
||||
## Installation Options
|
||||
|
||||
Since the 2022.1 release, the OpenVINO installation package has been distributed in two parts: OpenVINO Runtime and OpenVINO Development Tools. See the following instructions to choose your installation process.
|
||||
Since the 2022.1 release, the OpenVINO installation package has been separated into two parts: OpenVINO Runtime and OpenVINO Development Tools. See the following instructions to choose your installation process.
|
||||
### Decide What to Install
|
||||
|
||||
**If you have already finished your model development and want to deploy your applications on various devices, [install OpenVINO Runtime](installing-openvino-runtime.md)**, which contains a set of libraries for easy inference integration with your products.
|
||||
|
||||
@@ -16,10 +16,10 @@ The following Git repositories are required to build a Yocto image:
|
||||
|
||||
Clone these Git repositories to your host machine:
|
||||
```sh
|
||||
git clone https://git.yoctoproject.org/git/poky --branch honister
|
||||
git clone https://git.yoctoproject.org/git/meta-intel --branch honister
|
||||
git clone https://git.openembedded.org/meta-openembedded --branch honister
|
||||
git clone https://github.com/kraj/meta-clang.git --branch honister
|
||||
git clone https://git.yoctoproject.org/git/poky --branch kirkstone
|
||||
git clone https://git.yoctoproject.org/git/meta-intel --branch kirkstone
|
||||
git clone https://git.openembedded.org/meta-openembedded --branch kirkstone
|
||||
git clone https://github.com/kraj/meta-clang.git --branch kirkstone-clang12
|
||||
```
|
||||
|
||||
### Set up BitBake Layers
|
||||
@@ -88,7 +88,6 @@ openvino-inference-engine-dev
|
||||
openvino-inference-engine-python3
|
||||
openvino-inference-engine-samples
|
||||
openvino-inference-engine-src
|
||||
openvino-inference-engine-staticdev
|
||||
openvino-inference-engine-vpu-firmware
|
||||
openvino-model-optimizer
|
||||
openvino-model-optimizer-dbg
|
||||
|
||||
@@ -48,9 +48,14 @@ The complete list of supported hardware is available in the [Release Notes](http
|
||||
|
||||
|
||||
To list available OpenVINO packages, use the following command:
|
||||
```
|
||||
yum list 'openvino*'
|
||||
```
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
yum list 'openvino*'
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
### Step 2: Install OpenVINO Runtime Using the YUM Package Manager
|
||||
|
||||
@@ -82,9 +87,14 @@ sudo yum install openvino-2022.1.0
|
||||
#### To Check for Installed Packages and Version
|
||||
|
||||
Run the following command:
|
||||
```sh
|
||||
yum list installed 'openvino*'
|
||||
```
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
yum list installed 'openvino*'
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
#### To Uninstall the Latest Version
|
||||
|
||||
|
||||
@@ -4,15 +4,15 @@
|
||||
|
||||
|
||||
The IEI Mustang-V100-MX8 is an OEM version of the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs.
|
||||
This guide assumes you have installed the [Mustang-V100-MX8](https://download.ieiworld.com/) and the [Intel® Distribution of OpenVINO™ toolkit](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit.html).
|
||||
This guide assumes you have installed the [Mustang-V100-MX8](https://download.ieiworld.com/) and the [Intel® Distribution of OpenVINO™ Toolkit](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit.html).
|
||||
|
||||
Instructions in this guide for configuring your accelerator include:
|
||||
1. Installing the required IEI BSL reset software
|
||||
1. Installing the required IEI\* BSL reset software
|
||||
2. Configuration settings for the `hddldaemon` service
|
||||
|
||||
> **NOTE**: This guide does not apply to Uzel cards.
|
||||
> **NOTE**: This guide does not apply to Uzel\* cards.
|
||||
|
||||
## Installing IEI Reset Software
|
||||
## IEI Reset Software Installation
|
||||
|
||||
Using the IEI Mustang-V100-MX8 requires downloading and installing the most current software for your system.
|
||||
|
||||
@@ -21,14 +21,14 @@ Search for **Mustang-V100-MX8**.
|
||||
|
||||
Download the appropriate software for your system, decompress the downloaded archive, enter the newly created directory, and run the install script:
|
||||
|
||||
On **Linux**:
|
||||
On **Linux**\*:
|
||||
- Run the `install.sh script` with `sudo`, or as `root`.
|
||||
|
||||
On **Windows**, do one of the following:<br>
|
||||
On **Windows**\*, do one of the following:<br>
|
||||
- **GUI**: Double-click `install.bat`
|
||||
- **CLI**: Open a console with administrator privileges, cd into the directory, and run `install.bat`.
|
||||
|
||||
## Configuring Mustang-V100-MX8 Service
|
||||
## Mustang-V100-MX8 Service Configuration
|
||||
|
||||
The `hddldaemon` is a system service, a binary executable that is run to manage the computational workload on the board. It is a required abstraction layer that handles inference, graphics processing, and any type of computation that should be run on the video processing units (VPUs). Depending on the board configuration, there can be 8 or 16 VPUs.
|
||||
|
||||
@@ -58,7 +58,7 @@ Below are some possible configuration options.
|
||||
|
||||
> **NOTE**: After changing a configuration file, the `hddldaemon` must be restarted.
|
||||
|
||||
#### Recommended Settings
|
||||
### Recommended Settings
|
||||
|
||||
`device_snapshot_mode`
|
||||
Changes the output of the `hddldaemon` to display a table with individual VPU statistics.
|
||||
@@ -124,7 +124,7 @@ This setting reports the total FPS for the dispatching hddl_service (which will
|
||||
(default: `"true"`)
|
||||
|
||||
|
||||
## Additional Resources
|
||||
## Additional resources
|
||||
|
||||
- [Intel Distribution of OpenVINO Toolkit home page](https://software.intel.com/en-us/openvino-toolkit)
|
||||
- [Troubleshooting Guide](troubleshooting.md)
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
|
||||
> **NOTE**: This version is pre-release software and has not undergone full release validation or qualification. No support is offered on pre-release software and APIs/behavior are subject to change. It should NOT be incorporated into any production software/solution and instead should be used only for early testing and integration while awaiting a final release version of this software.
|
||||
@@ -1,48 +0,0 @@
|
||||
# SoftSign {#openvino_docs_ops_activation_SoftSign_9}
|
||||
|
||||
**Versioned name**: *SoftSign-9*
|
||||
|
||||
**Category**: *Activation function*
|
||||
|
||||
**Short description**: *SoftSign* performs element-wise activation on a given input tensor.
|
||||
|
||||
**Detailed description**:
|
||||
|
||||
*SoftSign* operation is introduced in this [article](https://arxiv.org/abs/2010.09458).
|
||||
|
||||
*SoftSign Activation Function* is a neuron activation function based on the mathematical function:
|
||||
|
||||
\f[
|
||||
SoftSign(x) = \frac{x}{1+|x|}
|
||||
\f]
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `data`. Input tensor of type *T*
|
||||
|
||||
**Outputs**:
|
||||
|
||||
* **1**: The resulting tensor of the same shape and type as the input tensor.
|
||||
|
||||
**Types**:
|
||||
|
||||
* **T**: Arbitrary supported floating-point type.
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<layer ... type="SoftSign">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>256</dim>
|
||||
<dim>56</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1">
|
||||
<dim>256</dim>
|
||||
<dim>56</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
@@ -18,7 +18,7 @@ The result of division by zero is undefined.
|
||||
|
||||
**Attributes**:
|
||||
|
||||
* *m_pythondiv*
|
||||
* *pythondiv*
|
||||
|
||||
* **Description**: specifies if floor division should be calculate. This attribute is supported only for integer data types.
|
||||
* **Range of values**:
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
# GenerateProposals {#openvino_docs_ops_detection_GenerateProposals_9}
|
||||
|
||||
**Versioned name**: *GenerateProposals-9*
|
||||
|
||||
**Category**: *Object detection*
|
||||
|
||||
**Short description**: The *GenerateProposals* operation proposes ROIs and their scores
|
||||
based on input data for each image in the batch.
|
||||
|
||||
**Detailed description**: The operation performs the following steps for each image:
|
||||
|
||||
1. Transposes and reshapes predicted bounding boxes deltas and scores to get them into the same dimension order as the
|
||||
anchors.
|
||||
2. Transforms anchors and deltas into proposal bboxes and clips proposal bboxes to an image. The attribute *normalized*
|
||||
indicates whether the proposal bboxes are normalized or not.
|
||||
3. Sorts all `(proposal, score)` pairs by score from highest to lowest; order of pairs with equal scores is undefined.
|
||||
4. Takes top *pre_nms_count* proposals, if total number of proposals is less than *pre_nms_count* takes all proposals.
|
||||
5. Removes predicted boxes with either height or width < *min_size*.
|
||||
6. Applies non-maximum suppression with *adaptive_nms_threshold*. The initial value of *adaptive_nms_threshold* is
|
||||
*nms_threshold*. If `nms_eta < 1` and `adaptive_threshold > 0.5`, update `adaptive_threshold *= nms_eta`.
|
||||
7. Takes and returns top proposals after nms operation. The number of returned proposals in each image is dynamic and is specified by output port 3 `rpnroisnum`. And the max number of proposals in each image is specified by attribute *post_nms_count*.
|
||||
|
||||
All proposals of the whole batch are concated image by image, and distinguishable through outputs.
|
||||
|
||||
**Attributes**:
|
||||
|
||||
* *min_size*
|
||||
|
||||
* **Description**: The *min_size* attribute specifies minimum box width and height.
|
||||
* **Range of values**: non-negative floating-point number
|
||||
* **Type**: float
|
||||
* **Required**: *yes*
|
||||
|
||||
* *nms_threshold*
|
||||
|
||||
* **Description**: The *nms_threshold* attribute specifies threshold to be used in the NMS stage.
|
||||
* **Range of values**: non-negative floating-point number
|
||||
* **Type**: float
|
||||
* **Required**: *yes*
|
||||
|
||||
* *pre_nms_count*
|
||||
|
||||
* **Description**: The *pre_nms_count* attribute specifies number of top-n proposals before NMS.
|
||||
* **Range of values**: non-negative integer number
|
||||
* **Type**: int
|
||||
* **Required**: *yes*
|
||||
|
||||
* *post_nms_count*
|
||||
|
||||
* **Description**: The *post_nms_count* attribute specifies number of top-n proposals after NMS.
|
||||
* **Range of values**: non-negative integer number
|
||||
* **Type**: int
|
||||
* **Required**: *yes*
|
||||
|
||||
* *normalized*
|
||||
|
||||
* **Description**: *normalized* is a flag that indicates whether proposal bboxes are normalized or not.
|
||||
* **Range of values**: true or false
|
||||
* *true* - the bbox coordinates are normalized.
|
||||
* *false* - the bbox coordinates are not normalized.
|
||||
* **Type**: boolean
|
||||
* **Default value**: True
|
||||
* **Required**: *no*
|
||||
|
||||
* *nms_eta*
|
||||
|
||||
* **Description**: eta parameter for adaptive NMS.
|
||||
* **Range of values**: a floating-point number in close range `[0, 1.0]`.
|
||||
* **Type**: float
|
||||
* **Default value**: `1.0`
|
||||
* **Required**: *no*
|
||||
|
||||
* *roi_num_type*
|
||||
|
||||
* **Description**: the type of element of output 3 `rpnroisnum`.
|
||||
* **Range of values**: i32, i64
|
||||
* **Type**: string
|
||||
* **Default value**: `i64`
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `im_info` - tensor of type *T* and shape `[num_batches, 3]` or `[num_batches, 4]` providing input image info. The image info is layout as `[image_height, image_width, scale_height_and_width]` or as `[image_height, image_width, scale_height, scale_width]`. **Required.**
|
||||
|
||||
* **2**: `anchors` - tensor of type *T* with shape `[height, width, number_of_anchors, 4]` providing anchors. Each anchor is layouted as `[xmin, ymin, xmax, ymax]`. **Required.**
|
||||
|
||||
* **3**: `boxesdeltas` - tensor of type *T* with shape `[num_batches, number_of_anchors * 4, height, width]` providing deltas for anchors. The delta consists of 4 element tuples with layout `[dx, dy, log(dw), log(dh)]`. **Required.**
|
||||
|
||||
* **4**: `scores` - tensor of type *T* with shape `[num_batches, number_of_anchors, height, width]` providing proposals scores. **Required.**
|
||||
|
||||
The `height` and `width` from inputs `anchors`, `boxesdeltas` and `scores` are the height and width of feature maps.
|
||||
|
||||
**Outputs**
|
||||
|
||||
* **1**: `rpnrois` - tensor of type *T* with shape `[num_rois, 4]` providing proposed ROIs. The proposals are layouted as `[xmin, ymin, xmax, ymax]`. The `num_rois` means the total proposals number of all the images in one batch. `num_rois` is a dynamic dimension.
|
||||
|
||||
* **2**: `rpnscores` - tensor of type *T* with shape `[num_rois]` providing proposed ROIs scores.
|
||||
|
||||
* **3**: `rpnroisnum` - tensor of type *roi_num_type* with shape `[num_batches]` providing the number of proposed ROIs in each image.
|
||||
|
||||
**Types**
|
||||
|
||||
* *T*: any supported floating-point type.
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<layer ... type="GenerateProposals" version="opset9">
|
||||
<data min_size="0.0" nms_threshold="0.699999988079071" post_nms_count="1000" pre_nms_count="1000" roi_num_type="i32"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>8</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>50</dim>
|
||||
<dim>84</dim>
|
||||
<dim>3</dim>
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>8</dim>
|
||||
<dim>12</dim>
|
||||
<dim>50</dim>
|
||||
<dim>84</dim>
|
||||
</port>
|
||||
<port id="3">
|
||||
<dim>8</dim>
|
||||
<dim>3</dim>
|
||||
<dim>50</dim>
|
||||
<dim>84</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="4" precision="FP32">
|
||||
<dim>-1</dim>
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
<port id="5" precision="FP32">
|
||||
<dim>-1</dim>
|
||||
</port>
|
||||
<port id="6" precision="I32">
|
||||
<dim>8</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
@@ -1,119 +0,0 @@
|
||||
# ROIAlign {#openvino_docs_ops_detection_ROIAlign_9}
|
||||
|
||||
**Versioned name**: *ROIAlign-9*
|
||||
|
||||
**Category**: *Object detection*
|
||||
|
||||
**Short description**: *ROIAlign* is a *pooling layer* used over feature maps of non-uniform input sizes and outputs a feature map of a fixed size.
|
||||
|
||||
**Detailed description**: [Reference](https://arxiv.org/abs/1703.06870).
|
||||
|
||||
*ROIAlign* performs the following for each Region of Interest (ROI) for each input feature map:
|
||||
1. Multiply box coordinates with *spatial_scale* to produce box coordinates relative to the input feature map size based on *aligned_mode* attribute.
|
||||
2. Divide the box into bins according to the *sampling_ratio* attribute.
|
||||
3. Apply bilinear interpolation with 4 points in each bin and apply maximum or average pooling based on *mode* attribute to produce output feature map element.
|
||||
|
||||
**Attributes**
|
||||
|
||||
* *pooled_h*
|
||||
|
||||
* **Description**: *pooled_h* is the height of the ROI output feature map.
|
||||
* **Range of values**: a positive integer
|
||||
* **Type**: `int`
|
||||
* **Required**: *yes*
|
||||
|
||||
* *pooled_w*
|
||||
|
||||
* **Description**: *pooled_w* is the width of the ROI output feature map.
|
||||
* **Range of values**: a positive integer
|
||||
* **Type**: `int`
|
||||
* **Required**: *yes*
|
||||
|
||||
* *sampling_ratio*
|
||||
|
||||
* **Description**: *sampling_ratio* is the number of bins over height and width to use to calculate each output feature map element. If the value
|
||||
is equal to 0 then use adaptive number of elements over height and width: `ceil(roi_height / pooled_h)` and `ceil(roi_width / pooled_w)` respectively.
|
||||
* **Range of values**: a non-negative integer
|
||||
* **Type**: `int`
|
||||
* **Required**: *yes*
|
||||
|
||||
* *spatial_scale*
|
||||
|
||||
* **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling.
|
||||
* **Range of values**: a positive floating-point number
|
||||
* **Type**: `float`
|
||||
* **Required**: *yes*
|
||||
|
||||
* *mode*
|
||||
|
||||
* **Description**: *mode* specifies a method to perform pooling to produce output feature map elements.
|
||||
* **Range of values**:
|
||||
* *max* - maximum pooling
|
||||
* *avg* - average pooling
|
||||
* **Type**: string
|
||||
* **Required**: *yes*
|
||||
|
||||
* *aligned_mode*
|
||||
|
||||
* **Description**: *aligned_mode* specifies how to transform the coordinate in original tensor to the resized tensor.
|
||||
* **Range of values**: name of the transformation mode in string format (here spatial_scale is resized_shape[x] / original_shape[x], resized_shape[x] is the shape of resized tensor in axis x, original_shape[x] is the shape of original tensor in axis x and x_original is a coordinate in axis x, for any axis x from the input axes):
|
||||
* *asymmetric* - the coordinate in the resized tensor axis x is calculated according to the formula x_original * spatial_scale
|
||||
* *tf_half_pixel_for_nn* - the coordinate in the resized tensor axis x is x_original * spatial_scale - 0.5
|
||||
* *half_pixel* - the coordinate in the resized tensor axis x is calculated as ((x_original + 0.5) * spatial_scale) - 0.5
|
||||
* **Type**: string
|
||||
* **Default value**: asymmetric
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: 4D input tensor of shape `[N, C, H, W]` with feature maps of type *T*. **Required.**
|
||||
|
||||
* **2**: 2D input tensor of shape `[NUM_ROIS, 4]` describing box consisting of 4 element tuples: `[x_1, y_1, x_2, y_2]` in relative coordinates of type *T*.
|
||||
The box height and width are calculated the following way:
|
||||
* If *aligned_mode* equals *asymmetric*: `roi_width = max(spatial_scale * (x_2 - x_1), 1.0)`, `roi_height = max(spatial_scale * (y_2 - y_1), 1.0)`, so the malformed boxes are expressed as a box of size `1 x 1`.
|
||||
* else: `roi_width = spatial_scale * (x_2 - x_1)`, `roi_height = spatial_scale * (y_2 - y_1)`.
|
||||
* **Required.**
|
||||
|
||||
* **3**: 1D input tensor of shape `[NUM_ROIS]` with batch indices of type *IND_T*. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
* **1**: 4D output tensor of shape `[NUM_ROIS, C, pooled_h, pooled_w]` with feature maps of type *T*.
|
||||
|
||||
**Types**
|
||||
|
||||
* *T*: any supported floating-point type.
|
||||
|
||||
* *IND_T*: any supported integer type.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<layer ... type="ROIAlign" ... >
|
||||
<data pooled_h="6" pooled_w="6" spatial_scale="16.0" sampling_ratio="2" mode="avg" aligned_mode="half_pixel"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>7</dim>
|
||||
<dim>256</dim>
|
||||
<dim>200</dim>
|
||||
<dim>200</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1000</dim>
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>1000</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="3" precision="FP32">
|
||||
<dim>1000</dim>
|
||||
<dim>256</dim>
|
||||
<dim>6</dim>
|
||||
<dim>6</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
@@ -1,127 +0,0 @@
|
||||
## Eye <a name="Eye"></a> {#openvino_docs_ops_generation_Eye_9}
|
||||
|
||||
**Versioned name**: *Eye-9*
|
||||
|
||||
**Category**: *Generation*
|
||||
|
||||
**Short description**: *Eye* operation generates shift matrix or a batch of matrices.
|
||||
|
||||
**Detailed description**:
|
||||
|
||||
*Eye* operation generates an identity matrix or a batch matrices with ones on the diagonal and zeros everywhere else. The index of the diagonal to be populated with ones is given by `diagonal_index`: `output[..., i, i + diagonal_index] = 1`.
|
||||
|
||||
|
||||
Example 1. *Eye* output with `output_type` = `i32`:
|
||||
|
||||
```
|
||||
num_rows = 3
|
||||
|
||||
num_columns = 4
|
||||
|
||||
diagonal_index = 2
|
||||
|
||||
output = [[0 0 1 0]
|
||||
[0 0 0 1]
|
||||
[0 0 0 0]]
|
||||
```
|
||||
|
||||
Example 2. *Eye* output with `output_type` = `i32`:
|
||||
|
||||
```
|
||||
num_rows = 3
|
||||
|
||||
num_columns = 4
|
||||
|
||||
diagonal_index = -1
|
||||
|
||||
output = [[0 0 0 0]
|
||||
[1 0 0 0]
|
||||
[0 1 0 0]]
|
||||
```
|
||||
|
||||
Example 3. *Eye* output with `output_type` = `f16`:
|
||||
|
||||
```
|
||||
num_rows = 2
|
||||
|
||||
diagonal_index = 5
|
||||
|
||||
batch_shape = [1, 2]
|
||||
|
||||
output = [[[[0. 0.]
|
||||
[0. 0.]]
|
||||
[[0. 0.]
|
||||
[0. 0.]]]]
|
||||
```
|
||||
|
||||
**Attributes**:
|
||||
|
||||
* *output_type*
|
||||
|
||||
* **Description**: the type of the output
|
||||
* **Range of values**: any numeric type
|
||||
* **Type**: string
|
||||
* **Required**: *Yes*
|
||||
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `num_rows` - scalar or 1D tensor with 1 non-negative element of type *T_NUM* describing the number of rows in matrix. **Required.**
|
||||
|
||||
* **2**: `num_columns` - scalar or 1D tensor with 1 non-negative element of type *T_NUM* describing the number of columns in matrix. **Required.**
|
||||
|
||||
* **3**: `diagonal_index` - scalar or 1D tensor with element of type *T_NUM* describing the index of the diagonal to be populated. A positive value refers to an upper diagonal and a negative value refers to a lower diagonal. Value `0` populates the main diagonal. If `diagonal_index` is a positive value and is not smaller than `num_rows` or if `diagonal_index` is a negative value and is not larger than `num_columns`, the matrix will be filled with only zeros. **Required.**
|
||||
|
||||
* **4**: `batch_shape` - 1D tensor with non-negative values of type *T_NUM* defines leading batch dimensions of output shape. If `batch_shape` is an empty list, *Eye* operation generates a 2D tensor (matrix). This input is optional, and its default value equal to an empty tensor.
|
||||
|
||||
|
||||
**Outputs**:
|
||||
|
||||
* **1**: A tensor with the type specified by the *output_type* attribute. The shape is `batch_shape + [num_rows, num_columns]`
|
||||
|
||||
**Types**
|
||||
|
||||
* *T_NUM*: `int32` or `int64`.
|
||||
|
||||
**Examples**
|
||||
|
||||
*Example 1*
|
||||
|
||||
```xml
|
||||
<layer ... name="Eye" type="Eye">
|
||||
<data output_type="i8"/>
|
||||
<input>
|
||||
<port id="0" precision="I32"/> <!-- num rows: 5 -->
|
||||
<port id="1" precision="I32"/> <!-- num columns: 5 -->
|
||||
<port id="2" precision="I32"/> <!-- diagonal index -->
|
||||
</input>
|
||||
<output>
|
||||
<port id="3" precision="I8" names="Eye:0">
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
*Example 2*
|
||||
|
||||
```xml
|
||||
<layer ... name="Eye" type="Eye">
|
||||
<data output_type="f32"/>
|
||||
<input>
|
||||
<port id="0" precision="I32"/> <!-- num rows -->
|
||||
<port id="1" precision="I32"/> <!-- num columns -->
|
||||
<port id="2" precision="I32"/> <!-- diagonal index -->
|
||||
<port id="3" precision="I32"/> <!-- batch_shape : [2, 3] -->
|
||||
</input>
|
||||
<output>
|
||||
<port id="3" precision="F32" names="Eye:0">
|
||||
<dim>2</dim>
|
||||
<dim>3</dim>
|
||||
<dim>-1</dim>
|
||||
<dim>-1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
@@ -5,8 +5,7 @@
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_ops_opset9
|
||||
|
||||
openvino_docs_ops_opset8
|
||||
openvino_docs_ops_opset7
|
||||
openvino_docs_ops_opset6
|
||||
@@ -15,24 +14,24 @@
|
||||
openvino_docs_ops_opset3
|
||||
openvino_docs_ops_opset2
|
||||
openvino_docs_ops_opset1
|
||||
|
||||
|
||||
@endsphinxdirective
|
||||
|
||||
According to capabilities of supported deep learning frameworks and hardware capabilities of a target inference device, all operations are combined into operations sets each fully supported in a specific version of OpenVINO™ toolkit.
|
||||
According to capabilities of supported deep learning frameworks and hardware capabilities of a target inference device, all operations are combined into operations sets each fully supported in a specific version of OpenVINO™ toolkit.
|
||||
|
||||
This topic provides a complete list of available sets of operations supported in different versions of OpenVINO™ toolkit. Use the relevant version of the operations set for a particular release. For a list of operations included into an operations set, click a link in the table.
|
||||
This topic provides a complete list of available sets of operations supported in different versions of OpenVINO™ toolkit. It's highly recommended to use the actual version of the operations set for a particular release. For a list of operations included into an operations set, click a link in the table.
|
||||
|
||||
| OpenVINO™ Version | Actual Operations Set |
|
||||
| :---------------- | :------------------------------- |
|
||||
| OpenVINO™ Version | Actual Operations Set |
|
||||
| :---------------- | :------------------------------- |
|
||||
| 2022.1 | [opset8](opset8.md) |
|
||||
| 2021.4 | [opset7](opset7.md) |
|
||||
| 2021.3 | [opset6](opset6.md) |
|
||||
| 2021.2 | [opset5](opset5.md) |
|
||||
| 2021.1 | [opset4](opset4.md) |
|
||||
| 2021.4 | [opset7](opset7.md) |
|
||||
| 2021.3 | [opset6](opset6.md) |
|
||||
| 2021.2 | [opset5](opset5.md) |
|
||||
| 2021.1 | [opset4](opset4.md) |
|
||||
| 2020.4 | [opset3](opset3.md) |
|
||||
| 2020.3 | [opset2](opset2.md) |
|
||||
| 2020.2 | [opset2](opset2.md) |
|
||||
| 2020.1 | [opset1](opset1.md) |
|
||||
| 2020.2 | [opset2](opset2.md) |
|
||||
| 2020.1 | [opset1](opset1.md) |
|
||||
|
||||
## See Also
|
||||
[Deep Learning Network Intermediate Representation and Operations Sets in OpenVINO™](../MO_DG/IR_and_opsets.md)
|
||||
|
||||
@@ -103,7 +103,7 @@ declared in `namespace opset8`.
|
||||
* [Mish](activation/Mish_4.md)
|
||||
* [Mod](arithmetic/Mod_1.md)
|
||||
* [MVN](normalization/MVN_6.md)
|
||||
* [MulticlassNMS](sort/MulticlassNonMaxSuppression_8.md)
|
||||
* [MulticlassNMS](sort/MulticlassNMS_8.md)
|
||||
* [Multiply](arithmetic/Multiply_1.md)
|
||||
* [Negative](arithmetic/Negative_1.md)
|
||||
* [NonMaxSuppression](sort/NonMaxSuppression_5.md)
|
||||
|
||||
@@ -1,183 +0,0 @@
|
||||
# opset9 {#openvino_docs_ops_opset9}
|
||||
|
||||
This specification document describes the `opset9` operation set supported in OpenVINO™.
|
||||
Support for each particular operation from the list below depends on the capabilities of an inference plugin
|
||||
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR V10 xml
|
||||
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding nGraph operation classes
|
||||
declared in `namespace opset9`.
|
||||
|
||||
|
||||
## Table of Contents <a name="toc"></a>
|
||||
|
||||
* [Abs](arithmetic/Abs_1.md)
|
||||
* [Acos](arithmetic/Acos_1.md)
|
||||
* [Acosh](arithmetic/Acosh_3.md)
|
||||
* [AdaptiveAvgPool](pooling/AdaptiveAvgPool_8.md)
|
||||
* [AdaptiveMaxPool](pooling/AdaptiveMaxPool_8.md)
|
||||
* [Add](arithmetic/Add_1.md)
|
||||
* [Asin](arithmetic/Asin_1.md)
|
||||
* [Asinh](arithmetic/Asinh_3.md)
|
||||
* [Assign](infrastructure/Assign_3.md)
|
||||
* [Atan](arithmetic/Atan_1.md)
|
||||
* [Atanh](arithmetic/Atanh_3.md)
|
||||
* [AvgPool](pooling/AvgPool_1.md)
|
||||
* [BatchNormInference](normalization/BatchNormInference_5.md)
|
||||
* [BatchToSpace](movement/BatchToSpace_2.md)
|
||||
* [BinaryConvolution](convolution/BinaryConvolution_1.md)
|
||||
* [Broadcast](movement/Broadcast_3.md)
|
||||
* [Bucketize](condition/Bucketize_3.md)
|
||||
* [CTCGreedyDecoder](sequence/CTCGreedyDecoder_1.md)
|
||||
* [CTCGreedyDecoderSeqLen](sequence/CTCGreedyDecoderSeqLen_6.md)
|
||||
* [CTCLoss](sequence/CTCLoss_4.md)
|
||||
* [Ceiling](arithmetic/Ceiling_1.md)
|
||||
* [Clamp](activation/Clamp_1.md)
|
||||
* [Concat](movement/Concat_1.md)
|
||||
* [Constant](infrastructure/Constant_1.md)
|
||||
* [Convert](type/Convert_1.md)
|
||||
* [ConvertLike](type/ConvertLike_1.md)
|
||||
* [Convolution](convolution/Convolution_1.md)
|
||||
* [ConvolutionBackpropData](convolution/ConvolutionBackpropData_1.md)
|
||||
* [Cos](arithmetic/Cos_1.md)
|
||||
* [Cosh](arithmetic/Cosh_1.md)
|
||||
* [CumSum](arithmetic/CumSum_3.md)
|
||||
* [DeformableConvolution](convolution/DeformableConvolution_8.md)
|
||||
* [DeformablePSROIPooling](detection/DeformablePSROIPooling_1.md)
|
||||
* [DepthToSpace](movement/DepthToSpace_1.md)
|
||||
* [DetectionOutput](detection/DetectionOutput_8.md)
|
||||
* [DFT](signals/DFT_7.md)
|
||||
* [Divide](arithmetic/Divide_1.md)
|
||||
* [Einsum](matrix/Einsum_7.md)
|
||||
* [Elu](activation/Elu_1.md)
|
||||
* [EmbeddingBagOffsetsSum](sparse/EmbeddingBagOffsetsSum_3.md)
|
||||
* [EmbeddingBagPackedSum](sparse/EmbeddingBagPackedSum_3.md)
|
||||
* [EmbeddingSegmentsSum](sparse/EmbeddingSegmentsSum_3.md)
|
||||
* [Equal](comparison/Equal_1.md)
|
||||
* [Erf](arithmetic/Erf_1.md)
|
||||
* [Exp](activation/Exp_1.md)
|
||||
* [ExperimentalDetectronDetectionOutput_6](detection/ExperimentalDetectronDetectionOutput_6.md)
|
||||
* [ExperimentalDetectronGenerateProposalsSingleImage_6](detection/ExperimentalDetectronGenerateProposalsSingleImage_6.md)
|
||||
* [ExperimentalDetectronPriorGridGenerator_6](detection/ExperimentalDetectronPriorGridGenerator_6.md)
|
||||
* [ExperimentalDetectronROIFeatureExtractor_6](detection/ExperimentalDetectronROIFeatureExtractor_6.md)
|
||||
* [ExperimentalDetectronTopKROIs_6](sort/ExperimentalDetectronTopKROIs_6.md)
|
||||
* [ExtractImagePatches](movement/ExtractImagePatches_3.md)
|
||||
* [Eye](generation/Eye_9.md)
|
||||
* [FakeQuantize](quantization/FakeQuantize_1.md)
|
||||
* [Floor](arithmetic/Floor_1.md)
|
||||
* [FloorMod](arithmetic/FloorMod_1.md)
|
||||
* [Gather](movement/Gather_8.md)
|
||||
* [GatherElements](movement/GatherElements_6.md)
|
||||
* [GatherND](movement/GatherND_8.md)
|
||||
* [GatherTree](movement/GatherTree_1.md)
|
||||
* [Gelu](activation/GELU_7.md)
|
||||
* [GenerateProposals](detection/GenerateProposals_9.md)
|
||||
* [Greater](comparison/Greater_1.md)
|
||||
* [GreaterEqual](comparison/GreaterEqual_1.md)
|
||||
* [GRN](normalization/GRN_1.md)
|
||||
* [GroupConvolution](convolution/GroupConvolution_1.md)
|
||||
* [GroupConvolutionBackpropData](convolution/GroupConvolutionBackpropData_1.md)
|
||||
* [GRUCell](sequence/GRUCell_3.md)
|
||||
* [GRUSequence](sequence/GRUSequence_5.md)
|
||||
* [HardSigmoid](activation/HardSigmoid_1.md)
|
||||
* [HSigmoid](activation/HSigmoid_5.md)
|
||||
* [HSwish](activation/HSwish_4.md)
|
||||
* [IDFT](signals/IDFT_7.md)
|
||||
* [I420toBGR](image/I420toBGR_8.md)
|
||||
* [I420toRGB](image/I420toRGB_8.md)
|
||||
* [If](condition/If_8.md)
|
||||
* [Interpolate](image/Interpolate_4.md)
|
||||
* [IRDFT](signals/IRDFT_9.md)
|
||||
* [Less](comparison/Less_1.md)
|
||||
* [LessEqual](comparison/LessEqual_1.md)
|
||||
* [Log](arithmetic/Log_1.md)
|
||||
* [LogicalAnd](logical/LogicalAnd_1.md)
|
||||
* [LogicalNot](logical/LogicalNot_1.md)
|
||||
* [LogicalOr](logical/LogicalOr_1.md)
|
||||
* [LogicalXor](logical/LogicalXor_1.md)
|
||||
* [LogSoftmax](activation/LogSoftmax_5.md)
|
||||
* [Loop](infrastructure/Loop_5.md)
|
||||
* [LRN](normalization/LRN_1.md)
|
||||
* [LSTMCell](sequence/LSTMCell_1.md)
|
||||
* [LSTMSequence](sequence/LSTMSequence_1.md)
|
||||
* [MatMul](matrix/MatMul_1.md)
|
||||
* [MatrixNMS](sort/MatrixNMS_8.md)
|
||||
* [MaxPool](pooling/MaxPool_8.md)
|
||||
* [Maximum](arithmetic/Maximum_1.md)
|
||||
* [Minimum](arithmetic/Minimum_1.md)
|
||||
* [Mish](activation/Mish_4.md)
|
||||
* [Mod](arithmetic/Mod_1.md)
|
||||
* [MVN](normalization/MVN_6.md)
|
||||
* [MulticlassNMS](sort/MulticlassNonMaxSuppression_9.md)
|
||||
* [Multiply](arithmetic/Multiply_1.md)
|
||||
* [Negative](arithmetic/Negative_1.md)
|
||||
* [NonMaxSuppression](sort/NonMaxSuppression_5.md)
|
||||
* [NonZero](condition/NonZero_3.md)
|
||||
* [NormalizeL2](normalization/NormalizeL2_1.md)
|
||||
* [NotEqual](comparison/NotEqual_1.md)
|
||||
* [NV12toBGR](image/NV12toBGR_8.md)
|
||||
* [NV12toRGB](image/NV12toRGB_8.md)
|
||||
* [OneHot](sequence/OneHot_1.md)
|
||||
* [Pad](movement/Pad_1.md)
|
||||
* [Parameter](infrastructure/Parameter_1.md)
|
||||
* [Power](arithmetic/Power_1.md)
|
||||
* [PReLU](activation/PReLU_1.md)
|
||||
* [PriorBoxClustered](detection/PriorBoxClustered_1.md)
|
||||
* [PriorBox](detection/PriorBox_8.md)
|
||||
* [Proposal](detection/Proposal_4.md)
|
||||
* [PSROIPooling](detection/PSROIPooling_1.md)
|
||||
* [RandomUniform](generation/RandomUniform_8.md)
|
||||
* [Range](generation/Range_4.md)
|
||||
* [RDFT](signals/RDFT_9.md)
|
||||
* [ReLU](activation/ReLU_1.md)
|
||||
* [ReadValue](infrastructure/ReadValue_3.md)
|
||||
* [ReduceL1](reduction/ReduceL1_4.md)
|
||||
* [ReduceL2](reduction/ReduceL2_4.md)
|
||||
* [ReduceLogicalAnd](reduction/ReduceLogicalAnd_1.md)
|
||||
* [ReduceLogicalOr](reduction/ReduceLogicalOr_1.md)
|
||||
* [ReduceMax](reduction/ReduceMax_1.md)
|
||||
* [ReduceMean](reduction/ReduceMean_1.md)
|
||||
* [ReduceMin](reduction/ReduceMin_1.md)
|
||||
* [ReduceProd](reduction/ReduceProd_1.md)
|
||||
* [ReduceSum](reduction/ReduceSum_1.md)
|
||||
* [RegionYolo](detection/RegionYolo_1.md)
|
||||
* [ReorgYolo](detection/ReorgYolo_1.md)
|
||||
* [Reshape](shape/Reshape_1.md)
|
||||
* [Result](infrastructure/Result_1.md)
|
||||
* [ReverseSequence](movement/ReverseSequence_1.md)
|
||||
* [RNNCell](sequence/RNNCell_3.md)
|
||||
* [RNNSequence](sequence/RNNSequence_5.md)
|
||||
* [ROIAlign](detection/ROIAlign_9.md)
|
||||
* [ROIPooling](detection/ROIPooling_1.md)
|
||||
* [Roll](movement/Roll_7.md)
|
||||
* [Round](arithmetic/Round_5.md)
|
||||
* [ScatterElementsUpdate](movement/ScatterElementsUpdate_3.md)
|
||||
* [ScatterNDUpdate](movement/ScatterNDUpdate_3.md)
|
||||
* [ScatterUpdate](movement/ScatterUpdate_3.md)
|
||||
* [Select](condition/Select_1.md)
|
||||
* [Selu](activation/Selu_1.md)
|
||||
* [ShapeOf](shape/ShapeOf_3.md)
|
||||
* [ShuffleChannels](movement/ShuffleChannels_1.md)
|
||||
* [Sigmoid](activation/Sigmoid_1.md)
|
||||
* [Sign](arithmetic/Sign_1.md)
|
||||
* [Sin](arithmetic/Sin_1.md)
|
||||
* [Sinh](arithmetic/Sinh_1.md)
|
||||
* [Slice](movement/Slice_8.md)
|
||||
* [SoftMax](activation/SoftMax_8.md)
|
||||
* [SoftPlus](activation/SoftPlus_4.md)
|
||||
* [SoftSign](activation/SoftSign_9.md)
|
||||
* [SpaceToBatch](movement/SpaceToBatch_2.md)
|
||||
* [SpaceToDepth](movement/SpaceToDepth_1.md)
|
||||
* [Split](movement/Split_1.md)
|
||||
* [Sqrt](arithmetic/Sqrt_1.md)
|
||||
* [SquaredDifference](arithmetic/SquaredDifference_1.md)
|
||||
* [Squeeze](shape/Squeeze_1.md)
|
||||
* [StridedSlice](movement/StridedSlice_1.md)
|
||||
* [Subtract](arithmetic/Subtract_1.md)
|
||||
* [Swish](activation/Swish_4.md)
|
||||
* [Tan](arithmetic/Tan_1.md)
|
||||
* [Tanh](arithmetic/Tanh_1.md)
|
||||
* [TensorIterator](infrastructure/TensorIterator_1.md)
|
||||
* [Tile](movement/Tile_1.md)
|
||||
* [TopK](sort/TopK_3.md)
|
||||
* [Transpose](movement/Transpose_1.md)
|
||||
* [Unsqueeze](shape/Unsqueeze_1.md)
|
||||
* [VariadicSplit](movement/VariadicSplit_1.md)
|
||||
@@ -1,218 +0,0 @@
|
||||
# Inverse Discrete complex-to-real Fourier Transformation (IRDFT) {#openvino_docs_ops_signals_IRDFT_9}
|
||||
|
||||
**Versioned name**: *IRDFT-9*
|
||||
|
||||
**Category**: *Signal processing*
|
||||
|
||||
**Short description**: *IRDFT* operation performs the inverse complex-to-real discrete Fourier transformation of the input tensor by specified dimensions.
|
||||
|
||||
**Attributes**:
|
||||
|
||||
No attributes available.
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data` - Input tensor of type *T* with data for the IRDFT transformation. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form `[D_0, D_1, ..., D_{N-1}, 2]`, representing the real and imaginary components of complex numbers in `[:, ..., :, 0]` and in `[:, ..., :, 1]` correspondingly. **Required.**
|
||||
* **2**: `axes` - 1D tensor of type *T_IND* specifying dimension indices where IRDFT is applied, and `axes` is any unordered list of indices of different dimensions of the input tensor, for example, `[0, 4]`, `[4, 0]`, `[4, 2, 1]`, `[1, 2, 3]`, `[-3, 0, -2]`. These indices should be integers from `-(r - 1)` to `(r - 2)` inclusively, where `r = rank(data)`. A negative axis `a` is interpreted as an axis `r - 1 + a`. Other dimensions do not change. The order of elements in the `axes` attribute matters, and is mapped directly to elements in the third input `signal_size`. **Required.**
|
||||
* **NOTE**: The following constraint must be satisfied: `rank(data) >= len(axes) + 1 and (rank(data) - 1) not in axes and (-1) not in axes`.
|
||||
* **3**: `signal_size` - 1D tensor of type *T_SIZE* describing signal size with respect to axes from the input `axes`. If `signal_size[i] == -1`, then IRDFT is calculated for full size of the axis `axes[i]`. If `signal_size[i] > data_shape[: r - 1][axes[i]]`, then input data is zero-padded with respect to the axis `axes[i]` at the end. Finally, if `signal_size[i] < data_shape[: r - 1][axes[i]]`, then input data is trimmed with respect to the axis `axes[i]`. More precisely, if `signal_size[i] < data_shape[: r - 1][axes[i]]`, the slice `0: signal_size[i]` of the axis `axes[i]` is considered. Optionally, with default value `[data_shape[: r - 1][a] for a in axes]`.
|
||||
* **NOTE**: If the input `signal_size` is specified, then the size of `signal_size` must be the same as the size of `axes`.
|
||||
|
||||
**Outputs**
|
||||
|
||||
* **1**: Resulting tensor with elements of the same type as input `data` tensor and with rank `r - 1`, where `r = rank(data)`. The shape of the output has the form `[S_0, S_1, ..., S_{r-2}]`, where all `S_a` are calculated as follows:
|
||||
|
||||
1. Calculate `normalized_axes`, where each `normalized_axes[i] = axes[i]`, if `axes[i] >= 0`, and `normalized_axes[i] = axes[i] + r - 1` otherwise.
|
||||
|
||||
2. If `a not in normalized_axes`, then `S_a = data_shape[a]`.
|
||||
|
||||
3. If `a in normalized_axes`, then `a = normalized_axes[i]` for some `i`. In such case, `S_a = 2 * (data_shape[a] - 1)` if the `signal_size` input is not specified, or, if it is specified, `signal_size[i] = -1`; and `S_a = signal_size[a]` otherwise.
|
||||
+ When `i != len(normalized_axes) - 1`, `S_a` is calculated as `S_a = data_shape[a]` if the `signal_size` input is not specified, or, if it is specified, `signal_size[i] = -1`; and `S_a = signal_size[a]` otherwise.
|
||||
+ When `i = len(normalized_axes) - 1`, `S_a` is calculated as `S_a = 2 * (data_shape[a] - 1)` if the `signal_size` input is not specified, or, if it is specified, `signal_size[i] = -1`; and `S_a = signal_size[a]` otherwise.
|
||||
|
||||
**Types**
|
||||
|
||||
* *T*: any supported floating-point type.
|
||||
|
||||
* *T_IND*: `int64` or `int32`.
|
||||
|
||||
* *T_SIZE*: `int64` or `int32`.
|
||||
|
||||
**Detailed description**: *IRDFT* performs the discrete Fourier transformation of the input tensor, according to the following rules.
|
||||
|
||||
For simplicity, assume that an input tensor `A` has the shape `[B_0, ..., B_{k-1}, M_0, ..., M_{q-1}, 2]`, `axes=[k,...,k + q - 1]`, and `signal_size=[S_0,...,S_{q-1}]`.
|
||||
|
||||
Let `D` be a value of the input tensor `A`.
|
||||
|
||||
Next, put
|
||||
\f[X[j_0,\dots,j_{k-1},j_k,\dots,j_{k+q-1}]=D[j_0,\dots,j_{k-1},j_k,\dots,j_{k+q-1},0]+iD[j_0,\dots,j_{k-1},j_k,\dots,j_{k+q-1},1]\f]
|
||||
for all indices `j_0,...,j_{k+q-1}`, where `i` is an imaginary unit, that is `X` is a complex tensor.
|
||||
|
||||
Define the complex tensor `F` with the shape `[B_0, ..., B_{k-1}, 2 * (M_0 - 1), ..., 2 * (M_{q-1} - 1)]` using the formula
|
||||
\f[F[j_0,\dots,j_{k-1},j_k,\dots,j_p,\dots,j_{k+q-1}] = \begin{cases}X[j_0,\dots,j_{k-1},j_k,\dots,j_p,\dots,j_{k+q-1}],\text{ when }j_p=0,\dots,M_p-1;\\ \overline{X[j_0,\dots,j_{k-1},j_k,\dots,2(M_{p-1} - 1) - j_p,\dots,j_{k+q-1}]},\text{ otherwise.}\end{cases}\f]
|
||||
|
||||
Construct the complex tensor `G` with the shape `[B_0, ..., B_{k-1}, S_0, ..., S_{q-1}]` by the following way. If `S_a > 2 * (M_a - 1)`, then the axis `k + a` of `F` will be padded by zeros; if `S_a < 2 * (M_a - 1)`, then the axis `k + a` of `F` will be trimmed, that is, we will consider only the slice `0: S_a` of this axis; finally, if `S_a = 2 * (M_a - 1)`, then we consider the full axis `k + a` of `F`.
|
||||
|
||||
Let `Y` be a complex tensor with the shape `[B_0, ..., B_{k-1}, S_0, ..., S_{q-1}]` such that
|
||||
\f[Y[n_0,\dots,n_{k-1},m_0,\dots,m_{q-1}]=\frac{1}{\prod\limits_{j=0}^{q-1}S_j}\sum\limits_{p_0=0}^{S_0}\cdots\sum\limits_{p_{q-1}=0}^{S_{q-1}}X[n_0,\dots,n_{k-1},j_0,\dots,j_{q-1}]\exp\left(2\pi i\sum\limits_{b=0}^{q-1}\frac{m_bj_b}{S_b}\right)\f]
|
||||
for all indices `n_0,...,n_{k-1}`, `m_0,...,m_{q-1}`.
|
||||
|
||||
Finally, the result of the inverse discrete complex-to-real Fourier transform is a real part of the tensor `Y`.
|
||||
|
||||
Calculations for the generic case of axes and signal sizes are similar.
|
||||
|
||||
**Example**:
|
||||
|
||||
There is no `signal_size` input (4D input tensor):
|
||||
```xml
|
||||
<layer ... type="IRDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>161</dim>
|
||||
<dim>161</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim> <!-- [1, 2] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="2">
|
||||
<dim>1</dim>
|
||||
<dim>161</dim>
|
||||
<dim>320</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
There is no `signal_size` input (3D input tensor):
|
||||
```xml
|
||||
<layer ... type="IRDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>161</dim>
|
||||
<dim>161</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim> <!-- [0, 1] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="2">
|
||||
<dim>161</dim>
|
||||
<dim>320</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
|
||||
There is `signal_size` input (4D input tensor):
|
||||
```xml
|
||||
<layer ... type="IRDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>161</dim>
|
||||
<dim>161</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim> <!-- [1, 2] -->
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>2</dim> <!-- [512, 100] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="3">
|
||||
<dim>1</dim>
|
||||
<dim>512</dim>
|
||||
<dim>100</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
|
||||
There is `signal_size` input (3D input tensor):
|
||||
```xml
|
||||
<layer ... type="IRDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>161</dim>
|
||||
<dim>161</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim> <!-- [0, 1] -->
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>2</dim> <!-- [512, 100] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="3">
|
||||
<dim>512</dim>
|
||||
<dim>100</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
|
||||
There is `signal_size` input (5D input tensor, `-1` in `signal_size`, unsorted axes):
|
||||
```xml
|
||||
<layer ... type="IRDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>16</dim>
|
||||
<dim>768</dim>
|
||||
<dim>580</dim>
|
||||
<dim>320</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>3</dim> <!-- axes input contains [3, 1, 2] -->
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>3</dim> <!-- signal_size input contains [170, -1, 1024] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="3">
|
||||
<dim>16</dim>
|
||||
<dim>768</dim>
|
||||
<dim>1024</dim>
|
||||
<dim>170</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
|
||||
There is `signal_size` input (5D input tensor, `-1` in `signal_size`, unsorted axes, the second example):
|
||||
```xml
|
||||
<layer ... type="IRDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>16</dim>
|
||||
<dim>768</dim>
|
||||
<dim>580</dim>
|
||||
<dim>320</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>3</dim> <!-- axes input contains [3, 0, 2] -->
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>3</dim> <!-- signal_size input contains [258, -1, 2056] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="3">
|
||||
<dim>16</dim>
|
||||
<dim>768</dim>
|
||||
<dim>2056</dim>
|
||||
<dim>258</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
@@ -1,210 +0,0 @@
|
||||
# Discrete Fourier Transformation for real-valued input (RDFT) {#openvino_docs_ops_signals_RDFT_9}
|
||||
|
||||
**Versioned name**: *RDFT-9*
|
||||
|
||||
**Category**: *Signal processing*
|
||||
|
||||
**Short description**: *RDFT* operation performs the discrete real-to-complex Fourier transformation of the input tensor by specified dimensions.
|
||||
|
||||
**Attributes**:
|
||||
|
||||
No attributes available.
|
||||
|
||||
**Inputs**
|
||||
|
||||
* **1**: `data` - Input tensor of type *T* with data for the RDFT transformation. **Required.**
|
||||
* **2**: `axes` - 1D tensor of type *T_IND* specifying dimension indices where RDFT is applied, and `axes` is any unordered list of indices of different dimensions of input tensor, for example, `[0, 4]`, `[4, 0]`, `[4, 2, 1]`, `[1, 2, 3]`, `[-3, 0, -2]`. These indices should be integers from `-r` to `r - 1` inclusively, where `r = rank(data)`. A negative axis `a` is interpreted as an axis `r + a`. Other dimensions do not change. The order of elements in `axes` attribute matters, and is mapped directly to elements in the third input `signal_size`. **Required.**
|
||||
* **3**: `signal_size` - 1D tensor of type *T_SIZE* describing signal size with respect to axes from the input `axes`. If `signal_size[i] == -1`, then RDFT is calculated for full size of the axis `axes[i]`. If `signal_size[i] > data_shape[axes[i]]`, then input data is zero-padded with respect to the axis `axes[i]` at the end. Finally, `signal_size[i] < data_shape[axes[i]]`, then input data is trimmed with respect to the axis `axes[i]`. More precisely, if `signal_size[i] < data_shape[axes[i]]`, the slice `0: signal_size[i]` of the axis `axes[i]` is considered. Optionally, with default value `[data_shape[a] for a in axes]`.
|
||||
* **NOTE**: If the input `signal_size` is specified, the size of `signal_size` must be the same as the size of `axes`.
|
||||
|
||||
**Outputs**
|
||||
|
||||
* **1**: Resulting tensor with elements of the same type as input `data` tensor and with rank `r + 1`, where `r = rank(data)`. The shape of the output has the form `[S_0, S_1, ..., S_{r-1}, 2]`, where all `S_a` are calculated as follows:
|
||||
|
||||
1. Calculate `normalized_axes`, where each `normalized_axes[i] = axes[i]`, if `axes[i] >= 0`, and `normalized_axes[i] = axes[i] + r` otherwise.
|
||||
|
||||
2. If `a not in normalized_axes`, then `S_a = data_shape[a]`.
|
||||
|
||||
3. If `a in normalized_axes`, then `a = normalized_axes[i]` for some `i`.
|
||||
+ When `i != len(normalized_axes) - 1`, `S_a` is calculated as `S_a = data_shape[a]` if the `signal_size` input is not specified, or, if it is specified, `signal_size[i] = -1`; and `S_a = signal_size[a]` otherwise.
|
||||
+ When `i = len(normalized_axes) - 1`, `S_a` is calculated as `S_a = data_shape[a] // 2 + 1` if the `signal_size` input is not specified, or, if it is specified, `signal_size[i] = -1`; and `S_a = signal_size[a] // 2 + 1` otherwise.
|
||||
|
||||
**Types**
|
||||
|
||||
* *T*: any supported floating-point type.
|
||||
|
||||
* *T_IND*: `int64` or `int32`.
|
||||
|
||||
* *T_SIZE*: `int64` or `int32`.
|
||||
|
||||
**Detailed description**: *RDFT* performs the discrete Fourier transformation of real-valued input tensor with respect to specified axes. Calculations are performed according to the following rules.
|
||||
|
||||
For simplicity, assume that an input tensor `A` has the shape `[B_0, ..., B_{k-1}, M_0, ..., M_{q-1}]`, `axes=[k,...,k+q-1]`, and `signal_size=[S_0,...,S_{1-1}]`.
|
||||
|
||||
Let `D` be an input tensor `A`, taking into account the `signal_size`, and, hence, `D` has the shape `[B_0, ..., B_{k-1}, S_0, ..., S_{1-1}]`.
|
||||
|
||||
Next, let
|
||||
\f[X=X[j_0,\dots,j_{k-1},j_k,\dots,j_{k+q-1}]\f]
|
||||
for all indices `j_0,...,j_{k+q-1}`, be a real-valued input tensor.
|
||||
|
||||
Then the transformation RDFT of the tensor `X` is the tensor `Y` of the shape `[B_0, ..., B_{k-1}, S_0 // 2 + 1, ..., S_{r-1} // 2 + 1]`, such that
|
||||
\f[Y[n_0,\dots,n_{k-1},m_0,\dots,m_{q-1}]=\sum\limits_{p_0=0}^{S_0}\cdots\sum\limits_{p_{q-1}=0}^{S_{q-1}}X[n_0,\dots,n_{k-1},j_0,\dots,j_{q-1}]\exp\left(-2\pi i\sum\limits_{b=0}^{q-1}\frac{m_bj_b}{S_b}\right)\f]
|
||||
for all indices `n_0,...,n_{k-1}`, `m_0,...,m_{q-1}`.
|
||||
|
||||
Calculations for the generic case of axes and signal sizes are similar.
|
||||
|
||||
**Example**:
|
||||
|
||||
There is no `signal_size` input (3D input tensor):
|
||||
```xml
|
||||
<layer ... type="RDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>320</dim>
|
||||
<dim>320</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim> <!-- axes input contains [1, 2] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="2">
|
||||
<dim>1</dim>
|
||||
<dim>320</dim>
|
||||
<dim>161</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
There is no `signal_size` input (2D input tensor):
|
||||
```xml
|
||||
<layer ... type="RDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>320</dim>
|
||||
<dim>320</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim> <!-- axes input contains [0, 1] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="2">
|
||||
<dim>320</dim>
|
||||
<dim>161</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
|
||||
There is `signal_size` input (3D input tensor):
|
||||
```xml
|
||||
<layer ... type="RDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>320</dim>
|
||||
<dim>320</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim> <!-- axes input contains [1, 2] -->
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>2</dim> <!-- signal_size input contains [512, 100] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="3">
|
||||
<dim>1</dim>
|
||||
<dim>512</dim>
|
||||
<dim>51</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
|
||||
There is `signal_size` input (2D input tensor):
|
||||
```xml
|
||||
<layer ... type="RDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>320</dim>
|
||||
<dim>320</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim> <!-- axes input contains [0, 1] -->
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>2</dim> <!-- signal_size input contains [512, 100] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="3">
|
||||
<dim>512</dim>
|
||||
<dim>51</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
|
||||
There is `signal_size` input (4D input tensor, `-1` in `signal_size`, unsorted axes):
|
||||
```xml
|
||||
<layer ... type="RDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>16</dim>
|
||||
<dim>768</dim>
|
||||
<dim>580</dim>
|
||||
<dim>320</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>3</dim> <!-- axes input contains [3, 1, 2] -->
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>3</dim> <!-- signal_size input contains [170, -1, 1024] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="3">
|
||||
<dim>16</dim>
|
||||
<dim>768</dim>
|
||||
<dim>513</dim>
|
||||
<dim>170</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
|
||||
|
||||
There is `signal_size` input (4D input tensor, `-1` in `signal_size`, unsorted axes, the second example):
|
||||
```xml
|
||||
<layer ... type="RDFT" ... >
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>16</dim>
|
||||
<dim>768</dim>
|
||||
<dim>580</dim>
|
||||
<dim>320</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>3</dim> <!-- axes input contains [3, 0, 2] -->
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>3</dim> <!-- signal_size input contains [258, -1, 2056] -->
|
||||
</port>
|
||||
<output>
|
||||
<port id="3">
|
||||
<dim>16</dim>
|
||||
<dim>768</dim>
|
||||
<dim>1029</dim>
|
||||
<dim>258</dim>
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
@@ -1,208 +0,0 @@
|
||||
## MulticlassNonMaxSuppression<a name="MulticlassNonMaxSuppression"></a> {#openvino_docs_ops_sort_MulticlassNonMaxSuppression_9}
|
||||
|
||||
**Versioned name**: *MulticlassNonMaxSuppression-9*
|
||||
|
||||
**Category**: *Sorting and maximization*
|
||||
|
||||
**Short description**: *MulticlassNonMaxSuppression* performs multi-class non-maximum suppression of the boxes with predicted scores.
|
||||
|
||||
**Detailed description**: *MulticlassNonMaxSuppression* is a multi-phase operation. It implements non-maximum suppression algorithm as described below:
|
||||
|
||||
1. Let `B = [b_0,...,b_n]` be the list of initial detection boxes, `S = [s_0,...,s_N]` be the list of corresponding scores.
|
||||
2. Let `D = []` be an initial collection of resulting boxes. Let `adaptive_threshold = iou_threshold`.
|
||||
3. If `B` is empty, go to step 9.
|
||||
4. Take the box with highest score. Suppose that it is the box `b` with the score `s`.
|
||||
5. Delete `b` from `B`.
|
||||
6. If the score `s` is greater than or equal to `score_threshold`, add `b` to `D`, else go to step 9.
|
||||
7. If `nms_eta < 1` and `adaptive_threshold > 0.5`, update `adaptive_threshold *= nms_eta`.
|
||||
8. For each input box `b_i` from `B` and the corresponding score `s_i`, set `s_i = 0` when `iou(b, b_i) > adaptive_threshold`, and go to step 3.
|
||||
9. Return `D`, a collection of the corresponding scores `S`, and the number of elements in `D`.
|
||||
|
||||
This algorithm is applied independently to each class of each batch element. The operation feeds at most `nms_top_k` scoring candidate boxes to this algorithm.
|
||||
The total number of output boxes of each batch element must not exceed `keep_top_k`.
|
||||
Boxes of `background_class` are skipped and thus eliminated.
|
||||
|
||||
**Attributes**:
|
||||
|
||||
* *sort_result*
|
||||
|
||||
* **Description**: *sort_result* specifies the order of output elements.
|
||||
* **Range of values**: `class`, `score`, `none`
|
||||
* *class* - sort selected boxes by class id (ascending).
|
||||
* *score* - sort selected boxes by score (descending).
|
||||
* *none* - do not guarantee the order.
|
||||
* **Type**: `string`
|
||||
* **Default value**: `none`
|
||||
* **Required**: *no*
|
||||
|
||||
* *sort_result_across_batch*
|
||||
|
||||
* **Description**: *sort_result_across_batch* is a flag that specifies whenever it is necessary to sort selected boxes across batches or not.
|
||||
* **Range of values**: true or false
|
||||
* *true* - sort selected boxes across batches.
|
||||
* *false* - do not sort selected boxes across batches (boxes are sorted per batch element).
|
||||
* **Type**: boolean
|
||||
* **Default value**: false
|
||||
* **Required**: *no*
|
||||
|
||||
* *output_type*
|
||||
|
||||
* **Description**: the tensor type of outputs `selected_indices` and `valid_outputs`.
|
||||
* **Range of values**: `i64` or `i32`
|
||||
* **Type**: `string`
|
||||
* **Default value**: `i64`
|
||||
* **Required**: *no*
|
||||
|
||||
* *iou_threshold*
|
||||
|
||||
* **Description**: intersection over union threshold.
|
||||
* **Range of values**: a floating-point number
|
||||
* **Type**: `float`
|
||||
* **Default value**: `0`
|
||||
* **Required**: *no*
|
||||
|
||||
* *score_threshold*
|
||||
|
||||
* **Description**: minimum score to consider box for the processing.
|
||||
* **Range of values**: a floating-point number
|
||||
* **Type**: `float`
|
||||
* **Default value**: `0`
|
||||
* **Required**: *no*
|
||||
|
||||
* *nms_top_k*
|
||||
|
||||
* **Description**: maximum number of boxes to be selected per class.
|
||||
* **Range of values**: an integer
|
||||
* **Type**: `int`
|
||||
* **Default value**: `-1` meaning to keep all boxes
|
||||
* **Required**: *no*
|
||||
|
||||
* *keep_top_k*
|
||||
|
||||
* **Description**: maximum number of boxes to be selected per batch element.
|
||||
* **Range of values**: an integer
|
||||
* **Type**: `int`
|
||||
* **Default value**: `-1` meaning to keep all boxes
|
||||
* **Required**: *no*
|
||||
|
||||
* *background_class*
|
||||
|
||||
* **Description**: the background class id.
|
||||
* **Range of values**: an integer
|
||||
* **Type**: `int`
|
||||
* **Default value**: `-1` meaning to keep all classes.
|
||||
* **Required**: *no*
|
||||
|
||||
* *normalized*
|
||||
|
||||
* **Description**: *normalized* is a flag that indicates whether `boxes` are normalized or not.
|
||||
* **Range of values**: true or false
|
||||
* *true* - the box coordinates are normalized.
|
||||
* *false* - the box coordinates are not normalized.
|
||||
* **Type**: boolean
|
||||
* **Default value**: True
|
||||
* **Required**: *no*
|
||||
|
||||
* *nms_eta*
|
||||
|
||||
* **Description**: eta parameter for adaptive NMS.
|
||||
* **Range of values**: a floating-point number in close range `[0, 1.0]`.
|
||||
* **Type**: `float`
|
||||
* **Default value**: `1.0`
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
There are 2 kinds of input formats. The first one is of two inputs. The boxes are shared by all classes.
|
||||
* **1**: `boxes` - tensor of type *T* and shape `[num_batches, num_boxes, 4]` with box coordinates. The box coordinates are layout as `[xmin, ymin, xmax, ymax]`. **Required.**
|
||||
|
||||
* **2**: `scores` - tensor of type *T* and shape `[num_batches, num_classes, num_boxes]` with box scores. The tensor type should be same with `boxes`. **Required.**
|
||||
|
||||
The second format is of three inputs. Each class has its own boxes that are not shared.
|
||||
* **1**: `boxes` - tensor of type *T* and shape `[num_classes, num_boxes, 4]` with box coordinates. The box coordinates are layout as `[xmin, ymin, xmax, ymax]`. **Required.**
|
||||
|
||||
* **2**: `scores` - tensor of type *T* and shape `[num_classes, num_boxes]` with box scores. The tensor type should be same with `boxes`. **Required.**
|
||||
|
||||
* **3**: `roisnum` - tensor of type *T_IND* and shape `[num_batches]` with box numbers in each image. `num_batches` is the number of images. Each element in this tensor is the number of boxes for corresponding image. The sum of all elements is `num_boxes`. **Required.**
|
||||
|
||||
**Outputs**:
|
||||
|
||||
* **1**: `selected_outputs` - tensor of type *T* which should be same with `boxes` and shape `[number of selected boxes, 6]` containing the selected boxes with score and class as tuples `[class_id, box_score, xmin, ymin, xmax, ymax]`.
|
||||
|
||||
* **2**: `selected_indices` - tensor of type *T_IND* and shape `[number of selected boxes, 1]` the selected indices in the flattened `boxes`, which are absolute values cross batches. Therefore possible valid values are in the range `[0, num_batches * num_boxes - 1]`.
|
||||
|
||||
* **3**: `selected_num` - 1D tensor of type *T_IND* and shape `[num_batches]` representing the number of selected boxes for each batch element.
|
||||
|
||||
When there is no box selected, `selected_num` is filled with `0`. `selected_outputs` is an empty tensor of shape `[0, 6]`, and `selected_indices` is an empty tensor of shape `[0, 1]`.
|
||||
|
||||
**Types**
|
||||
|
||||
* *T*: floating-point type.
|
||||
|
||||
* *T_IND*: `int64` or `int32`.
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<layer ... type="MulticlassNonMaxSuppression" ... >
|
||||
<data sort_result="score" output_type="i64" sort_result_across_batch="false" iou_threshold="0.2" score_threshold="0.5" nms_top_k="-1" keep_top_k="-1" background_class="-1" normalized="false" nms_eta="0.0"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>3</dim>
|
||||
<dim>100</dim>
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>3</dim>
|
||||
<dim>5</dim>
|
||||
<dim>100</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="5" precision="FP32">
|
||||
<dim>-1</dim> <!-- "-1" means a undefined dimension calculated during the model inference -->
|
||||
<dim>6</dim>
|
||||
</port>
|
||||
<port id="6" precision="I64">
|
||||
<dim>-1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
<port id="7" precision="I64">
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
Another possible example with 3 inputs could be like:
|
||||
```xml
|
||||
<layer ... type="MulticlassNonMaxSuppression" ... >
|
||||
<data sort_result="score" output_type="i64" sort_result_across_batch="false" iou_threshold="0.2" score_threshold="0.5" nms_top_k="-1" keep_top_k="-1" background_class="-1" normalized="false" nms_eta="0.0"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>3</dim>
|
||||
<dim>100</dim>
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>3</dim>
|
||||
<dim>100</dim>
|
||||
</port>
|
||||
<port id="2">
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="5" precision="FP32">
|
||||
<dim>-1</dim> <!-- "-1" means a undefined dimension calculated during the model inference -->
|
||||
<dim>6</dim>
|
||||
</port>
|
||||
<port id="6" precision="I64">
|
||||
<dim>-1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
<port id="7" precision="I64">
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
@@ -1,123 +0,0 @@
|
||||
# NonMaxSuppression {#openvino_docs_ops_sort_NonMaxSuppression_9}
|
||||
|
||||
**Versioned name**: *NonMaxSuppression-9*
|
||||
|
||||
**Category**: *Sorting and maximization*
|
||||
|
||||
**Short description**: *NonMaxSuppression* performs non maximum suppression of the boxes with predicted scores.
|
||||
|
||||
**Detailed description**: *NonMaxSuppression* performs non maximum suppression algorithm as described below:
|
||||
|
||||
1. Let `B = [b_0,...,b_n]` be the list of initial detection boxes, `S = [s_0,...,s_N]` be the list of corresponding scores.
|
||||
2. Let `D = []` be an initial collection of resulting boxes.
|
||||
3. If `B` is empty then go to step 8.
|
||||
4. Take the box with highest score. Suppose that it is the box `b` with the score `s`.
|
||||
5. Delete `b` from `B`.
|
||||
6. If the score `s` is greater or equal than `score_threshold` then add `b` to `D` else go to step 8.
|
||||
7. For each input box `b_i` from `B` and the corresponding score `s_i`, set `s_i = s_i * func(IOU(b_i, b))` and go to step 3.
|
||||
8. Return `D`, a collection of the corresponding scores `S`, and the number of elements in `D`.
|
||||
|
||||
Here `func(iou) = 1 if iou <= iou_threshold else 0` when `soft_nms_sigma == 0`, else `func(iou) = exp(-0.5 * iou * iou / soft_nms_sigma)`.
|
||||
|
||||
This algorithm is applied independently to each class of each batch element. The total number of output boxes for each
|
||||
class must not exceed `max_output_boxes_per_class`.
|
||||
|
||||
**Attributes**:
|
||||
|
||||
* *box_encoding*
|
||||
|
||||
* **Description**: *box_encoding* specifies the format of boxes data encoding.
|
||||
* **Range of values**: "corner" or "center"
|
||||
* *corner* - the box data is supplied as `[y1, x1, y2, x2]` where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners.
|
||||
* *center* - the box data is supplied as `[x_center, y_center, width, height]`.
|
||||
* **Type**: string
|
||||
* **Default value**: "corner"
|
||||
* **Required**: *no*
|
||||
|
||||
* *sort_result_descending*
|
||||
|
||||
* **Description**: *sort_result_descending* is a flag that specifies whenever it is necessary to sort selected boxes across batches or not.
|
||||
* **Range of values**: true of false
|
||||
* *true* - sort selected boxes across batches.
|
||||
* *false* - do not sort selected boxes across batches (boxes are sorted per class).
|
||||
* **Type**: boolean
|
||||
* **Default value**: true
|
||||
* **Required**: *no*
|
||||
|
||||
* *output_type*
|
||||
|
||||
* **Description**: the output tensor type
|
||||
* **Range of values**: "i64" or "i32"
|
||||
* **Type**: string
|
||||
* **Default value**: "i64"
|
||||
* **Required**: *no*
|
||||
|
||||
**Inputs**:
|
||||
|
||||
* **1**: `boxes` - tensor of type *T* and shape `[num_batches, num_boxes, 4]` with box coordinates. **Required.**
|
||||
|
||||
* **2**: `scores` - tensor of type *T* and shape `[num_batches, num_classes, num_boxes]` with box scores. **Required.**
|
||||
|
||||
* **3**: `max_output_boxes_per_class` - scalar or 1D tensor with 1 element of type *T_MAX_BOXES* specifying maximum number of boxes to be selected per class. Optional with default value 0 meaning select no boxes.
|
||||
|
||||
* **4**: `iou_threshold` - scalar or 1D tensor with 1 element of type *T_THRESHOLDS* specifying intersection over union threshold. Optional with default value 0 meaning keep all boxes.
|
||||
|
||||
* **5**: `score_threshold` - scalar or 1D tensor with 1 element of type *T_THRESHOLDS* specifying minimum score to consider box for the processing. Optional with default value 0.
|
||||
|
||||
* **6**: `soft_nms_sigma` - scalar or 1D tensor with 1 element of type *T_THRESHOLDS* specifying the sigma parameter for Soft-NMS; see [Bodla et al](https://arxiv.org/abs/1704.04503.pdf). Optional with default value 0.
|
||||
|
||||
**Outputs**:
|
||||
|
||||
* **1**: `selected_indices` - tensor of type *output_type* and shape `[number of selected boxes, 3]` containing information about selected boxes as triplets `[batch_index, class_index, box_index]`.
|
||||
|
||||
* **2**: `selected_scores` - tensor of type *T_THRESHOLDS* and shape `[number of selected boxes, 3]` containing information about scores for each selected box as triplets `[batch_index, class_index, box_score]`.
|
||||
|
||||
* **3**: `valid_outputs` - 1D tensor with 1 element of type *output_type* representing the total number of selected boxes.
|
||||
|
||||
Plugins which do not support dynamic output tensors produce `selected_indices` and `selected_scores` tensors of shape `[min(num_boxes, max_output_boxes_per_class) * num_batches * num_classes, 3]` which is an upper bound for the number of possible selected boxes. Output tensor elements following the really selected boxes are filled with value -1.
|
||||
|
||||
**Types**
|
||||
|
||||
* *T*: floating-point type.
|
||||
|
||||
* *T_MAX_BOXES*: integer type.
|
||||
|
||||
* *T_THRESHOLDS*: floating-point type.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<layer ... type="NonMaxSuppression" ... >
|
||||
<data box_encoding="corner" sort_result_descending="1" output_type="i64"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>3</dim>
|
||||
<dim>100</dim>
|
||||
<dim>4</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>3</dim>
|
||||
<dim>5</dim>
|
||||
<dim>100</dim>
|
||||
</port>
|
||||
<port id="2"/> <!-- 10 -->
|
||||
<port id="3"/>
|
||||
<port id="4"/>
|
||||
<port id="5"/>
|
||||
</input>
|
||||
<output>
|
||||
<port id="6" precision="I64">
|
||||
<dim>150</dim> <!-- min(100, 10) * 3 * 5 -->
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
<port id="7" precision="FP32">
|
||||
<dim>150</dim> <!-- min(100, 10) * 3 * 5 -->
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
<port id="8" precision="I64">
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
```
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user