Compare commits
4 Commits
dependabot
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8cf742e67d | ||
|
|
d722fd82fb | ||
|
|
75a8cd8a0a | ||
|
|
321f426d52 |
5
.github/ISSUE_TEMPLATE/good_first_issue.yml
vendored
5
.github/ISSUE_TEMPLATE/good_first_issue.yml
vendored
@@ -41,7 +41,10 @@ body:
|
||||
Any materials related to the task, such as operator specifications,
|
||||
discussions, guides.
|
||||
value: |
|
||||
- [Contribution guide - start here!](https://github.com/openvinotoolkit/openvino/blob/master/CONTRIBUTING.md)
|
||||
- [What is OpenVINO?](https://github.com/openvinotoolkit/openvino#what-is-openvino-toolkit)
|
||||
- [Contribution guide](https://github.com/openvinotoolkit/openvino/blob/master/CONTRIBUTING.md)
|
||||
- [Blog post on contributing to OpenVINO](https://github.com/openvinotoolkit/openvino/blob/master/CONTRIBUTING.md)
|
||||
- [User documentation](https://docs.openvino.ai/)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
||||
4
.github/actions/setup_python/action.yml
vendored
4
.github/actions/setup_python/action.yml
vendored
@@ -29,9 +29,9 @@ runs:
|
||||
run: apt-get update && apt-get install -y ca-certificates software-properties-common
|
||||
|
||||
- if: ${{ runner.os == 'Linux' && runner.arch == 'ARM64' }}
|
||||
name: Setup sudo and python3
|
||||
name: Setup sudo
|
||||
shell: bash
|
||||
run: apt-get update && apt-get install -y sudo python3 # Needed for the deadsnakes action
|
||||
run: apt-get update && apt-get install -y sudo # Needed for the deadsnakes action
|
||||
|
||||
- if: ${{ runner.os == 'Linux' && runner.arch == 'ARM64' }}
|
||||
name: Setup Python ${{ inputs.version }}
|
||||
|
||||
17
.github/workflows/android_arm64.yml
vendored
17
.github/workflows/android_arm64.yml
vendored
@@ -5,7 +5,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- 'releases/**'
|
||||
|
||||
concurrency:
|
||||
# github.ref is not unique in post-commit
|
||||
@@ -46,7 +45,7 @@ jobs:
|
||||
container:
|
||||
image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04
|
||||
volumes:
|
||||
- /mount:/mount
|
||||
- /mount/caches:/mount/caches
|
||||
options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
@@ -170,17 +169,3 @@ jobs:
|
||||
|
||||
- name: Show ccache stats
|
||||
run: ${SCCACHE_PATH} --show-stats
|
||||
|
||||
Overall_Status:
|
||||
name: ci/gha_overall_status_android
|
||||
needs: [Smart_CI, Build]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check status of all jobs
|
||||
if: >-
|
||||
${{
|
||||
contains(needs.*.result, 'failure') ||
|
||||
contains(needs.*.result, 'cancelled')
|
||||
}}
|
||||
run: exit 1
|
||||
|
||||
10
.github/workflows/build_doc.yml
vendored
10
.github/workflows/build_doc.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
packages: graphviz texlive liblua5.2-0 libclang1-9 libclang-cpp9
|
||||
version: 3.0
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@v4
|
||||
id: cp310
|
||||
with:
|
||||
python-version: '3.10'
|
||||
@@ -68,19 +68,19 @@ jobs:
|
||||
echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_ENV
|
||||
|
||||
- name: 'Upload doxygen.log'
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: doxygen_build_log_${{ env.PR_NUMBER }}.log
|
||||
path: build/docs/doxygen.log
|
||||
|
||||
- name: 'Upload sphinx.log'
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sphinx_build_log_${{ env.PR_NUMBER }}.log
|
||||
path: build/docs/sphinx.log
|
||||
|
||||
- name: 'Upload docs html'
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_docs_html_${{ env.PR_NUMBER }}.zip
|
||||
path: build/docs/openvino_docs_html.zip
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
|
||||
- name: 'Upload test results'
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_docs_pytest
|
||||
path: build/docs/_artifacts/
|
||||
|
||||
2
.github/workflows/cleanup_pip_cache.yml
vendored
2
.github/workflows/cleanup_pip_cache.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
container:
|
||||
image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04
|
||||
volumes:
|
||||
- /mount:/mount
|
||||
- /mount/caches:/mount/caches
|
||||
env:
|
||||
PIP_CACHE_PATH: /mount/caches/pip
|
||||
|
||||
|
||||
2
.github/workflows/coverage.yml
vendored
2
.github/workflows/coverage.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10.10'
|
||||
architecture: 'x64'
|
||||
|
||||
141
.github/workflows/coverity.yml
vendored
141
.github/workflows/coverity.yml
vendored
@@ -1,141 +0,0 @@
|
||||
name: Coverity (Ubuntu 20.04, Python 3.11)
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# run daily at 00:00
|
||||
- cron: '0 0 * * *'
|
||||
|
||||
concurrency:
|
||||
# github.ref is not unique in post-commit
|
||||
group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-linux-coverity
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
PIP_CACHE_PATH: /mount/caches/pip/linux
|
||||
PYTHON_VERSION: '3.11'
|
||||
|
||||
jobs:
|
||||
Build:
|
||||
timeout-minutes: 150
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
runs-on: aks-linux-16-cores-32gb
|
||||
container:
|
||||
image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04
|
||||
volumes:
|
||||
- /mount/caches:/mount/caches
|
||||
options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
CMAKE_BUILD_TYPE: 'Release'
|
||||
CMAKE_GENERATOR: 'Ninja Multi-Config'
|
||||
CMAKE_CXX_COMPILER_LAUNCHER: sccache
|
||||
CMAKE_C_COMPILER_LAUNCHER: sccache
|
||||
GITHUB_WORKSPACE: '/__w/openvino/openvino'
|
||||
OPENVINO_REPO: /__w/openvino/openvino/openvino
|
||||
OPENVINO_CONTRIB_REPO: /__w/openvino/openvino/openvino_contrib
|
||||
BUILD_DIR: /__w/openvino/openvino/openvino_build
|
||||
SCCACHE_AZURE_KEY_PREFIX: coverity_ubuntu20_x86_64
|
||||
COVERITY_TOOL_DIR: /__w/openvino/openvino/coverity_tool
|
||||
|
||||
steps:
|
||||
- name: Install git
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes --no-install-recommends git ca-certificates
|
||||
|
||||
- name: Clone OpenVINO
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.OPENVINO_REPO }}
|
||||
submodules: 'true'
|
||||
|
||||
- name: Clone OpenVINO Contrib
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: 'openvinotoolkit/openvino_contrib'
|
||||
path: ${{ env.OPENVINO_CONTRIB_REPO }}
|
||||
submodules: 'true'
|
||||
ref: 'master'
|
||||
|
||||
#
|
||||
# Dependencies
|
||||
#
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
bash ${OPENVINO_REPO}/install_build_dependencies.sh
|
||||
# default-jdk - Java API
|
||||
apt install --assume-yes --no-install-recommends default-jdk
|
||||
|
||||
- name: Install sccache
|
||||
uses: mozilla-actions/sccache-action@v0.0.3
|
||||
with:
|
||||
version: "v0.5.4"
|
||||
|
||||
- name: Setup Python ${{ env.PYTHON_VERSION }}
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
pip-cache-path: ${{ env.PIP_CACHE_PATH }}
|
||||
should-setup-pip-paths: 'true'
|
||||
self-hosted-runner: 'true'
|
||||
|
||||
#
|
||||
# Build
|
||||
#
|
||||
|
||||
- name: CMake configure - OpenVINO
|
||||
run: |
|
||||
cmake \
|
||||
-G "${{ env.CMAKE_GENERATOR }}" \
|
||||
-DENABLE_CPPLINT=OFF \
|
||||
-DENABLE_STRICT_DEPENDENCIES=OFF \
|
||||
-DENABLE_SYSTEM_TBB=ON \
|
||||
-DENABLE_SYSTEM_OPENCL=ON \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON \
|
||||
-DCPACK_GENERATOR=TGZ \
|
||||
-DBUILD_nvidia_plugin=OFF \
|
||||
-DOPENVINO_EXTRA_MODULES=${OPENVINO_CONTRIB_REPO}/modules \
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \
|
||||
-S ${OPENVINO_REPO} \
|
||||
-B ${BUILD_DIR}
|
||||
|
||||
- name: Clean sccache stats
|
||||
run: ${SCCACHE_PATH} --zero-stats
|
||||
|
||||
- name: Install Coverity tool
|
||||
run: |
|
||||
rm -rf ${COVERITY_TOOL_DIR} && mkdir -p ${COVERITY_TOOL_DIR}
|
||||
pushd ${COVERITY_TOOL_DIR}
|
||||
wget https://scan.coverity.com/download/linux64 --progress=bar:force:noscroll --post-data "token=${{ secrets.COVERITY_TOKEN }}&project=openvino" -O coverity_tool.tgz
|
||||
tar xvf coverity_tool.tgz && rm coverity_tool.tgz
|
||||
popd
|
||||
|
||||
- name: Cmake build - OpenVINO with Coverity
|
||||
run: |
|
||||
${COVERITY_TOOL_DIR}/cov-analysis*/bin/cov-build --dir ${BUILD_DIR}/cov-int \
|
||||
cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }}
|
||||
|
||||
- name: Show sccache stats
|
||||
run: ${SCCACHE_PATH} --show-stats
|
||||
|
||||
- name: Pack Artefacts
|
||||
run: |
|
||||
pushd ${BUILD_DIR}
|
||||
tar -C ${BUILD_DIR} -czvf openvino.tgz cov-int
|
||||
popd
|
||||
|
||||
- name: Submit artefacts
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
pushd ${BUILD_DIR}
|
||||
curl --form token=${{ secrets.COVERITY_TOKEN }} \
|
||||
--form email=${{ secrets.COVERITY_USER }} \
|
||||
--form file=@openvino.tgz \
|
||||
--form version="${{ github.sha }}" \
|
||||
--form description="https://github.com/openvinotoolkit/openvino/runs/${{ github.run_number }}" \
|
||||
https://scan.coverity.com/builds?project=openvino
|
||||
popd
|
||||
22
.github/workflows/fedora.yml
vendored
22
.github/workflows/fedora.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
container:
|
||||
image: fedora:33
|
||||
volumes:
|
||||
- /mount:/mount
|
||||
- /mount/caches:/mount/caches
|
||||
options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING
|
||||
env:
|
||||
CMAKE_BUILD_TYPE: 'Release'
|
||||
@@ -166,7 +166,7 @@ jobs:
|
||||
|
||||
- name: Upload openvino package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz
|
||||
@@ -174,7 +174,7 @@ jobs:
|
||||
|
||||
- name: Upload openvino RPM packages
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_rpm_packages
|
||||
path: ${{ env.BUILD_DIR }}/*.rpm
|
||||
@@ -182,7 +182,7 @@ jobs:
|
||||
|
||||
- name: Upload openvino tests package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz
|
||||
@@ -242,17 +242,3 @@ jobs:
|
||||
python3 -c 'from openvino.frontend import FrontEndManager; assert len(FrontEndManager().get_available_front_ends()) == 6'
|
||||
benchmark_app --help
|
||||
ovc --help
|
||||
|
||||
Overall_Status:
|
||||
name: ci/gha_overall_status_fedora
|
||||
needs: [Smart_CI, Build, RPM_Packages]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check status of all jobs
|
||||
if: >-
|
||||
${{
|
||||
contains(needs.*.result, 'failure') ||
|
||||
contains(needs.*.result, 'cancelled')
|
||||
}}
|
||||
run: exit 1
|
||||
|
||||
124
.github/workflows/job_cpu_functional_tests.yml
vendored
124
.github/workflows/job_cpu_functional_tests.yml
vendored
@@ -1,124 +0,0 @@
|
||||
name: CPU functional tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runner:
|
||||
description: 'Machine on which the tests would run'
|
||||
type: string
|
||||
required: true
|
||||
image:
|
||||
description: 'Docker image in which the tests would run'
|
||||
type: string
|
||||
required: false
|
||||
default: null
|
||||
|
||||
jobs:
|
||||
CPU_Functional_Tests:
|
||||
name: CPU functional tests
|
||||
timeout-minutes: 25
|
||||
runs-on: ${{ inputs.runner }}
|
||||
container:
|
||||
image: ${{ inputs.image }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
PARALLEL_TEST_SCRIPT: ${{ github.workspace }}/install/tests/functional_test_utils/layer_tests_summary/run_parallel.py
|
||||
PARALLEL_TEST_CACHE: ${{ github.workspace }}/install/tests/test_cache.lst
|
||||
steps:
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
# Needed as ${{ github.workspace }} is not working correctly when using Docker
|
||||
- name: Setup Variables
|
||||
run: |
|
||||
echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV"
|
||||
echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV"
|
||||
echo "PARALLEL_TEST_SCRIPT=$GITHUB_WORKSPACE/install/tests/functional_test_utils/layer_tests_summary/run_parallel.py" >> "$GITHUB_ENV"
|
||||
echo "PARALLEL_TEST_CACHE=$GITHUB_WORKSPACE/install/tests/test_cache.lst" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd $INSTALL_DIR
|
||||
tar -xzf openvino_package.tar.gz -C $INSTALL_DIR
|
||||
popd
|
||||
|
||||
pushd $INSTALL_TEST_DIR
|
||||
tar -xzf openvino_tests.tar.gz -C $INSTALL_DIR
|
||||
popd
|
||||
|
||||
- name: Install OpenVINO dependencies (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -c=gpu -y
|
||||
|
||||
- name: Fetch setup_python action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.github/actions/setup_python/action.yml
|
||||
sparse-checkout-cone-mode: false
|
||||
path: 'openvino'
|
||||
|
||||
- name: Setup Python 3.11
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: '3.11'
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: ${{ runner.os == 'Linux' }}
|
||||
|
||||
- name: Install python dependencies for run_parallel.py
|
||||
run: python3 -m pip install -r ${INSTALL_TEST_DIR}/functional_test_utils/layer_tests_summary/requirements.txt
|
||||
|
||||
- name: Restore tests execution time
|
||||
uses: actions/cache/restore@v3
|
||||
with:
|
||||
path: ${{ env.PARALLEL_TEST_CACHE }}
|
||||
key: ${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp
|
||||
|
||||
- name: Intel CPU plugin func tests (parallel)
|
||||
run: |
|
||||
# Needed as the Linux CC does not require setupvars to work
|
||||
if [[ -f "${INSTALL_DIR}/setupvars.sh" ]]; then
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
fi
|
||||
|
||||
python3 ${PARALLEL_TEST_SCRIPT} -e ${INSTALL_TEST_DIR}/ov_cpu_func_tests -c ${PARALLEL_TEST_CACHE} -w ${INSTALL_TEST_DIR} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke*
|
||||
timeout-minutes: 20
|
||||
|
||||
- name: Save tests execution time
|
||||
uses: actions/cache/save@v3
|
||||
if: github.ref_name == 'master'
|
||||
with:
|
||||
path: ${{ env.PARALLEL_TEST_CACHE }}
|
||||
key: ${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp-${{ github.sha }}
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-functional-cpu
|
||||
path: |
|
||||
${{ env.INSTALL_TEST_DIR }}/temp/*.log
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/*.log
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/failed/*.log
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/crashed/*.log
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/hash_table.csv
|
||||
${{ env.PARALLEL_TEST_CACHE }}
|
||||
if-no-files-found: 'error'
|
||||
279
.github/workflows/job_cxx_unit_tests.yml
vendored
279
.github/workflows/job_cxx_unit_tests.yml
vendored
@@ -1,279 +0,0 @@
|
||||
name: Samples
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runner:
|
||||
description: 'Machine on which the tests would run'
|
||||
type: string
|
||||
required: true
|
||||
image:
|
||||
description: 'Docker image in which the tests would run'
|
||||
type: string
|
||||
required: false
|
||||
default: null
|
||||
affected-components:
|
||||
description: 'Components that are affected by changes in the commit defined by the Smart CI Action'
|
||||
type: string
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
CXX_Unit_Tests:
|
||||
name: C++ unit tests
|
||||
timeout-minutes: 30
|
||||
runs-on: ${{ inputs.runner }}
|
||||
container:
|
||||
image: ${{ inputs.image }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
steps:
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
# Needed as ${{ github.workspace }} is not working correctly when using Docker
|
||||
- name: Setup Variables
|
||||
run: |
|
||||
echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV"
|
||||
echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd $INSTALL_DIR
|
||||
tar -xzf openvino_package.tar.gz -C $INSTALL_DIR
|
||||
popd
|
||||
pushd $INSTALL_TEST_DIR
|
||||
tar -xzf openvino_tests.tar.gz -C $INSTALL_DIR
|
||||
popd
|
||||
|
||||
- name: Install OpenVINO dependencies (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -c=gpu -y
|
||||
|
||||
#
|
||||
# Tests
|
||||
#
|
||||
|
||||
- name: OpenVINO Core Unit Tests
|
||||
if: fromJSON(inputs.affected-components).Core.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVCoreUT.xml
|
||||
|
||||
- name: OpenVINO Inference Functional Tests
|
||||
if: fromJSON(inputs.affected-components).inference.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_inference_functional_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceFunc.xml
|
||||
|
||||
- name: OpenVINO Inference Unit Tests
|
||||
if: fromJSON(inputs.affected-components).inference.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_inference_unit_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceUnit.xml
|
||||
|
||||
- name: Low Precision Transformations Tests
|
||||
if: fromJSON(inputs.affected-components).LP_transformations.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
|
||||
${INSTALL_TEST_DIR}/ov_lp_transformations_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-LpTransformations.xml
|
||||
|
||||
- name: OpenVINO Conditional compilation tests
|
||||
if: fromJSON(inputs.affected-components).Core.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_conditional_compilation_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ConditionalCompilation.xml
|
||||
|
||||
- name: IR frontend tests
|
||||
if: fromJSON(inputs.affected-components).IR_FE.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_ir_frontend_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-IRFrontend.xml
|
||||
|
||||
- name: PaddlePaddle frontend tests
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/paddle_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-PaddleTests.xml
|
||||
|
||||
- name: ONNX frontend tests
|
||||
if: ${{ fromJSON(inputs.affected-components).ONNX_FE.test && runner.arch != 'ARM64' }} # Ticket for macOS ARM64: 122663, for Linux ARM64: 126280
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_onnx_frontend_tests --gtest_print_time=1 \
|
||||
--gtest_filter=-*IE_GPU* \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ONNXFrontend.xml
|
||||
|
||||
- name: TensorFlow Common frontend tests
|
||||
if: fromJSON(inputs.affected-components).TF_FE.test ||
|
||||
fromJSON(inputs.affected-components).TFL_FE.test &&
|
||||
(runner.os != 'macOS' && runner.arch != 'ARM64')
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_tensorflow_common_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowCommonFrontend.xml
|
||||
|
||||
- name: TensorFlow frontend tests
|
||||
if: fromJSON(inputs.affected-components).TF_FE.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
|
||||
${INSTALL_TEST_DIR}/ov_tensorflow_frontend_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowFrontend.xml
|
||||
|
||||
- name: TensorFlow Lite frontend tests
|
||||
if: fromJSON(inputs.affected-components).TFL_FE.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowLiteFrontend.xml
|
||||
|
||||
- name: Transformations func tests
|
||||
if: ${{ fromJSON(inputs.affected-components).transformations.test && runner.arch != 'ARM64' }} # Ticket: 126281
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
|
||||
${INSTALL_TEST_DIR}/ov_transformations_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-Transformations.xml
|
||||
|
||||
- name: Legacy Transformations func tests
|
||||
if: fromJSON(inputs.affected-components).GNA.test &&
|
||||
(runner.os != 'macOS' && runner.arch != 'ARM64')
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_legacy_transformations_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-LegacyTransformations.xml
|
||||
|
||||
- name: Inference Engine 1.0 unit tests
|
||||
if: fromJSON(inputs.affected-components).GNA.test &&
|
||||
(runner.os != 'macOS' && runner.arch != 'ARM64')
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/InferenceEngineUnitTests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceEngineUnitTests.xml
|
||||
|
||||
- name: Common test utils tests
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_util_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CommonUtilTests.xml
|
||||
|
||||
- name: Snippets func tests
|
||||
if: fromJSON(inputs.affected-components).CPU.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_snippets_func_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-SnippetsFuncTests.xml
|
||||
|
||||
- name: CPU plugin unit tests
|
||||
if: fromJSON(inputs.affected-components).CPU.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_cpu_unit_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CPUUnitTests.xml
|
||||
|
||||
- name: ov_subgraphs_dumper_tests tests
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_subgraphs_dumper_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_subgraphs_dumper_tests.xml
|
||||
|
||||
- name: Template OpImpl tests
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_op_conformance_tests --gtest_print_time=1 --device=TEMPLATE --gtest_filter=*OpImpl*\
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpImplTests.xml
|
||||
|
||||
- name: AUTO unit tests
|
||||
if: fromJSON(inputs.affected-components).AUTO.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_auto_unit_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_unit_tests.xml
|
||||
|
||||
- name: AUTO func Tests
|
||||
if: fromJSON(inputs.affected-components).AUTO.test
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml
|
||||
|
||||
- name: Template plugin func tests
|
||||
if: fromJSON(inputs.affected-components).TEMPLATE.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_template_func_tests --gtest_print_time=1 \
|
||||
--gtest_filter=*smoke* \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateFuncTests.xml
|
||||
|
||||
- name: Inference Engine C API tests
|
||||
if: fromJSON(inputs.affected-components).C_API.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/InferenceEngineCAPITests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceEngineCAPITests.xml
|
||||
|
||||
- name: OpenVINO C API tests
|
||||
if: fromJSON(inputs.affected-components).C_API.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_capi_test --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpenVINOCAPITests.xml
|
||||
|
||||
- name: AutoBatch unit tests
|
||||
if: fromJSON(inputs.affected-components).AUTO_BATCH.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_auto_batch_unit_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_unit_tests.xml
|
||||
|
||||
- name: AutoBatch func tests
|
||||
if: fromJSON(inputs.affected-components).AUTO_BATCH.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_auto_batch_func_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_func_tests.xml
|
||||
|
||||
- name: Proxy Plugin func tests
|
||||
if: fromJSON(inputs.affected-components).PROXY.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVProxyTests.xml
|
||||
|
||||
- name: Hetero unit tests
|
||||
if: fromJSON(inputs.affected-components).HETERO.test
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml
|
||||
|
||||
- name: Hetero func tests
|
||||
if: fromJSON(inputs.affected-components).HETERO.test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
${INSTALL_TEST_DIR}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVHeteroFuncTests.xml
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-cpp
|
||||
path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml
|
||||
if-no-files-found: 'warn'
|
||||
83
.github/workflows/job_debian_packages.yml
vendored
83
.github/workflows/job_debian_packages.yml
vendored
@@ -1,83 +0,0 @@
|
||||
name: Debian Packages
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runner:
|
||||
description: 'Machine on which the tests would run'
|
||||
type: string
|
||||
required: true
|
||||
image:
|
||||
description: 'Docker image in which the tests would run'
|
||||
type: string
|
||||
required: false
|
||||
default: null
|
||||
|
||||
jobs:
|
||||
Debian_Packages:
|
||||
name: Debian Packages
|
||||
runs-on: ${{ inputs.runner }}
|
||||
container:
|
||||
image: ${{ inputs.image }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
DEBIAN_PACKAGES_DIR: ${{ github.workspace }}/packages
|
||||
steps:
|
||||
|
||||
- name: Download OpenVINO debian packages
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_debian_packages
|
||||
path: ${{ env.DEBIAN_PACKAGES_DIR }}
|
||||
|
||||
# Needed as ${{ github.workspace }} is not working correctly when using Docker
|
||||
- name: Setup Variables
|
||||
run: echo "DEBIAN_PACKAGES_DIR=$GITHUB_WORKSPACE/packages" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Install debian packages & check conflicts
|
||||
run: |
|
||||
apt-get update -y
|
||||
|
||||
if [[ "${{ runner.arch }}" == "X64" ]]; then
|
||||
# Install debian packages from previous release
|
||||
apt-get install --no-install-recommends -y gnupg wget ca-certificates
|
||||
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||
apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||
echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | tee /etc/apt/sources.list.d/intel-openvino-2023.list
|
||||
apt-get update -y
|
||||
apt-get install -y openvino
|
||||
fi
|
||||
|
||||
# install our local one and make sure the conflicts are resolved
|
||||
apt-get install --no-install-recommends -y dpkg-dev
|
||||
dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz
|
||||
echo "deb [trusted=yes] file:${DEBIAN_PACKAGES_DIR} ./" | tee /etc/apt/sources.list.d/openvino-local.list
|
||||
apt-get update -y
|
||||
apt-get install openvino -y
|
||||
working-directory: ${{ env.DEBIAN_PACKAGES_DIR }}
|
||||
|
||||
- name: Test debian packages
|
||||
run: |
|
||||
/usr/share/openvino/samples/cpp/build_samples.sh
|
||||
/usr/share/openvino/samples/c/build_samples.sh
|
||||
|
||||
[[ "${{ runner.arch }}" == "X64" ]] && path_by_arch="intel64" || path_by_arch="aarch64"
|
||||
~/openvino_cpp_samples_build/$path_by_arch/Release/hello_query_device
|
||||
|
||||
python3 /usr/share/openvino/samples/python/hello_query_device/hello_query_device.py
|
||||
python3 -c 'from openvino import Core; Core().get_property("CPU", "AVAILABLE_DEVICES")'
|
||||
|
||||
if [[ "${{ runner.arch }}" == "X64" ]]; then
|
||||
python3 -c 'from openvino import Core; Core().get_property("GPU", "AVAILABLE_DEVICES")'
|
||||
fi
|
||||
|
||||
python3 -c 'from openvino import Core; Core().get_property("AUTO", "SUPPORTED_METRICS")'
|
||||
python3 -c 'from openvino import Core; Core().get_property("MULTI", "SUPPORTED_METRICS")'
|
||||
python3 -c 'from openvino import Core; Core().get_property("HETERO", "SUPPORTED_METRICS")'
|
||||
python3 -c 'from openvino import Core; Core().get_property("BATCH", "SUPPORTED_METRICS")'
|
||||
python3 -c 'from openvino.frontend import FrontEndManager; assert len(FrontEndManager().get_available_front_ends()) == 6'
|
||||
benchmark_app --help
|
||||
ovc --help
|
||||
157
.github/workflows/job_onnx_runtime.yml
vendored
157
.github/workflows/job_onnx_runtime.yml
vendored
@@ -1,157 +0,0 @@
|
||||
name: ONNX Runtime Integration
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runner:
|
||||
description: 'Machine on which the tests would run'
|
||||
type: string
|
||||
required: true
|
||||
container:
|
||||
description: 'JSON to be converted to the value of the "container" configuration for the job'
|
||||
type: string
|
||||
required: false
|
||||
default: '{"image": null}'
|
||||
sccache-azure-key-prefix:
|
||||
description: 'Key prefix for the cache folder on the Azure'
|
||||
type: string
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
ONNX_Runtime:
|
||||
name: ONNX Runtime Integration
|
||||
timeout-minutes: 60
|
||||
runs-on: ${{ inputs.runner }}
|
||||
container: ${{ fromJSON(inputs.container) }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
OPENVINO_REPO: ${{ github.workspace }}/openvino
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
CMAKE_GENERATOR: 'Ninja Multi-Config'
|
||||
CMAKE_CXX_COMPILER_LAUNCHER: sccache
|
||||
CMAKE_C_COMPILER_LAUNCHER: sccache
|
||||
SCCACHE_AZURE_KEY_PREFIX: ${{ inputs.sccache-azure-key-prefix }}
|
||||
ONNX_RUNTIME_REPO: ${{ github.workspace }}/onnxruntime
|
||||
ONNX_RUNTIME_UTILS: ${{ github.workspace }}/install/onnxruntime
|
||||
ONNX_RUNTIME_BUILD_DIR: ${{ github.workspace }}/onnxruntime/build
|
||||
steps:
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
# Needed as ${{ github.workspace }} is not working correctly when using Docker
|
||||
- name: Setup Variables
|
||||
run: |
|
||||
echo "OPENVINO_REPO=$GITHUB_WORKSPACE/openvino" >> "$GITHUB_ENV"
|
||||
echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV"
|
||||
echo "ONNX_RUNTIME_REPO=$GITHUB_WORKSPACE/onnxruntime" >> "$GITHUB_ENV"
|
||||
echo "ONNX_RUNTIME_UTILS=$GITHUB_WORKSPACE/install/onnxruntime" >> "$GITHUB_ENV"
|
||||
echo "ONNX_RUNTIME_BUILD_DIR=$GITHUB_WORKSPACE/onnxruntime/build" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Fetch install_build_dependencies.sh and setup_python action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
install_build_dependencies.sh
|
||||
.github/actions/setup_python/action.yml
|
||||
sparse-checkout-cone-mode: false
|
||||
path: 'openvino'
|
||||
|
||||
- name: Install git
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes --no-install-recommends git ca-certificates
|
||||
|
||||
- name: Setup Python ${{ env.PYTHON_VERSION }}
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: '3.11'
|
||||
should-setup-pip-paths: 'false'
|
||||
|
||||
- name: Extract OpenVINO package
|
||||
run: |
|
||||
pushd ${INSTALL_DIR}
|
||||
tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR}
|
||||
popd
|
||||
|
||||
- name: Install OpenVINO dependencies
|
||||
run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y
|
||||
|
||||
- name: Clone ONNX Runtime
|
||||
run: |
|
||||
branch=`tr -s '\n ' < ${ONNX_RUNTIME_UTILS}/version`
|
||||
git clone --branch $branch --single-branch --recursive https://github.com/microsoft/onnxruntime.git ${ONNX_RUNTIME_REPO}
|
||||
|
||||
#
|
||||
# Tests
|
||||
#
|
||||
|
||||
- name: Install Build Dependencies
|
||||
run: bash ${OPENVINO_REPO}/install_build_dependencies.sh
|
||||
|
||||
- name: Install sccache
|
||||
uses: mozilla-actions/sccache-action@v0.0.3
|
||||
with:
|
||||
version: "v0.5.4"
|
||||
|
||||
- name: Build Lin ONNX Runtime
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
|
||||
${ONNX_RUNTIME_REPO}/build.sh \
|
||||
--config RelWithDebInfo \
|
||||
--use_openvino CPU_FP32 \
|
||||
--build_shared_lib \
|
||||
--parallel \
|
||||
--skip_tests \
|
||||
--compile_no_warning_as_error \
|
||||
--build_dir ${ONNX_RUNTIME_BUILD_DIR}
|
||||
env:
|
||||
CXXFLAGS: "-Wno-error=deprecated-declarations"
|
||||
|
||||
- name: Show sccache stats
|
||||
run: ${SCCACHE_PATH} --show-stats
|
||||
|
||||
- name: Run onnxruntime_test_all
|
||||
if: ${{ runner.arch != 'ARM64' }} # Ticket: 126277
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
skip_tests=$(tr -s '\n ' ':' < ${ONNX_RUNTIME_UTILS}/skip_tests)
|
||||
|
||||
./onnxruntime_test_all --gtest_filter=-$skip_tests
|
||||
working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo
|
||||
|
||||
- name: Run onnxruntime_shared_lib_test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
./onnxruntime_shared_lib_test --gtest_filter=-CApiTest.test_custom_op_openvino_wrapper_library
|
||||
working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo
|
||||
|
||||
- name: Run onnxruntime_global_thread_pools_test
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
./onnxruntime_global_thread_pools_test
|
||||
working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo
|
||||
|
||||
- name: Run onnxruntime_api_tests_without_env
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
./onnxruntime_api_tests_without_env
|
||||
working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo
|
||||
|
||||
- name: Run pytorch-converted tests
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
./onnx_test_runner "${ONNX_RUNTIME_REPO}/cmake/external/onnx/onnx/backend/test/data/pytorch-converted"
|
||||
working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo
|
||||
|
||||
- name: Run pytorch-operator tests
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
./onnx_test_runner "${ONNX_RUNTIME_REPO}/cmake/external/onnx/onnx/backend/test/data/pytorch-operator"
|
||||
working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo
|
||||
323
.github/workflows/job_python_unit_tests.yml
vendored
323
.github/workflows/job_python_unit_tests.yml
vendored
@@ -1,323 +0,0 @@
|
||||
name: Python unit tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runner:
|
||||
description: 'Machine on which the tests would run'
|
||||
type: string
|
||||
required: true
|
||||
container:
|
||||
description: 'JSON to be converted to the value of the "container" configuration for the job'
|
||||
type: string
|
||||
required: false
|
||||
default: '{"image": null}'
|
||||
affected-components:
|
||||
description: 'Components that are affected by changes in the commit defined by the Smart CI Action'
|
||||
type: string
|
||||
required: true
|
||||
|
||||
env:
|
||||
PIP_CACHE_PATH: /mount/caches/pip/linux
|
||||
PYTHON_VERSION: '3.11'
|
||||
|
||||
jobs:
|
||||
Python_Unit_Tests:
|
||||
name: Python unit tests
|
||||
timeout-minutes: 60
|
||||
runs-on: ${{ inputs.runner }}
|
||||
container: ${{ fromJSON(inputs.container) }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
OPENVINO_REPO: ${{ github.workspace }}/openvino
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
LAYER_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/layer_tests
|
||||
steps:
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
# Needed as ${{ github.workspace }} is not working correctly when using Docker
|
||||
- name: Setup Variables
|
||||
run: |
|
||||
echo "OPENVINO_REPO=$GITHUB_WORKSPACE/openvino" >> "$GITHUB_ENV"
|
||||
echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV"
|
||||
echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV"
|
||||
echo "LAYER_TESTS_INSTALL_DIR=$GITHUB_WORKSPACE/install/tests/layer_tests" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd $INSTALL_DIR
|
||||
tar -xzf openvino_package.tar.gz -C $INSTALL_DIR
|
||||
popd
|
||||
pushd $INSTALL_TEST_DIR
|
||||
tar -xzf openvino_tests.tar.gz -C $INSTALL_DIR
|
||||
popd
|
||||
|
||||
- name: Install OpenVINO dependencies (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y
|
||||
|
||||
- name: Fetch setup_python action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.github/actions/setup_python/action.yml
|
||||
sparse-checkout-cone-mode: false
|
||||
path: 'openvino'
|
||||
|
||||
- name: Setup Python ${{ env.PYTHON_VERSION }}
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
pip-cache-path: ${{ runner.os == 'Linux' && env.PIP_CACHE_PATH || '' }}
|
||||
should-setup-pip-paths: ${{ runner.os == 'Linux' }}
|
||||
self-hosted-runner: ${{ runner.os == 'Linux' }}
|
||||
|
||||
#
|
||||
# Tests
|
||||
#
|
||||
|
||||
- name: Install OpenVINO Python wheels
|
||||
run: |
|
||||
# Install the core OV wheel
|
||||
python3 -m pip install ${INSTALL_DIR}/tools/openvino-*.whl
|
||||
|
||||
extras_to_install="caffe,kaldi,onnx,tensorflow2,pytorch"
|
||||
|
||||
if [[ "${{ runner.arch }}" != "ARM64" ]]; then
|
||||
extras_to_install="mxnet,$extras_to_install"
|
||||
fi
|
||||
|
||||
# Find and install OV dev wheel
|
||||
pushd ${INSTALL_DIR}/tools
|
||||
ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl')
|
||||
python3 -m pip install $ov_dev_wheel_name[$extras_to_install]
|
||||
popd
|
||||
|
||||
- name: Install Python API tests dependencies
|
||||
run: |
|
||||
# To enable pytest parallel features
|
||||
python3 -m pip install pytest-xdist[psutil]
|
||||
# For torchvision to OpenVINO preprocessing converter
|
||||
python3 -m pip install -r ${INSTALL_TEST_DIR}/python/preprocess/torchvision/requirements.txt
|
||||
|
||||
# TODO: replace with Python API tests requirements
|
||||
python3 -m pip install -r ${INSTALL_TEST_DIR}/mo/requirements_dev.txt
|
||||
|
||||
#
|
||||
# Tests
|
||||
#
|
||||
|
||||
- name: Python API 1.0 Tests
|
||||
# if: fromJSON(inputs.affected-components).Python_API.test # Ticket: 127101
|
||||
run: |
|
||||
python3 -m pytest -s ${INSTALL_TEST_DIR}/pyngraph \
|
||||
--junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \
|
||||
--ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \
|
||||
--ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_backend.py
|
||||
|
||||
- name: Python API 2.0 Tests
|
||||
# if: ${{ fromJSON(inputs.affected-components).Python_API.test && runner.arch != 'ARM64' }} # Ticket: 126380, 127101
|
||||
run: |
|
||||
# for 'template' extension
|
||||
export LD_LIBRARY_PATH=${INSTALL_TEST_DIR}:$LD_LIBRARY_PATH
|
||||
python3 -m pytest -sv ${INSTALL_TEST_DIR}/pyopenvino \
|
||||
--junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \
|
||||
--ignore=${INSTALL_TEST_DIR}/pyopenvino/tests/test_utils/test_utils.py
|
||||
|
||||
- name: Model Optimizer unit tests
|
||||
if: fromJSON(inputs.affected-components).MO.test
|
||||
run: |
|
||||
skip_filter=''
|
||||
if [[ "${{ runner.os }}" != "Linux" ]] && [[ "${{ runner.arch }} != "ARM64" ]] || [[ "${{ runner.os }} != "macOS" ]]; then
|
||||
# required for MxNet
|
||||
apt-get install -y libgomp1 libquadmath0
|
||||
else
|
||||
# Skips under Ticket: 122666
|
||||
skip_filter='--ignore-glob=**/mo/unit_tests/mo/front/mxnet/**'
|
||||
fi
|
||||
|
||||
python3 -m pytest -s ${INSTALL_TEST_DIR}/mo/unit_tests \
|
||||
--junitxml=${INSTALL_TEST_DIR}/TEST-ModelOptimizer.xml \
|
||||
"$skip_filter"
|
||||
|
||||
- name: Python ONNX operators tests
|
||||
if: fromJSON(inputs.affected-components).Python_API.test ||
|
||||
fromJSON(inputs.affected-components).ONNX_FE.test && runner.os != 'macOS' # Ticket: 123325
|
||||
run: |
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately
|
||||
python3 -m pytest -sv ${INSTALL_TEST_DIR}/onnx -k 'not cuda' \
|
||||
--junitxml=${INSTALL_TEST_DIR}/TEST-onnx_frontend.xml \
|
||||
--ignore=${INSTALL_TEST_DIR}/onnx/test_python/test_zoo_models.py
|
||||
|
||||
- name: OVC unit tests
|
||||
if: fromJSON(inputs.affected-components).MO.test
|
||||
run: python3 -m pytest -s ${INSTALL_TEST_DIR}/ovc/unit_tests --junitxml=${INSTALL_TEST_DIR}/TEST-OpenVinoConversion.xml
|
||||
|
||||
- name: Install Python Layer tests dependencies
|
||||
run: |
|
||||
# layer test requirements
|
||||
python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt
|
||||
|
||||
- name: MO Python API Tests
|
||||
if: fromJSON(inputs.affected-components).MO.test
|
||||
run: |
|
||||
# Import 'test_utils' installed in '<package_test>/tests/python/openvino'
|
||||
export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH
|
||||
export PYTHONPATH=${INSTALL_TEST_DIR}/python
|
||||
|
||||
if [[ "${{ runner.os }}" == "Linux" ]] && [[ "${{ runner.arch }}" == "ARM64" ]]; then
|
||||
# Find gomp lib
|
||||
GOMP_LIB=$(find "${PIP_INSTALL_PATH}/torch/lib/../../torch.libs/" -name '*libgomp-*so*')
|
||||
export LD_PRELOAD=${GOMP_LIB}
|
||||
fi
|
||||
|
||||
python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/mo_python_api_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_mo_convert.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: OVC Python API Tests
|
||||
if: fromJSON(inputs.affected-components).MO.test
|
||||
run: |
|
||||
# Import 'test_utils' installed in '<package_test>/tests/python/openvino'
|
||||
export PYTHONPATH=${INSTALL_TEST_DIR}/python
|
||||
export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH
|
||||
|
||||
if [[ "${{ runner.os }}" == "Linux" ]] && [[ "${{ runner.arch }}" == "ARM64" ]]; then
|
||||
# Find gomp lib
|
||||
GOMP_LIB=$(find "${PIP_INSTALL_PATH}/torch/lib/../../torch.libs/" -name '*libgomp-*so*')
|
||||
export LD_PRELOAD=${GOMP_LIB}
|
||||
fi
|
||||
|
||||
python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/ovc_python_api_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_ovc_convert.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: Python Frontend tests
|
||||
if: fromJSON(inputs.affected-components).PyTorch_FE.test ||
|
||||
fromJSON(inputs.affected-components).PDPD_FE.test
|
||||
run: |
|
||||
# to allow 'libtest_builtin_extensions.so' to find 'libopenvino_onnx_frontend.so'
|
||||
export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH
|
||||
python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/py_frontend_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_py_fontend.xml
|
||||
|
||||
- name: PyTorch Layer Tests
|
||||
if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.arch != 'ARM64' }} # Ticket: 126287
|
||||
run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -n logical -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP32
|
||||
|
||||
- name: PyTorch torch.compile TORCHFX Layer Tests
|
||||
if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.os != 'macOS' }}
|
||||
run: |
|
||||
python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit_fx_backend --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP32
|
||||
PYTORCH_TRACING_MODE: TORCHFX
|
||||
|
||||
- name: PyTorch torch.compile TORCHSCRIPT Layer Tests
|
||||
if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.os != 'macOS' }}
|
||||
run: |
|
||||
python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit_ts_backend --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP32
|
||||
PYTORCH_TRACING_MODE: TORCHSCRIPT
|
||||
|
||||
- name: ONNX Layer Tests
|
||||
if: fromJSON(inputs.affected-components).ONNX_FE.test
|
||||
run: |
|
||||
# requires 'unit_tests' from 'tools/mo'
|
||||
export PYTHONPATH=${INSTALL_TEST_DIR}/mo:$PYTHONPATH
|
||||
python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/onnx_tests -m "not launch_only_if_manually_specified and precommit" --junitxml=${INSTALL_TEST_DIR}/TEST-onnx.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: TensorFlow 1 Layer Tests - TF FE
|
||||
if: fromJSON(inputs.affected-components).TF_FE.test
|
||||
run: |
|
||||
# requires 'unit_tests' from 'mo'
|
||||
export PYTHONPATH=${INSTALL_TEST_DIR}/mo
|
||||
python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf_fe.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: TensorFlow 2 Layer Tests - TF FE
|
||||
if: fromJSON(inputs.affected-components).TF_FE.test && runner.os != 'macOS' # Ticket: 123322
|
||||
run: |
|
||||
# requires 'unit_tests' from 'mo'
|
||||
export PYTHONPATH=${INSTALL_TEST_DIR}/mo
|
||||
python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_fe.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: JAX Layer Tests - TF FE
|
||||
if: ${{ fromJSON(inputs.affected-components).TF_FE.test && runner.arch != 'ARM64' }}
|
||||
run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/jax_tests/ -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-jax.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
|
||||
- name: TensorFlow 1 Layer Tests - Legacy FE
|
||||
if: fromJSON(inputs.affected-components).TF_FE.test
|
||||
run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${INSTALL_TEST_DIR}/TEST-tf_Roll.xml
|
||||
|
||||
- name: TensorFlow 2 Layer Tests - Legacy FE
|
||||
if: fromJSON(inputs.affected-components).TF_FE.test
|
||||
run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/test_tf2_keras_activation.py --ir_version=11 -k "sigmoid" --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_Activation.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: TensorFlow Lite Layer Tests - TFL FE
|
||||
if: fromJSON(inputs.affected-components).TFL_FE.test
|
||||
run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_lite_tests/ --junitxml=${INSTALL_TEST_DIR}/TEST-tfl_fe.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: Clone API snippets
|
||||
if: runner.os != 'macOS'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: openvino/docs/snippets
|
||||
path: ${{ env.OPENVINO_REPO }}
|
||||
submodules: 'false'
|
||||
|
||||
- name: Docs Python snippets
|
||||
if: runner.os != 'macOS'
|
||||
run: |
|
||||
# to find 'snippets' module in docs
|
||||
export PYTHONPATH=${OPENVINO_REPO}/docs
|
||||
# for 'template' extension
|
||||
export LD_LIBRARY_PATH=${INSTALL_TEST_DIR}:$LD_LIBRARY_PATH
|
||||
python3 ${OPENVINO_REPO}/docs/snippets/main.py
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-python
|
||||
path: |
|
||||
${{ env.INSTALL_TEST_DIR }}/TEST*.html
|
||||
${{ env.INSTALL_TEST_DIR }}/TEST*.xml
|
||||
if-no-files-found: 'warn'
|
||||
132
.github/workflows/job_pytorch_models_tests.yml
vendored
132
.github/workflows/job_pytorch_models_tests.yml
vendored
@@ -1,132 +0,0 @@
|
||||
name: PyTorch Models tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runner:
|
||||
description: 'Machine on which the tests would run'
|
||||
type: string
|
||||
required: true
|
||||
container:
|
||||
description: 'JSON to be converted to the value of the "container" configuration for the job'
|
||||
type: string
|
||||
required: false
|
||||
default: '{"image": null}'
|
||||
event:
|
||||
description: 'Event that triggered the workflow. E.g., "schedule" for nightly runs'
|
||||
type: string
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
PyTorch_Models_Tests:
|
||||
name: PyTorch Models tests
|
||||
timeout-minutes: ${{ inputs.event == 'schedule' && 400 || 30 }}
|
||||
runs-on: ${{ inputs.runner }}
|
||||
container: ${{ fromJSON(inputs.container) }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
OPENVINO_REPO: ${{ github.workspace }}/openvino
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests
|
||||
steps:
|
||||
|
||||
- name: Check sudo
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi
|
||||
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
# Needed as ${{ github.workspace }} is not working correctly when using Docker
|
||||
- name: Setup Variables
|
||||
run: |
|
||||
echo "OPENVINO_REPO=$GITHUB_WORKSPACE/openvino" >> "$GITHUB_ENV"
|
||||
echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV"
|
||||
echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV"
|
||||
echo "MODEL_HUB_TESTS_INSTALL_DIR=$GITHUB_WORKSPACE/install/tests/model_hub_tests" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd ${INSTALL_DIR}
|
||||
tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR}
|
||||
popd
|
||||
|
||||
pushd ${INSTALL_TEST_DIR}
|
||||
tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR}
|
||||
popd
|
||||
|
||||
- name: Fetch setup_python action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.github/actions/setup_python/action.yml
|
||||
sparse-checkout-cone-mode: false
|
||||
path: 'openvino'
|
||||
|
||||
- name: Install dependencies
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
run: |
|
||||
# install git (required to build pip deps from the sources)
|
||||
# install 'g++' to build 'detectron2' and 'natten' wheels
|
||||
sudo apt-get install --assume-yes --no-install-recommends g++ git ca-certificates
|
||||
|
||||
- name: Setup Python 3.11
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: '3.11'
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: ${{ contains(inputs.runner, 'aks') }}
|
||||
|
||||
- name: Install OpenVINO Python wheels
|
||||
run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-*
|
||||
|
||||
- name: Install PyTorch tests requirements
|
||||
run: |
|
||||
python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/requirements.txt
|
||||
python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/requirements_secondary.txt
|
||||
echo "Available storage:"
|
||||
df -h
|
||||
env:
|
||||
CPLUS_INCLUDE_PATH: ${{ env.Python_ROOT_DIR }}/include/python${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: PyTorch Models Tests
|
||||
run: |
|
||||
export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH
|
||||
python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v
|
||||
env:
|
||||
TYPE: ${{ inputs.event == 'schedule' && 'nightly' || 'precommit'}}
|
||||
TEST_DEVICE: CPU
|
||||
USE_SYSTEM_CACHE: False
|
||||
OP_REPORT_FILE: ${{ env.INSTALL_TEST_DIR }}/TEST-torch_unsupported_ops.log
|
||||
|
||||
- name: Reformat unsupported ops file
|
||||
if: '!cancelled()'
|
||||
run: |
|
||||
python3 ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/scripts/process_op_report.py ${INSTALL_TEST_DIR}/TEST-torch_unsupported_ops.log
|
||||
|
||||
- name: Available storage after tests
|
||||
run: |
|
||||
echo "Available storage:"
|
||||
df -h
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-torch-models
|
||||
path: |
|
||||
${{ env.INSTALL_TEST_DIR }}/TEST-torch*
|
||||
if-no-files-found: 'error'
|
||||
132
.github/workflows/job_samples_tests.yml
vendored
132
.github/workflows/job_samples_tests.yml
vendored
@@ -1,132 +0,0 @@
|
||||
name: Samples
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runner:
|
||||
description: 'Machine on which the tests would run'
|
||||
type: string
|
||||
required: true
|
||||
image:
|
||||
description: 'Docker image in which the tests would run'
|
||||
type: string
|
||||
required: false
|
||||
default: null
|
||||
affected-components:
|
||||
description: 'Components that are affected by changes in the commit defined by the Smart CI Action'
|
||||
type: string
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
Samples:
|
||||
runs-on: ${{ inputs.runner }}
|
||||
container:
|
||||
image: ${{ inputs.image }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
BUILD_DIR: ${{ github.workspace }}/build
|
||||
steps:
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
# Needed as ${{ github.workspace }} is not working correctly when using Docker
|
||||
- name: Setup Variables
|
||||
run: |
|
||||
echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV"
|
||||
echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV"
|
||||
echo "BUILD_DIR=$GITHUB_WORKSPACE/build" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd $INSTALL_DIR
|
||||
tar -xzf openvino_package.tar.gz -C $INSTALL_DIR
|
||||
popd
|
||||
pushd $INSTALL_TEST_DIR
|
||||
tar -xzf openvino_tests.tar.gz -C $INSTALL_DIR
|
||||
popd
|
||||
|
||||
- name: Install OpenVINO dependencies (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y
|
||||
|
||||
- name: Install OpenVINO dependencies (mac)
|
||||
if: runner.os == 'macOS'
|
||||
run: brew install coreutils
|
||||
|
||||
- name: Fetch setup_python action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.github/actions/setup_python/action.yml
|
||||
sparse-checkout-cone-mode: false
|
||||
path: 'openvino'
|
||||
|
||||
- name: Setup Python 3.11
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: '3.11'
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: ${{ runner.os == 'Linux' }}
|
||||
|
||||
- name: Build cpp samples - GCC
|
||||
run: $INSTALL_DIR/samples/cpp/build_samples.sh -i $INSTALL_DIR -b $BUILD_DIR/cpp_samples
|
||||
env:
|
||||
CMAKE_COMPILE_WARNING_AS_ERROR: 'ON'
|
||||
|
||||
- name: Build cpp samples - Clang
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
apt-get install -y clang
|
||||
$INSTALL_DIR/samples/cpp/build_samples.sh -i $INSTALL_DIR -b $BUILD_DIR/cpp_samples_clang
|
||||
env:
|
||||
CMAKE_COMPILE_WARNING_AS_ERROR: 'ON'
|
||||
CC: clang
|
||||
CXX: clang++
|
||||
|
||||
- name: Build c samples
|
||||
run: $INSTALL_DIR/samples/c/build_samples.sh -i $INSTALL_DIR -b $BUILD_DIR/c_samples
|
||||
env:
|
||||
CMAKE_COMPILE_WARNING_AS_ERROR: 'ON'
|
||||
|
||||
#
|
||||
# Tests
|
||||
#
|
||||
|
||||
- name: Samples tests
|
||||
if: fromJSON(inputs.affected-components).samples.test
|
||||
run: |
|
||||
export WORKSPACE=$INSTALL_DIR
|
||||
export IE_APP_PATH=$INSTALL_DIR/samples_bin
|
||||
export IE_APP_PYTHON_PATH=$INSTALL_DIR/samples/python
|
||||
export SHARE=$INSTALL_TEST_DIR/smoke_tests/samples_smoke_tests_data
|
||||
|
||||
python3 -m pip install --ignore-installed PyYAML -r $INSTALL_TEST_DIR/smoke_tests/requirements.txt
|
||||
export LD_LIBRARY_PATH=${IE_APP_PATH}:$LD_LIBRARY_PATH
|
||||
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
|
||||
python3 -m pytest -sv $INSTALL_TEST_DIR/smoke_tests \
|
||||
--env_conf $INSTALL_TEST_DIR/smoke_tests/env_config.yml \
|
||||
--junitxml=$INSTALL_TEST_DIR/TEST-SamplesSmokeTests.xml
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-samples
|
||||
path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml
|
||||
if-no-files-found: 'warn'
|
||||
@@ -1,113 +0,0 @@
|
||||
name: TensorFlow Hub Models tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runner:
|
||||
description: 'Machine on which the tests would run'
|
||||
type: string
|
||||
required: true
|
||||
container:
|
||||
description: 'JSON to be converted to the value of the "container" configuration for the job'
|
||||
type: string
|
||||
required: false
|
||||
default: '{"image": null}'
|
||||
event:
|
||||
description: 'Event that triggered the workflow. E.g., "schedule" for nightly runs'
|
||||
type: string
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
TensorFlow_Hub_Models_Tests:
|
||||
name: TensorFlow Hub Models tests
|
||||
timeout-minutes: ${{ inputs.event == 'schedule' && 400 || 25 }}
|
||||
runs-on: ${{ inputs.runner }}
|
||||
container: ${{ fromJSON(inputs.container) }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
OPENVINO_REPO: ${{ github.workspace }}/openvino
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests
|
||||
steps:
|
||||
|
||||
- name: Check sudo
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi
|
||||
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
# Needed as ${{ github.workspace }} is not working correctly when using Docker
|
||||
- name: Setup Variables
|
||||
run: |
|
||||
echo "OPENVINO_REPO=$GITHUB_WORKSPACE/openvino" >> "$GITHUB_ENV"
|
||||
echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV"
|
||||
echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV"
|
||||
echo "MODEL_HUB_TESTS_INSTALL_DIR=$GITHUB_WORKSPACE/install/tests/model_hub_tests" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd ${INSTALL_DIR}
|
||||
tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR}
|
||||
popd
|
||||
|
||||
pushd ${INSTALL_TEST_DIR}
|
||||
tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR}
|
||||
popd
|
||||
|
||||
- name: Fetch setup_python action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.github/actions/setup_python/action.yml
|
||||
sparse-checkout-cone-mode: false
|
||||
path: 'openvino'
|
||||
|
||||
- name: Install dependencies
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
run: |
|
||||
# install git (required to build pip deps from the sources)
|
||||
sudo apt-get install --assume-yes --no-install-recommends g++ git ca-certificates
|
||||
|
||||
- name: Setup Python 3.11
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: '3.11'
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: ${{ contains(inputs.runner, 'aks') }}
|
||||
|
||||
- name: Install OpenVINO Python wheels
|
||||
run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-*
|
||||
|
||||
- name: Install TF Hub tests requirements
|
||||
run: python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/requirements.txt
|
||||
|
||||
- name: TensorFlow Hub Tests - TF FE
|
||||
run: |
|
||||
export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH
|
||||
python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/ -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-tf_hub_tf_fe.html --self-contained-html -v
|
||||
env:
|
||||
TYPE: ${{ inputs.event == 'schedule' && 'nightly' || 'precommit'}}
|
||||
TEST_DEVICE: CPU
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-tensorflow-hub-models
|
||||
path: |
|
||||
${{ env.INSTALL_TEST_DIR }}/TEST*.html
|
||||
if-no-files-found: 'error'
|
||||
1225
.github/workflows/linux.yml
vendored
1225
.github/workflows/linux.yml
vendored
File diff suppressed because it is too large
Load Diff
1245
.github/workflows/linux_arm64.yml
vendored
1245
.github/workflows/linux_arm64.yml
vendored
File diff suppressed because it is too large
Load Diff
120
.github/workflows/linux_conditional_compilation.yml
vendored
120
.github/workflows/linux_conditional_compilation.yml
vendored
@@ -5,7 +5,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- 'releases/**'
|
||||
|
||||
concurrency:
|
||||
# github.ref is not unique in post-commit
|
||||
@@ -50,7 +49,7 @@ jobs:
|
||||
container:
|
||||
image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04
|
||||
volumes:
|
||||
- /mount:/mount
|
||||
- /mount/caches:/mount/caches
|
||||
options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
@@ -61,7 +60,6 @@ jobs:
|
||||
GITHUB_WORKSPACE: '/__w/openvino/openvino'
|
||||
OPENVINO_REPO: /__w/openvino/openvino/openvino
|
||||
INSTALL_DIR: /__w/openvino/openvino/openvino_install
|
||||
INSTALL_TEST_DIR: /__w/openvino/openvino/tests_install
|
||||
BUILD_DIR: /__w/openvino/openvino/openvino_build
|
||||
SELECTIVE_BUILD_STAT_DIR: /__w/openvino/openvino/selective_build_stat
|
||||
MODELS_PATH: /__w/openvino/openvino/testdata
|
||||
@@ -165,9 +163,7 @@ jobs:
|
||||
run: ${SCCACHE_PATH} --show-stats
|
||||
|
||||
- name: Cmake install - OpenVINO
|
||||
run: |
|
||||
cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -P ${BUILD_DIR}/cmake_install.cmake
|
||||
cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_TEST_DIR} -DCOMPONENT=tests -P ${BUILD_DIR}/cmake_install.cmake
|
||||
run: cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -P ${BUILD_DIR}/cmake_install.cmake
|
||||
|
||||
- name: Build C++ samples - OpenVINO build tree
|
||||
run: |
|
||||
@@ -193,29 +189,16 @@ jobs:
|
||||
tar -czvf ${BUILD_DIR}/openvino_selective_build_stat.tar.gz *
|
||||
popd
|
||||
|
||||
pushd ${INSTALL_DIR}
|
||||
tar -czvf ${BUILD_DIR}/openvino_package.tar.gz \
|
||||
install_dependencies/install_openvino_dependencies.sh
|
||||
popd
|
||||
|
||||
pushd ${INSTALL_TEST_DIR}
|
||||
pushd ${OPENVINO_REPO}
|
||||
tar -czvf ${BUILD_DIR}/openvino_tests.tar.gz \
|
||||
tests/ov_cpu_func_tests \
|
||||
tests/libtemplate_extension.so \
|
||||
tests/functional_test_utils/layer_tests_summary/*
|
||||
bin/intel64/Release/ov_cpu_func_tests \
|
||||
src/tests/test_utils/functional_test_utils/layer_tests_summary/* \
|
||||
scripts/install_dependencies/*
|
||||
popd
|
||||
|
||||
- name: Upload openvino package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz
|
||||
if-no-files-found: 'error'
|
||||
|
||||
- name: Upload selective build statistics package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_selective_build_stat
|
||||
path: ${{ env.BUILD_DIR }}/openvino_selective_build_stat.tar.gz
|
||||
@@ -223,7 +206,7 @@ jobs:
|
||||
|
||||
- name: Upload OpenVINO tests package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz
|
||||
@@ -240,7 +223,7 @@ jobs:
|
||||
container:
|
||||
image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04
|
||||
volumes:
|
||||
- /mount:/mount
|
||||
- /mount/caches:/mount/caches
|
||||
options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
@@ -327,23 +310,74 @@ jobs:
|
||||
|
||||
CPU_Functional_Tests:
|
||||
name: CPU functional tests
|
||||
needs: [Build, Smart_CI]
|
||||
timeout-minutes: 25
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
runs-on: aks-linux-8-cores-32gb
|
||||
container:
|
||||
image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04
|
||||
env:
|
||||
OPENVINO_REPO: /__w/openvino/openvino/openvino
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests
|
||||
PARALLEL_TEST_SCRIPT: /__w/openvino/openvino/install/tests/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py
|
||||
PARALLEL_TEST_CACHE: /__w/openvino/openvino/install/tests/test_cache.lst
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test
|
||||
needs: [ Build, Smart_CI ]
|
||||
uses: ./.github/workflows/job_cpu_functional_tests.yml
|
||||
with:
|
||||
runner: 'aks-linux-8-cores-32gb'
|
||||
image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04'
|
||||
|
||||
Overall_Status:
|
||||
name: ci/gha_overall_status_linux_cc
|
||||
needs: [Smart_CI, Build, CC_Build, CPU_Functional_Tests]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check status of all jobs
|
||||
if: >-
|
||||
${{
|
||||
contains(needs.*.result, 'failure') ||
|
||||
contains(needs.*.result, 'cancelled')
|
||||
}}
|
||||
run: exit 1
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
- name: Extract OpenVINO tests package
|
||||
run: tar -xvzf ${INSTALL_TEST_DIR}/openvino_tests.tar.gz -C ${INSTALL_TEST_DIR}
|
||||
|
||||
- name: Install OpenVINO dependencies
|
||||
run: bash ${INSTALL_TEST_DIR}/scripts/install_dependencies/install_openvino_dependencies.sh -c=core -c=gpu -y
|
||||
|
||||
- name: Fetch setup_python action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.github/actions/setup_python/action.yml
|
||||
sparse-checkout-cone-mode: false
|
||||
path: ${{ env.OPENVINO_REPO }}
|
||||
|
||||
- name: Setup Python ${{ env.PYTHON_VERSION }}
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
should-setup-pip-paths: 'false'
|
||||
|
||||
- name: Install python dependencies for run_parallel.py
|
||||
run: python3 -m pip install -r ${INSTALL_TEST_DIR}/src/tests/test_utils/functional_test_utils/layer_tests_summary/requirements.txt
|
||||
|
||||
- name: Restore tests execution time
|
||||
uses: actions/cache/restore@v3
|
||||
with:
|
||||
path: ${{ env.PARALLEL_TEST_CACHE }}
|
||||
key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-tests-functional-cpu-stamp
|
||||
|
||||
- name: Intel CPU plugin func tests (parallel)
|
||||
run: python3 ${PARALLEL_TEST_SCRIPT} -e ${INSTALL_TEST_DIR}/bin/intel64/Release/ov_cpu_func_tests -c ${PARALLEL_TEST_CACHE} -w ${INSTALL_TEST_DIR} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke*
|
||||
timeout-minutes: 20
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-functional-cpu
|
||||
path: |
|
||||
${{ env.INSTALL_TEST_DIR }}/TEST*.xml
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/failed/*.log
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/crashed/*.log
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/disabled_tests.log
|
||||
if-no-files-found: 'error'
|
||||
|
||||
16
.github/workflows/linux_riscv.yml
vendored
16
.github/workflows/linux_riscv.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
container:
|
||||
image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04
|
||||
volumes:
|
||||
- /mount:/mount
|
||||
- /mount/caches:/mount/caches
|
||||
env:
|
||||
CMAKE_BUILD_TYPE: 'Release'
|
||||
CMAKE_GENERATOR: 'Ninja'
|
||||
@@ -206,17 +206,3 @@ jobs:
|
||||
source ${OPENVINO_BUILD_DIR}/dependencies/deactivate_conanbuild.sh
|
||||
env:
|
||||
CMAKE_TOOLCHAIN_FILE: ${{ env.OPENVINO_BUILD_DIR }}/dependencies/conan_toolchain.cmake
|
||||
|
||||
Overall_Status:
|
||||
name: ci/gha_overall_status_linux_riscv
|
||||
needs: [Smart_CI, Build]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check status of all jobs
|
||||
if: >-
|
||||
${{
|
||||
contains(needs.*.result, 'failure') ||
|
||||
contains(needs.*.result, 'cancelled')
|
||||
}}
|
||||
run: exit 1
|
||||
|
||||
709
.github/workflows/mac.yml
vendored
709
.github/workflows/mac.yml
vendored
@@ -33,41 +33,27 @@ env:
|
||||
PYTHON_VERSION: '3.11'
|
||||
|
||||
jobs:
|
||||
|
||||
Smart_CI:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
affected_components: "${{ steps.smart_ci.outputs.affected_components }}"
|
||||
skip_workflow: "${{ steps.smart_ci.outputs.skip_workflow }}"
|
||||
steps:
|
||||
- name: checkout action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: .github/actions/smart-ci
|
||||
|
||||
- name: Get affected components
|
||||
id: smart_ci
|
||||
uses: ./.github/actions/smart-ci
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
pr: ${{ github.event.number }}
|
||||
commit_sha: ${{ github.sha }}
|
||||
component_pattern: "category: (.*)"
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
skip_when_only_listed_labels_set: 'docs'
|
||||
skip_when_only_listed_files_changed: '*.md,*.rst,*.png,*.jpg,*.svg'
|
||||
|
||||
Build:
|
||||
needs: Smart_CI
|
||||
timeout-minutes: 150
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
runs-on: 'macos-13-large'
|
||||
strategy:
|
||||
max-parallel: 2
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arhitecture: 'x86_64'
|
||||
machine: 'macos-13-large'
|
||||
macos_deployment_target: '10.12'
|
||||
- arhitecture: 'arm64'
|
||||
machine: 'macos-13-xlarge'
|
||||
macos_deployment_target: '11.0'
|
||||
runs-on: ${{ matrix.machine }}
|
||||
env:
|
||||
CMAKE_BUILD_TYPE: 'Release'
|
||||
CMAKE_GENERATOR: 'Ninja Multi-Config'
|
||||
MACOSX_DEPLOYMENT_TARGET: '10.12'
|
||||
MACOSX_DEPLOYMENT_TARGET: ${{ matrix.macos_deployment_target }}
|
||||
CMAKE_CXX_COMPILER_LAUNCHER: ccache
|
||||
CMAKE_C_COMPILER_LAUNCHER: ccache
|
||||
OPENVINO_REPO: ${{ github.workspace }}/openvino
|
||||
@@ -139,9 +125,9 @@ jobs:
|
||||
# github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push
|
||||
save: ${{ github.ref_name == 'master' && 'true' || 'false' }}
|
||||
verbose: 2
|
||||
key: ${{ runner.os }}-${{ runner.arch }}-main
|
||||
key: ${{ runner.os }}-${{ matrix.arhitecture }}-main
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ runner.arch }}-main
|
||||
${{ runner.os }}-${{ matrix.arhitecture }}-main
|
||||
|
||||
- name: CMake configure
|
||||
run: |
|
||||
@@ -196,49 +182,660 @@ jobs:
|
||||
|
||||
- name: Upload openvino package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
name: openvino_package_${{ matrix.arhitecture }}
|
||||
path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz
|
||||
if-no-files-found: 'error'
|
||||
|
||||
- name: Upload openvino tests package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
name: openvino_tests_${{ matrix.arhitecture }}
|
||||
path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz
|
||||
if-no-files-found: 'error'
|
||||
|
||||
Samples:
|
||||
needs: [ Build, Smart_CI ]
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).samples
|
||||
uses: ./.github/workflows/job_samples_tests.yml
|
||||
with:
|
||||
runner: 'macos-13'
|
||||
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
|
||||
needs: Build
|
||||
timeout-minutes: 5
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
max-parallel: 2
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arhitecture: 'x86_64'
|
||||
machine: 'macos-13'
|
||||
- arhitecture: 'arm64'
|
||||
machine: 'macos-13-xlarge'
|
||||
runs-on: ${{ matrix.machine }}
|
||||
env:
|
||||
OPENVINO_REPO: ${{ github.workspace }}/openvino
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
BUILD_DIR: ${{ github.workspace }}/build
|
||||
|
||||
steps:
|
||||
|
||||
#
|
||||
# Initialize OpenVINO
|
||||
#
|
||||
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package_${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests_${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd ${INSTALL_DIR}
|
||||
tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR}
|
||||
popd
|
||||
|
||||
pushd ${INSTALL_TEST_DIR}
|
||||
tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR}
|
||||
popd
|
||||
|
||||
- name: Install dependencies
|
||||
run: brew install coreutils
|
||||
|
||||
- name: Fetch setup_python action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.github/actions/setup_python/action.yml
|
||||
sparse-checkout-cone-mode: false
|
||||
path: 'openvino'
|
||||
|
||||
- name: Setup Python ${{ env.PYTHON_VERSION }}
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: 'false'
|
||||
|
||||
- name: Build cpp samples
|
||||
run: ${INSTALL_DIR}/samples/cpp/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/cpp_samples
|
||||
env:
|
||||
CMAKE_COMPILE_WARNING_AS_ERROR: 'ON'
|
||||
|
||||
- name: Build c samples
|
||||
run: ${INSTALL_DIR}/samples/c/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/c_samples
|
||||
env:
|
||||
CMAKE_COMPILE_WARNING_AS_ERROR: 'ON'
|
||||
|
||||
#
|
||||
# Tests
|
||||
#
|
||||
|
||||
- name: Samples tests
|
||||
run: |
|
||||
export WORKSPACE=${INSTALL_DIR}
|
||||
export IE_APP_PATH=${INSTALL_DIR}/samples_bin
|
||||
export IE_APP_PYTHON_PATH=${INSTALL_DIR}/samples/python
|
||||
export SHARE=${INSTALL_TEST_DIR}/smoke_tests/samples_smoke_tests_data
|
||||
|
||||
python3 -m pip install --ignore-installed PyYAML -r ${INSTALL_TEST_DIR}/smoke_tests/requirements.txt
|
||||
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
|
||||
python3 -m pytest -sv ${INSTALL_TEST_DIR}/smoke_tests \
|
||||
--env_conf ${INSTALL_TEST_DIR}/smoke_tests/env_config.yml \
|
||||
--junitxml=${INSTALL_TEST_DIR}/TEST-SamplesSmokeTests.xml
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-samples-${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml
|
||||
if-no-files-found: 'error'
|
||||
|
||||
CXX_Unit_Tests:
|
||||
name: C++ unit tests
|
||||
needs: [ Build, Smart_CI ]
|
||||
uses: ./.github/workflows/job_cxx_unit_tests.yml
|
||||
with:
|
||||
runner: 'macos-13'
|
||||
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
|
||||
name: C++ Unit tests
|
||||
needs: Build
|
||||
timeout-minutes: 20
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
max-parallel: 2
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arhitecture: 'x86_64'
|
||||
machine: 'macos-13'
|
||||
- arhitecture: 'arm64'
|
||||
machine: 'macos-13-xlarge'
|
||||
runs-on: ${{ matrix.machine }}
|
||||
env:
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
|
||||
steps:
|
||||
#
|
||||
# Dependencies
|
||||
#
|
||||
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package_${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests_${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd ${{ env.INSTALL_DIR }}
|
||||
tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_package.tar.gz || exit 1
|
||||
popd
|
||||
pushd ${{ env.INSTALL_TEST_DIR }}
|
||||
tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_tests.tar.gz || exit 1
|
||||
popd
|
||||
|
||||
#
|
||||
# Tests
|
||||
#
|
||||
|
||||
- name: OpenVINO Core Unit Tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-NGraphUT.xml
|
||||
|
||||
- name: OpenVINO Inference Functional Tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_inference_functional_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceFunc.xml
|
||||
|
||||
- name: OpenVINO Inference Unit Tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_inference_unit_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceUnit.xml
|
||||
|
||||
- name: Low Precision Transformations Tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
|
||||
# Skips under Ticket: 122660
|
||||
skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*smoke_LPT/FoldFakeQuantizeInTransformations.CompareFunctions*' || '' }}
|
||||
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 "$skip_filter" \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LpTransformations.xml
|
||||
|
||||
- name: OpenVINO Conditional compilation tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_conditional_compilation_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ConditionalCompilation.xml
|
||||
|
||||
- name: IR frontend tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_ir_frontend_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-IRFrontend.xml
|
||||
|
||||
- name: PaddlePaddle frontend tests
|
||||
if: ${{ 'false' }}
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml
|
||||
|
||||
- name: ONNX frontend tests
|
||||
if: ${{ matrix.arhitecture == 'x86_64' }} # Ticket for ARM64: 122663
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml
|
||||
|
||||
- name: TensorFlow Common tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_common_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowCommonFrontend.xml
|
||||
|
||||
- name: TensorFlow frontend tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
|
||||
# Skips under Ticket: 122666
|
||||
skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*CompileModelsTests.ModelWithSplitConvConcat*:*NgramCompilation*' || '' }}
|
||||
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 "$skip_filter" \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml
|
||||
|
||||
- name: TensorFlow Lite frontend tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowLiteFrontend.xml
|
||||
|
||||
- name: Transformations func tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
|
||||
# Skips under Ticket: 122668
|
||||
skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*TransformationTestsF.CompressQuantizeWeights*:*TransformationTests/CompressQuantizeWeightsTests.FusionTest*' || '' }}
|
||||
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 "$skip_filter" \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml
|
||||
|
||||
- name: Common test utils tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_util_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-commonUtilsTests.xml
|
||||
|
||||
- name: Snippets func tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_snippets_func_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SnippetsFuncTests.xml
|
||||
|
||||
- name: CPU plugin unit tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml
|
||||
|
||||
- name: ov_subgraphs_dumper_tests tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_subgraphs_dumper_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_subgraphs_dumper_tests.xml
|
||||
|
||||
- name: Template OpImpl tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_op_conformance_tests --gtest_print_time=1 --device=TEMPLATE --gtest_filter="*OpImpl*" \
|
||||
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateOpImplTests.xml
|
||||
|
||||
- name: AUTO unit tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml
|
||||
|
||||
- name: AUTO func Tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml
|
||||
|
||||
- name: Template plugin func tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 \
|
||||
--gtest_filter=*smoke* \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml
|
||||
|
||||
- name: Inference Engine C API tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/InferenceEngineCAPITests --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineCAPITests.xml
|
||||
|
||||
- name: OpenVINO C API tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 \
|
||||
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml
|
||||
|
||||
- name: AutoBatch unit tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_unit_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_unit_tests.xml
|
||||
|
||||
- name: AutoBatch func tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_func_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_func_tests.xml
|
||||
|
||||
- name: Proxy Plugin func tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVProxyTests.xml
|
||||
|
||||
- name: Hetero unit tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml
|
||||
|
||||
- name: Hetero func tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroFuncTests.xml
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: test-results-cpp-${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml
|
||||
if-no-files-found: 'error'
|
||||
|
||||
Python_Unit_Tests:
|
||||
name: Python unit tests
|
||||
needs: [ Build, Smart_CI ]
|
||||
uses: ./.github/workflows/job_python_unit_tests.yml
|
||||
with:
|
||||
runner: 'macos-13'
|
||||
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
|
||||
needs: Build
|
||||
timeout-minutes: 55
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
max-parallel: 2
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arhitecture: 'x86_64'
|
||||
machine: 'macos-13'
|
||||
- arhitecture: 'arm64'
|
||||
machine: 'macos-13-xlarge'
|
||||
runs-on: ${{ matrix.machine }}
|
||||
env:
|
||||
OPENVINO_REPO: ${{ github.workspace }}/openvino
|
||||
OPENVINO_CONTRIB_REPO: ${{ github.workspace }}/openvino_contrib
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
LAYER_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/layer_tests
|
||||
steps:
|
||||
- name: Fetch setup_python action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.github/actions/setup_python/action.yml
|
||||
sparse-checkout-cone-mode: false
|
||||
path: 'openvino'
|
||||
|
||||
- name: Setup Python ${{ env.PYTHON_VERSION }}
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: 'false'
|
||||
|
||||
#
|
||||
# Dependencies
|
||||
#
|
||||
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package_${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests_${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd ${{ env.INSTALL_DIR }}
|
||||
tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }}
|
||||
popd
|
||||
|
||||
pushd ${{ env.INSTALL_TEST_DIR }}
|
||||
tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }}
|
||||
popd
|
||||
|
||||
- name: Install OpenVINO Python wheels
|
||||
run: |
|
||||
# Install the core OV wheel
|
||||
python3 -m pip install ${{ env.INSTALL_DIR }}/tools/openvino-*.whl
|
||||
|
||||
# mxnet is only available on x86_64
|
||||
extras_to_install="caffe,kaldi,onnx,tensorflow2,pytorch"
|
||||
if [[ "${{ matrix.arhitecture }}" == "x86_64" ]]; then
|
||||
extras_to_install="mxnet,$extras_to_install"
|
||||
fi
|
||||
|
||||
# Find and install OV dev wheel
|
||||
pushd ${{ env.INSTALL_DIR }}/tools
|
||||
ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl')
|
||||
python3 -m pip install $ov_dev_wheel_name[$extras_to_install]
|
||||
popd
|
||||
|
||||
- name: Install Python API tests dependencies
|
||||
run: |
|
||||
# For torchvision to OpenVINO preprocessing converter
|
||||
python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/python/preprocess/torchvision/requirements.txt
|
||||
|
||||
# TODO: replace with Python API tests requirements
|
||||
python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/mo/requirements_dev.txt
|
||||
|
||||
- name: Python API 1.0 Tests
|
||||
run: |
|
||||
python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph \
|
||||
--junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \
|
||||
--ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \
|
||||
--ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_backend.py
|
||||
|
||||
- name: Python API 2.0 Tests
|
||||
run: |
|
||||
# For python imports to import pybind_mock_frontend
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}:$PYTHONPATH
|
||||
# for 'template' extension
|
||||
export DYLD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$DYLD_LIBRARY_PATH
|
||||
|
||||
python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino \
|
||||
--junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \
|
||||
--ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py
|
||||
|
||||
- name: MO Python API Tests
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
# Used for 'test_utils' installed in '<test_package>/python/openvino/test_utils'
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/python/openvino/test_utils:${{ env.INSTALL_TEST_DIR }}/python:$PYTHONPATH
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/mo_python_api_tests/ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_mo_convert.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: OVC Python API Tests
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
# Used for 'test_utils' installed in '<test_package>/python/openvino/test_utils'
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/python/openvino/test_utils:${{ env.INSTALL_TEST_DIR }}/python:$PYTHONPATH
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/ovc_python_api_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_ovc_convert.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: Model Optimizer unit tests
|
||||
run: |
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}:$PYTHONPATH
|
||||
python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests \
|
||||
--ignore=${{ env.INSTALL_TEST_DIR }}/mo/unit_tests/mo/front/mxnet \
|
||||
--junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml
|
||||
|
||||
- name: PyTorch Layer Tests
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
export PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: ONNX Layer Tests
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/onnx_tests -m "not launch_only_if_manually_specified and precommit" --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-onnx.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: TensorFlow 1 Layer Tests - TF FE
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_fe.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
|
||||
- name: TensorFlow 2 Layer Tests - TF FE
|
||||
if: ${{ 'false' }} # Ticket: 123322
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow2_keras_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf2_fe.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
|
||||
- name: TensorFlow 1 Layer Tests - Legacy FE
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_Roll.xml
|
||||
|
||||
- name: TensorFlow 2 Layer Tests - Legacy FE
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow2_keras_tests/test_tf2_keras_activation.py \
|
||||
--ir_version=11 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf2_Activation.xml -k "sigmoid"
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: TensorFlow Lite Layer Tests - TFL FE
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_lite_tests/ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tfl_fe.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: Python ONNX operators tests
|
||||
if: ${{ 'false' }} # Ticket: 123325
|
||||
run: |
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately
|
||||
python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/onnx -k 'not cuda' \
|
||||
--junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-onnx_frontend.xml \
|
||||
--ignore=${{ env.INSTALL_TEST_DIR }}/onnx/test_python/test_zoo_models.py
|
||||
|
||||
- name: Python Frontend tests
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH
|
||||
|
||||
# to allow 'libtest_builtin_extensions.so' to find 'libopenvino_onnx_frontend.so'
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/py_frontend_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_py_fontend.xml
|
||||
|
||||
# TODO: install to 'tests' component via cpack
|
||||
- name: OVC unit tests
|
||||
run: python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/ovc/unit_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-OpenVinoConversion.xml
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: test-results-python-${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml
|
||||
if-no-files-found: 'error'
|
||||
|
||||
CPU_Functional_Tests:
|
||||
name: CPU functional tests
|
||||
# if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test
|
||||
if: ${{ 'false' }} # Ticket: 122001
|
||||
needs: [ Build, Smart_CI ]
|
||||
uses: ./.github/workflows/job_cpu_functional_tests.yml
|
||||
with:
|
||||
runner: 'macos-13'
|
||||
needs: Build
|
||||
timeout-minutes: 25
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
max-parallel: 2
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# ticket: 122001
|
||||
# - arhitecture: 'x86_64'
|
||||
# machine: 'macos-13'
|
||||
- arhitecture: 'arm64'
|
||||
machine: 'macos-13-xlarge'
|
||||
runs-on: ${{ matrix.machine }}
|
||||
env:
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
|
||||
steps:
|
||||
- name: Create Directories
|
||||
run: mkdir -p ${{ env.INSTALL_DIR }} ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package_${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests_${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd ${{ env.INSTALL_DIR }}
|
||||
tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_package.tar.gz
|
||||
popd
|
||||
pushd ${{ env.INSTALL_TEST_DIR }}
|
||||
tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_tests.tar.gz
|
||||
popd
|
||||
|
||||
- name: CPU plugin func tests
|
||||
run: |
|
||||
source ${{ env.INSTALL_DIR }}/setupvars.sh
|
||||
|
||||
# Skips under Ticket: 122769
|
||||
skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*smoke_nonzero/NonZeroLayerTest.Inference/IS*:*smoke_NormalizeL2_*:*Extension.XmlModelWithExtensionFromDSO*:*Extension.OnnxModelWithExtensionFromDSO*:*ONNXQuantizedModels/QuantizedModelsTests.MaxPool*:*ONNXQuantizedModels/QuantizedModelsTests.Convolution*:**' || '' }}
|
||||
|
||||
${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests --gtest_print_time=1 --gtest_filter=*smoke* "$skip_filter" --gtest_output=xml:"${{ env.INSTALL_TEST_DIR }}/TEST-CPUFuncTests.xml"
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: test-results-functional-cpu-${{ matrix.arhitecture }}
|
||||
path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml
|
||||
if-no-files-found: 'error'
|
||||
|
||||
241
.github/workflows/mac_arm64.yml
vendored
241
.github/workflows/mac_arm64.yml
vendored
@@ -1,241 +0,0 @@
|
||||
name: macOS ARM64 (Python 3.11)
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# at 00:00 on workdays
|
||||
- cron: '0 0 * * 1,2,3,4,5'
|
||||
# pull_request:
|
||||
# paths-ignore:
|
||||
# - '**/docs/**'
|
||||
# - 'docs/**'
|
||||
# - '**/**.md'
|
||||
# - '**.md'
|
||||
# - '**/layer_tests_summary/**'
|
||||
# - '**/conformance/**'
|
||||
# push:
|
||||
# paths-ignore:
|
||||
# - '**/docs/**'
|
||||
# - 'docs/**'
|
||||
# - '**/**.md'
|
||||
# - '**.md'
|
||||
# - '**/layer_tests_summary/**'
|
||||
# - '**/conformance/**'
|
||||
# branches:
|
||||
# - master
|
||||
# - 'releases/**'
|
||||
|
||||
concurrency:
|
||||
# github.ref is not unique in post-commit
|
||||
group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-mac-arm64
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: '3.11'
|
||||
|
||||
jobs:
|
||||
Smart_CI:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
affected_components: "${{ steps.smart_ci.outputs.affected_components }}"
|
||||
skip_workflow: "${{ steps.smart_ci.outputs.skip_workflow }}"
|
||||
steps:
|
||||
- name: checkout action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: .github/actions/smart-ci
|
||||
|
||||
- name: Get affected components
|
||||
id: smart_ci
|
||||
uses: ./.github/actions/smart-ci
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
pr: ${{ github.event.number }}
|
||||
commit_sha: ${{ github.sha }}
|
||||
component_pattern: "category: (.*)"
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
skip_when_only_listed_labels_set: 'docs'
|
||||
skip_when_only_listed_files_changed: '*.md,*.rst,*.png,*.jpg,*.svg'
|
||||
|
||||
Build:
|
||||
needs: Smart_CI
|
||||
timeout-minutes: 150
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
runs-on: 'macos-13-xlarge'
|
||||
env:
|
||||
CMAKE_BUILD_TYPE: 'Release'
|
||||
CMAKE_GENERATOR: 'Ninja Multi-Config'
|
||||
MACOSX_DEPLOYMENT_TARGET: '11.0'
|
||||
CMAKE_CXX_COMPILER_LAUNCHER: ccache
|
||||
CMAKE_C_COMPILER_LAUNCHER: ccache
|
||||
OPENVINO_REPO: ${{ github.workspace }}/openvino
|
||||
OPENVINO_CONTRIB_REPO: ${{ github.workspace }}/openvino_contrib
|
||||
INSTALL_DIR: ${{ github.workspace }}/openvino_install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/tests_install
|
||||
BUILD_DIR: ${{ github.workspace }}/build
|
||||
steps:
|
||||
- name: Clone OpenVINO
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: 'openvino'
|
||||
submodules: 'true'
|
||||
|
||||
- name: Clone OpenVINO Contrib
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: 'openvinotoolkit/openvino_contrib'
|
||||
path: 'openvino_contrib'
|
||||
|
||||
#
|
||||
# Print system info
|
||||
#
|
||||
|
||||
- name: System info
|
||||
uses: ./openvino/.github/actions/system_info
|
||||
|
||||
#
|
||||
# Dependencies
|
||||
#
|
||||
|
||||
- name: Install build dependencies
|
||||
run: brew install coreutils ninja scons
|
||||
|
||||
- name: Setup Python ${{ env.PYTHON_VERSION }}
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: 'false'
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
# For Python API
|
||||
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt
|
||||
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements.txt
|
||||
|
||||
# For running Python API tests
|
||||
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt
|
||||
|
||||
# For running ONNX frontend unit tests
|
||||
python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt
|
||||
|
||||
# For running TensorFlow frontend unit tests
|
||||
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow/tests/requirements.txt
|
||||
|
||||
# For running Paddle frontend unit tests
|
||||
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt
|
||||
|
||||
#
|
||||
# Build
|
||||
#
|
||||
|
||||
- name: Setup ccache
|
||||
uses: hendrikmuhs/ccache-action@v1.2
|
||||
with:
|
||||
max-size: "2000M"
|
||||
# Should save cache only if run in the master branch of the base repo
|
||||
# github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push
|
||||
save: ${{ github.ref_name == 'master' && 'true' || 'false' }}
|
||||
verbose: 2
|
||||
key: ${{ runner.os }}-${{ runner.arch }}-main
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ runner.arch }}-main
|
||||
|
||||
- name: CMake configure
|
||||
run: |
|
||||
cmake \
|
||||
-G "${{ env.CMAKE_GENERATOR }}" \
|
||||
-DENABLE_CPPLINT=OFF \
|
||||
-DENABLE_NCC_STYLE=OFF \
|
||||
-DENABLE_TESTS=ON \
|
||||
-DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \
|
||||
-DENABLE_STRICT_DEPENDENCIES=OFF \
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \
|
||||
-S ${{ env.OPENVINO_REPO }} \
|
||||
-B ${{ env.BUILD_DIR }}
|
||||
|
||||
- name: Cmake build - OpenVINO
|
||||
run: cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }}
|
||||
|
||||
- name: Show ccache stats
|
||||
run: ccache --show-stats
|
||||
|
||||
- name: Cmake install - OpenVINO
|
||||
run: |
|
||||
cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake
|
||||
cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_TEST_DIR }} -DCOMPONENT=tests -P ${{ env.BUILD_DIR }}/cmake_install.cmake
|
||||
cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -DCOMPONENT=python_wheels -P ${{ env.BUILD_DIR }}/cmake_install.cmake
|
||||
|
||||
- name: Pack Artifacts
|
||||
run: |
|
||||
pushd ${{ env.INSTALL_DIR }}
|
||||
tar -czvf ${{ env.BUILD_DIR }}/openvino_package.tar.gz *
|
||||
popd
|
||||
|
||||
pushd ${{ env.INSTALL_TEST_DIR }}
|
||||
tar -czvf ${{ env.BUILD_DIR }}/openvino_tests.tar.gz *
|
||||
popd
|
||||
|
||||
- name: Cmake & Build - OpenVINO Contrib
|
||||
run: |
|
||||
cmake \
|
||||
-DBUILD_nvidia_plugin=OFF \
|
||||
-DBUILD_java_api=OFF \
|
||||
-DCUSTOM_OPERATIONS="calculate_grid;complex_mul;fft;grid_sample;sparse_conv;sparse_conv_transpose" \
|
||||
-DOPENVINO_EXTRA_MODULES=${{ env.OPENVINO_CONTRIB_REPO }}/modules \
|
||||
-S ${{ env.OPENVINO_REPO }} \
|
||||
-B ${{ env.BUILD_DIR }}
|
||||
cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }}
|
||||
|
||||
#
|
||||
# Upload build artifacts
|
||||
#
|
||||
|
||||
- name: Upload openvino package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz
|
||||
if-no-files-found: 'error'
|
||||
|
||||
- name: Upload openvino tests package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz
|
||||
if-no-files-found: 'error'
|
||||
|
||||
Samples:
|
||||
needs: Build
|
||||
uses: ./.github/workflows/job_samples_tests.yml
|
||||
with:
|
||||
runner: 'macos-13-xlarge'
|
||||
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
|
||||
|
||||
CXX_Unit_Tests:
|
||||
name: C++ unit tests
|
||||
needs: [ Build, Smart_CI ]
|
||||
uses: ./.github/workflows/job_cxx_unit_tests.yml
|
||||
with:
|
||||
runner: 'macos-13-xlarge'
|
||||
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
|
||||
|
||||
Python_Unit_Tests:
|
||||
name: Python unit tests
|
||||
needs: [ Build, Smart_CI ]
|
||||
uses: ./.github/workflows/job_python_unit_tests.yml
|
||||
with:
|
||||
runner: 'macos-13-xlarge'
|
||||
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
|
||||
|
||||
CPU_Functional_Tests:
|
||||
name: CPU functional tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test
|
||||
needs: [ Build, Smart_CI ]
|
||||
uses: ./.github/workflows/job_cpu_functional_tests.yml
|
||||
with:
|
||||
runner: 'macos-13-xlarge'
|
||||
2
.github/workflows/mo.yml
vendored
2
.github/workflows/mo.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
|
||||
12
.github/workflows/py_checks.yml
vendored
12
.github/workflows/py_checks.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
git diff > samples_diff.diff
|
||||
working-directory: samples/python
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: samples_diff
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
git diff > ie_python_diff.diff
|
||||
working-directory: src/bindings/python/src/compatibility/openvino
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: ie_python_diff
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
git diff > pyngraph_diff.diff
|
||||
working-directory: src/bindings/python/src/compatibility/ngraph
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: pyngraph_diff
|
||||
@@ -101,7 +101,7 @@ jobs:
|
||||
git diff > pyopenvino_diff.diff
|
||||
working-directory: src/bindings/python/src/openvino
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: pyopenvino_diff
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
git diff > wheel_diff.diff
|
||||
working-directory: src/bindings/python/wheel
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: wheel_diff
|
||||
|
||||
2
.github/workflows/stale_prs_and_issues.yml
vendored
2
.github/workflows/stale_prs_and_issues.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
stale-issue-message: 'This issue will be closed in a week because of 9 months of no activity.'
|
||||
stale-pr-message: 'This PR will be closed in a week because of 2 weeks of no activity.'
|
||||
|
||||
2
.github/workflows/webassembly.yml
vendored
2
.github/workflows/webassembly.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
container:
|
||||
image: emscripten/emsdk
|
||||
volumes:
|
||||
- /mount:/mount
|
||||
- /mount/caches:/mount/caches
|
||||
options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING
|
||||
env:
|
||||
CMAKE_BUILD_TYPE: 'Release'
|
||||
|
||||
179
.github/workflows/windows.yml
vendored
179
.github/workflows/windows.yml
vendored
@@ -1,46 +1,34 @@
|
||||
name: Windows (VS 2019, Python 3.11)
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
# pull_request:
|
||||
# paths-ignore:
|
||||
# - '**/docs/**'
|
||||
# - 'docs/**'
|
||||
# - '**/**.md'
|
||||
# - '**.md'
|
||||
# - '**/layer_tests_summary/**'
|
||||
# - '**/conformance/**'
|
||||
push:
|
||||
paths-ignore:
|
||||
- '**/docs/**'
|
||||
- 'docs/**'
|
||||
- '**/**.md'
|
||||
- '**.md'
|
||||
- '**/layer_tests_summary/**'
|
||||
- '**/conformance/**'
|
||||
branches:
|
||||
- master
|
||||
- 'releases/**'
|
||||
concurrency:
|
||||
# github.ref is not unique in post-commit
|
||||
group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-windows
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
PIP_CACHE_PATH: /mount/caches/pip/win
|
||||
PYTHON_VERSION: '3.11'
|
||||
|
||||
jobs:
|
||||
Smart_CI:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
affected_components: "${{ steps.smart_ci.outputs.affected_components }}"
|
||||
skip_workflow: "${{ steps.smart_ci.outputs.skip_workflow }}"
|
||||
steps:
|
||||
- name: checkout action
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: .github/actions/smart-ci
|
||||
|
||||
- name: Get affected components
|
||||
id: smart_ci
|
||||
uses: ./.github/actions/smart-ci
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
pr: ${{ github.event.number }}
|
||||
commit_sha: ${{ github.sha }}
|
||||
component_pattern: "category: (.*)"
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
skip_when_only_listed_labels_set: 'docs'
|
||||
skip_when_only_listed_files_changed: '*.md,*.rst,*.png,*.jpg,*.svg,*/layer_tests_summary/*,*/conformance/*'
|
||||
|
||||
Build:
|
||||
needs: Smart_CI
|
||||
timeout-minutes: 180
|
||||
defaults:
|
||||
run:
|
||||
@@ -58,8 +46,6 @@ jobs:
|
||||
BUILD_DIR: "${{ github.workspace }}\\openvino_build"
|
||||
# TODO: specify version of compiler here
|
||||
SCCACHE_AZURE_KEY_PREFIX: windows2022_x86_64_Release
|
||||
if: "!needs.smart_ci.outputs.skip_workflow"
|
||||
|
||||
steps:
|
||||
- name: Clone OpenVINO
|
||||
uses: actions/checkout@v4
|
||||
@@ -89,10 +75,8 @@ jobs:
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
pip-cache-path: ${{ env.PIP_CACHE_PATH }}
|
||||
should-setup-pip-paths: 'true'
|
||||
self-hosted-runner: 'true'
|
||||
show-cache-info: 'true'
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: 'false'
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
@@ -109,13 +93,9 @@ jobs:
|
||||
# For running TensorFlow Lite frontend unit tests
|
||||
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow_lite/tests/requirements.txt
|
||||
|
||||
# Disabled because of CVS-95904
|
||||
# For running Paddle frontend unit tests
|
||||
# python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt
|
||||
|
||||
# For getting rid of SSL issues during model downloading for unit tests
|
||||
python3 -m pip install certifi
|
||||
|
||||
- name: Install sccache
|
||||
uses: mozilla-actions/sccache-action@v0.0.3
|
||||
with:
|
||||
@@ -131,9 +111,6 @@ jobs:
|
||||
- name: Configure Developer Command Prompt for Microsoft Visual C++
|
||||
uses: ilammy/msvc-dev-cmd@v1
|
||||
|
||||
- name: Set SSL_CERT_FILE for model downloading for unit tests
|
||||
run: echo SSL_CERT_FILE=$(python3 -m certifi) >> $env:GITHUB_ENV
|
||||
|
||||
- name: CMake configure
|
||||
run: |
|
||||
cmake -G "${{ env.CMAKE_GENERATOR }}" `
|
||||
@@ -195,22 +172,21 @@ jobs:
|
||||
cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --verbose
|
||||
|
||||
- name: Upload openvino package
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.BUILD_DIR }}/openvino_package.zip
|
||||
if-no-files-found: 'error'
|
||||
|
||||
- name: Upload openvino tests package
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.BUILD_DIR }}/openvino_tests.zip
|
||||
if-no-files-found: 'error'
|
||||
|
||||
Samples:
|
||||
needs: [Build, Smart_CI]
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).samples
|
||||
needs: Build
|
||||
timeout-minutes: 20
|
||||
defaults:
|
||||
run:
|
||||
@@ -258,7 +234,7 @@ jobs:
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: 'true'
|
||||
self-hosted-runner: 'false'
|
||||
|
||||
- name: Build cpp samples
|
||||
run: |
|
||||
@@ -282,7 +258,7 @@ jobs:
|
||||
WORKSPACE: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-samples
|
||||
@@ -291,12 +267,12 @@ jobs:
|
||||
|
||||
Python_Unit_Tests:
|
||||
name: Python unit tests
|
||||
needs: [Build, Smart_CI]
|
||||
needs: Build
|
||||
timeout-minutes: 75
|
||||
defaults:
|
||||
run:
|
||||
shell: pwsh
|
||||
runs-on: aks-win-8-cores-16gb
|
||||
runs-on: aks-win-4-cores-8gb
|
||||
env:
|
||||
OPENVINO_REPO: "${{ github.workspace }}\\openvino"
|
||||
OPENVINO_CONTRIB_REPO: "${{ github.workspace }}\\openvino_contrib"
|
||||
@@ -339,9 +315,8 @@ jobs:
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
pip-cache-path: ${{ env.PIP_CACHE_PATH }}
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: 'true'
|
||||
self-hosted-runner: 'false'
|
||||
|
||||
- name: Install OpenVINO Python wheels
|
||||
run: |
|
||||
@@ -355,9 +330,6 @@ jobs:
|
||||
|
||||
- name: Install Python API tests dependencies
|
||||
run: |
|
||||
# To enable pytest parallel features
|
||||
python3 -m pip install pytest-xdist[psutil]
|
||||
|
||||
# For torchvision to OpenVINO preprocessing converter
|
||||
python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/python/preprocess/torchvision/requirements.txt
|
||||
|
||||
@@ -365,53 +337,48 @@ jobs:
|
||||
python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/mo/requirements_dev.txt
|
||||
|
||||
- name: Python API 1.0 Tests
|
||||
#if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py
|
||||
|
||||
- name: Python API 2.0 Tests
|
||||
#if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101
|
||||
shell: cmd
|
||||
run: |
|
||||
set PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH%
|
||||
python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py
|
||||
|
||||
- name: Model Optimizer UT
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests --ignore=${{ env.INSTALL_TEST_DIR }}/mo/unit_tests/mo/front/mxnet --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml
|
||||
|
||||
- name: Install Python Layer tests dependencies
|
||||
run: |
|
||||
# layer test requirements
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
# Ticket - 115085
|
||||
- name: PyTorch Layer Tests
|
||||
if: ${{ 'false' }}
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests -n logical -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
|
||||
- name: ONNX Layer Tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
:: requires 'unit_tests' from 'tools/mo'
|
||||
set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\mo;%PYTHONPATH%
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/onnx_tests -n logical -m "not launch_only_if_manually_specified and precommit" --junitxml=${INSTALL_TEST_DIR}/TEST-onnx.xml
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/onnx_tests -m "not launch_only_if_manually_specified and precommit" --junitxml=${INSTALL_TEST_DIR}/TEST-onnx.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: TensorFlow 1 Layer Tests - TF FE
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
:: requires 'unit_tests' from 'tools/mo'
|
||||
set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\mo;%PYTHONPATH%
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_fe.xml
|
||||
@@ -420,9 +387,10 @@ jobs:
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: TensorFlow 2 Layer Tests - TF FE
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
:: requires 'unit_tests' from 'tools/mo'
|
||||
set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\mo;%PYTHONPATH%
|
||||
|
||||
@@ -431,32 +399,30 @@ jobs:
|
||||
TEST_DEVICE: CPU
|
||||
|
||||
- name: TensorFlow 1 Layer Tests - Legacy FE
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_Roll.xml
|
||||
|
||||
- name: TensorFlow 2 Layer Tests - Legacy FE
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow2_keras_tests/test_tf2_keras_activation.py --ir_version=11 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf2_Activation.xml -k "sigmoid"
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: TensorFlow Lite Layer Tests - TFL FE
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_lite_tests/ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tfl_fe.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: Python ONNX operators tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test ||
|
||||
fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
:: Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately
|
||||
@@ -465,24 +431,26 @@ jobs:
|
||||
--ignore=${{ env.INSTALL_TEST_DIR }}/onnx/test_python/test_zoo_models.py
|
||||
|
||||
- name: MO Python API Tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
:: Used for 'test_utils' installed in '<test_package>\python\openvino\test_utils'
|
||||
set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\python\openvino\test_utils;${{ env.INSTALL_TEST_DIR }}\python;%PYTHONPATH%
|
||||
|
||||
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/mo_python_api_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_mo_convert.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: OVC Python API Tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
:: Used for 'test_utils' installed in '<test_package>\python\openvino\test_utils'
|
||||
set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\python\openvino\test_utils;${{ env.INSTALL_TEST_DIR }}\python;%PYTHONPATH%
|
||||
|
||||
|
||||
:: Skip test ticket: 126319
|
||||
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/ovc_python_api_tests -k "not test_ovc_tool_non_existng_output_dir" --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_ovc_convert.xml
|
||||
env:
|
||||
@@ -490,19 +458,18 @@ jobs:
|
||||
TEST_PRECISION: FP16
|
||||
|
||||
- name: Python Frontend tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test ||
|
||||
fromJSON(needs.smart_ci.outputs.affected_components).PDPD_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
|
||||
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/py_frontend_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_py_fontend.xml
|
||||
|
||||
- name: OVC unit tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test
|
||||
shell: cmd
|
||||
run: python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/ovc/unit_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-OpenVinoConversion.xml
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-python
|
||||
@@ -511,7 +478,7 @@ jobs:
|
||||
|
||||
CXX_Unit_Tests:
|
||||
name: C++ unit tests
|
||||
needs: [Build, Smart_CI]
|
||||
needs: Build
|
||||
timeout-minutes: 25
|
||||
defaults:
|
||||
run:
|
||||
@@ -544,87 +511,73 @@ jobs:
|
||||
popd
|
||||
|
||||
- name: OpenVINO Core unit tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-NGraphUT.xml
|
||||
|
||||
- name: OpenVINO Inference functional tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_inference_functional_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceFunc.xml
|
||||
|
||||
- name: OpenVINO Inference unit tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_inference_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceUnit.xml
|
||||
|
||||
- name: Low Precision Transformations Tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).LP_transformations.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LpTransformations.xml
|
||||
|
||||
- name: OpenVINO Conditional compilation tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_conditional_compilation_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ConditionalCompilation.xml
|
||||
|
||||
- name: IR frontend tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).IR_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-IRFrontend.xml
|
||||
|
||||
- name: PaddlePaddle frontend tests # Disabled because of CVS-95904
|
||||
- name: PaddlePaddle frontend tests # Disabled in Azure: https://github.com/openvinotoolkit/openvino/blob/master/.ci/azure/linux.yml#L403
|
||||
if: ${{ 'false' }}
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml
|
||||
|
||||
- name: ONNX frontend tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml
|
||||
|
||||
- name: TensorFlow Common frontend tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test ||
|
||||
fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_common_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowCommonFrontend.xml
|
||||
|
||||
- name: TensorFlow frontend tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml
|
||||
|
||||
- name: TensorFlow Lite frontend tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
:: Skip ticket: 126320
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 --gtest_filter=-*test_decode_convert_equal_convert*:*test_convert_partially_equal_convert* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowLiteFrontend.xml
|
||||
|
||||
- name: Transformations func tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).transformations.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml
|
||||
|
||||
- name: Legacy Transformations func tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_legacy_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LegacyTransformations.xml
|
||||
|
||||
- name: Inference Engine 1.0 unit tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineUnitTests.xml
|
||||
@@ -635,13 +588,11 @@ jobs:
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_util_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-commonUtilsTests.xml
|
||||
|
||||
- name: Snippets func tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_snippets_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SnippetsFuncTests.xml
|
||||
|
||||
- name: CPU plugin unit tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml
|
||||
@@ -657,31 +608,26 @@ jobs:
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_op_conformance_tests --gtest_print_time=1 --gtest_filter="*OpImpl*" --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateOpImplTests.xml
|
||||
|
||||
- name: GNA plugin unit tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_gna_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-GNAUnitTests.xml
|
||||
|
||||
- name: AUTO unit tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml
|
||||
|
||||
- name: AUTO func Tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml
|
||||
|
||||
- name: Template plugin func tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).TEMPLATE.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml
|
||||
|
||||
- name: Inference Engine C API tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).C_API.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/InferenceEngineCAPITests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineCAPITests.xml
|
||||
@@ -693,37 +639,32 @@ jobs:
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml
|
||||
|
||||
- name: AutoBatch unit tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_unit_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_unit_tests.xml
|
||||
|
||||
- name: AutoBatch func tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_func_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_func_tests.xml
|
||||
|
||||
- name: Proxy Plugin func tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).PROXY.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVProxyTests.xml
|
||||
|
||||
- name: Hetero Unit Tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml
|
||||
|
||||
- name: Hetero Func Tests
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test
|
||||
shell: cmd
|
||||
run: |
|
||||
call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroFuncTests.xml
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-cpp
|
||||
@@ -732,7 +673,7 @@ jobs:
|
||||
|
||||
CPU_Functional_Tests:
|
||||
name: CPU functional tests
|
||||
needs: [Build, Smart_CI]
|
||||
needs: Build
|
||||
timeout-minutes: 70
|
||||
defaults:
|
||||
run:
|
||||
@@ -744,7 +685,7 @@ jobs:
|
||||
INSTALL_TEST_DIR: "${{ github.workspace }}\\install\\tests"
|
||||
PARALLEL_TEST_SCRIPT: "${{ github.workspace }}\\install\\tests\\functional_test_utils\\layer_tests_summary\\run_parallel.py"
|
||||
PARALLEL_TEST_CACHE: "${{ github.workspace }}\\install\\tests\\test_cache.lst"
|
||||
if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test
|
||||
|
||||
steps:
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
@@ -780,7 +721,7 @@ jobs:
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: 'true'
|
||||
self-hosted-runner: 'false'
|
||||
|
||||
- name: Install python dependencies
|
||||
shell: cmd
|
||||
@@ -808,7 +749,7 @@ jobs:
|
||||
key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }}
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-functional-cpu
|
||||
@@ -822,17 +763,3 @@ jobs:
|
||||
${{ env.INSTALL_TEST_DIR }}/logs/hash_table.csv
|
||||
${{ env.PARALLEL_TEST_CACHE }}
|
||||
if-no-files-found: 'error'
|
||||
|
||||
Overall_Status:
|
||||
name: ci/gha_overall_status_windows
|
||||
needs: [Smart_CI, Build, Samples, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check status of all jobs
|
||||
if: >-
|
||||
${{
|
||||
contains(needs.*.result, 'failure') ||
|
||||
contains(needs.*.result, 'cancelled')
|
||||
}}
|
||||
run: exit 1
|
||||
|
||||
@@ -4,25 +4,24 @@ on:
|
||||
schedule:
|
||||
# run daily at 00:00
|
||||
- cron: '0 0 * * *'
|
||||
# pull_request:
|
||||
# paths-ignore:
|
||||
# - '**/docs/**'
|
||||
# - 'docs/**'
|
||||
# - '**/**.md'
|
||||
# - '**.md'
|
||||
# - '**/layer_tests_summary/**'
|
||||
# - '**/conformance/**'
|
||||
push:
|
||||
paths-ignore:
|
||||
- '**/docs/**'
|
||||
- 'docs/**'
|
||||
- '**/**.md'
|
||||
- '**.md'
|
||||
- '**/layer_tests_summary/**'
|
||||
- '**/conformance/**'
|
||||
branches:
|
||||
- master
|
||||
- 'releases/**'
|
||||
# pull_request:
|
||||
# paths-ignore:
|
||||
# - '**/docs/**'
|
||||
# - 'docs/**'
|
||||
# - '**/**.md'
|
||||
# - '**.md'
|
||||
# - '**/layer_tests_summary/**'
|
||||
# - '**/conformance/**'
|
||||
# push:
|
||||
# paths-ignore:
|
||||
# - '**/docs/**'
|
||||
# - 'docs/**'
|
||||
# - '**/**.md'
|
||||
# - '**.md'
|
||||
# - '**/layer_tests_summary/**'
|
||||
# - '**/conformance/**'
|
||||
# branches:
|
||||
# - master
|
||||
|
||||
concurrency:
|
||||
# github.ref is not unique in post-commit
|
||||
@@ -38,7 +37,7 @@ jobs:
|
||||
defaults:
|
||||
run:
|
||||
shell: pwsh
|
||||
runs-on: aks-win-16-cores-32gb
|
||||
runs-on: windows-latest-8-cores
|
||||
env:
|
||||
CMAKE_BUILD_TYPE: 'Release'
|
||||
CMAKE_GENERATOR: 'Ninja Multi-Config'
|
||||
@@ -50,8 +49,6 @@ jobs:
|
||||
BUILD_DIR: "${{ github.workspace }}\\openvino_build"
|
||||
MODELS_PATH: "${{ github.workspace }}\\testdata"
|
||||
SELECTIVE_BUILD_STAT_DIR: "${{ github.workspace }}\\selective_build_stat"
|
||||
# TODO: specify version of compiler here
|
||||
SCCACHE_AZURE_KEY_PREFIX: windows2022_x86_64_itt_Release
|
||||
steps:
|
||||
- name: Clone OpenVINO
|
||||
uses: actions/checkout@v4
|
||||
@@ -85,11 +82,6 @@ jobs:
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: 'false'
|
||||
|
||||
- name: Install sccache
|
||||
uses: mozilla-actions/sccache-action@v0.0.3
|
||||
with:
|
||||
version: "v0.5.4"
|
||||
|
||||
- name: Install build dependencies
|
||||
run: choco install --no-progress ninja
|
||||
|
||||
@@ -97,19 +89,15 @@ jobs:
|
||||
run: |
|
||||
# For running ONNX frontend unit tests
|
||||
python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt
|
||||
|
||||
|
||||
# For running TensorFlow frontend unit tests
|
||||
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow/tests/requirements.txt
|
||||
|
||||
|
||||
# For running TensorFlow Lite frontend unit tests
|
||||
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow_lite/tests/requirements.txt
|
||||
|
||||
# For getting rid of SSL issues during model downloading for unit tests
|
||||
python3 -m pip install certifi
|
||||
|
||||
# Disabled because of CVS-95904
|
||||
|
||||
# For running Paddle frontend unit tests
|
||||
# python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt
|
||||
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt
|
||||
|
||||
#
|
||||
# Build
|
||||
@@ -118,8 +106,17 @@ jobs:
|
||||
- name: Configure Developer Command Prompt for Microsoft Visual C++
|
||||
uses: ilammy/msvc-dev-cmd@v1
|
||||
|
||||
- name: Set SSL_CERT_FILE for model downloading for unit tests
|
||||
run: echo SSL_CERT_FILE=$(python3 -m certifi) >> $env:GITHUB_ENV
|
||||
- name: Setup sccache
|
||||
uses: hendrikmuhs/ccache-action@v1.2
|
||||
with:
|
||||
variant: sccache
|
||||
max-size: "2000M"
|
||||
# Should save cache only if run in the master branch of the base repo
|
||||
# github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push
|
||||
save: ${{ github.ref_name == 'master' && 'true' || 'false' }}
|
||||
key: ${{ github.job }}-${{ runner.os }}-itt
|
||||
restore-keys: |
|
||||
${{ github.job }}-${{ runner.os }}-itt
|
||||
|
||||
- name: CMake configure - CC COLLECT
|
||||
run: |
|
||||
@@ -136,29 +133,10 @@ jobs:
|
||||
-S ${{ env.OPENVINO_REPO }} `
|
||||
-B ${{ env.BUILD_DIR }}
|
||||
|
||||
- name: Clean sccache stats
|
||||
run: '& "$Env:SCCACHE_PATH" --zero-stats'
|
||||
|
||||
# to get more information on the issue
|
||||
# described in the next step
|
||||
- name: Show which network ports are used
|
||||
run: netstat -ban
|
||||
|
||||
# the case is the following:
|
||||
# sccache: error: An attempt was made to access a socket in a way forbidden by its access permissions. (os error 10013)
|
||||
# This looks like the attempt to use
|
||||
# a port below 1024 or a port
|
||||
# which is occupied by another app
|
||||
- name: Stop sccache server just in case
|
||||
run: '& "$Env:SCCACHE_PATH" --stop-server'
|
||||
|
||||
- name: Cmake build - CC COLLECT
|
||||
run: |
|
||||
cmake --build ${{ env.BUILD_DIR }} --parallel 8 --config ${{ env.CMAKE_BUILD_TYPE }} && `
|
||||
cmake --build ${{ env.BUILD_DIR }} --parallel 8 --config ${{ env.CMAKE_BUILD_TYPE }} --target sea_itt_lib
|
||||
|
||||
- name: Show sccache stats
|
||||
run: '& "$Env:SCCACHE_PATH" --show-stats'
|
||||
cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }}
|
||||
cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target sea_itt_lib
|
||||
|
||||
- name: Cmake install - OpenVINO
|
||||
run: cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake
|
||||
@@ -182,7 +160,7 @@ jobs:
|
||||
shell: cmd
|
||||
run: |
|
||||
set path=%path%;${{ env.OPENVINO_REPO }}\temp\tbb\bin
|
||||
|
||||
|
||||
python3 ${{ env.OPENVINO_REPO }}\thirdparty\itt_collector\runtool\sea_runtool.py ^
|
||||
--bindir ${{ env.OPENVINO_REPO }}\bin\intel64\${{ env.CMAKE_BUILD_TYPE }} ^
|
||||
-o ${{ env.SELECTIVE_BUILD_STAT_DIR }}\itt_stat ! ${{ env.OPENVINO_REPO }}\bin\intel64\${{ env.CMAKE_BUILD_TYPE }}\benchmark_app.exe ^
|
||||
@@ -218,7 +196,7 @@ jobs:
|
||||
|
||||
- name: Upload selective build statistics package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_selective_build_stat
|
||||
path: ${{ env.BUILD_DIR }}/openvino_selective_build_stat.zip
|
||||
@@ -226,7 +204,7 @@ jobs:
|
||||
|
||||
- name: Upload OpenVINO tests package
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.BUILD_DIR }}/openvino_tests.zip
|
||||
@@ -238,7 +216,7 @@ jobs:
|
||||
defaults:
|
||||
run:
|
||||
shell: pwsh
|
||||
runs-on: aks-win-16-cores-32gb
|
||||
runs-on: windows-latest-8-cores
|
||||
env:
|
||||
CMAKE_BUILD_TYPE: 'Release'
|
||||
CMAKE_CXX_COMPILER_LAUNCHER: sccache
|
||||
@@ -247,7 +225,6 @@ jobs:
|
||||
BUILD_DIR: "${{ github.workspace }}\\openvino_build"
|
||||
MODELS_PATH: "${{ github.workspace }}\\testdata"
|
||||
SELECTIVE_BUILD_STAT_DIR: "${{ github.workspace }}\\selective_build_stat"
|
||||
SCCACHE_AZURE_KEY_PREFIX: windows2022_x86_64_cc_Release
|
||||
steps:
|
||||
- name: Clone OpenVINO
|
||||
uses: actions/checkout@v4
|
||||
@@ -272,18 +249,6 @@ jobs:
|
||||
- name: Extract selective build statistics package
|
||||
run: Expand-Archive ${{ env.SELECTIVE_BUILD_STAT_DIR }}/openvino_selective_build_stat.zip -DestinationPath "${{ env.SELECTIVE_BUILD_STAT_DIR }}"
|
||||
|
||||
- name: Setup Python ${{ env.PYTHON_VERSION }}
|
||||
uses: ./openvino/.github/actions/setup_python
|
||||
with:
|
||||
version: ${{ env.PYTHON_VERSION }}
|
||||
should-setup-pip-paths: 'false'
|
||||
self-hosted-runner: 'false'
|
||||
|
||||
- name: Install sccache
|
||||
uses: mozilla-actions/sccache-action@v0.0.3
|
||||
with:
|
||||
version: "v0.5.4"
|
||||
|
||||
- name: CMake configure - CC ON
|
||||
run: |
|
||||
cmake `
|
||||
@@ -302,15 +267,9 @@ jobs:
|
||||
-S ${{ env.OPENVINO_REPO }} `
|
||||
-B ${{ env.BUILD_DIR }}
|
||||
|
||||
- name: Clean sccache stats
|
||||
run: '& "$Env:SCCACHE_PATH" --zero-stats'
|
||||
|
||||
- name: Cmake build - CC ON
|
||||
run: cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target benchmark_app
|
||||
|
||||
- name: Show sccache stats
|
||||
run: '& "$Env:SCCACHE_PATH" --show-stats'
|
||||
|
||||
- name: List bin files
|
||||
shell: cmd
|
||||
run: dir ${{ env.OPENVINO_REPO }}\bin\ /s
|
||||
@@ -324,11 +283,10 @@ jobs:
|
||||
CPU_Functional_Tests:
|
||||
name: CPU functional tests
|
||||
needs: Build
|
||||
timeout-minutes: 70
|
||||
defaults:
|
||||
run:
|
||||
shell: pwsh
|
||||
runs-on: aks-win-8-cores-16gb
|
||||
runs-on: windows-latest-8-cores
|
||||
env:
|
||||
OPENVINO_REPO: "${{ github.workspace }}\\openvino"
|
||||
INSTALL_TEST_DIR: "${{ github.workspace }}\\tests_install"
|
||||
@@ -380,7 +338,7 @@ jobs:
|
||||
timeout-minutes: 45
|
||||
|
||||
- name: Upload Test Results
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: test-results-functional-cpu
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -7,7 +7,7 @@ cmake-build*
|
||||
!__init__.py
|
||||
!__main__.py
|
||||
# and sphinx documentation folders
|
||||
!docs/sphinx_setup/_*
|
||||
!docs/_*
|
||||
|
||||
# developer tools
|
||||
*.idea
|
||||
|
||||
@@ -4,14 +4,8 @@
|
||||
|
||||
OpenVINO™ is always looking for opportunities to improve and your contributions
|
||||
play a big role in this process. There are several ways you can make the
|
||||
product better.
|
||||
product better:
|
||||
|
||||
# Table of Contents
|
||||
1. [Forms of contribution](#Forms-of-contribution)
|
||||
2. [Technical guide](#Technical-guide)
|
||||
|
||||
|
||||
## Forms of contribution
|
||||
|
||||
### Provide Feedback
|
||||
|
||||
@@ -38,7 +32,7 @@ product better.
|
||||
If you want to help improving OpenVINO, choose one of the issues reported in
|
||||
[GitHub Issue Tracker](https://github.com/openvinotoolkit/openvino/issues) and
|
||||
[create a Pull Request](./CONTRIBUTING_PR.md) addressing it. Consider one of the
|
||||
tasks listed as [first-time contributions](https://github.com/orgs/openvinotoolkit/projects/3).
|
||||
tasks listed as [first-time contributions](https://github.com/openvinotoolkit/openvino/issues/17502).
|
||||
If the feature you want to develop is more complex or not well defined by the reporter,
|
||||
it is always a good idea to [discuss it](https://github.com/openvinotoolkit/openvino/discussions)
|
||||
with OpenVINO developers first. Before creating a new PR, check if nobody is already
|
||||
@@ -87,66 +81,6 @@ product better.
|
||||
share your expertise with the community. Check GitHub Discussions and
|
||||
Issues to see if you can help someone.
|
||||
|
||||
## Technical guide
|
||||
|
||||
This section lists all the necessary steps required to set up your environment, build OpenVINO locally, and run tests for specific components. It's a perfect place to start when you have just picked a Good First Issue and are wondering how to start working on it.
|
||||
|
||||
Keep in mind that we are here to help - **do not hesitate to ask the development team if something is not clear**. Such questions allow us to keep improving our documentation.
|
||||
|
||||
### 1. Prerequisites
|
||||
|
||||
You can start with the following links:
|
||||
- [What is OpenVINO?](https://github.com/openvinotoolkit/openvino#what-is-openvino-toolkit)
|
||||
- [OpenVINO architecture](https://github.com/openvinotoolkit/openvino/blob/master/src/docs/architecture.md)
|
||||
- [User documentation](https://docs.openvino.ai/)
|
||||
- [Blog post on contributing to OpenVINO](https://medium.com/openvino-toolkit/how-to-contribute-to-an-ai-open-source-project-c741f48e009e)
|
||||
- [Pick up a Good First Issue](https://github.com/orgs/openvinotoolkit/projects/3)
|
||||
|
||||
### 2. Building the project
|
||||
|
||||
In order to build the project, follow the [build instructions for your specific OS](https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/build.md).
|
||||
|
||||
### 3. Familiarize yourself with the component you'll be working with
|
||||
|
||||
Choose the component your Good First Issue is related to. You can run tests to make sure it works correctly.
|
||||
|
||||
##### APIs
|
||||
- [C API](https://github.com/openvinotoolkit/openvino/tree/master/src/bindings/c)
|
||||
- [Core](https://github.com/openvinotoolkit/openvino/tree/master/src/core)
|
||||
- [Python API](https://github.com/openvinotoolkit/openvino/tree/master/src/bindings/python)
|
||||
|
||||
##### Frontends
|
||||
- [IR Frontend](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends/ir)
|
||||
- [ONNX Frontend](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends/onnx)
|
||||
- [PaddlePaddle Frontend](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends/paddle)
|
||||
- [PyTorch Frontend](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends/pytorch)
|
||||
- [TensorFlow Frontend](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends/tensorflow)
|
||||
|
||||
##### Plugins
|
||||
- [Auto plugin](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/auto)
|
||||
- [CPU plugin](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_cpu)
|
||||
- [GPU plugin](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu)
|
||||
- [Hetero plugin](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/hetero)
|
||||
- [Template plugin](https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/template)
|
||||
|
||||
##### Tools
|
||||
- [Benchmark Tool](https://github.com/openvinotoolkit/openvino/tree/master/tools/benchmark_tool)
|
||||
- [Model Optimizer](https://github.com/openvinotoolkit/openvino/tree/master/tools/mo)
|
||||
|
||||
##### Others
|
||||
- [Documentation](https://github.com/openvinotoolkit/openvino/blob/master/CONTRIBUTING_DOCS.md)
|
||||
|
||||
### 3. Start working on your Good First Issue
|
||||
|
||||
Use the issue description and locally built OpenVINO to complete the task. Remember that you can always ask users tagged in the "Contact points" section for help!
|
||||
|
||||
### 4. Submit a PR with your changes
|
||||
|
||||
Follow our [Good Pull Request guidelines](https://github.com/openvinotoolkit/openvino/blob/master/CONTRIBUTING_PR.md).
|
||||
|
||||
### 5. Wait for a review
|
||||
|
||||
We'll make sure to review your Pull Request as soon as possible and provide you with our feedback. You can expect a merge once your changes are validated with automatic tests and approved by maintainers.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -87,6 +87,11 @@ function(ov_set_temp_directory temp_variable source_tree_dir)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
macro(set_temp_directory)
|
||||
message(WARNING "'set_temp_directory' is deprecated. Please, use 'ov_set_temp_directory'")
|
||||
ov_set_temp_directory(${ARGV})
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# For cross-compilation
|
||||
#
|
||||
@@ -289,6 +294,11 @@ function(ov_mark_target_as_cc TARGET_NAME)
|
||||
add_dependencies(${TARGET_NAME} conditional_compilation_gen)
|
||||
endfunction()
|
||||
|
||||
function(ie_mark_target_as_cc TARGET_NAME)
|
||||
message(WARNING "This function is deprecated. Please use ov_mark_target_as_cc(TARGET_NAME) instead.")
|
||||
ov_mark_target_as_cc(${TARGET_NAME})
|
||||
endfunction()
|
||||
|
||||
include(python_requirements)
|
||||
|
||||
# Code style utils
|
||||
|
||||
@@ -181,3 +181,15 @@ function(ov_add_test_target)
|
||||
COMPONENT ${ARG_COMPONENT}
|
||||
EXCLUDE_FROM_ALL)
|
||||
endfunction()
|
||||
|
||||
# deprecated
|
||||
|
||||
function(addIeTarget)
|
||||
message(WARNING "'addIeTarget' is deprecated, please, use 'ov_add_target' instead")
|
||||
ov_add_target(${ARGV})
|
||||
endfunction()
|
||||
|
||||
function(addIeTargetTest)
|
||||
message(WARNING "'addIeTargetTest' is deprecated, please, use 'ov_add_test_target' instead")
|
||||
ov_add_test_target(${ARGV})
|
||||
endfunction()
|
||||
|
||||
@@ -196,3 +196,10 @@ endfunction()
|
||||
function(ov_add_api_validator_post_build_step)
|
||||
_ov_add_api_validator_post_build_step(${ARGN})
|
||||
endfunction()
|
||||
|
||||
# deprecated
|
||||
|
||||
function(ie_add_api_validator_post_build_step)
|
||||
message(WARNING "'ie_add_api_validator_post_build_step' is deprecated, use 'ov_add_api_validator_post_build_step' instead")
|
||||
_ov_add_api_validator_post_build_step(${ARGN})
|
||||
endfunction()
|
||||
|
||||
@@ -130,3 +130,8 @@ function(ov_add_clang_format_target TARGET_NAME)
|
||||
add_dependencies(clang_format_check_all ${TARGET_NAME})
|
||||
add_dependencies(clang_format_fix_all ${TARGET_NAME}_fix)
|
||||
endfunction()
|
||||
|
||||
function(add_clang_format_target)
|
||||
message(WARNING "add_clang_format_target is deprecated, use ov_add_clang_format_target instead")
|
||||
ov_add_clang_format_target(${ARGV})
|
||||
endfunction()
|
||||
|
||||
@@ -32,6 +32,11 @@ macro(ov_disable_deprecated_warnings)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ov_c_cxx_deprecated}")
|
||||
endmacro()
|
||||
|
||||
macro(disable_deprecated_warnings)
|
||||
message(WARNING "'disable_deprecated_warnings' is deprecated, use 'ov_disable_deprecated_warnings' instead")
|
||||
ov_disable_deprecated_warnings()
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# ov_deprecated_no_errors()
|
||||
#
|
||||
@@ -120,7 +125,7 @@ macro(ov_avx2_optimization_flags flags)
|
||||
set(${flags} -xCORE-AVX2)
|
||||
endif()
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(${flags} -mavx2 -mfma -mf16c)
|
||||
set(${flags} -mavx2 -mfma)
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
@@ -142,7 +147,7 @@ macro(ov_avx512_optimization_flags flags)
|
||||
set(${flags} -xCOMMON-AVX512)
|
||||
endif()
|
||||
elseif(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(${flags} -mavx512f -mfma -mf16c)
|
||||
set(${flags} -mavx512f -mfma)
|
||||
else()
|
||||
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
@@ -208,6 +213,16 @@ function(ov_disable_all_warnings)
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
#
|
||||
# ie_enable_lto()
|
||||
#
|
||||
# Enables Link Time Optimization compilation
|
||||
#
|
||||
macro(ie_enable_lto)
|
||||
message(WARNING "'ie_enable_lto' is deprecated, set 'INTERPROCEDURAL_OPTIMIZATION_RELEASE' target property instead")
|
||||
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ON)
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# ov_add_compiler_flags(<flag1 [flag2 flag3 ...>])
|
||||
#
|
||||
@@ -220,6 +235,11 @@ macro(ov_add_compiler_flags)
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(ie_add_compiler_flags)
|
||||
message(WARNING "'ie_add_compiler_flags' is deprecated, use 'ov_add_compiler_flags' instead")
|
||||
ov_add_compiler_flags(${ARGN})
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# ov_force_include(<target> <PUBLIC | PRIVATE | INTERFACE> <header file>)
|
||||
#
|
||||
|
||||
@@ -19,3 +19,10 @@ function(ov_build_target_faster TARGET_NAME)
|
||||
target_precompile_headers(${TARGET_NAME} ${FASTER_BUILD_PCH})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
# deprecated
|
||||
|
||||
function(ie_faster_build)
|
||||
message(WARNING "ie_faster_build is deprecated, use ov_build_target_faster instead")
|
||||
ov_build_target_faster(${ARGV})
|
||||
endfunction()
|
||||
|
||||
@@ -57,10 +57,10 @@ function(ov_generate_frontends_hpp)
|
||||
# for some reason dependency on source files does not work
|
||||
# so, we have to use explicit target and make it dependency for frontend_common
|
||||
add_custom_target(_ov_frontends_hpp DEPENDS ${ov_frontends_hpp})
|
||||
add_dependencies(openvino_frontend_common_obj _ov_frontends_hpp)
|
||||
add_dependencies(frontend_common_obj _ov_frontends_hpp)
|
||||
|
||||
# add dependency for object files
|
||||
get_target_property(sources openvino_frontend_common_obj SOURCES)
|
||||
get_target_property(sources frontend_common_obj SOURCES)
|
||||
foreach(source IN LISTS sources)
|
||||
if("${source}" MATCHES "\\$\\<TARGET_OBJECTS\\:([A-Za-z0-9_]*)\\>")
|
||||
# object library
|
||||
@@ -220,7 +220,6 @@ macro(ov_add_frontend)
|
||||
PUBLIC
|
||||
$<BUILD_INTERFACE:${${TARGET_NAME}_INCLUDE_DIR}>
|
||||
PRIVATE
|
||||
$<TARGET_PROPERTY:openvino::frontend::common,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
${frontend_root_dir}/src
|
||||
${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
@@ -343,7 +342,6 @@ macro(ov_add_frontend)
|
||||
install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/openvino
|
||||
DESTINATION ${FRONTEND_INSTALL_INCLUDE}
|
||||
COMPONENT ${dev_component}
|
||||
${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}
|
||||
FILES_MATCHING PATTERN "*.hpp")
|
||||
|
||||
# public target name
|
||||
|
||||
@@ -55,3 +55,20 @@ function (ov_print_enabled_features)
|
||||
endforeach()
|
||||
message(STATUS "")
|
||||
endfunction()
|
||||
|
||||
# deprecated
|
||||
|
||||
macro (ie_option variable description value)
|
||||
message(WARNING "'ie_option' is deprecated, please, use 'ov_option' instead")
|
||||
ov_option(${variable} "${description}" ${value})
|
||||
endmacro()
|
||||
|
||||
macro(ie_dependent_option variable description def_value condition fallback_value)
|
||||
message(WARNING "'ie_dependent_option' is deprecated, please, use 'ov_dependent_option' instead")
|
||||
ov_dependent_option(${variable} "${description}" ${def_value} "${condition}" ${fallback_value})
|
||||
endmacro()
|
||||
|
||||
function(print_enabled_features)
|
||||
message(WARNING "'print_enabled_features' is deprecated, please, use 'ov_print_enabled_features' instead")
|
||||
ov_print_enabled_features()
|
||||
endfunction()
|
||||
|
||||
@@ -241,3 +241,10 @@ macro(ov_cpack)
|
||||
|
||||
include(CPack)
|
||||
endmacro()
|
||||
|
||||
# deprecated
|
||||
|
||||
macro(ie_cpack)
|
||||
message(WARNING "'ie_cpack' is deprecated. Please, use 'ov_cpack'")
|
||||
ov_cpack(${ARGV})
|
||||
endmacro()
|
||||
|
||||
@@ -135,6 +135,9 @@ function(ov_add_plugin)
|
||||
install(TARGETS ${OV_PLUGIN_NAME}
|
||||
LIBRARY DESTINATION ${OV_CPACK_PLUGINSDIR}
|
||||
COMPONENT ${install_component})
|
||||
install(TARGETS ${OV_PLUGIN_NAME}
|
||||
LIBRARY DESTINATION ${OV_CPACK_PLUGINSDIR}
|
||||
COMPONENT ${install_component})
|
||||
else()
|
||||
ov_install_static_lib(${OV_PLUGIN_NAME} ${OV_CPACK_COMP_CORE})
|
||||
endif()
|
||||
@@ -164,6 +167,11 @@ function(ov_add_plugin)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(ie_add_plugin)
|
||||
message(WARNING "'ie_add_plugin' is deprecated. Please, use 'ov_add_plugin'")
|
||||
ov_add_plugin(${ARGN})
|
||||
endfunction()
|
||||
|
||||
#
|
||||
# ov_register_in_plugins_xml(MAIN_TARGET <main target name>)
|
||||
#
|
||||
@@ -255,6 +263,14 @@ macro(ov_register_plugins)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# ie_register_plugins()
|
||||
#
|
||||
macro(ie_register_plugins)
|
||||
message(WARNING "'ie_register_plugins' is deprecated. Please, use 'ov_register_plugins'")
|
||||
ov_register_plugins(${ARGN})
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# ov_target_link_plugins(<TARGET_NAME>)
|
||||
#
|
||||
|
||||
@@ -166,6 +166,28 @@ macro(ov_parse_ci_build_number repo_root)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro (addVersionDefines FILE)
|
||||
message(WARNING "'addVersionDefines' is deprecated. Please, use 'ov_add_version_defines'")
|
||||
|
||||
set(__version_file ${FILE})
|
||||
if(NOT IS_ABSOLUTE ${__version_file})
|
||||
set(__version_file "${CMAKE_CURRENT_SOURCE_DIR}/${__version_file}")
|
||||
endif()
|
||||
if(NOT EXISTS ${__version_file})
|
||||
message(FATAL_ERROR "${FILE} does not exists in current source directory")
|
||||
endif()
|
||||
foreach (VAR ${ARGN})
|
||||
if (DEFINED ${VAR} AND NOT "${${VAR}}" STREQUAL "")
|
||||
set_property(
|
||||
SOURCE ${__version_file}
|
||||
APPEND
|
||||
PROPERTY COMPILE_DEFINITIONS
|
||||
${VAR}="${${VAR}}")
|
||||
endif()
|
||||
endforeach()
|
||||
unset(__version_file)
|
||||
endmacro()
|
||||
|
||||
macro (ov_add_version_defines FILE TARGET)
|
||||
set(__version_file ${FILE})
|
||||
if(NOT IS_ABSOLUTE ${__version_file})
|
||||
|
||||
@@ -51,3 +51,10 @@ function(ov_target_link_whole_archive targetName)
|
||||
target_link_libraries(${targetName} PRIVATE ${libs})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
# deprecated
|
||||
|
||||
function(ieTargetLinkWholeArchive)
|
||||
message(WARNING "'ieTargetLinkWholeArchive' is deprecated, use 'ov_target_link_whole_archive' instead")
|
||||
ov_target_link_whole_archive(${ARGN})
|
||||
endfunction()
|
||||
|
||||
@@ -2,6 +2,38 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
function(ie_generate_dev_package_config)
|
||||
# dummy check that OpenCV is here
|
||||
find_package(OpenCV QUIET)
|
||||
if(OpenCV_VERSION VERSION_LESS 3.0)
|
||||
set(OpenCV_FOUND OFF)
|
||||
endif()
|
||||
|
||||
# export all targets with prefix and use them during extra modules build
|
||||
export(TARGETS ${_OPENVINO_DEVELOPER_PACKAGE_TARGETS} NAMESPACE IE::
|
||||
APPEND FILE "${CMAKE_BINARY_DIR}/inference_engine_developer_package_targets.cmake")
|
||||
add_custom_target(ie_dev_targets DEPENDS ${_OPENVINO_DEVELOPER_PACKAGE_TARGETS})
|
||||
|
||||
set(PATH_VARS "OpenVINO_SOURCE_DIR")
|
||||
if(ENABLE_SAMPLES OR ENABLE_TESTS)
|
||||
list(APPEND PATH_VARS "gflags_BINARY_DIR")
|
||||
# if we've found system gflags
|
||||
if(gflags_DIR)
|
||||
set(gflags_BINARY_DIR "${gflags_DIR}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in"
|
||||
"${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig.cmake"
|
||||
INSTALL_DESTINATION share # not used
|
||||
PATH_VARS ${PATH_VARS}
|
||||
NO_CHECK_REQUIRED_COMPONENTS_MACRO)
|
||||
|
||||
configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-version.cmake.in"
|
||||
"${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig-version.cmake"
|
||||
@ONLY)
|
||||
endfunction()
|
||||
|
||||
function(ov_generate_dev_package_config)
|
||||
# dummy check that OpenCV is here
|
||||
find_package(OpenCV QUIET)
|
||||
@@ -175,6 +207,7 @@ endfunction()
|
||||
|
||||
# this OpenVINODeveloperPackageConfig.cmake is not used during extra modules build
|
||||
# since it's generated after modules are configured
|
||||
ie_generate_dev_package_config()
|
||||
ov_generate_dev_package_config()
|
||||
|
||||
# extra modules must be registered after inference_engine library
|
||||
|
||||
188
cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in
Normal file
188
cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in
Normal file
@@ -0,0 +1,188 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
@PACKAGE_INIT@
|
||||
|
||||
include(CMakeFindDependencyMacro)
|
||||
|
||||
message(WARNING "find_package(InferenceEngineDeveloperPackage) is deprecated and will be removed in 2024.0 release. Please, use find_package(OpenVINODeveloperPackage)")
|
||||
|
||||
# TODO: remove after changing [private plugins]
|
||||
set_and_check(OpenVINO_SOURCE_DIR "@OpenVINO_SOURCE_DIR@") # NPU
|
||||
set_and_check(OpenVINO_MAIN_SOURCE_DIR "@OpenVINO_SOURCE_DIR@") # NPU
|
||||
|
||||
# Variables to export in plugin's projects
|
||||
|
||||
set(ov_options "@OV_OPTIONS@")
|
||||
list(APPEND ov_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER
|
||||
CMAKE_CXX_LINKER_LAUNCHER CMAKE_C_LINKER_LAUNCHER
|
||||
CMAKE_INSTALL_PREFIX CPACK_GENERATOR)
|
||||
|
||||
if(APPLE)
|
||||
list(APPEND ov_options CMAKE_OSX_ARCHITECTURES CMAKE_OSX_DEPLOYMENT_TARGET)
|
||||
endif()
|
||||
|
||||
get_property(_OV_GENERATOR_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(_OV_GENERATOR_MULTI_CONFIG)
|
||||
list(APPEND ov_options CMAKE_CONFIGURATION_TYPES)
|
||||
if(CMAKE_GENERATOR MATCHES "^Ninja Multi-Config$")
|
||||
list(APPEND ov_options CMAKE_DEFAULT_BUILD_TYPE)
|
||||
endif()
|
||||
else()
|
||||
list(APPEND ov_options CMAKE_BUILD_TYPE)
|
||||
endif()
|
||||
unset(_OV_GENERATOR_MULTI_CONFIG)
|
||||
|
||||
file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path)
|
||||
|
||||
message(STATUS "The following CMake options are exported from Inference Engine Developer package")
|
||||
message(" ")
|
||||
foreach(option IN LISTS ov_options)
|
||||
if(NOT DEFINED "${option}")
|
||||
load_cache("${cache_path}" READ_WITH_PREFIX "" ${option})
|
||||
endif()
|
||||
message(" ${option}: ${${option}}")
|
||||
endforeach()
|
||||
message(" ")
|
||||
|
||||
# for samples in 3rd party projects
|
||||
if(ENABLE_SAMPLES)
|
||||
set_and_check(gflags_DIR "@gflags_BINARY_DIR@")
|
||||
endif()
|
||||
|
||||
# Disable warning as error for private components
|
||||
set(CMAKE_COMPILE_WARNING_AS_ERROR OFF)
|
||||
|
||||
#
|
||||
# Content
|
||||
#
|
||||
|
||||
find_dependency(OpenVINODeveloperScripts
|
||||
PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package"
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
find_dependency(InferenceEngine
|
||||
PATHS "${CMAKE_CURRENT_LIST_DIR}"
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
find_dependency(ngraph
|
||||
PATHS "${CMAKE_CURRENT_LIST_DIR}"
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
NO_DEFAULT_PATH)
|
||||
|
||||
if(TARGET openvino::runtime AND NOT TARGET IE::runtime)
|
||||
add_library(IE::runtime INTERFACE IMPORTED)
|
||||
set_target_properties(IE::runtime PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES openvino::runtime)
|
||||
endif()
|
||||
|
||||
# WA for cmake: it exports ngraph as IE::ngraph in the IE export list
|
||||
# while we already have ngraph export in its own export list as ngraph::ngraph
|
||||
if(TARGET ngraph::ngraph AND NOT TARGET IE::ngraph)
|
||||
add_library(IE::ngraph INTERFACE IMPORTED)
|
||||
set_target_properties(IE::ngraph PROPERTIES INTERFACE_LINK_LIBRARIES ngraph::ngraph)
|
||||
endif()
|
||||
|
||||
_ov_find_tbb()
|
||||
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/inference_engine_developer_package_targets.cmake")
|
||||
|
||||
if(TARGET IE::ov_core_dev AND NOT TARGET openvino::core::dev)
|
||||
add_library(openvino::core::dev INTERFACE IMPORTED)
|
||||
set_target_properties(openvino::core::dev PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES IE::ov_core_dev)
|
||||
endif()
|
||||
|
||||
if(TARGET IE::runtime::dev AND NOT TARGET openvino::runtime::dev)
|
||||
add_library(openvino::runtime::dev INTERFACE IMPORTED)
|
||||
set_target_properties(openvino::runtime::dev PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES IE::runtime::dev)
|
||||
endif()
|
||||
|
||||
if(TARGET IE::reference AND NOT TARGET IE::ngraph_reference)
|
||||
add_library(IE::ngraph_reference INTERFACE IMPORTED)
|
||||
set_target_properties(IE::ngraph_reference PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES IE::reference)
|
||||
endif()
|
||||
|
||||
if(ENABLE_SYSTEM_PUGIXML)
|
||||
set(_ov_pugixml_pkgconfig_interface "@pugixml_FOUND@")
|
||||
set(_ov_pugixml_cmake_interface "@PugiXML_FOUND@")
|
||||
if(_ov_pugixml_pkgconfig_interface)
|
||||
find_dependency(PkgConfig)
|
||||
elseif(_ov_pugixml_cmake_interface)
|
||||
find_dependency(PugiXML)
|
||||
endif()
|
||||
if(PugiXML_FOUND)
|
||||
set_property(TARGET pugixml PROPERTY IMPORTED_GLOBAL TRUE)
|
||||
add_library(IE::pugixml ALIAS pugixml)
|
||||
elseif(PkgConfig_FOUND)
|
||||
if(${CMAKE_FIND_PACKAGE_NAME}_FIND_QUIETLY)
|
||||
set(pkg_config_quiet_arg QUIET)
|
||||
endif()
|
||||
if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED)
|
||||
set(pkg_config_required_arg REQUIRED)
|
||||
endif()
|
||||
|
||||
pkg_search_module(pugixml
|
||||
${pkg_config_quiet_arg}
|
||||
${pkg_config_required_arg}
|
||||
IMPORTED_TARGET GLOBAL
|
||||
pugixml)
|
||||
|
||||
unset(pkg_config_quiet_arg)
|
||||
unset(pkg_config_required_arg)
|
||||
|
||||
if(pugixml_FOUND)
|
||||
add_library(IE::pugixml ALIAS PkgConfig::pugixml)
|
||||
|
||||
# PATCH: on Ubuntu 18.04 pugixml.pc contains incorrect include directories
|
||||
get_target_property(interface_include_dir PkgConfig::pugixml INTERFACE_INCLUDE_DIRECTORIES)
|
||||
if(interface_include_dir AND NOT EXISTS "${interface_include_dir}")
|
||||
set_target_properties(PkgConfig::pugixml PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# debian 9 case: no cmake, no pkg-config files
|
||||
if(NOT TARGET IE::pugixml)
|
||||
find_library(PUGIXML_LIBRARY NAMES pugixml DOC "Path to pugixml library")
|
||||
if(PUGIXML_LIBRARY)
|
||||
add_library(IE::pugixml INTERFACE IMPORTED GLOBAL)
|
||||
set_target_properties(IE::pugixml PROPERTIES INTERFACE_LINK_LIBRARIES "${PUGIXML_LIBRARY}")
|
||||
else()
|
||||
message(FATAL_ERROR "Failed to find system pugixml in OpenVINO Developer Package")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(_ov_nlohmann_json_FOUND "@nlohmann_json_FOUND@")
|
||||
if(_ov_nlohmann_json_FOUND)
|
||||
find_dependency(nlohmann_json)
|
||||
set_target_properties(nlohmann_json::nlohmann_json PROPERTIES IMPORTED_GLOBAL ON)
|
||||
add_library(IE::nlohmann_json ALIAS nlohmann_json::nlohmann_json)
|
||||
endif()
|
||||
unset(_ov_nlohmann_json_FOUND)
|
||||
|
||||
# inherit OpenCV from main IE project if enabled
|
||||
if("@OpenCV_FOUND@")
|
||||
# Use OpenCV_DIR from cache only if user doesn't define OpenCV_DIR
|
||||
if(NOT OpenCV_DIR)
|
||||
load_cache("${cache_path}" READ_WITH_PREFIX "" OpenCV_DIR)
|
||||
endif()
|
||||
find_dependency(OpenCV)
|
||||
endif()
|
||||
|
||||
#
|
||||
# Extra Compile Flags
|
||||
#
|
||||
|
||||
# don't fail on strict compilation options in 3rd party modules
|
||||
ov_dev_package_no_errors()
|
||||
|
||||
# Don't threat deprecated API warnings as errors in 3rd party apps
|
||||
ov_deprecated_no_errors()
|
||||
21
docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.rst
Normal file
21
docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.rst
Normal file
@@ -0,0 +1,21 @@
|
||||
.. {#openvino_docs_ie_plugin_dg_lp_representation}
|
||||
|
||||
Representation of low-precision models
|
||||
======================================
|
||||
The goal of this document is to describe how optimized models are represented in OpenVINO Intermediate Representation (IR) and provide guidance on interpretation rules for such models at runtime.
|
||||
Currently, there are two groups of optimization methods that can influence on the IR after applying them to the full-precision model:
|
||||
- **Sparsity**. It is represented by zeros inside the weights and this is up to the hardware plugin how to interpret these zeros (use weights as is or apply special compression algorithms and sparse arithmetic). No additional mask is provided with the model.
|
||||
- **Quantization**. The rest of this document is dedicated to the representation of quantized models.
|
||||
|
||||
## Representation of quantized models
|
||||
The OpenVINO Toolkit represents all the quantized models using the so-called FakeQuantize operation (see the description in [this document](@ref openvino_docs_ops_quantization_FakeQuantize_1)). This operation is very expressive and allows mapping values from arbitrary input and output ranges. The whole idea behind that is quite simple: we project (discretize) the input values to the low-precision data type using affine transformation (with clamp and rounding) and then reproject discrete values back to the original range and data type. It can be considered as an emulation of the quantization process which happens at runtime.
|
||||
In order to be able to execute a particular DL operation in low-precision all its inputs should be quantized i.e. should have FakeQuantize between operation and data blobs. The figure below shows an example of quantized Convolution which contains two FakeQuantize nodes: one for weights and one for activations (bias is quantized using the same parameters).
|
||||
![quantized_convolution]
|
||||
<div align="center">Figure 1. Example of quantized Convolution operation.</div>
|
||||
|
||||
Starting from OpenVINO 2020.2 release all the quantized models are represented in the compressed form. It means that the weights of low-precision operations are converted into the target precision (e.g. INT8). It helps to substantially reduce the model size. The rest of the parameters can be represented in FLOAT32 or FLOAT16 precision depending on the input full-precision model used in the quantization process. Fig. 2 below shows an example of the part of the compressed IR.
|
||||
![quantized_model_example]
|
||||
<div align="center">Figure 2. Example of compressed quantized model.</div>
|
||||
|
||||
[quantized_convolution]: images/quantized_convolution.png
|
||||
[quantized_model_example]: images/quantized_model_example.png
|
||||
@@ -0,0 +1,110 @@
|
||||
# [LEGACY] Extending Model Optimizer with Caffe Python Layers {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers}
|
||||
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to extract operator attributes in Model Optimizer to
|
||||
support a custom Caffe operation written only in Python.
|
||||
|
||||
.. danger::
|
||||
|
||||
The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications.
|
||||
|
||||
This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <openvino_docs_Extensibility_UG_Frontend_Extensions>` article.
|
||||
|
||||
This article provides instructions on how to support a custom Caffe operation written only in Python. For example, the
|
||||
`Faster-R-CNN model <https://dl.dropboxusercontent.com/s/o6ii098bu51d139/faster_rcnn_models.tgz?dl=0>`__ implemented in
|
||||
Caffe contains a custom proposal layer written in Python. The layer is described in the
|
||||
`Faster-R-CNN prototxt <https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/models/pascal_voc/VGG16/faster_rcnn_end2end/test.prototxt>`__ in the following way:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
layer {
|
||||
name: 'proposal'
|
||||
type: 'Python'
|
||||
bottom: 'rpn_cls_prob_reshape'
|
||||
bottom: 'rpn_bbox_pred'
|
||||
bottom: 'im_info'
|
||||
top: 'rois'
|
||||
python_param {
|
||||
module: 'rpn.proposal_layer'
|
||||
layer: 'ProposalLayer'
|
||||
param_str: "'feat_stride': 16"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
This article describes only a procedure on how to extract operator attributes in Model Optimizer. The rest of the
|
||||
operation enabling pipeline and information on how to support other Caffe operations (written in C++) is described in
|
||||
the :doc:`Customize Model Optimizer <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer>` guide.
|
||||
|
||||
========================================
|
||||
Writing Extractor for Caffe Python Layer
|
||||
========================================
|
||||
|
||||
Custom Caffe Python layers have an attribute ``type`` (defining the type of the operation) equal to ``Python`` and two
|
||||
mandatory attributes ``module`` and ``layer`` in the ``python_param`` dictionary. The ``module`` defines the Python module name
|
||||
with the layer implementation, while ``layer`` value is an operation type defined by a user. In order to extract
|
||||
attributes for such an operation it is necessary to implement extractor class inherited from the
|
||||
``CaffePythonFrontExtractorOp`` class instead of ``FrontExtractorOp`` class, used for standard framework layers. The ``op``
|
||||
class attribute value should be set to the ``module + "." + layer`` value so the extractor is triggered for this kind of
|
||||
operation.
|
||||
|
||||
Below is a simplified example of the extractor for the custom operation Proposal from the mentioned Faster-R-CNN model.
|
||||
The full code with additional checks can be found `here <https://github.com/openvinotoolkit/openvino/blob/releases/2022/1/tools/mo/openvino/tools/mo/front/caffe/proposal_python_ext.py>`__.
|
||||
|
||||
The sample code uses operation ``ProposalOp`` which corresponds to ``Proposal`` operation described in the :doc:`Available Operations Sets <openvino_docs_ops_opset>`
|
||||
page. For a detailed explanation of the extractor, refer to the source code below.
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo.ops.proposal import ProposalOp
|
||||
from openvino.tools.mo.front.extractor import CaffePythonFrontExtractorOp
|
||||
|
||||
|
||||
class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp):
|
||||
op = 'rpn.proposal_layer.ProposalLayer' # module + "." + layer
|
||||
enabled = True # extractor is enabled
|
||||
|
||||
@staticmethod
|
||||
def extract_proposal_params(node, defaults):
|
||||
param = node.pb.python_param # get the protobuf message representation of the layer attributes
|
||||
# parse attributes from the layer protobuf message to a Python dictionary
|
||||
attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str)
|
||||
update_attrs = defaults
|
||||
|
||||
# the operation expects ratio and scale values to be called "ratio" and "scale" while Caffe uses different names
|
||||
if 'ratios' in attrs:
|
||||
attrs['ratio'] = attrs['ratios']
|
||||
del attrs['ratios']
|
||||
if 'scales' in attrs:
|
||||
attrs['scale'] = attrs['scales']
|
||||
del attrs['scales']
|
||||
|
||||
update_attrs.update(attrs)
|
||||
ProposalOp.update_node_stat(node, update_attrs) # update the node attributes
|
||||
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
# define default values for the Proposal layer attributes
|
||||
defaults = {
|
||||
'feat_stride': 16,
|
||||
'base_size': 16,
|
||||
'min_size': 16,
|
||||
'ratio': [0.5, 1, 2],
|
||||
'scale': [8, 16, 32],
|
||||
'pre_nms_topn': 6000,
|
||||
'post_nms_topn': 300,
|
||||
'nms_thresh': 0.7
|
||||
}
|
||||
cls.extract_proposal_params(node, defaults)
|
||||
return cls.enabled
|
||||
|
||||
====================
|
||||
Additional Resources
|
||||
====================
|
||||
|
||||
* :doc:`Model Optimizer Extensibility <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer>`
|
||||
* :doc:`Graph Traversal and Modification Using Ports and Connections <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections>`
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
# [LEGACY] Model Optimizer Extensions {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions}
|
||||
|
||||
|
||||
.. meta::
|
||||
:description: Learn about deprecated extensions, which enable injecting logic
|
||||
to the model conversion pipeline without changing the Model
|
||||
Optimizer core code.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation
|
||||
openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor
|
||||
openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions
|
||||
|
||||
.. danger::
|
||||
|
||||
The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications.
|
||||
|
||||
This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <openvino_docs_Extensibility_UG_Frontend_Extensions>` article.
|
||||
|
||||
Model Optimizer extensions enable you to inject some logic to the model conversion pipeline without changing the Model
|
||||
Optimizer core code. There are three types of the Model Optimizer extensions:
|
||||
|
||||
1. :doc:`Model Optimizer operation <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation>`.
|
||||
2. A :doc:`framework operation extractor <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor>`.
|
||||
3. A :doc:`model transformation <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions>`, which can be executed during front, middle or back phase of the model conversion.
|
||||
|
||||
An extension is just a plain text file with a Python code. The file should contain a class (or classes) inherited from
|
||||
one of extension base classes. Extension files should be saved to a directory with the following structure:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
./<MY_EXT>/
|
||||
ops/ - custom operations
|
||||
front/ - framework independent front transformations
|
||||
<FRAMEWORK_1>/ - front transformations for <FRAMEWORK_1> models only and extractors for <FRAMEWORK_1> operations
|
||||
<FRAMEWORK_2>/ - front transformations for <FRAMEWORK_2> models only and extractors for <FRAMEWORK_2> operations
|
||||
...
|
||||
middle/ - middle transformations
|
||||
back/ - back transformations
|
||||
|
||||
Model Optimizer uses the same layout internally to keep built-in extensions. The only exception is that the
|
||||
``mo/ops/`` directory is also used as a source of the Model Optimizer operations due to historical reasons.
|
||||
|
||||
.. note::
|
||||
The name of a root directory with extensions should not be equal to "extensions" because it will result in a name conflict with the built-in Model Optimizer extensions.
|
||||
|
||||
.. note::
|
||||
Model Optimizer itself is built by using these extensions, so there is a huge number of examples of their usage in the Model Optimizer code.
|
||||
|
||||
====================
|
||||
Additional Resources
|
||||
====================
|
||||
|
||||
* :doc:`Model Optimizer Extensibility <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer>`
|
||||
* :doc:`Graph Traversal and Modification Using Ports and Connections <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
@@ -0,0 +1,113 @@
|
||||
# [LEGACY] Operation Extractor {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor}
|
||||
|
||||
|
||||
.. meta::
|
||||
:description: Learn about a deprecated generic extension in Model Optimizer,
|
||||
which provides the operation extractor usable for all model
|
||||
frameworks.
|
||||
|
||||
|
||||
.. danger::
|
||||
|
||||
The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications.
|
||||
|
||||
This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <openvino_docs_Extensibility_UG_Frontend_Extensions>` article.
|
||||
|
||||
Model Optimizer runs specific extractor for each operation in the model during the model loading.
|
||||
|
||||
There are several types of Model Optimizer extractor extensions:
|
||||
|
||||
1. The generic one, which is described in this article.
|
||||
2. The special extractor for Caffe models with Python layers. This kind of extractor is described in the :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>` guide.
|
||||
|
||||
Generic extension provides a generic mechanism for the operation extractor applicable for all frameworks. Model Optimizer provides the ``mo.front.extractor.FrontExtractorOp`` class as a base class to implement the extractor. It has the ``extract`` class method, which gets the only parameter ``Node``, which corresponds to the graph node to extract data from. The operation description in the original framework format is stored in the attribute ``pb`` of the node. The extractor goal is to parse this attribute and save necessary attributes to the corresponding node of the graph. Consider the extractor for the ``Const`` TensorFlow operation (refer to the ``extensions/front/tf/const_ext.py`` file):
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo.front.extractor import FrontExtractorOp
|
||||
from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape, tf_tensor_content
|
||||
from openvino.tools.mo.ops.const import Const
|
||||
|
||||
|
||||
class ConstExtractor(FrontExtractorOp):
|
||||
# The "op" class attribute defines a type of the operation in the framework (in this case it is a TensorFlow),
|
||||
# for which the extractor should be triggered.
|
||||
op = 'Const'
|
||||
enabled = True # The flag that indicates that this extractor is enabled.
|
||||
|
||||
@classmethod
|
||||
def extract(cls, node): # The entry point of the extractor.
|
||||
# The `node.pb` attribute stores the TensorFlow representation of the operation, which is a Protobuf message of the
|
||||
# specific format. In particular, the message contains the attribute called "value" containing the description of
|
||||
# the constant. The string "pb.attr["value"].tensor" is just a Python binding for Protobuf message parsing.
|
||||
pb_tensor = node.pb.attr["value"].tensor
|
||||
# Get the shape of the tensor from the protobuf message, using the helper function "tf_tensor_shape".
|
||||
shape = tf_tensor_shape(pb_tensor.tensor_shape)
|
||||
# Create a dictionary with necessary attributes.
|
||||
attrs = {
|
||||
'shape': shape,
|
||||
# Get the tensor value, using "tf_tensor_content" helper function.
|
||||
'value': tf_tensor_content(pb_tensor.dtype, shape, pb_tensor),
|
||||
# Get the tensor data type, using "tf_dtype_extractor" helper function.
|
||||
'data_type': tf_dtype_extractor(pb_tensor.dtype),
|
||||
}
|
||||
# Update the node attributes, using default attributes from the "Const" operation and attributes saved to the
|
||||
# "attrs" dictionary.
|
||||
Const.update_node_stat(node, attrs)
|
||||
return cls.enabled
|
||||
|
||||
Consider another example with an extractor of the ``Constant`` ONNX operation (refer to the ``extensions/front/onnx/const_ext.py`` file):
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from onnx import numpy_helper
|
||||
from onnx.numpy_helper import to_array
|
||||
|
||||
from openvino.tools.mo.front.extractor import FrontExtractorOp
|
||||
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
|
||||
from openvino.tools.mo.ops.const import Const
|
||||
|
||||
|
||||
class ConstantExtractor(FrontExtractorOp):
|
||||
op = 'Constant'
|
||||
enabled = True
|
||||
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
# Use "onnx_attr" helper method, which parses the Protobuf representation of the operation saved in the "node".
|
||||
# Gets the value of the attribute with name "value" as "TensorProto" type (specified with a keyword "t").
|
||||
pb_value = onnx_attr(node, 'value', 't')
|
||||
# Use "numpy_helper.to_array()" ONNX helper method to convert "TensorProto" object to a numpy array.
|
||||
value = numpy_helper.to_array(pb_value)
|
||||
|
||||
attrs = {
|
||||
'data_type': value.dtype,
|
||||
'value': value,
|
||||
}
|
||||
# Update the node attributes, using default attributes from the "Const" operation and attributes saved to the
|
||||
# "attrs" dictionary.
|
||||
Const.update_node_stat(node, attrs)
|
||||
return cls.enabled
|
||||
|
||||
The extractors for operations from different frameworks work similarly. The only difference is in the helper methods used to parse operation attributes encoded with a framework-specific representation.
|
||||
|
||||
A common practice is to use ``update_node_stat()`` method of the dedicated ``Op`` class to update the node attributes. This method does the following:
|
||||
|
||||
1. Sets values for common attributes like ``op``, ``type``, ``infer``, ``in_ports_count``, ``out_ports_count``, ``version`` to values specific to the dedicated operation (``Const`` operation in this case).
|
||||
2. Uses ``supported_attrs()`` and ``backend_attrs()`` methods, defined in the ``Op`` class to update specific node attribute ``IE``. The IR emitter uses the value stored in the ``IE`` attribute to pre-process attribute values and save them to IR.
|
||||
3. Optionally sets additional attributes provided to the ``update_node_stat()`` function as a second parameter. Usually these attributes are parsed from the particular instance of the operation.
|
||||
|
||||
.. note::
|
||||
Model Optimizer uses numpy arrays to store values and numpy arrays of ``np.int64`` type to store shapes in the graph.
|
||||
|
||||
====================
|
||||
Additional Resources
|
||||
====================
|
||||
|
||||
* :doc:`Model Optimizer Extensibility <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer>`
|
||||
* :doc:`Graph Traversal and Modification Using Ports and Connections <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections>`
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
@@ -0,0 +1,110 @@
|
||||
# [LEGACY] Model Optimizer Operation {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation}
|
||||
|
||||
|
||||
.. meta::
|
||||
:description: Learn about the Op class, that contains operation attributes,
|
||||
which are set to a node of the graph created during model
|
||||
conversion with Model Optimizer.
|
||||
|
||||
.. danger::
|
||||
|
||||
The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications.
|
||||
|
||||
This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <openvino_docs_Extensibility_UG_Frontend_Extensions>` article.
|
||||
|
||||
Model Optimizer defines a ``mo.ops.Op`` class (``Op`` will be used later in the document to be short), which is a base class
|
||||
for an operation used in the Model Optimizer. The instance of the ``Op`` class serves several purposes:
|
||||
|
||||
1. Stores the operation attributes.
|
||||
2. Stores the operation shape/value and type inference functions.
|
||||
3. Defines operation attributes to be saved to the corresponding IR section.
|
||||
4. Contains convenient methods to create a graph node from an ``Op`` object instance and connect it with the existing graph.
|
||||
5. Used in the extractors to store parsed attributes and operation specific attributes in the dedicated graph node.
|
||||
|
||||
It is important to mention that there is no connection between the instance of the ``Op`` class and the ``Node`` object
|
||||
created from it. The ``Op`` class is just a container for attributes describing the operation. Model Optimizer uses the ``Op``
|
||||
class during a model conversion to create a node of the graph with attributes copied from the ``Op`` class instance. Graph
|
||||
manipulations are performed with graph ``Nodes`` and their attributes and does not involve ``Ops``.
|
||||
|
||||
There are a number of common attributes used in the operations. Below is the list of these attributes with description.
|
||||
|
||||
* ``id`` — **(Mandatory)** — unique identifier of a node in a graph. Generated automatically, equal to the number of nodes in the graph plus 1 if not specified.
|
||||
* ``name`` — **(Mandatory)** — name of the operation. Generated automatically, equal to the ``id`` if not specified.
|
||||
* ``type`` — **(Mandatory)** — type of the operation according to the :doc:`opset specification <openvino_docs_ops_opset>`. For the internal Model Optimizer operations, this attribute should be set to ``None``. The model conversion fails if an operation with ``type`` equal to ``None`` comes to the IR emitting phase.
|
||||
* ``version`` — **(Mandatory)** — the operation set (opset) name the operation belongs to. If not specified, Model Optimizer sets it equal to ``experimental``. For more information about operation sets, refer to :doc:`OpenVINO Model Representation <openvino_docs_OV_UG_Model_Representation>` section.
|
||||
* ``op`` — Model Optimizer type of the operation. In many cases, the value of ``type`` is equal to the value of ``op``. However, when Model Optimizer cannot instantiate the opset operation during model loading, it creates an instance of an internal operation. Thus, the attribute ``op`` is used as a type of this internal operation. Later in the pipeline, the node created from an internal operation will be replaced during front, middle or back phase with node(s) created from the opset.
|
||||
* ``infer`` — the attribute defines a function calculating output tensor(s) shape and optional value(s). The attribute may be set to ``None`` for the internal Model Optimizer operations used during the front phase only. For more information about the shape inference function, refer to the :ref:`Partial Inference <mo_partial_inference>`.
|
||||
* ``type_infer`` — the attribute defines a function calculating output tensor(s) data type. If the attribute is not defined, the default function is used. The function checks if the ``data_type`` node attribute is set and then propagates this type to the output tensor from the **port 0**. Otherwise, it propagates the data type of the tensor coming into the input **port 0** to the output tensor from the **port 0**.
|
||||
* ``in_ports_count`` — default number of input ports to be created for the operation. Additional ports can be created or redundant ports can be removed using dedicated ``Node`` class API methods.
|
||||
* ``out_ports_count`` — default number of output ports to be created for the operation. Additional ports can be created or redundant ports can be removed using dedicated ``Node`` class API methods.
|
||||
|
||||
Below is an example of the Model Optimizer class for the :doc:`SoftMax <openvino_docs_ops_activation_SoftMax_1>` operation from
|
||||
the ``mo/ops/softmax.py`` file with the comments in code.
|
||||
|
||||
.. code-block:: py
|
||||
|
||||
class Softmax(Op):
|
||||
# The class attribute defines a name of the operation so the operation class can be obtained using the
|
||||
# "Op.get_op_class_by_name()" static method
|
||||
op = 'SoftMax'
|
||||
|
||||
# The operation works as an extractor by default. This is a legacy behavior, currently not recommended for use,
|
||||
# thus "enabled" class attribute is set to False. The recommended approach is to use dedicated extractor extension.
|
||||
enabled = False
|
||||
|
||||
def __init__(self, graph: Graph, attrs: dict):
|
||||
super().__init__(graph, { # The constructor of the base class Op is called with additional default attributes.
|
||||
'type': __class__.op, # The operation is from the opset so the type is set to 'SoftMax'.
|
||||
'op': __class__.op, # Internal Model Optimizer operation has the same type.
|
||||
'version': 'opset1', # The operation corresponds to opset1.
|
||||
'infer': Softmax.infer, # Shape inference function is defined below.
|
||||
'axis': 1, # Default value for the "axis" attribute of the operation SoftMax.
|
||||
'in_ports_count': 1, # The operation has one input.
|
||||
'out_ports_count': 1, # The operation produces one output.
|
||||
}, attrs)
|
||||
|
||||
# The method returns operation specific attributes list. This method is important when implementing
|
||||
# extractor inherited from CaffePythonFrontExtractorOp class to extract attribute for Caffe Python operation.
|
||||
# However, it is currently used interchangeably with the "backend_attrs()" method. If the "backend_attrs()" is not used,
|
||||
# then the "supported_attrs()" is used instead. In this particular case, the operation has just one attribute "axis".
|
||||
def supported_attrs(self):
|
||||
return ['axis']
|
||||
|
||||
@staticmethod
|
||||
def infer(node: Node):
|
||||
"some code calculating output shape and values"
|
||||
|
||||
There is a dedicated method called ``backend_attrs()`` defining a list of attributes to be saved to the IR. Consider an
|
||||
example from the ``mo/ops/pooling.py`` file:
|
||||
|
||||
.. code-block:: py
|
||||
|
||||
def backend_attrs(self):
|
||||
return [
|
||||
('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))),
|
||||
('kernel', lambda node: ','.join(map(str, node['window'][node.spatial_dims]))),
|
||||
|
||||
('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))),
|
||||
('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))),
|
||||
|
||||
('pool-method', 'pool_method'),
|
||||
('exclude-pad', 'exclude_pad'),
|
||||
|
||||
'rounding_type',
|
||||
'auto_pad',
|
||||
]
|
||||
|
||||
The ``backend_attrs()`` function returns a list of records. A record can be of one of the following formats:
|
||||
1. A string defining the attribute to be saved to the IR. If the value of the attribute is ``None``, the attribute is not saved. Examples of this case are ``rounding_type`` and ``auto_pad``.
|
||||
2. A tuple, where the first element is a string defining the name of the attribute as it will appear in the IR and the second element is a function to produce the value for this attribute. The function gets an instance of the ``Node`` as the only parameter and returns a string with the value to be saved to the IR. Examples of this case are ``strides``, ``kernel``, ``pads_begin`` and ``pads_end``.
|
||||
3. A tuple, where the first element is a string defining the name of the attribute as it will appear in the IR and the second element is the name of the ``Node`` attribute to get the value from. Examples of this case are ``pool-method`` and ``exclude-pad``.
|
||||
|
||||
====================
|
||||
Additional Resources
|
||||
====================
|
||||
|
||||
* :doc:`Model Optimizer Extensibility <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer>`
|
||||
* :doc:`Graph Traversal and Modification Using Ports and Connections <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections>`
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
@@ -0,0 +1,186 @@
|
||||
# [LEGACY] Graph Traversal and Modification {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections}
|
||||
|
||||
|
||||
.. meta::
|
||||
:description: Learn about deprecated APIs and the Port and Connection classes
|
||||
in Model Optimizer used for graph traversal and transformation.
|
||||
|
||||
.. danger::
|
||||
|
||||
The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications.
|
||||
|
||||
This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <openvino_docs_Extensibility_UG_Frontend_Extensions>` article.
|
||||
|
||||
There are three APIs for a graph traversal and transformation used in the Model Optimizer:
|
||||
|
||||
1. The API provided with the ``networkx`` Python library for the ``networkx.MultiDiGraph`` class, which is the base class for
|
||||
the ``mo.graph.graph.Graph`` object. For example, the following methods belong to this API level:
|
||||
|
||||
* ``graph.add_edges_from([list])``,
|
||||
* ``graph.add_node(x, attrs)``,
|
||||
* ``graph.out_edges(node_id)``
|
||||
* other methods where ``graph`` is a an instance of the ``networkx.MultiDiGraph`` class.
|
||||
|
||||
**This is the lowest-level API. Avoid using it in the Model Optimizer transformations**. For more details, refer to the :ref:`Model Representation in Memory <mo_model_representation_in_memory>` section.
|
||||
|
||||
2. The API built around the ``mo.graph.graph.Node`` class. The ``Node`` class is the primary class to work with graph nodes
|
||||
and their attributes. Examples of such methods and functions are:
|
||||
|
||||
* ``node.in_node(y)``,
|
||||
* ``node.out_node(x)``,
|
||||
* ``node.get_outputs()``,
|
||||
* ``node.insert_node_after(n1, y)``,
|
||||
* ``create_edge(n1, n2)``
|
||||
|
||||
**There are some "Node" class methods not recommended for use and some functions defined in the mo.graph.graph have been deprecated**. For more details, refer to the ``mo/graph/graph.py`` file.
|
||||
|
||||
3. The high-level API called Model Optimizer Graph API, which uses ``mo.graph.graph.Graph``, ``mo.graph.port.Port`` and
|
||||
``mo.graph.connection.Connection`` classes. For example, the following methods belong to this API level:
|
||||
|
||||
* ``node.in_port(x)``,
|
||||
* ``node.out_port(y)``,
|
||||
* ``port.get_connection()``,
|
||||
* ``connection.get_source()``,
|
||||
* ``connection.set_destination(dest_port)``
|
||||
|
||||
**This is the recommended API for the Model Optimizer transformations and operations implementation**.
|
||||
|
||||
The main benefit of using the Model Optimizer Graph API is that it hides some internal implementation details (the fact that
|
||||
the graph contains data nodes), provides API to perform safe and predictable graph manipulations, and adds operation
|
||||
semantic to the graph. This is achieved with introduction of concepts of ports and connections.
|
||||
|
||||
.. note::
|
||||
This article is dedicated to the Model Optimizer Graph API only and does not cover other two non-recommended APIs.
|
||||
|
||||
.. _mo_intro_ports:
|
||||
|
||||
=====
|
||||
Ports
|
||||
=====
|
||||
|
||||
An operation semantic describes how many inputs and outputs the operation has. For example,
|
||||
:doc:`Parameter <openvino_docs_ops_infrastructure_Parameter_1>` and :doc:`Const <openvino_docs_ops_infrastructure_Constant_1>` operations have no
|
||||
inputs and have one output, :doc:`ReLU <openvino_docs_ops_activation_ReLU_1>` operation has one input and one output,
|
||||
:doc:`Split <openvino_docs_ops_movement_Split_1>` operation has 2 inputs and a variable number of outputs depending on the value of the
|
||||
attribute ``num_splits``.
|
||||
|
||||
Each operation node in the graph (an instance of the ``Node`` class) has 0 or more input and output ports (instances of
|
||||
the ``mo.graph.port.Port`` class). The ``Port`` object has several attributes:
|
||||
|
||||
* ``node`` - the instance of the ``Node`` object the port belongs to.
|
||||
* ``idx`` - the port number. Input and output ports are numbered independently, starting from ``0``. Thus,
|
||||
:doc:`ReLU <openvino_docs_ops_activation_ReLU_1>` operation has one input port (with index ``0``) and one output port (with index ``0``).
|
||||
* ``type`` - the type of the port. Could be equal to either ``"in"`` or ``"out"``.
|
||||
* ``data`` - the object that should be used to get attributes of the corresponding data node. This object has methods ``get_shape()`` / ``set_shape()`` and ``get_value()`` / ``set_value()`` to get/set shape/value of the corresponding data node. For example, ``in_port.data.get_shape()`` returns an input shape of a tensor connected to input port ``in_port`` (``in_port.type == 'in'``), ``out_port.data.get_value()`` returns a value of a tensor produced from output port ``out_port`` (``out_port.type == 'out'``).
|
||||
|
||||
.. note::
|
||||
Functions ``get_shape()`` and ``get_value()`` return ``None`` until the partial inference phase. For more information about model conversion phases, refer to the :ref:`Model Conversion Pipeline <mo_model_conversion_pipeline>`. For information about partial inference phase, see the :ref:`Partial Inference <mo_partial_inference>`.
|
||||
|
||||
There are several methods of the ``Node`` class to get the instance of a corresponding port:
|
||||
|
||||
* ``in_port(x)`` and ``out_port(x)`` to get the input/output port with number ``x``.
|
||||
* ``in_ports()`` and ``out_ports()`` to get a dictionary, where key is a port number and the value is the corresponding input/output port.
|
||||
|
||||
Attributes ``in_ports_count`` and ``out_ports_count`` of the ``Op`` class instance define default number of input and output
|
||||
ports to be created for the ``Node``. However, additional input/output ports can be added using methods
|
||||
``add_input_port()`` and ``add_output_port()``. Port also can be removed, using the ``delete_input_port()`` and
|
||||
``delete_output_port()`` methods.
|
||||
|
||||
The ``Port`` class is just an abstraction that works with edges incoming/outgoing to/from a specific ``Node`` instance. For
|
||||
example, output port with ``idx = 1`` corresponds to the outgoing edge of a node with an attribute ``out = 1``, the input
|
||||
port with ``idx = 2`` corresponds to the incoming edge of a node with an attribute ``in = 2``.
|
||||
|
||||
Consider the example of a graph part with 4 operation nodes "Op1", "Op2", "Op3", and "Op4" and a number of data nodes
|
||||
depicted with light green boxes.
|
||||
|
||||
.. image:: _static/images/MO_ports_example_1.svg
|
||||
:scale: 80 %
|
||||
:align: center
|
||||
|
||||
Operation nodes have input ports (yellow squares) and output ports (light purple squares). Input port may not be
|
||||
connected. For example, the input **port 2** of node **Op1** does not have incoming edge, while output port always has an
|
||||
associated data node (after the partial inference when the data nodes are added to the graph), which may have no
|
||||
consumers.
|
||||
|
||||
Ports can be used to traverse a graph. The method ``get_source()`` of an input port returns an output port producing the
|
||||
tensor consumed by the input port. It is important that the method works the same during front, middle and back phases of a
|
||||
model conversion even though the graph structure changes (there are no data nodes in the graph during the front phase).
|
||||
|
||||
Let's assume that there are 4 instances of ``Node`` object ``op1, op2, op3``, and ``op4`` corresponding to nodes **Op1**, **Op2**,
|
||||
**Op3**, and **Op4**, respectively. The result of ``op2.in_port(0).get_source()`` and ``op4.in_port(1).get_source()`` is the
|
||||
same object ``op1.out_port(1)`` of type ``Port``.
|
||||
|
||||
The method ``get_destination()`` of an output port returns the input port of the node consuming this tensor. If there are
|
||||
multiple consumers of this tensor, the error is raised. The method ``get_destinations()`` of an output port returns a
|
||||
list of input ports consuming the tensor.
|
||||
|
||||
The method ``disconnect()`` removes a node incoming edge corresponding to the specific input port. The method removes
|
||||
several edges if it is applied during the front phase for a node output port connected with multiple nodes.
|
||||
|
||||
The method ``port.connect(another_port)`` connects output port ``port`` and input port ``another_port``. The method handles
|
||||
situations when the graph contains data nodes (middle and back phases) and does not create an edge between two nodes
|
||||
but also automatically creates data node or reuses existing data node. If the method is used during the front phase and
|
||||
data nodes do not exist, the method creates edge and properly sets ``in`` and ``out`` edge attributes.
|
||||
|
||||
For example, applying the following two methods to the graph above will result in the graph depicted below:
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
op4.in_port(1).disconnect()
|
||||
op3.out_port(0).connect(op4.in_port(1))
|
||||
|
||||
.. image:: _static/images/MO_ports_example_2.svg
|
||||
:scale: 80 %
|
||||
:align: center
|
||||
|
||||
.. note::
|
||||
For a full list of available methods, refer to the ``Node`` class implementation in the ``mo/graph/graph.py`` and ``Port`` class implementation in the ``mo/graph/port.py`` files.
|
||||
|
||||
===========
|
||||
Connections
|
||||
===========
|
||||
|
||||
Connection is a concept introduced to easily and reliably perform graph modifications. Connection corresponds to a
|
||||
link between a source output port with one or more destination input ports or a link between a destination input port
|
||||
and source output port producing data. So each port is connected with one or more ports with help of a connection.
|
||||
Model Optimizer uses the ``mo.graph.connection.Connection`` class to represent a connection.
|
||||
|
||||
There is only one ``get_connection()`` method of the ``Port`` class to get the instance of the corresponding ``Connection``
|
||||
object. If the port is not connected, the returned value is ``None``.
|
||||
|
||||
For example, the ``op3.out_port(0).get_connection()`` method returns a ``Connection`` object encapsulating edges from node
|
||||
**Op3** to data node **data_3_0** and two edges from data node **data_3_0** to two ports of the node **Op4**.
|
||||
|
||||
The ``Connection`` class provides methods to get source and destination(s) ports the connection corresponds to:
|
||||
|
||||
* ``connection.get_source()`` - returns an output ``Port`` object producing the tensor.
|
||||
* ``connection.get_destinations()`` - returns a list of input ``Port`` consuming the data.
|
||||
* ``connection.get_destination()`` - returns a single input ``Port`` consuming the data. If there are multiple consumers, the exception is raised.
|
||||
|
||||
The ``Connection`` class provides methods to modify a graph by changing a source or destination(s) of a connection. For
|
||||
example, the function call ``op3.out_port(0).get_connection().set_source(op1.out_port(0))`` changes source port of edges
|
||||
consuming data from port ``op3.out_port(0)`` to ``op1.out_port(0)``. The transformed graph from the sample above is depicted
|
||||
below:
|
||||
|
||||
.. image:: _static/images/MO_connection_example_1.svg
|
||||
:scale: 80 %
|
||||
:align: center
|
||||
|
||||
Another example is the ``connection.set_destination(dest_port)`` method. It disconnects ``dest_port`` and all input ports to which
|
||||
the connection is currently connected and connects the connection source port to ``dest_port``.
|
||||
|
||||
Note that connection works seamlessly during front, middle, and back phases and hides the fact that the graph structure is
|
||||
different.
|
||||
|
||||
.. note::
|
||||
For a full list of available methods, refer to the ``Connection`` class implementation in the ``mo/graph/connection.py`` file.
|
||||
|
||||
====================
|
||||
Additional Resources
|
||||
====================
|
||||
|
||||
* :doc:`Model Optimizer Extensibility <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer>`
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
@@ -0,0 +1,605 @@
|
||||
# [LEGACY] Graph Transformation Extensions {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions}
|
||||
|
||||
|
||||
.. meta::
|
||||
:description: Learn about various base classes for front, middle and back phase
|
||||
transformations applied during model conversion with Model Optimizer.
|
||||
|
||||
.. danger::
|
||||
|
||||
The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications.
|
||||
|
||||
This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions <openvino_docs_Extensibility_UG_Frontend_Extensions>` article.
|
||||
|
||||
Model Optimizer provides various base classes to implement :ref:`Front Phase Transformations <mo_front_phase_transformations>`,
|
||||
:ref:`Middle Phase Transformations <mo_middle_phase_transformations>`, and :ref:`Back Phase Transformations <mo_back_phase_transformations>`.
|
||||
All classes have the following common class attributes and methods:
|
||||
|
||||
1. The ``enabled`` attribute specifies whether the transformation is enabled or not. The value can be changed during runtime to enable or disable execution of the transformation during a model conversion. Default value is ``True``.
|
||||
2. The ``id`` attribute specifies a unique transformation string identifier. This transformation identifier can be used to enable (disable) the transformation by setting environment variable ``MO_ENABLED_TRANSFORMS`` (``MO_DISABLED_TRANSFORMS``) with a comma separated list of ``ids``. The environment variables override the value of the ``enabled`` attribute of the transformation. Instead of using ``id`` attribute value you can add fully defined class name to ``MO_ENABLED_TRANSFORMS`` (``MO_DISABLED_TRANSFORMS``) variable, ``extensions.back.NonmalizeToNormalizeL2.NormalizeToNormalizeL2`` for example. It is an optional attribute.
|
||||
3. The ``run_not_recursively`` attribute specifies whether the transformation should be executed in the sub-graphs, for example, body of the :doc:`TensorIterator <openvino_docs_ops_infrastructure_TensorIterator_1>` and the :doc:`Loop <openvino_docs_ops_infrastructure_Loop_5>`. Default value is ``True``.
|
||||
4. The ``force_clean_up`` attribute specifies whether the graph clean up should be executed after the transformation. The graph cleanup removes nodes of the graph not reachable from the model inputs. Default value is ``False``.
|
||||
5. The ``force_shape_inference`` attribute specifies whether the nodes marked with ``need_shape_inference`` attribute equal to ``True`` should be re-inferred after the transformation. Model Optimizer sets this attribute automatically for nodes, input(s) of which were changed during the transformation, or you can set this attribute manually in the transformation for the specific nodes. Default value is ``False``.
|
||||
6. Attribute ``graph_condition`` specifies a list of functions with one parameter -- ``Graph`` object. The transformation is executed if and only if all functions return ``True``. If the attribute is not set, no check is performed.
|
||||
7. Method ``run_before()`` returns a list of transformation classes which this transformation should be executed before.
|
||||
8. Method ``run_after()`` returns a list of transformation classes which this transformation should be executed after.
|
||||
|
||||
.. note::
|
||||
Some of the transformation types have specific class attributes and methods, which are explained in the corresponding sections of this document.
|
||||
|
||||
Model Optimizer builds a graph of dependencies between registered transformations and executes them in the topological
|
||||
order. To execute the transformation during a proper model conversion phase, Model Optimizer defines several
|
||||
anchor transformations that do nothing. All transformations are ordered with respect to these anchor transformations.
|
||||
The diagram below shows anchor transformations, some of built-in transformations and dependencies between them:
|
||||
|
||||
.. image:: _static/images/MO_transformations_graph.svg
|
||||
|
||||
User-defined transformations are executed after the corresponding ``Start`` and before the corresponding ``Finish`` anchor
|
||||
transformations by default (if ``run_before()`` and ``run_after()`` methods have not been overridden).
|
||||
|
||||
.. note::
|
||||
The ``PreMiddleStart`` and ``PostMiddleStart`` anchors were introduced due to historical reasons to refactor the Model Optimizer pipeline, which initially had a hardcoded order of transformations.
|
||||
|
||||
.. _mo_front_phase_transformations:
|
||||
|
||||
===========================
|
||||
Front Phase Transformations
|
||||
===========================
|
||||
|
||||
There are several types of a front phase transformation:
|
||||
|
||||
1. :ref:`Pattern-Defined Front Phase Transformations <pattern_defined_front_phase_transformations>` triggered for each sub-graph of the original graph isomorphic to the specified pattern.
|
||||
2. :ref:`Specific Operation Front Phase Transformations <specific_operation_front_phase_transformations>` triggered for the node with a specific ``op`` attribute value.
|
||||
3. :ref:`Generic Front Phase Transformations <generic_front_phase_transformations>`.
|
||||
4. Manually enabled transformation, defined with a JSON configuration file (for TensorFlow, ONNX, Apache MXNet, and PaddlePaddle models), specified using the ``--transformations_config`` command-line parameter:
|
||||
|
||||
1. :ref:`Node Name Pattern Front Phase Transformations <node_name_pattern_front_phase_transformations>`.
|
||||
2. :ref:`Front Phase Transformations Using Start and End Points <start_end_points_front_phase_transformations>`.
|
||||
3. :ref:`Generic Front Phase Transformations Enabled with Transformations Configuration File <generic_transformations_config_front_phase_transformations>`.
|
||||
|
||||
.. _pattern_defined_front_phase_transformations:
|
||||
|
||||
Pattern-Defined Front Phase Transformations
|
||||
###########################################
|
||||
|
||||
This type of transformation is implemented using ``mo.front.common.replacement.FrontReplacementSubgraph`` and
|
||||
``mo.front.common.replacement.FrontReplacementPattern`` as base classes and works as follows:
|
||||
|
||||
1. Define a sub-graph to be matched, using a list of nodes with attributes and edges connecting them (edges may also have attributes).
|
||||
2. Model Optimizer searches for all sub-graphs of the original graph, isomorphic to the specified sub-graph (pattern).
|
||||
3. Model Optimizer executes the defined function performing graph transformation for each instance of a matched sub-graph. You can override different functions in the base transformation class so the Model Optimizer works differently:
|
||||
|
||||
1. The ``replace_sub_graph(self, graph, match)`` override the method. In this case Model Optimizer only executes the overridden function, pass the ``graph`` object and a dictionary describing the matched sub-graph. You are required to write the transformation and connect the newly created nodes to the rest of the graph.
|
||||
2. The ``generate_sub_graph(self, graph, match)`` override the method. This case is not recommended for use because it is the most complicated approach. It can be effectively replaced with one of two previous approaches.
|
||||
|
||||
The sub-graph pattern is defined in the ``pattern()`` function. This function should return a dictionary with two keys:
|
||||
``nodes`` and ``edges``:
|
||||
|
||||
* The value for the ``nodes`` key is a list of tuples with two elements.
|
||||
|
||||
* The first element is an alias name for a node that will be used to define edges between nodes and in the transformation function.
|
||||
* The second element is a dictionary with attributes. The key is a name of an attribute that should exist in the node. The value for the attribute can be some specific value to match or a function that gets a single parameter - the attribute value from the node. The function should return the result of attribute comparison with a dedicated value.
|
||||
|
||||
* The value for the ``edges`` key is a list of tuples with two or three elements.
|
||||
|
||||
* The first element is the alias name of the node producing a tensor.
|
||||
* The second element is the alias name of the node consuming the tensor.
|
||||
* The third element (optional) is the dictionary with expected edge attributes. This dictionary usually contains attributes like ``in`` and ``out``, defining input and output ports.
|
||||
|
||||
Consider the example of a front transformation implemented in the ``extensions/front/Mish_fusion.py`` file performing
|
||||
fusing of the sub-graph defining the :doc:`Mish <openvino_docs_ops_activation_Mish_4>` activation function into a single
|
||||
operation:
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo.front.Softplus_fusion import SoftplusFusion
|
||||
from openvino.tools.mo.ops.activation_ops import Mish
|
||||
from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph
|
||||
from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch
|
||||
from openvino.tools.mo.graph.graph import Graph, rename_nodes
|
||||
|
||||
|
||||
class MishFusion(FrontReplacementSubgraph):
|
||||
"""
|
||||
The transformation looks for the pattern with Softplus defining the Mish function: Mish(x) = x * tanh(SoftPlus(x)).
|
||||
"""
|
||||
enabled = True # Transformation is enabled.
|
||||
|
||||
def run_after(self): # Run this transformation after "SoftplusFusion" transformation.
|
||||
return [SoftplusFusion]
|
||||
|
||||
def pattern(self): # Define pattern according to formulae x * tanh(SoftPlus(x)).
|
||||
return dict(
|
||||
nodes=[
|
||||
('mul', dict(op='Mul')),
|
||||
('tanh', dict(op='Tanh')),
|
||||
('softplus', dict(op='SoftPlus')),
|
||||
],
|
||||
edges=[
|
||||
('softplus', 'tanh'),
|
||||
('tanh', 'mul'),
|
||||
])
|
||||
|
||||
def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): # Entry point for the transformation.
|
||||
mul = match['mul'] # Get the Node corresponding to matched "mul" node.
|
||||
mul_name = mul.soft_get('name', mul.id)
|
||||
softplus = match['softplus'] # Get the Node corresponding to the matched "softplus" node.
|
||||
|
||||
# Determine the input port of Mul which gets the 'input' node output.
|
||||
input_port_idx = int(mul.in_port(0).get_connection().get_source().node.soft_get('op') == 'Tanh')
|
||||
|
||||
# Check that the same tensor is provided as input to Mul and SoftPlus.
|
||||
if mul.in_port(input_port_idx).get_source() != softplus.in_port(0).get_source():
|
||||
return
|
||||
|
||||
mish = Mish(graph, {}).create_node() # Create Mish operation.
|
||||
mish.in_port(0).connect(mul.in_port(input_port_idx).get_source()) # Connect input to the Mish.
|
||||
mul.out_port(0).get_connection().set_source(mish.out_port(0)) # Reconnect outgoing edge from "mul" to Mish.
|
||||
|
||||
# Rename the created Mish operation to have the name of the "mul" node, which produced the value equal to the
|
||||
# Mish output.
|
||||
rename_nodes([(mul, mul_name + '/TBR'), (mish, mul_name)])
|
||||
|
||||
.. _specific_operation_front_phase_transformations:
|
||||
|
||||
Specific Operation Front Phase Transformations
|
||||
##############################################
|
||||
|
||||
This type of transformation is implemented using ``mo.front.common.replacement.FrontReplacementOp`` as base class and
|
||||
works as follows:
|
||||
|
||||
1. Define an operation type to trigger the transformation.
|
||||
2. Model Optimizer searches for all nodes in the graph with the attribute ``op`` equal to the specified value.
|
||||
3. Model Optimizer executes the defined function performing graph transformation for each instance of a matched node. You can override different functions in the base transformation class and Model Optimizer works differently:
|
||||
|
||||
1. The ``replace_sub_graph(self, graph, match)`` override method. In this case, Model Optimizer only executes the overridden function. Pass the ``graph`` object and a dictionary with a single key ``op`` with the matched node as value. You are required to write the transformation and connect the newly created nodes to the rest of the graph.
|
||||
2. The ``replace_op(self, graph, node)`` override method. In this case, Model Optimizer executes the overridden function. Pass the ``graph`` object and the matched node as ``node`` parameter. If the function returns an ``id`` of some node, then the ``Node`` with this ``id`` is connected to the consumers of the matched node. After applying the transformation, the matched node is removed from the graph.
|
||||
|
||||
The ``FrontReplacementOp`` class provides a simpler mechanism to match a single operation with specific value of the ``op``
|
||||
(write the ``op`` attribute in the class instead of defining a ``pattern()`` function) attribute and perform the
|
||||
transformation.
|
||||
|
||||
Consider an example transformation from the ``extensions/front/Pack.py`` file, which replaces ``Pack`` operation from
|
||||
the TensorFlow:
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
|
||||
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
|
||||
from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs
|
||||
from openvino.tools.mo.graph.graph import Node, Graph, rename_nodes
|
||||
from openvino.tools.mo.ops.concat import Concat
|
||||
from openvino.tools.mo.ops.unsqueeze import Unsqueeze
|
||||
|
||||
|
||||
class Pack(FrontReplacementOp):
|
||||
op = "Pack" # Trigger transformation for all nodes in the graph with the op = "Pack" attribute
|
||||
enabled = True # Transformation is enabled.
|
||||
|
||||
def replace_op(self, graph: Graph, node: Node): # Entry point for the transformation.
|
||||
# Create a Concat operation with a number of inputs equal to a number of inputs to Pack.
|
||||
out_node = Concat(graph, {'axis': node.axis, 'in_ports_count': len(node.in_ports())}).create_node()
|
||||
pack_name = node.soft_get('name', node.id)
|
||||
|
||||
for ind in node.in_ports():
|
||||
# Add dimension of size 1 to all inputs of the Pack operation and add them as Concat inputs.
|
||||
unsqueeze_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array([node.axis])},
|
||||
{'name': node.soft_get('name', node.id) + '/Unsqueeze'})
|
||||
node.in_port(ind).get_connection().set_destination(unsqueeze_node.in_port(0))
|
||||
unsqueeze_node.out_port(0).connect(out_node.in_port(ind))
|
||||
|
||||
# Rename the created Concat operation to have the name of the "pack" node, which produced the value equal to the
|
||||
# Concat output.
|
||||
rename_nodes([(node, pack_name + '/TBR'), (out_node, pack_name)])
|
||||
return [out_node.id] # Reconnect the Pack operation consumers to get input from Concat instead.
|
||||
|
||||
|
||||
.. _generic_front_phase_transformations:
|
||||
|
||||
Generic Front Phase Transformations
|
||||
###################################
|
||||
|
||||
Model Optimizer provides a mechanism to implement generic front phase transformation. This type of transformation is
|
||||
implemented using ``mo.front.common.replacement.FrontReplacementSubgraph`` or
|
||||
``mo.front.common.replacement.FrontReplacementPattern`` as base classes. Make sure the transformation is enabled before trying to execute it.
|
||||
Then, Model Optimizer executes the ``find_and_replace_pattern(self, graph)`` method and
|
||||
provides a ``Graph`` object as an input.
|
||||
|
||||
Consider the example of a generic front transformation from the ``extensions/front/SqueezeNormalize.py`` file performing
|
||||
normalization of the :doc:`Squeeze <openvino_docs_ops_shape_Squeeze_1>` operation. Older version of the operation had a list of
|
||||
axes to squeeze as an attribute, but now it is a separate input. For backward compatibility, the Model Optimizer
|
||||
operation supports both semantics. Before IR generation, however, the operation should be normalized according to the
|
||||
specification.
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import logging as log
|
||||
|
||||
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
|
||||
from openvino.tools.mo.front.common.replacement import FrontReplacementPattern
|
||||
from openvino.tools.mo.graph.graph import Graph
|
||||
from openvino.tools.mo.ops.const import Const
|
||||
from openvino.tools.mo.utils.error import Error
|
||||
|
||||
|
||||
class SqueezeNormalize(FrontReplacementPattern):
|
||||
"""
|
||||
Normalizes inputs of the Squeeze layers. The layers should have two inputs: the input with data and input with the
|
||||
dimensions to squeeze. If the second input is omitted then all dimensions of size 1 should be removed.
|
||||
"""
|
||||
enabled = True # The transformation is enabled.
|
||||
|
||||
def find_and_replace_pattern(self, graph: Graph): # The function is called unconditionally.
|
||||
for squeeze_node in graph.get_op_nodes(op='Squeeze'): # Iterate over all nodes with op='Squeeze'.
|
||||
# If the operation has only 1 input node and no 'squeeze_dims' Node attribute, then convert the attribute to
|
||||
# the operation input.
|
||||
if len(squeeze_node.in_nodes()) == 1 and squeeze_node.has_valid('squeeze_dims'):
|
||||
dims_node = Const(graph, {'name': squeeze_node.id + '/Dims',
|
||||
'value': int64_array(squeeze_node.squeeze_dims)}).create_node()
|
||||
squeeze_node.in_port(1).connect(dims_node.out_port(0))
|
||||
del squeeze_node['squeeze_dims']
|
||||
# If two inputs already exist, that means the operation is already normalized.
|
||||
elif len(squeeze_node.in_nodes()) == 2:
|
||||
log.debug('The Squeeze node "{}" is already normalized'.format(squeeze_node.name))
|
||||
# In all other cases, raise an error.
|
||||
else:
|
||||
raise Error('The Squeeze layer "{}" should either have 2 inputs or one input and an "squeeze_dims" '
|
||||
'attribute'.format(squeeze_node.soft_get('name')))
|
||||
|
||||
For the details on implementation and how these front phase transformations work, refer to the ``mo/front/common/replacement.py``
|
||||
file.
|
||||
|
||||
.. _node_name_pattern_front_phase_transformations:
|
||||
|
||||
Node Name Pattern Front Phase Transformations
|
||||
#############################################
|
||||
|
||||
TensorFlow uses a mechanism of scope to group related operation nodes. It is a good practice to put nodes performing
|
||||
particular task into the same scope. This approach divides a graph into logical blocks that are easier to review in the
|
||||
TensorBoard. The scope, in fact, just defines a common name prefix for the nodes belonging to it.
|
||||
|
||||
For example, Inception topologies contain several types of so-called **Inception blocks**. Some of them are equal to each
|
||||
other, but located in different places of the network. For example, Inception V4 from the
|
||||
`TensorFlow-Slim image classification model library <https://github.com/tensorflow/models/tree/master/research/slim>`__ has
|
||||
``Mixed_5b``, ``Mixed_5c`` and ``Mixed_5d`` inception blocks with exactly the same nodes, with the same set of attributes.
|
||||
|
||||
Consider a situation when these Inception blocks are implemented extremely efficiently using a single Inference
|
||||
Engine operation called ``InceptionBlock`` and these blocks in the model need to be replaced with instances of this operation.
|
||||
Model Optimizer provides mechanism to trigger the transformation for a sub-graph of operations defined by the node name
|
||||
regular expressions (scope). In this particular case, some of the patterns are: ``.*InceptionV4/Mixed_5b``,
|
||||
``.*InceptionV4/Mixed_5c`` and ``.*InceptionV4/Mixed_5d``. Each pattern starts with ``.*``, because the ``InceptionV4`` prefix
|
||||
is added to all nodes names during a model freeze.
|
||||
|
||||
This type of transformation is implemented using ``mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph`` as a
|
||||
base class and works as follows:
|
||||
|
||||
1. Prepare a JSON configuration file template defining node names patterns.
|
||||
2. Run Model Optimizer with the ``--tensorflow_custom_operations_config_update`` command-line parameter, and Model Optimizer adds information about input and output nodes of the specified sub-graphs.
|
||||
3. Model Optimizer executes the defined transformation **only** when you specify the path to the configuration file updated in step 2 using the ``--transformations_config`` command-line parameter.
|
||||
|
||||
Consider the following possible configuration file template for the Inception Block transformation:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
[
|
||||
{
|
||||
"custom_attributes": {
|
||||
"attr1_key": "attr1_value",
|
||||
"attr2_key": 123456
|
||||
},
|
||||
"id": "InceptionBlockTransformation",
|
||||
"instances": [
|
||||
".*InceptionV4/Mixed_5b",
|
||||
".*InceptionV4/Mixed_5c",
|
||||
".*InceptionV4/Mixed_5d"
|
||||
],
|
||||
"match_kind": "scope"
|
||||
}
|
||||
]
|
||||
|
||||
The configuration file contains a list of dictionaries. Each dictionary defines one transformation. Each transformation
|
||||
is defined with several parameters:
|
||||
|
||||
* ``id`` - **(Mandatory)** — is a unique identifier of the transformation. It is used in the Python code that implements the transformation to link the class and the transformation description from the configuration file.
|
||||
* ``match_kind`` - **(Mandatory)** — is a string that specifies the matching algorithm. For the node name pattern case, the value should be equal to ``scope``. Another possible values are described in the dedicated sections below.
|
||||
* ``instances`` - **(Mandatory)** — specifies instances of the sub-graph to be matched. It contains a list of node names prefixes patterns for the match kind of the ``scope`` type.
|
||||
* ``custom_attributes`` - **(Optional)** — is a dictionary with attributes that can be used in the transformation code.
|
||||
|
||||
After running Model Optimizer with additional ``--tensorflow_custom_operations_config_update`` parameter pointing to
|
||||
the template configuration file, the content of the file should be updated with two new sections ``inputs`` and ``outputs``.
|
||||
The file content after the update is as follows:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
[
|
||||
{
|
||||
"id": "InceptionBlockTransformation",
|
||||
"custom_attributes": {
|
||||
"attr1_key": "attr1_value",
|
||||
"attr2_key": 123456
|
||||
},
|
||||
"instances": [
|
||||
".*InceptionV4/Mixed_5b",
|
||||
".*InceptionV4/Mixed_5c",
|
||||
".*InceptionV4/Mixed_5d"
|
||||
],
|
||||
"match_kind": "scope",
|
||||
"inputs": [
|
||||
[
|
||||
{
|
||||
"node": "Branch_2/Conv2d_0a_1x1/Conv2D$",
|
||||
"port": 0
|
||||
},
|
||||
{
|
||||
"node": "Branch_3/AvgPool_0a_3x3/AvgPool$",
|
||||
"port": 0
|
||||
},
|
||||
{
|
||||
"node": "Branch_1/Conv2d_0a_1x1/Conv2D$",
|
||||
"port": 0
|
||||
},
|
||||
{
|
||||
"node": "Branch_0/Conv2d_0a_1x1/Conv2D$",
|
||||
"port": 0
|
||||
}
|
||||
]
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"node": "concat$",
|
||||
"port": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
The value for ``inputs`` key is a list of lists describing input tensors of the sub-graph. Each element of the top-level
|
||||
list corresponds to one unique input tensor of the sub-graph. Each internal list describes a list of nodes consuming
|
||||
this tensor and port numbers, where the tensor is consumed. Model Optimizer generates regular expressions for the input
|
||||
nodes names to uniquely identify them in each instance of the sub-graph, defined by the ``instances``. Denote these nodes
|
||||
as input nodes of the sub-graph.
|
||||
|
||||
In the InceptionV4 topology, the ``InceptionV4/Mixed_5b`` block has four input tensors from outside of the sub-graph,
|
||||
but all of them are produced by the ``InceptionV4/Mixed_5a/concat`` node. Therefore, the top-level list of the ``inputs``
|
||||
contains one list corresponding to this tensor. Four input nodes of the sub-graph consume the tensor produced by
|
||||
``InceptionV4/Mixed_5a/concat`` node. In this case, all four input nodes consume input tensor into "port 0".
|
||||
|
||||
The order of items in the internal list describing nodes does not matter, but the order of elements in the top-level
|
||||
list is important. This order defines how Model Optimizer attaches input tensors to a new generated
|
||||
node if the sub-graph is replaced with a single node. The ``i``-th input node of the sub-graph is obtained using
|
||||
``match.single_input_node(i)`` call in the sub-graph transformation code. More information about API is given below. If it is
|
||||
necessary to change the order of input tensors, the configuration file can be edited in the text editor.
|
||||
|
||||
The value for the ``outputs`` key is a list describing nodes of the sub-graph producing tensor, that goes outside of the
|
||||
sub-graph or does not have child nodes. Denote these nodes as output nodes of the sub-graph. The order of elements in
|
||||
the list is important. The ``i``-th element of the list describes the ``i``-th output tensor of the sub-graph, which could be
|
||||
obtained using ``match.output_node(i)`` call. The order of elements can be manually changed in the configuration file.
|
||||
Model Optimizer uses this order to connect output edges if the sub-graph is replaced with a single node.
|
||||
|
||||
For more examples of this type of transformation, refer to the :doc:`Converting TensorFlow Object Detection API Models <openvino_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models>` guide.
|
||||
|
||||
.. _start_end_points_front_phase_transformations:
|
||||
|
||||
Front Phase Transformations Using Start and End Points
|
||||
######################################################
|
||||
|
||||
This type of transformation is implemented using ``mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph`` as a
|
||||
base class and works as follows:
|
||||
|
||||
1. Prepare a JSON configuration file that defines the sub-graph to match, using two lists of node names: "start" and "end" nodes.
|
||||
2. Model Optimizer executes the defined transformation **only** when you specify the path to the configuration file using the ``--transformations_config`` command-line parameter . Model Optimizer performs the following steps to match the sub-graph:
|
||||
|
||||
1. Starts a graph traversal from every start node following the direction of the graph edges. The search stops in an end node or in the case of a node without consumers. All visited nodes are added to the matched sub-graph.
|
||||
2. Starts another graph traversal from each non-start node of the sub-graph, i.e. every node except nodes from the "start" list. In this step, the edges are traversed in the opposite edge direction. All newly visited nodes are added to the matched sub-graph. This step is needed to add nodes required for calculation values of internal nodes of the matched sub-graph.
|
||||
3. Checks that all "end" nodes were reached from "start" nodes. If not, it exits with an error.
|
||||
4. Checks that there are no :doc:`Parameter <openvino_docs_ops_infrastructure_Parameter_1>` operations among added nodes. If they exist, the sub-graph depends on the inputs of the model. Such configuration is considered incorrect so Model Optimizer exits with an error.
|
||||
|
||||
This algorithm finds all nodes "between" start and end nodes and nodes needed for calculation of non-input nodes of the
|
||||
matched sub-graph.
|
||||
|
||||
The example of a JSON configuration file for a transformation with start and end points is
|
||||
``extensions/front/tf/ssd_support_api_v1.15.json``:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
[
|
||||
{
|
||||
"custom_attributes": {
|
||||
"code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
|
||||
"pad_mode": "caffe.ResizeParameter.CONSTANT",
|
||||
"resize_mode": "caffe.ResizeParameter.WARP",
|
||||
"clip_before_nms": false,
|
||||
"clip_after_nms": true
|
||||
},
|
||||
"id": "ObjectDetectionAPISSDPostprocessorReplacement",
|
||||
"include_inputs_to_sub_graph": true,
|
||||
"include_outputs_to_sub_graph": true,
|
||||
"instances": {
|
||||
"end_points": [
|
||||
"detection_boxes",
|
||||
"detection_scores",
|
||||
"num_detections"
|
||||
],
|
||||
"start_points": [
|
||||
"Postprocessor/Shape",
|
||||
"Postprocessor/scale_logits",
|
||||
"Postprocessor/Tile",
|
||||
"Postprocessor/Reshape_1",
|
||||
"Postprocessor/Cast_1"
|
||||
]
|
||||
},
|
||||
"match_kind": "points"
|
||||
}
|
||||
]
|
||||
|
||||
The format of the file is similar to the one provided as an example in the
|
||||
:ref:`Node Name Pattern Front Phase Transformations <node_name_pattern_front_phase_transformations>` section. The difference is in
|
||||
the value of the ``match_kind`` parameter, which should be equal to the ``points`` and the format of the ``instances`` parameter,
|
||||
which should be a dictionary with two keys ``start_points`` and ``end_points``, defining start and end node names
|
||||
respectively.
|
||||
|
||||
.. note::
|
||||
The ``include_inputs_to_sub_graph`` and ``include_outputs_to_sub_graph`` parameters are redundant and should be always equal to ``true``.
|
||||
|
||||
.. note::
|
||||
This sub-graph match algorithm has a limitation that each start node must have only one input. Therefore, it is not possible to specify, for example, the :doc:`Convolution <openvino_docs_ops_convolution_Convolution_1>` node as input because it has two inputs: data tensor and tensor with weights.
|
||||
|
||||
For other examples of transformations with points, refer to the
|
||||
:doc:`Converting TensorFlow Object Detection API Models <openvino_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models>` guide.
|
||||
|
||||
.. _generic_transformations_config_front_phase_transformations:
|
||||
|
||||
Generic Front Phase Transformations Enabled with Transformations Configuration File
|
||||
###################################################################################
|
||||
|
||||
This type of transformation works similarly to the :ref:`Generic Front Phase Transformations <generic_front_phase_transformations)`
|
||||
but require a JSON configuration file to enable it similarly to
|
||||
:ref:`Node Name Pattern Front Phase Transformations <node_name_pattern_front_phase_transformations>` and
|
||||
:ref:`Front Phase Transformations Using Start and End Points <start_end_points_front_phase_transformations>`.
|
||||
|
||||
The base class for this type of transformation is
|
||||
``mo.front.common.replacement.FrontReplacementFromConfigFileGeneral``. Model Optimizer executes the
|
||||
``transform_graph(self, graph, replacement_descriptions)`` method and provides the ``Graph`` object and dictionary with values
|
||||
parsed from the `custom_attributes` attribute of the provided JSON configuration file.
|
||||
|
||||
The example of the configuration file for this type of transformation is ``extensions/front/tf/yolo_v1_tiny.json``:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
[
|
||||
{
|
||||
"id": "TFYOLO",
|
||||
"match_kind": "general",
|
||||
"custom_attributes": {
|
||||
"classes": 20,
|
||||
"coords": 4,
|
||||
"num": 2,
|
||||
"do_softmax": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
and the corresponding transformation file is ``./extensions/front/YOLO.py``:
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo.front.no_op_eraser import NoOpEraser
|
||||
from openvino.tools.mo.front.standalone_const_eraser import StandaloneConstEraser
|
||||
from openvino.tools.mo.ops.regionyolo import RegionYoloOp
|
||||
from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral
|
||||
from openvino.tools.mo.graph.graph import Node, Graph
|
||||
from openvino.tools.mo.ops.result import Result
|
||||
from openvino.tools.mo.utils.error import Error
|
||||
|
||||
|
||||
class YoloRegionAddon(FrontReplacementFromConfigFileGeneral):
|
||||
"""
|
||||
Replaces all Result nodes in graph with YoloRegion->Result nodes chain.
|
||||
YoloRegion node attributes are taken from configuration file
|
||||
"""
|
||||
replacement_id = 'TFYOLO' # The identifier matching the "id" attribute in the JSON file.
|
||||
|
||||
def run_after(self):
|
||||
return [NoOpEraser, StandaloneConstEraser]
|
||||
|
||||
def transform_graph(self, graph: Graph, replacement_descriptions):
|
||||
op_outputs = [n for n, d in graph.nodes(data=True) if 'op' in d and d['op'] == 'Result']
|
||||
for op_output in op_outputs:
|
||||
last_node = Node(graph, op_output).in_node(0)
|
||||
op_params = dict(name=last_node.id + '/YoloRegion', axis=1, end_axis=-1)
|
||||
op_params.update(replacement_descriptions)
|
||||
region_layer = RegionYoloOp(graph, op_params)
|
||||
region_layer_node = region_layer.create_node([last_node])
|
||||
# In here, 'axis' from 'dim_attrs' can be removed to avoid permutation from axis = 1 to axis = 2.
|
||||
region_layer_node.dim_attrs.remove('axis')
|
||||
Result(graph).create_node([region_layer_node])
|
||||
graph.remove_node(op_output)
|
||||
|
||||
The configuration file has only 3 parameters: ``id`` identifier of the transformation , ``match_kind`` (which should be equal
|
||||
to ``general``) and the ``custom_attributes`` dictionary with custom attributes accessible in the transformation.
|
||||
|
||||
.. _mo_middle_phase_transformations:
|
||||
|
||||
============================
|
||||
Middle Phase Transformations
|
||||
============================
|
||||
|
||||
There are two types of middle phase transformations:
|
||||
|
||||
1. :ref:`Pattern-Defined Middle Phase Transformations <pattern_defined_middle_phase_transformations>` triggered for each sub-graph of the original graph, isomorphic to the specified pattern.
|
||||
2. :ref:`Generic Middle Phase Transformations <generic_middle_phase_transformations>`.
|
||||
|
||||
.. _pattern_defined_middle_phase_transformations:
|
||||
|
||||
Pattern-Defined Middle Phase Transformations
|
||||
############################################
|
||||
|
||||
This type of transformation is implemented using ``mo.middle.replacement.MiddleReplacementPattern`` as a base class and
|
||||
works similarly to the :ref:`Pattern-Defined Middle Phase Transformations <pattern_defined_middle_phase_transformations>`
|
||||
The are two differences:
|
||||
|
||||
1. The transformation entry function name is ``replace_pattern(self, graph, match)``.
|
||||
2. The pattern defining the graph should contain data nodes because the structure of the graph is different between front and middle phases. For more information about the graph structure changes, refer to the :ref:`Partial Inference <mo_partial_inference>`.
|
||||
|
||||
For the example of a pattern-defined middle transformation, refer to the ``extensions/middle/L2NormToNorm.py`` file.
|
||||
|
||||
.. _generic_middle_phase_transformations:
|
||||
|
||||
Generic Middle Phase Transformations
|
||||
####################################
|
||||
|
||||
Model Optimizer provides a mechanism to implement generic middle phase transformations. This type of transformation is
|
||||
implemented using ``mo.middle.replacement.MiddleReplacementPattern`` as a base class and works similarly to the
|
||||
:ref:`Generic Front Phase Transformations <generic_front_phase_transformations>`. The only difference is that the
|
||||
transformation entry function name is ``find_and_replace_pattern(self, graph: Graph)``.
|
||||
|
||||
For the example of this transformation, refer to the ``extensions/middle/CheckForCycle.py`` file.
|
||||
|
||||
.. _mo_back_phase_transformations:
|
||||
|
||||
==========================
|
||||
Back Phase Transformations
|
||||
==========================
|
||||
|
||||
There are two types of back phase transformations:
|
||||
|
||||
1. :ref:`Pattern-Defined Back Phase Transformations <pattern_defined_back_phase_transformations>` triggered for each sub-graph of the original graph, isomorphic to the specified pattern.
|
||||
2. :ref:`Generic Back Phase Transformations <generic_back_phase_transformations>`.
|
||||
|
||||
.. note::
|
||||
The graph layout during the back phase is always NCHW. However, during the front and middle phases it could be NHWC if the original model was using it. For more details, refer to :ref:`Model Conversion Pipeline <mo_model_conversion_pipeline>`.
|
||||
|
||||
.. _pattern_defined_back_phase_transformations:
|
||||
|
||||
Pattern-Defined Back Phase Transformations
|
||||
##########################################
|
||||
|
||||
This type of transformation is implemented using ``mo.back.replacement.MiddleReplacementPattern`` as a base class and
|
||||
works the same way as :ref:`Pattern-Defined Middle Phase Transformations <pattern_defined_middle_phase_transformations>`.
|
||||
|
||||
For the example of a pattern-defined back transformation, refer to the ``extensions/back/ShufflenetReLUReorder.py`` file.
|
||||
|
||||
.. _generic_back_phase_transformations:
|
||||
|
||||
Generic Back Phase Transformations
|
||||
##################################
|
||||
|
||||
Model Optimizer provides mechanism to implement generic back phase transformations. This type of transformation is
|
||||
implemented using ``mo.back.replacement.BackReplacementPattern`` as a base class and works the same way as
|
||||
:ref:`Generic Middle Phase Transformations <generic_middle_phase_transformations>`.
|
||||
|
||||
For the example of this transformation, refer to the ``extensions/back/GatherNormalizer.py`` file.
|
||||
|
||||
====================
|
||||
Additional Resources
|
||||
====================
|
||||
|
||||
* :doc:`Model Optimizer Extensibility <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer>`
|
||||
* :doc:`Graph Traversal and Modification Using Ports and Connections <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections>`
|
||||
* :doc:`Model Optimizer Extensions <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions>`
|
||||
* :doc:`Extending Model Optimizer with Caffe Python Layers <openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers>`
|
||||
|
||||
59
docs/OV_Runtime_UG/Int8Inference.md
Normal file
59
docs/OV_Runtime_UG/Int8Inference.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Low-Precision 8-bit Integer Inference
|
||||
|
||||
## Disclaimer
|
||||
|
||||
Low-precision 8-bit inference is optimized for:
|
||||
- Intel® architecture processors with the following instruction set architecture extensions:
|
||||
- Intel® Advanced Vector Extensions 512 Vector Neural Network Instructions (Intel® AVX-512 VNNI)
|
||||
- Intel® Advanced Vector Extensions 512 (Intel® AVX-512)
|
||||
- Intel® Advanced Vector Extensions 2.0 (Intel® AVX2)
|
||||
- Intel® Streaming SIMD Extensions 4.2 (Intel® SSE4.2)
|
||||
- Intel® processor graphics:
|
||||
- Intel® Iris® Xe Graphics
|
||||
- Intel® Iris® Xe MAX Graphics
|
||||
|
||||
## Introduction
|
||||
|
||||
For 8-bit integer computation, a model must be quantized. You can use a quantized model from [OpenVINO™ Toolkit Intel's Pre-Trained Models](@ref omz_models_group_intel) or quantize a model yourself. For more details on how to get quantized model please refer to [Model Optimization](@ref openvino_docs_model_optimization_guide) document.
|
||||
|
||||
The quantization process adds [FakeQuantize](../ops/quantization/FakeQuantize_1.md) layers on activations and weights for most layers. Read more about mathematical computations in the [Uniform Quantization with Fine-Tuning](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md).
|
||||
|
||||
When you pass the quantized IR to the OpenVINO™ plugin, the plugin automatically recognizes it as a quantized model and performs 8-bit inference. Note that if you pass a quantized model to another plugin that does not support 8-bit inference but supports all operations from the model, the model is inferred in precision that this plugin supports.
|
||||
|
||||
At runtime, the quantized model is loaded to the plugin. The plugin uses the `Low Precision Transformation` component to update the model to infer it in low precision:
|
||||
- Update `FakeQuantize` layers to have quantized output tensors in low-precision range and add dequantization layers to compensate for the update. Dequantization layers are pushed through as many layers as possible to have more layers in low precision. After that, most layers have quantized input tensors in low-precision range and can be inferred in low precision. Ideally, dequantization layers should be fused in the next `FakeQuantize` layer.
|
||||
- Weights are quantized and stored in `Constant` layers.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Let's explore quantized [TensorFlow* implementation of the ResNet-50](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf) model. Use [Model Downloader](@ref omz_tools_downloader) to download the `FP16` model from [OpenVINO™ Toolkit - Open Model Zoo repository](https://github.com/openvinotoolkit/open_model_zoo):
|
||||
|
||||
```sh
|
||||
omz_downloader --name resnet-50-tf --precisions FP16-INT8
|
||||
```
|
||||
After that you should quantize the model with the [Model Quantizer](@ref omz_tools_downloader) tool.
|
||||
```sh
|
||||
omz_quantizer --model_dir public/resnet-50-tf --dataset_dir <DATASET_DIR> --precisions=FP16-INT8
|
||||
```
|
||||
|
||||
The simplest way to infer the model and collect performance counters is the [Benchmark Application](../../samples/cpp/benchmark_app/README.md):
|
||||
```sh
|
||||
./benchmark_app -m resnet-50-tf.xml -d CPU -niter 1 -api sync -report_type average_counters -report_folder pc_report_dir
|
||||
```
|
||||
If you infer the model with the OpenVINO™ CPU plugin and collect performance counters, all operations (except the last non-quantized SoftMax) are executed in INT8 precision.
|
||||
|
||||
## Low-Precision 8-bit Integer Inference Workflow
|
||||
|
||||
For 8-bit integer computations, a model must be quantized. Quantized models can be downloaded from [Overview of OpenVINO™ Toolkit Intel's Pre-Trained Models](@ref omz_models_group_intel). If the model is not quantized, you can use the [Post-Training Optimization Tool](@ref pot_introduction) to quantize the model. The quantization process adds [FakeQuantize](../ops/quantization/FakeQuantize_1.md) layers on activations and weights for most layers. Read more about mathematical computations in the [Uniform Quantization with Fine-Tuning](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md).
|
||||
|
||||
8-bit inference pipeline includes two stages (also refer to the figure below):
|
||||
1. *Offline stage*, or *model quantization*. During this stage, [FakeQuantize](../ops/quantization/FakeQuantize_1.md) layers are added before most layers to have quantized tensors before layers in a way that low-precision accuracy drop for 8-bit integer inference satisfies the specified threshold. The output of this stage is a quantized model. Quantized model precision is not changed, quantized tensors are in the original precision range (`fp32`). `FakeQuantize` layer has `levels` attribute which defines quants count. Quants count defines precision which is used during inference. For `int8` range `levels` attribute value has to be 255 or 256. To quantize the model, you can use the [Post-Training Optimization Tool](@ref pot_introduction) delivered with the Intel® Distribution of OpenVINO™ toolkit release package.
|
||||
|
||||
When you pass the quantized IR to the OpenVINO™ plugin, the plugin automatically recognizes it as a quantized model and performs 8-bit inference. Note, if you pass a quantized model to another plugin that does not support 8-bit inference but supports all operations from the model, the model is inferred in precision that this plugin supports.
|
||||
|
||||
2. *Runtime stage*. This stage is an internal procedure of the OpenVINO™ plugin. During this stage, the quantized model is loaded to the plugin. The plugin uses `Low Precision Transformation` component to update the model to infer it in low precision:
|
||||
- Update `FakeQuantize` layers to have quantized output tensors in low precision range and add dequantization layers to compensate the update. Dequantization layers are pushed through as many layers as possible to have more layers in low precision. After that, most layers have quantized input tensors in low precision range and can be inferred in low precision. Ideally, dequantization layers should be fused in the next `FakeQuantize` layer.
|
||||
- Weights are quantized and stored in `Constant` layers.
|
||||
|
||||
![int8_flow]
|
||||
|
||||
@@ -3,11 +3,6 @@
|
||||
Automatic Device Selection
|
||||
==========================
|
||||
|
||||
.. meta::
|
||||
:description: The Automatic Device Selection mode in OpenVINO™ Runtime
|
||||
detects available devices and selects the optimal processing
|
||||
unit for inference automatically.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
@@ -15,25 +10,34 @@ Automatic Device Selection
|
||||
|
||||
Debugging Auto-Device Plugin <openvino_docs_OV_UG_supported_plugins_AUTO_debugging>
|
||||
|
||||
.. meta::
|
||||
:description: The Automatic Device Selection mode in OpenVINO™ Runtime
|
||||
detects available devices and selects the optimal processing
|
||||
unit for inference automatically.
|
||||
|
||||
|
||||
This article introduces how Automatic Device Selection works and how to use it for inference.
|
||||
|
||||
|
||||
.. _how-auto-works:
|
||||
|
||||
The Automatic Device Selection mode, or AUTO for short, uses a "virtual" or a "proxy" device,
|
||||
which does not bind to a specific type of hardware, but rather selects the processing unit
|
||||
for inference automatically. It detects available devices, picks the one best-suited for the
|
||||
task, and configures its optimization settings. This way, you can write the application once
|
||||
and deploy it anywhere.
|
||||
|
||||
The selection also depends on your performance requirements, defined by the “hints”
|
||||
configuration API, as well as device priority list limitations, if you choose to exclude
|
||||
some hardware from the process.
|
||||
How AUTO Works
|
||||
##############
|
||||
|
||||
The Automatic Device Selection mode, or AUTO for short, uses a "virtual" or a "proxy" device,
|
||||
which does not bind to a specific type of hardware, but rather selects the processing unit for inference automatically.
|
||||
It detects available devices, picks the one best-suited for the task, and configures its optimization settings.
|
||||
This way, you can write the application once and deploy it anywhere.
|
||||
|
||||
The selection also depends on your performance requirements, defined by the “hints” configuration API, as well as device priority list limitations, if you choose to exclude some hardware from the process.
|
||||
|
||||
The logic behind the choice is as follows:
|
||||
|
||||
1. Check what supported devices are available.
|
||||
2. Check precisions of the input model (for detailed information on precisions read more on the ``ov::device::capabilities``).
|
||||
3. Select the highest-priority device capable of supporting the given model, as listed in the table below.
|
||||
4. If model's precision is FP32 but there is no device capable of supporting it, offload the model to a device supporting FP16.
|
||||
4. If model’s precision is FP32 but there is no device capable of supporting it, offload the model to a device supporting FP16.
|
||||
|
||||
|
||||
+----------+-----------------------------------------------------+------------------------------------+
|
||||
@@ -49,18 +53,7 @@ The logic behind the choice is as follows:
|
||||
| 3 | Intel® CPU | FP32, FP16, INT8, BIN |
|
||||
| | (e.g. Intel® Core™ i7-1165G7) | |
|
||||
+----------+-----------------------------------------------------+------------------------------------+
|
||||
| 4 | Intel® NPU | |
|
||||
| | (e.g. Intel® Core™ Ultra) | |
|
||||
+----------+-----------------------------------------------------+------------------------------------+
|
||||
|
||||
.. note::
|
||||
|
||||
Note that NPU is currently excluded from the default priority list. To use it for inference, you
|
||||
need to specify it explicitly
|
||||
|
||||
|
||||
How AUTO Works
|
||||
##############
|
||||
|
||||
To put it simply, when loading the model to the first device on the list fails, AUTO will try to load it to the next device in line, until one of them succeeds.
|
||||
What is important, **AUTO starts inference with the CPU of the system by default**, as it provides very low latency and can start inference with no additional delays.
|
||||
@@ -68,19 +61,12 @@ While the CPU is performing inference, AUTO continues to load the model to the d
|
||||
This way, the devices which are much slower in compiling models, GPU being the best example, do not impact inference at its initial stages.
|
||||
For example, if you use a CPU and a GPU, the first-inference latency of AUTO will be better than that of using GPU alone.
|
||||
|
||||
Note that if you choose to exclude CPU from the priority list or disable the initial
|
||||
CPU acceleration feature via ``ov::intel_auto::enable_startup_fallback``, it will be
|
||||
unable to support the initial model compilation stage. The models with dynamic
|
||||
input/output or stateful :doc:`stateful<openvino_docs_OV_UG_model_state_intro>`
|
||||
operations will be loaded to the CPU if it is in the candidate list. Otherwise,
|
||||
these models will follow the normal flow and be loaded to the device based on priority.
|
||||
Note that if you choose to exclude CPU from the priority list or disable the initial CPU acceleration feature via ``ov::intel_auto::enable_startup_fallback``, it will be unable to support the initial model compilation stage. The models with dynamic input/output or stateful :doc:`stateful<openvino_docs_OV_UG_model_state_intro>` operations will be loaded to the CPU if it is in the candidate list. Otherwise, these models will follow the normal flow and be loaded to the device based on priority.
|
||||
|
||||
.. image:: _static/images/autoplugin_accelerate.svg
|
||||
|
||||
|
||||
This mechanism can be easily observed in the :ref:`Using AUTO with Benchmark app sample <using-auto-with-openvino-samples-and-benchmark-app>`
|
||||
section, showing how the first-inference latency (the time it takes to compile the
|
||||
model and perform the first inference) is reduced when using AUTO. For example:
|
||||
This mechanism can be easily observed in the :ref:`Using AUTO with Benchmark app sample <using-auto-with-openvino-samples-and-benchmark-app>` section, showing how the first-inference latency (the time it takes to compile the model and perform the first inference) is reduced when using AUTO. For example:
|
||||
|
||||
|
||||
.. code-block:: sh
|
||||
@@ -102,9 +88,8 @@ model and perform the first inference) is reduced when using AUTO. For example:
|
||||
Using AUTO
|
||||
##########
|
||||
|
||||
Following the OpenVINO™ naming convention, the Automatic Device Selection mode is assigned the label of "AUTO".
|
||||
It may be defined with no additional parameters, resulting in defaults being used, or configured further with
|
||||
the following setup options:
|
||||
Following the OpenVINO™ naming convention, the Automatic Device Selection mode is assigned the label of "AUTO". It may be defined with no additional parameters, resulting in defaults being used, or configured further with the following setup options:
|
||||
|
||||
|
||||
+----------------------------------------------+--------------------------------------------------------------------+
|
||||
| Property(C++ version) | Values and Description |
|
||||
@@ -182,17 +167,6 @@ the following setup options:
|
||||
| | |
|
||||
| | The default value is ``true``. |
|
||||
+----------------------------------------------+--------------------------------------------------------------------+
|
||||
| ``ov::intel_auto::schedule_policy`` | **Values**: |
|
||||
| | |
|
||||
| | ``ROUND_ROBIN`` |
|
||||
| | |
|
||||
| | ``DEVICE_PRIORITY`` |
|
||||
| | |
|
||||
| | Specify the schedule policy of infer request assigned to hardware |
|
||||
| | plugin for AUTO cumulative mode (MULTI). |
|
||||
| | |
|
||||
| | The default value is ``DEVICE_PRIORITY``. |
|
||||
+----------------------------------------------+--------------------------------------------------------------------+
|
||||
|
||||
Inference with AUTO is configured similarly to when device plugins are used:
|
||||
you compile the model on the plugin with configuration and execute inference.
|
||||
@@ -220,6 +194,7 @@ The code samples on this page assume following import(Python)/using (C++) are in
|
||||
Device Candidates and Priority
|
||||
++++++++++++++++++++++++++++++
|
||||
|
||||
|
||||
The device candidate list enables you to customize the priority and limit the choice of devices available to AUTO.
|
||||
|
||||
* If <device candidate list> is not specified, AUTO assumes all the devices present in the system can be used.
|
||||
@@ -510,4 +485,3 @@ Additional Resources
|
||||
- :doc:`Running on Multiple Devices Simultaneously <openvino_docs_OV_UG_Running_on_multiple_devices>`
|
||||
- :doc:`Supported Devices <openvino_docs_OV_UG_supported_plugins_Supported_Devices>`
|
||||
|
||||
|
||||
@@ -3,11 +3,7 @@
|
||||
Quantized models compute and restrictions
|
||||
=========================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_ie_plugin_dg_lp_representation
|
||||
|
||||
.. meta::
|
||||
:description: Learn about the support for quantized models with different
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
.. {#openvino_docs_ie_plugin_dg_lp_representation}
|
||||
|
||||
Representation of low-precision models
|
||||
======================================
|
||||
|
||||
The goal of this document is to describe how optimized models are represented in OpenVINO Intermediate Representation (IR) and provide guidance
|
||||
on interpretation rules for such models at runtime.
|
||||
|
||||
Currently, there are two groups of optimization methods that can influence on the IR after applying them to the full-precision model:
|
||||
|
||||
- **Sparsity**. It is represented by zeros inside the weights and this is up to the hardware plugin how to interpret these zeros
|
||||
(use weights as is or apply special compression algorithms and sparse arithmetic). No additional mask is provided with the model.
|
||||
- **Quantization**. The rest of this document is dedicated to the representation of quantized models.
|
||||
|
||||
Representation of quantized models
|
||||
###################################
|
||||
|
||||
The OpenVINO Toolkit represents all the quantized models using the so-called FakeQuantize operation (see the description in
|
||||
:doc:`this document <openvino_docs_ops_quantization_FakeQuantize_1>`). This operation is very expressive and allows mapping values from
|
||||
arbitrary input and output ranges. The whole idea behind that is quite simple: we project (discretize) the input values to the low-precision
|
||||
data type using affine transformation (with clamp and rounding) and then reproject discrete values back to the original range and data type.
|
||||
It can be considered as an emulation of the quantization process which happens at runtime.
|
||||
In order to be able to execute a particular DL operation in low-precision all its inputs should be quantized i.e. should have FakeQuantize
|
||||
between operation and data blobs. The figure below shows an example of quantized Convolution which contains two FakeQuantize nodes: one for
|
||||
weights and one for activations (bias is quantized using the same parameters).
|
||||
|
||||
.. .. image:: _static/images/quantized_convolution.png
|
||||
|
||||
Starting from OpenVINO 2020.2 release all the quantized models are represented in the compressed form. It means that the weights
|
||||
of low-precision operations are converted into the target precision (e.g. INT8). It helps to substantially reduce the model size.
|
||||
The rest of the parameters can be represented in FLOAT32 or FLOAT16 precision depending on the input full-precision model used in
|
||||
the quantization process. Fig. 2 below shows an example of the part of the compressed IR.
|
||||
|
||||
.. .. image:: _static/images/quantized_model_example.png
|
||||
@@ -30,8 +30,6 @@ Particular cases:
|
||||
1. If ``axes`` is an empty list, *ReduceMax* corresponds to the identity operation.
|
||||
2. If ``axes`` contains all dimensions of input ``data``, a single reduction value is calculated for the entire input tensor.
|
||||
|
||||
Reducing empty tensor results in an undefined behavior.
|
||||
|
||||
**Attributes**
|
||||
|
||||
* *keep_dims*
|
||||
|
||||
@@ -30,8 +30,6 @@ Particular cases:
|
||||
1. If ``axes`` is an empty list, *ReduceMin* corresponds to the identity operation.
|
||||
2. If ``axes`` contains all dimensions of input ``data``, a single reduction value is calculated for the entire input tensor.
|
||||
|
||||
Reducing empty tensor results in an undefined behavior.
|
||||
|
||||
**Attributes**
|
||||
|
||||
* *keep_dims*
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers}
|
||||
# [LEGACY] Extending Model Optimizer with Caffe Python Layers {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers}
|
||||
|
||||
[LEGACY] Extending Model Optimizer with Caffe Python Layers
|
||||
============================================================
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to extract operator attributes in Model Optimizer to
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions}
|
||||
# [LEGACY] Model Optimizer Extensions {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions}
|
||||
|
||||
[LEGACY] Model Optimizer Extensions
|
||||
=====================================
|
||||
|
||||
.. meta::
|
||||
:description: Learn about deprecated extensions, which enable injecting logic
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor}
|
||||
# [LEGACY] Operation Extractor {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor}
|
||||
|
||||
[LEGACY] Operation Extractor
|
||||
=============================
|
||||
|
||||
.. meta::
|
||||
:description: Learn about a deprecated generic extension in Model Optimizer,
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation}
|
||||
# [LEGACY] Model Optimizer Operation {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation}
|
||||
|
||||
[LEGACY] Model Optimizer Operation
|
||||
===================================
|
||||
|
||||
.. meta::
|
||||
:description: Learn about the Op class, that contains operation attributes,
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions}
|
||||
# [LEGACY] Graph Transformation Extensions {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions}
|
||||
|
||||
[LEGACY] Graph Transformation Extensions
|
||||
==========================================
|
||||
|
||||
.. meta::
|
||||
:description: Learn about various base classes for front, middle and back phase
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections}
|
||||
# [LEGACY] Graph Traversal and Modification {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections}
|
||||
|
||||
[LEGACY] Graph Traversal and Modification
|
||||
===========================================
|
||||
|
||||
.. meta::
|
||||
:description: Learn about deprecated APIs and the Port and Connection classes
|
||||
|
||||
@@ -3,43 +3,70 @@
|
||||
Configurations for Intel® NPU with OpenVINO™
|
||||
===============================================
|
||||
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to provide additional configuration for Intel®
|
||||
NPU to work with the OpenVINO™ toolkit on your system.
|
||||
|
||||
|
||||
The Intel® NPU device requires a proper driver to be installed in the system.
|
||||
Make sure you use the most recent supported driver for your hardware setup.
|
||||
|
||||
Drivers and Dependencies
|
||||
########################
|
||||
|
||||
|
||||
.. tab-set::
|
||||
The Intel® NPU device requires a proper driver to be installed on the system.
|
||||
|
||||
.. tab-item:: Linux
|
||||
|
||||
The driver is maintained as open source and may be found in the following repository,
|
||||
together with comprehensive information on installation and system requirements:
|
||||
`github.com/intel/linux-npu-driver <https://github.com/intel/linux-npu-driver>`__
|
||||
|
||||
It is recommended to check for the latest version of the driver.
|
||||
|
||||
Make sure you use a supported OS version, as well as install make, gcc,
|
||||
and Linux kernel headers. To check the NPU state, use the ``dmesg``
|
||||
command in the console. A successful boot-up of the NPU should give you
|
||||
a message like this one:
|
||||
|
||||
``[ 797.193201] [drm] Initialized intel_vpu 0.<version number> for 0000:00:0b.0 on minor 0``
|
||||
Linux
|
||||
####################
|
||||
|
||||
The current requirement for inference on NPU is Ubuntu 22.04 with the kernel
|
||||
version of 6.6 or higher.
|
||||
Prerequisites
|
||||
++++++++++++++++++++
|
||||
|
||||
.. tab-item:: Windows
|
||||
Ensure that make, gcc, and Linux kernel headers are installed. Use the following command to install the required software:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
sudo apt-get install gcc make linux-headers-generic
|
||||
|
||||
|
||||
Configuration steps
|
||||
++++++++++++++++++++
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Windows
|
||||
####################
|
||||
|
||||
Intel® NPU driver for Windows is available through Windows Update.
|
||||
|
||||
|
||||
|
||||
|
||||
What’s Next?
|
||||
####################
|
||||
|
||||
Now you are ready to try out OpenVINO™. You can use the following tutorials to write your applications using Python and C/C++.
|
||||
|
||||
* Developing in Python:
|
||||
|
||||
* `Start with tensorflow models with OpenVINO™ <notebooks/101-tensorflow-to-openvino-with-output.html>`__
|
||||
* `Start with ONNX and PyTorch models with OpenVINO™ <notebooks/102-pytorch-onnx-to-openvino-with-output.html>`__
|
||||
* `Start with PaddlePaddle models with OpenVINO™ <notebooks/103-paddle-to-openvino-classification-with-output.html>`__
|
||||
|
||||
* Developing in C/C++:
|
||||
|
||||
* :doc:`Image Classification Async C++ Sample <openvino_inference_engine_samples_classification_sample_async_README>`
|
||||
* :doc:`Hello Classification C++ Sample <openvino_inference_engine_samples_hello_classification_README>`
|
||||
* :doc:`Hello Reshape SSD C++ Sample <openvino_inference_engine_samples_hello_reshape_ssd_README>`
|
||||
|
||||
The Intel® NPU driver for Windows is available through Windows Update but
|
||||
it may also be installed manually by downloading the
|
||||
`NPU driver package <https://www.intel.com/content/www/us/en/download-center/home.html>`__ and following the
|
||||
`Windows driver installation guide <https://support.microsoft.com/en-us/windows/update-drivers-manually-in-windows-ec62f46c-ff14-c91d-eead-d7126dc1f7b6>`__.
|
||||
|
||||
If a driver has already been installed you should be able to find
|
||||
'Intel(R) NPU Accelerator' in Windows Device Manager. If you
|
||||
cannot find such a device, the NPU is most likely listed in "Other devices"
|
||||
as "Multimedia Video Controller."
|
||||
|
||||
@@ -23,7 +23,7 @@ Install OpenVINO™ 2023.2
|
||||
|
||||
<script type="module" crossorigin src="_static/selector-tool/assets/index-f34d1fad.js"></script>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<iframe id="selector" src="_static/selector-tool/selector-5cca680.html" style="width: 100%; border: none" title="Download Intel® Distribution of OpenVINO™ Toolkit"></iframe>
|
||||
<iframe id="selector" src="_static/selector-tool/selector-c1c409a.html" style="width: 100%; border: none" title="Download Intel® Distribution of OpenVINO™ Toolkit"></iframe>
|
||||
|
||||
.. warning::
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ Installing OpenVINO Runtime with Conan Package Manager
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
python3 -m pip install 'conan>=2.0.8'
|
||||
python3 -m pip install conan>=2.0.8
|
||||
|
||||
2. Create a ``conanfile.txt`` file for your OpenVINO project and add "*openvino*" dependency in there:
|
||||
|
||||
|
||||
@@ -30,5 +30,6 @@ as well as an experienced user.
|
||||
| :doc:`OpenVINO Samples <openvino_docs_OV_UG_Samples_Overview>`
|
||||
| The OpenVINO samples (Python and C++) are simple console applications that show how to use specific OpenVINO API features. They can assist you in executing tasks such as loading a model, running inference, querying particular device capabilities, etc.
|
||||
|
||||
| :doc:`Optimize and Deploy Generative AI Models <gen_ai_guide>`
|
||||
| Detailed information on how OpenVINO accelerates Generative AI use cases and what models it supports. This tutorial provides instructions for running Generative AI models using Hugging Face Optimum Intel and Native OpenVINO APIs.
|
||||
| :doc:`OpenVINO™ API 2.0 Transition Guide <openvino_2_0_transition_guide>`
|
||||
| With the release of 2022.1 OpenVINO introduced its improved API 2.0 and its new OpenVINO IR model format: IR v11. This tutorial will instruct you on how to adopt the new solution, as well as show you the benefits of the new logic of working with models.
|
||||
|
||||
|
||||
@@ -58,37 +58,30 @@ The Jupyter notebooks are categorized into following classes:
|
||||
|
||||
Below you will find a selection of recommended tutorials that demonstrate inference on a particular model. These tutorials are guaranteed to provide a great experience with inference in OpenVINO:
|
||||
|
||||
|
||||
.. showcase::
|
||||
:title: 272-paint-by-example
|
||||
:img: https://camo.githubusercontent.com/79d83ce8fc6813a503f372bacb7dc709c79d9560060df3dc92196b9849cc33a9/68747470733a2f2f757365722d696d616765732e67697468756275736572636f6e74656e742e636f6d2f3130333232363538302f3233363935343931382d66333634623232372d323933632d346637382d613962662d3964636562636231303334612e706e67
|
||||
|
||||
Paint by Example using Stable Diffusion and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 271-sdxl-turbo
|
||||
:img: _images/271-sdxl-turbo-with-output_30_1.png
|
||||
|
||||
Single step image generation using SDXL-turbo and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 270-sound-generation-audioldm2
|
||||
:img: https://github.com/openvinotoolkit/openvino_notebooks/assets/76463150/c93a0f86-d9cf-4bd1-93b9-e27532170d75
|
||||
|
||||
Sound Generation with AudioLDM2 and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 269-film-slowmo
|
||||
:img: https://github.com/googlestaging/frame-interpolation/raw/main/moment.gif
|
||||
|
||||
Frame interpolation using FILM and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 268-table-question-answering
|
||||
:img: _static/images/notebook_eye.png
|
||||
|
||||
Table Question Answering using TAPAS and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 267-distil-whisper-asr
|
||||
:img: _static/images/notebook_eye.png
|
||||
|
||||
Automatic speech recognition using Distil-Whisper and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 266-speculative-sampling
|
||||
:img: _static/images/notebook_eye.png
|
||||
|
||||
Text Generation via Speculative Sampling, KV Caching, and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 265-wuerstchen-image-generation
|
||||
:img: https://user-images.githubusercontent.com/76161256/277724498-6917c558-d74c-4cc9-b81a-679ce0a299ee.png
|
||||
@@ -107,18 +100,60 @@ Below you will find a selection of recommended tutorials that demonstrate infere
|
||||
|
||||
Image generation with Latent Consistency Model and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 263-lcm-lora-controlnet
|
||||
:img: https://user-images.githubusercontent.com/29454499/284292122-f146e16d-7233-49f7-a401-edcb714b5288.png
|
||||
|
||||
Text-to-Image Generation with LCM LoRA and ControlNet Conditioning.
|
||||
|
||||
.. showcase::
|
||||
:title: 262-softvc-voice-conversion
|
||||
:img: _static/images/notebook_eye.png
|
||||
|
||||
SoftVC VITS Singing Voice Conversion and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 261-fast-segment-anything
|
||||
:img: https://user-images.githubusercontent.com/26833433/248551984-d98f0f6d-7535-45d0-b380-2e1440b52ad7.jpg
|
||||
|
||||
Object segmentation with FastSAM and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 259-decidiffusion-image-generation
|
||||
:img: https://user-images.githubusercontent.com/29454499/274927904-cd734349-9954-4656-ab96-08a903e846ef.png
|
||||
|
||||
Image generation with DeciDiffusion and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 258-blip-diffusion-subject-generation
|
||||
:img: https://user-images.githubusercontent.com/76161256/275485611-0ecf621f-b544-44ae-8258-8a49be704989.png
|
||||
|
||||
Subject-driven image generation and editing using BLIP Diffusion and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 257-llava-multimodal-chatbot
|
||||
:img: https://raw.githubusercontent.com/haotian-liu/LLaVA/main/images/llava_logo.png
|
||||
|
||||
Visual-language assistant with LLaVA and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 256-bark-text-to-audio
|
||||
:img: https://user-images.githubusercontent.com/29454499/269278630-9a770279-0045-480e-95f2-1a2f2d0a5115.png
|
||||
|
||||
Text-to-speech generation using Bark and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 254-llm-chatbot
|
||||
:img: _static/images/notebook_eye.png
|
||||
|
||||
Create an LLM-powered Chatbot using OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 253-zeroscope-text2video
|
||||
:img: https://camo.githubusercontent.com/64eec6e52d060ca971c5a3be3f0d60e712907c98b4661b454d7e3e9575c2bc6b/68747470733a2f2f68756767696e67666163652e636f2f64617461736574732f68756767696e67666163652f646f63756d656e746174696f6e2d696d616765732f7265736f6c76652f6d61696e2f6469666675736572732f646172746876616465725f63657270656e73652e676966
|
||||
|
||||
Video generation with ZeroScope and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 251-tiny-sd-image-generation
|
||||
:img: https://user-images.githubusercontent.com/29454499/260904650-274fc2f9-24d2-46a3-ac3d-d660ec3c9a19.png
|
||||
|
||||
Image Generation with Tiny-SD and OpenVINO.
|
||||
|
||||
|
||||
.. note::
|
||||
If there are any issues while running the notebooks, refer to the **Troubleshooting** and **FAQ** sections in the :doc:`Installation Guide <notebooks_installation>` or start a GitHub
|
||||
|
||||
@@ -17,12 +17,6 @@ Tutorials that explain how to optimize and quantize models with OpenVINO tools.
|
||||
|
||||
Convert TensorFlow Hub models to OpenVINO Intermediate Representation (IR).
|
||||
|
||||
.. showcase::
|
||||
:title: 125-lraspp-segmentation
|
||||
:img: _static/images/notebook_eye.png
|
||||
|
||||
Semantic segmentation with LRASPP MobileNet v3 and OpenVINO
|
||||
|
||||
.. showcase::
|
||||
:title: 125-convnext-classification
|
||||
:img: _static/images/notebook_eye.png
|
||||
|
||||
@@ -11,22 +11,6 @@ Model Demos
|
||||
|
||||
Demos that demonstrate inference on a particular model.
|
||||
|
||||
.. showcase::
|
||||
:title: 272-paint-by-example
|
||||
:img: https://camo.githubusercontent.com/79d83ce8fc6813a503f372bacb7dc709c79d9560060df3dc92196b9849cc33a9/68747470733a2f2f757365722d696d616765732e67697468756275736572636f6e74656e742e636f6d2f3130333232363538302f3233363935343931382d66333634623232372d323933632d346637382d613962662d3964636562636231303334612e706e67
|
||||
|
||||
Paint by Example using Stable Diffusion and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 271-sdxl-turbo
|
||||
:img: _images/271-sdxl-turbo-with-output_30_1.png
|
||||
|
||||
Single step image generation using SDXL-turbo and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 270-sound-generation-audioldm2
|
||||
:img: https://github.com/openvinotoolkit/openvino_notebooks/assets/76463150/c93a0f86-d9cf-4bd1-93b9-e27532170d75
|
||||
|
||||
.. showcase::
|
||||
:title: 269-film-slowmo
|
||||
:img: https://github.com/googlestaging/frame-interpolation/raw/main/moment.gif
|
||||
@@ -69,12 +53,6 @@ Demos that demonstrate inference on a particular model.
|
||||
|
||||
Image generation with Latent Consistency Model and OpenVINO.
|
||||
|
||||
.. showcase::
|
||||
:title: 263-lcm-lora-controlnet
|
||||
:img: https://user-images.githubusercontent.com/29454499/284292122-f146e16d-7233-49f7-a401-edcb714b5288.png
|
||||
|
||||
Text-to-Image Generation with LCM LoRA and ControlNet Conditioning.
|
||||
|
||||
.. showcase::
|
||||
:title: 262-softvc-voice-conversion
|
||||
:img: _static/images/notebook_eye.png
|
||||
@@ -147,12 +125,6 @@ Demos that demonstrate inference on a particular model.
|
||||
|
||||
Universal segmentation with OneFormer and OpenVINO™.
|
||||
|
||||
.. showcase::
|
||||
:title: 248-ssd-b1
|
||||
:img: https://user-images.githubusercontent.com/29454499/258651862-28b63016-c5ff-4263-9da8-73ca31100165.jpeg
|
||||
|
||||
Image generation with Stable Diffusion XL and OpenVINO™.
|
||||
|
||||
.. showcase::
|
||||
:title: 248-stable-diffusion-xl
|
||||
:img: https://user-images.githubusercontent.com/29454499/258651862-28b63016-c5ff-4263-9da8-73ca31100165.jpeg
|
||||
|
||||
@@ -14,18 +14,17 @@ Inference Device Support
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
openvino_docs_OV_UG_query_api
|
||||
openvino_docs_OV_UG_supported_plugins_CPU
|
||||
openvino_docs_OV_UG_supported_plugins_GPU
|
||||
openvino_docs_OV_UG_supported_plugins_NPU
|
||||
openvino_docs_OV_UG_supported_plugins_GNA
|
||||
openvino_docs_OV_UG_query_api
|
||||
|
||||
|
||||
OpenVINO™ Runtime can infer deep learning models using the following device types:
|
||||
|
||||
* :doc:`CPU <openvino_docs_OV_UG_supported_plugins_CPU>`
|
||||
* :doc:`GPU <openvino_docs_OV_UG_supported_plugins_GPU>`
|
||||
* :doc:`NPU <openvino_docs_OV_UG_supported_plugins_NPU>`
|
||||
* :doc:`GNA <openvino_docs_OV_UG_supported_plugins_GNA>`
|
||||
* :doc:`Arm® CPU <openvino_docs_OV_UG_supported_plugins_CPU>`
|
||||
|
||||
@@ -34,25 +33,26 @@ For a more detailed list of hardware, see :doc:`Supported Devices <openvino_docs
|
||||
.. _devicesupport-feature-support-matrix:
|
||||
|
||||
|
||||
|
||||
Feature Support Matrix
|
||||
#######################################
|
||||
|
||||
The table below demonstrates support of key features by OpenVINO device plugins.
|
||||
|
||||
========================================================================================= ============================ ========== =========== ===========
|
||||
Capability CPU GPU NPU GNA
|
||||
========================================================================================= ============================ ========== =========== ===========
|
||||
:doc:`Heterogeneous execution <openvino_docs_OV_UG_Hetero_execution>` Yes Yes No
|
||||
:doc:`Multi-device execution <openvino_docs_OV_UG_Running_on_multiple_devices>` Yes Yes Partial
|
||||
:doc:`Automatic batching <openvino_docs_OV_UG_Automatic_Batching>` No Yes No
|
||||
:doc:`Multi-stream execution <openvino_docs_deployment_optimization_guide_tput>` Yes (Intel® x86-64 only) Yes No
|
||||
:doc:`Models caching <openvino_docs_OV_UG_Model_caching_overview>` Yes Partial Yes
|
||||
:doc:`Dynamic shapes <openvino_docs_OV_UG_DynamicShapes>` Yes Partial No
|
||||
:doc:`Import/Export <openvino_ecosystem>` Yes No Yes
|
||||
:doc:`Preprocessing acceleration <openvino_docs_OV_UG_Preprocessing_Overview>` Yes Yes No
|
||||
:doc:`Stateful models <openvino_docs_OV_UG_model_state_intro>` Yes No Yes
|
||||
:doc:`Extensibility <openvino_docs_Extensibility_UG_Intro>` Yes Yes No
|
||||
========================================================================================= ============================ ========== =========== ===========
|
||||
========================================================================================= ============================ =============== ===============
|
||||
Capability CPU GPU GNA
|
||||
========================================================================================= ============================ =============== ===============
|
||||
:doc:`Heterogeneous execution <openvino_docs_OV_UG_Hetero_execution>` Yes Yes No
|
||||
:doc:`Multi-device execution <openvino_docs_OV_UG_Running_on_multiple_devices>` Yes Yes Partial
|
||||
:doc:`Automatic batching <openvino_docs_OV_UG_Automatic_Batching>` No Yes No
|
||||
:doc:`Multi-stream execution <openvino_docs_deployment_optimization_guide_tput>` Yes (Intel® x86-64 only) Yes No
|
||||
:doc:`Models caching <openvino_docs_OV_UG_Model_caching_overview>` Yes Partial Yes
|
||||
:doc:`Dynamic shapes <openvino_docs_OV_UG_DynamicShapes>` Yes Partial No
|
||||
:doc:`Import/Export <openvino_ecosystem>` Yes No Yes
|
||||
:doc:`Preprocessing acceleration <openvino_docs_OV_UG_Preprocessing_Overview>` Yes Yes No
|
||||
:doc:`Stateful models <openvino_docs_OV_UG_model_state_intro>` Yes No Yes
|
||||
:doc:`Extensibility <openvino_docs_Extensibility_UG_Intro>` Yes Yes No
|
||||
========================================================================================= ============================ =============== ===============
|
||||
|
||||
For more details on plugin-specific feature limitations, see the corresponding plugin pages.
|
||||
|
||||
@@ -359,7 +359,7 @@ and *W* is limited to 87 when there are 64 input channels.
|
||||
|
||||
:download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i16) <../../../docs/OV_Runtime_UG/supported_plugins/files/GNA_Maximum_Input_Tensor_Widths_i16.csv>`
|
||||
|
||||
:download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i8) <../../../docs/OV_Runtime_UG/supported_plugins/files/GNA_Maximum_Input_Tensor_Widths_i8.csv>`
|
||||
:download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i8) <../../../docs/OV_Runtime_UG/supported_plugins/files/GNA_Maximum_Input_Tensor_Widths_i8.csv>`
|
||||
|
||||
|
||||
.. note::
|
||||
@@ -452,15 +452,6 @@ Below is a list of such operations:
|
||||
|
||||
The behavior depends on specific parameters of the operations and hardware configuration.
|
||||
|
||||
.. important::
|
||||
|
||||
While working on a fine tuned model, inference may give an inaccuracy and performance drop
|
||||
on GPU if winograd convolutions are selected. This issue can be fixed by disabling winograd
|
||||
convolutions:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
compiled_model = core.compile_model(ov_model, device_name=devStr1, config={ "GPU_DISABLE_WINOGRAD_CONVOLUTION": True })
|
||||
|
||||
GPU Performance Checklist: Summary
|
||||
#######################################
|
||||
@@ -0,0 +1,29 @@
|
||||
.. {#openvino_docs_OV_UG_supported_plugins_NPU}
|
||||
|
||||
NPU Device
|
||||
==========
|
||||
|
||||
|
||||
.. meta::
|
||||
:description: The NPU plugin in the Intel® Distribution of OpenVINO™ toolkit
|
||||
aims at high performance inference of neural
|
||||
networks on the low-power NPU processing device.
|
||||
|
||||
|
||||
NPU is a new generation of low-power processing unit dedicated to processing neural networks.
|
||||
The NPU plugin is a core part of the OpenVINO™ toolkit. For its in-depth description, see:
|
||||
|
||||
..
|
||||
- `NPU plugin developer documentation < cmake_options_for_custom_compilation.md ??? >`__.
|
||||
- `NPU plugin source files < ??? >`__.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -3,37 +3,35 @@
|
||||
Query Device Properties - Configuration
|
||||
=======================================
|
||||
|
||||
|
||||
|
||||
.. meta::
|
||||
:description: Learn the details on the process of querying different device
|
||||
properties and configuration values at runtime.
|
||||
|
||||
|
||||
This article provides an overview of how to query different device properties
|
||||
and configuration values at runtime.
|
||||
The OpenVINO™ toolkit supports inference with several types of devices (processors or accelerators).
|
||||
This section provides a high-level description of the process of querying of different device properties and configuration values at runtime.
|
||||
|
||||
OpenVINO runtime has two types of properties:
|
||||
|
||||
- **Read only properties** which provide information about devices, such as device
|
||||
name and execution capabilities, and information about configuration values
|
||||
used to compile the model - ``ov::CompiledModel``.
|
||||
- **Mutable properties**, primarily used to configure the ``ov::Core::compile_model``
|
||||
process and affect final inference on a specific set of devices. Such properties
|
||||
can be set globally per device via ``ov::Core::set_property`` or locally for a
|
||||
particular model in the ``ov::Core::compile_model`` and ``ov::Core::query_model``
|
||||
calls.
|
||||
- Read only properties which provide information about the devices (such as device name or execution capabilities, etc.)
|
||||
and information about configuration values used to compile the model (``ov::CompiledModel``) .
|
||||
- Mutable properties which are primarily used to configure the ``ov::Core::compile_model`` process and affect final
|
||||
inference on a specific set of devices. Such properties can be set globally per device via ``ov::Core::set_property``
|
||||
or locally for particular model in the ``ov::Core::compile_model`` and the ``ov::Core::query_model`` calls.
|
||||
|
||||
|
||||
An OpenVINO property is represented as a named constexpr variable with a given string
|
||||
name and a type. The following example represents a read-only property with the C++ name
|
||||
of ``ov::available_devices``, the string name of ``AVAILABLE_DEVICES`` and the type of
|
||||
``std::vector<std::string>``:
|
||||
An OpenVINO property is represented as a named constexpr variable with a given string name and a type.
|
||||
The following example represents a read-only property with a C++ name of ``ov::available_devices``,
|
||||
a string name of ``AVAILABLE_DEVICES`` and a type of ``std::vector<std::string>``:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
static constexpr Property<std::vector<std::string>, PropertyMutability::RO> available_devices{"AVAILABLE_DEVICES"};
|
||||
|
||||
|
||||
Refer to the :doc:`Hello Query Device C++ Sample <openvino_inference_engine_samples_hello_query_device_README>` sources and
|
||||
Refer to the :doc:`Hello Query Device С++ Sample <openvino_inference_engine_samples_hello_query_device_README>` sources and
|
||||
the :doc:`Multi-Device execution <openvino_docs_OV_UG_Running_on_multiple_devices>` documentation for examples of using
|
||||
setting and getting properties in user applications.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user