Merge branch 'master' into github_actions/tests_parallel

This commit is contained in:
Mikhail Ryzhov 2023-08-31 10:59:10 +02:00 committed by GitHub
commit b97bda1401
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
611 changed files with 5254 additions and 3293 deletions

438
.github/workflows/linux_debian.yml vendored Normal file
View File

@ -0,0 +1,438 @@
name: Linux Debian (Ubuntu 20.04, Python 3.11)
on:
schedule:
# run daily at 00:00
- cron: '0 0 * * *'
workflow_dispatch:
# pull_request:
# paths-ignore:
# - '**/docs/**'
# - 'docs/**'
# - '**/**.md'
# - '**.md'
# - '**/layer_tests_summary/**'
# - '**/conformance/**'
# push:
# paths-ignore:
# - '**/docs/**'
# - 'docs/**'
# - '**/**.md'
# - '**.md'
# - '**/layer_tests_summary/**'
# - '**/conformance/**'
# branches:
# - master
concurrency:
group: ${{ github.head_ref || github.run_id }}-linux-debian
cancel-in-progress: true
jobs:
Build:
# TODO: remove. Temporary measure to prevent the workflow from scheduling on forks.
if: ${{ github.repository_owner == 'openvinotoolkit' }}
defaults:
run:
shell: bash
runs-on: ubuntu-20.04-8-cores
env:
CMAKE_BUILD_TYPE: 'Release'
CMAKE_GENERATOR: 'Ninja'
CMAKE_CXX_COMPILER_LAUNCHER: ccache
CMAKE_C_COMPILER_LAUNCHER: ccache
CMAKE_CXX_LINKER_LAUNCHER: ccache
CMAKE_C_LINKER_LAUNCHER: ccache
BUILD_TYPE: Release
OPENVINO_REPO: ${{ github.workspace }}/openvino
BUILD_DIR: ${{ github.workspace }}/build
INSTALL_DIR: ${{ github.workspace }}/install
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
LAYER_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/layer_tests
DATA_PATH: ${{ github.workspace }}/testdata
MODELS_PATH: ${{ github.workspace }}/testdata
OV_TEMP: ${{ github.workspace }}/openvino_temp
SAMPLES_INSTALL_DIR: /usr/share/openvino/samples
PYTHON_STATIC_ARGS: -m "not dynamic_library and not template_plugin"
steps:
- name: Clone OpenVINO
uses: actions/checkout@v3
with:
path: 'openvino'
submodules: 'recursive'
- name: Clone testdata for C API tests
uses: actions/checkout@v3
with:
repository: 'openvinotoolkit/testdata'
path: 'testdata'
submodules: 'recursive'
lfs: 'true'
- name: Create Directories
run: |
mkdir -p ${{ env.BUILD_DIR }}
mkdir -p ${{ env.INSTALL_DIR }}
- name: Setup Python 3.11
uses: actions/setup-python@v4
with:
python-version: '3.11'
#
# Dependencies
#
- name: Install build dependencies
run: |
sudo -E apt update
sudo -E ${{ env.OPENVINO_REPO }}/install_build_dependencies.sh
# 'clang' is used as a default compiler
sudo apt --assume-yes install clang
sudo apt --assume-yes install --no-install-recommends libopencv-imgproc-dev libopencv-imgcodecs-dev
# Speed up build
sudo apt -y --no-install-recommends install unzip
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
unzip ninja-linux.zip
sudo cp -v ninja /usr/local/bin/
# Speed up tests
git clone https://github.com/google/gtest-parallel.git
- name: Install python dependencies
run: |
python3 -m pip install --upgrade pip
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements.txt
# For running Python API tests
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt
# For running Paddle frontend unit tests
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt
# For running ONNX frontend unit tests
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt
# For running TensorFlow frontend unit tests
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow/tests/requirements.txt
# For MO unit tests
python3 -m pip install -U pip
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_mxnet.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_caffe.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_kaldi.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_onnx.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_tf2.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_dev.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt
# for Python API tests
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements_test.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements.txt
- name: Setup ccache
uses: hendrikmuhs/ccache-action@v1.2
with:
max-size: "2000M"
# Should save cache only if run in the master branch of the base repo
# github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push
save: ${{ github.ref_name == 'master' && 'true' || 'false' }}
verbose: 2
key: ${{ github.job }}-linux-debian
restore-keys: |
${{ github.job }}-linux-debian
- name: Get tools versions
run: |
ninja --version
ccache --version
python3 --version
cmake --version
#
# Build
#
- name: Get number of CPU cores
uses: SimenB/github-actions-cpu-cores@v1
id: cpu-cores
- name: CMake configure
run: |
cmake \
-GNinja \
-DENABLE_CPPLINT=OFF \
-DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} \
-DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \
-DENABLE_PYTHON=ON \
-DENABLE_INTEL_GNA=OFF \
-DENABLE_TESTS=ON \
-DENABLE_FASTER_BUILD=ON \
-DENABLE_STRICT_DEPENDENCIES=OFF \
-DENABLE_SYSTEM_SNAPPY=ON \
-DENABLE_PYTHON_PACKAGING=ON \
-DCPACK_GENERATOR=DEB \
-S ${{ env.OPENVINO_REPO }} \
-B ${{ env.BUILD_DIR }}
- name: Clean ccache stats
run: ccache --zero-stats --show-config
- name: Build
run: cmake --build ${{ env.BUILD_DIR }} --parallel ${{ steps.cpu-cores.outputs.count }} --config ${{ env.BUILD_TYPE }}
- name: Show ccache stats
run: ccache --show-stats
- name: CMake Layer Tests
run: cmake -GNinja -S ${{ env.OPENVINO_REPO }}/tests/layer_tests -B ${{ env.BUILD_DIR }}/layer_tests
- name: Build Layer Tests
run: cmake --build ${{ env.BUILD_DIR }}/layer_tests --parallel --config ${{ env.BUILD_TYPE }}
# to check that wheel packages tested later contain all the dependencies like TBB or pugixml
- name: Remove debian dependencies
run: sudo apt-get remove libtbb2 libpugixml1v5 -y
- name: Install wheel packages
run: cmake -DCOMPONENT=python_wheels -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake
- name: Install Python Samples
run: cmake -DCOMPONENT=python_samples -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake
- name: Install Layer Tests
run: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/layer_tests/cmake_install.cmake
- name: Install tests
run: cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -DCOMPONENT=tests -P ${{ env.BUILD_DIR }}/cmake_install.cmake
- name: List install test files
run: ls -alR ${{ env.INSTALL_DIR }}
- name: Install python wheels
run: python3 -m pip install openvino-dev --find-links=${{ env.INSTALL_DIR }}/tools
- name: Build Debian packages
run: |
sudo apt-get install libtbb-dev libpugixml-dev -y
cmake --build ${{ env.BUILD_DIR }} --config ${{ env.BUILD_TYPE }} --target package --parallel
- name: Install Debian packages
run: |
pushd ${{ env.BUILD_DIR }}
# install debian packages from previous release
sudo apt-get -y update
sudo apt-get install --no-install-recommends gnupg wget -y
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2023.list
sudo apt-get update -o Dir::Etc::sourcelist=/etc/apt/sources.list.d/intel-openvino-2023.list
sudo apt-get install openvino -y
# install our local one and make sure the conflicts are resolved
sudo apt-get install --no-install-recommends dpkg-dev -y
rm -r _CPack_Packages
dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz
echo "deb [trusted=yes] file:${{ env.BUILD_DIR }} ./" | sudo tee /etc/apt/sources.list.d/openvino-local.list
sudo apt-get update
sudo apt-get install openvino -y
popd
- name: List install files
run: ls -alR ${{ env.INSTALL_DIR }}
- name: Build cpp samples - gcc
run: ${{ env.SAMPLES_INSTALL_DIR }}/cpp/build_samples.sh -i ${{ env.INSTALL_DIR }}
- name: Build c samples
run: ${{ env.SAMPLES_INSTALL_DIR }}/c/build_samples.sh -i ${{ env.INSTALL_DIR }}
- name: OpenVINO Core Unit Tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVCoreUT.xml
- name: Proxy Plugin Tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVProxyTests.xml
- name: Hetero Func Tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroFuncTests.xml
- name: ONNX frontend tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU*:*FrontEndLoadFromTest.testLoadFromTwoStreams*:*FrontEndLoadFromTest.testLoadFromTwoFiles* \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml
- name: TensorFlow frontend tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml
# Disabled in Azure: https://github.com/openvinotoolkit/openvino/blob/master/.ci/azure/linux.yml#L403
# - name: PaddlePaddle frontend tests
# run: |
# ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* \
# --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml
- name: TensorFlow Common tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_common_tests --gtest_print_time=1 \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowCommonFrontend.xml
- name: TensorFlow Lite frontend tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowLiteFrontend.xml
- name: Snippets func tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_snippets_func_tests --gtest_print_time=1 \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SnippetsFuncTests.xml
- name: CPU plugin unit tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml
- name: AUTO UT
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml
- name: Template plugin tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 \
--gtest_filter=*smoke* \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml
- name: Inference Engine C API tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/InferenceEngineCAPITests --gtest_print_time=1 \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineCAPITests.xml
- name: OpenVINO C API tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 \
--gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml
- name: nGraph and IE Python Bindings Tests
run: |
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} \
--junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \
--ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_zoo_models.py \
--ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests/test_onnx/test_backend.py
- name: Python API 2.0 Tests
run: |
# For python imports to import pybind_mock_frontend
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}:${{ env.OPENVINO_REPO }}/tools/mo:$PYTHONPATH
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino \
--junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \
--ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py
- name: ONNX Frontend Python Tests
run: |
# For python imports to import pybind_mock_frontend
export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}:${{ env.OPENVINO_REPO }}/tools/mo:$PYTHONPATH
export LD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$LD_LIBRARY_PATH
python3 -m pytest -sv ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests \
--junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ONNX-FE-PYTHON.xml \
--ignore=${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/test_python/test_zoo_models.py \
--ignore=${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/test_python/test_backend.py
- name: Model Optimizer UT
run: |
export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.OPENVINO_REPO }}/tools/ovc/:${{ env.LAYER_TESTS_INSTALL_DIR }}:${{ env.INSTALL_TEST_DIR }}:${{ env.INSTALL_DIR }}/python/python3.11:$PYTHONPATH
# Need to be reinstalled to have correct numpy version
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_mxnet.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_caffe.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_kaldi.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_onnx.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_tf2.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_dev.txt
python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests \
--junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml
# run not all smoke filter to save time in post-commit
- name: CPU FuncTests
run: ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests --gtest_filter=*OVCLass*:*CoreThreadingTests* --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_cpu_func_tests.xml
- name: CMake Samples Tests
run: cmake -GNinja -S ${{ env.OPENVINO_REPO }}/tests/samples_tests -B ${{ env.BUILD_DIR }}/samples_tests
- name: Install Samples Tests
run: cmake -DCOMPONENT=tests -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/samples_tests/cmake_install.cmake
- name: Samples Smoke Tests
run: |
python3 -m pip install --ignore-installed PyYAML -r ${{ env.INSTALL_TEST_DIR }}/smoke_tests/requirements.txt
export LD_LIBRARY_PATH=${{ env.IE_APP_PATH }}:$LD_LIBRARY_PATH
python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/smoke_tests -k "not GNA" \
--env_conf ${{ env.INSTALL_TEST_DIR }}/smoke_tests/env_config.yml \
--junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-SamplesSmokeTests.xml
env:
IE_APP_PATH: ${{ env.INSTALL_DIR }}/samples_bin
IE_APP_PYTHON_PATH: ${{ env.INSTALL_DIR }}/share/openvino/samples/python
LD_LIBRARY_PATH: ${{ env.INSTALL_DIR }}/samples_bin
SHARE: ${{ env.INSTALL_TEST_DIR }}/smoke_tests/samples_smoke_tests_data
WORKSPACE: ${{ env.INSTALL_DIR }}
- name: TensorFlow 1 Layer Tests - Legacy FE
run: |
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_Roll.xml
- name: TensorFlow Lite Layer Tests - TFL FE
run: |
python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt
export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH
# Need to be reinstalled to have correct numpy version
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_caffe.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_kaldi.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_onnx.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_tf2.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_dev.txt
python3 -m pip install -r ${{ env.OPENVINO_REPO }}/tools/mo/requirements_mxnet.txt
python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_lite_tests/ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tfl_fe.xml
env:
TEST_DEVICE: CPU
- name: Upload Test Results
uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: test-results
path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml
if-no-files-found: 'error'

View File

@ -0,0 +1,896 @@
Video generation with ZeroScope and OpenVINO
============================================
.. _top:
The ZeroScope model is a free and open-source text-to-video model that
can generate realistic and engaging videos from text descriptions. It is
based on the
`Modelscope <https://modelscope.cn/models/damo/text-to-video-synthesis/summary>`__
model, but it has been improved to produce higher-quality videos with a
16:9 aspect ratio and no Shutterstock watermark. The ZeroScope model is
available in two versions: ZeroScope_v2 576w, which is optimized for
rapid content creation at a resolution of 576x320 pixels, and
ZeroScope_v2 XL, which upscales videos to a high-definition resolution
of 1024x576.
The ZeroScope model is trained on a dataset of over 9,000 videos and
29,000 tagged frames. It uses a diffusion model to generate videos,
which means that it starts with a random noise image and gradually adds
detail to it until it matches the text description. The ZeroScope model
is still under development, but it has already been used to create some
impressive videos. For example, it has been used to create videos of
people dancing, playing sports, and even driving cars.
The ZeroScope model is a powerful tool that can be used to create
various videos, from simple animations to complex scenes. It is still
under development, but it has the potential to revolutionize the way we
create and consume video content.
Both versions of the ZeroScope model are available on Hugging Face:
- `ZeroScope_v2 576w <https://huggingface.co/cerspense/zeroscope_v2_576w>`__
- `ZeroScope_v2 XL <https://huggingface.co/cerspense/zeroscope_v2_XL>`__
We will use the first one.
**Table of contents**:
- `Install and import required packages <#install-and-import-required-packages>`__
- `Load the model <#load-the-model>`__
- `Convert the model <#convert-the-model>`__
- `Define the conversion function <#define-the-conversion-function>`__
- `UNet <#unet>`__ -
- `VAE <#vae>`__
- `Text encoder <#text-encoder>`__
- `Build a pipeline <#build-a-pipeline>`__
- `Inference with OpenVINO <#inference-with-openvino>`__
- `Select inference device <#select-inference-device>`__
- `Define a prompt <#define-a-prompt>`__
- `Video generation <#video-generation>`__
.. important::
This tutorial requires at least 24GB of free memory to generate a video with
a frame size of 432x240 and 16 frames. Increasing either of these values will
require more memory and take more time.
Install and import required packages `⇑ <#top>`__
###############################################################################################################################
To work with text-to-video synthesis model, we will use Hugging Faces
`Diffusers <https://github.com/huggingface/diffusers>`__ library. It
provides already pretrained model from ``cerspense``.
.. code:: ipython3
!pip install -q "diffusers[torch]>=0.15.0" transformers "openvino==2023.1.0.dev20230811" numpy gradio
.. code:: ipython3
import gc
from pathlib import Path
from typing import Optional, Union, List, Callable
import base64
import tempfile
import warnings
import diffusers
import transformers
import numpy as np
import IPython
import ipywidgets as widgets
import torch
import PIL
import gradio as gr
import openvino as ov
.. parsed-literal::
2023-08-16 21:15:40.145184: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2023-08-16 21:15:40.146998: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.
2023-08-16 21:15:40.179214: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.
2023-08-16 21:15:40.180050: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-08-16 21:15:40.750499: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
Original 576x320 inference requires a lot of RAM (>100GB), so lets run
our example on a smaller frame size, keeping the same aspect ratio. Try
reducing values below to reduce the memory consumption.
.. code:: ipython3
WIDTH = 432 # must be divisible by 8
HEIGHT = 240 # must be divisible by 8
NUM_FRAMES = 16
Load the model `⇑ <#top>`__
###############################################################################################################################
The model is loaded from HuggingFace using ``.from_pretrained`` method
of ``diffusers.DiffusionPipeline``.
.. code:: ipython3
pipe = diffusers.DiffusionPipeline.from_pretrained('cerspense/zeroscope_v2_576w')
.. parsed-literal::
vae/diffusion_pytorch_model.safetensors not found
.. parsed-literal::
Loading pipeline components...: 0%| | 0/5 [00:00<?, ?it/s]
.. code:: ipython3
unet = pipe.unet
unet.eval()
vae = pipe.vae
vae.eval()
text_encoder = pipe.text_encoder
text_encoder.eval()
tokenizer = pipe.tokenizer
scheduler = pipe.scheduler
vae_scale_factor = pipe.vae_scale_factor
unet_in_channels = pipe.unet.config.in_channels
sample_width = WIDTH // vae_scale_factor
sample_height = HEIGHT // vae_scale_factor
del pipe
gc.collect();
Convert the model `⇑ <#top>`__
###############################################################################################################################
The architecture for generating videos from text comprises three
distinct sub-networks: one for extracting text features, another for
translating text features into the video latent space using a diffusion
model, and a final one for mapping the video latent space to the visual
space. The collective parameters of the entire model amount to
approximately 1.7 billion. Its capable of processing English input. The
diffusion model is built upon the Unet3D model and achieves video
generation by iteratively denoising a starting point of pure Gaussian
noise video.
.. image:: 253-zeroscope-text2video-with-output_files/253-zeroscope-text2video-with-output_01_02.png
Define the conversion function `⇑ <#top>`__
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Model components are PyTorch modules, that can be converted with
``ov.convert_model`` function directly. We also use ``ov.save_model``
function to serialize the result of conversion.
.. code:: ipython3
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
.. code:: ipython3
def convert(model: torch.nn.Module, xml_path: str, **convert_kwargs) -> Path:
xml_path = Path(xml_path)
if not xml_path.exists():
xml_path.parent.mkdir(parents=True, exist_ok=True)
with torch.no_grad():
converted_model = ov.convert_model(model, **convert_kwargs)
ov.save_model(converted_model, xml_path)
del converted model
gc.collect()
torch._C._jit_clear_class_registry()
torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
torch.jit._state._clear_class_state()
return xml_path
UNet `⇑ <#top>`__
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Text-to-video generation pipeline main component is a conditional 3D
UNet model that takes a noisy sample, conditional state, and a timestep
and returns a sample shaped output.
.. code:: ipython3
unet_xml_path = convert(
unet,
"models/unet.xml",
example_input={
"sample": torch.randn(2, 4, 2, 32, 32),
"timestep": torch.tensor(1),
"encoder_hidden_states": torch.randn(2, 77, 1024),
},
input=[
("sample", (2, 4, NUM_FRAMES, sample_height, sample_width)),
("timestep", ()),
("encoder_hidden_states", (2, 77, 1024)),
],
)
del unet
gc.collect();
.. parsed-literal::
WARNING:tensorflow:Please fix your imports. Module tensorflow.python.training.tracking.base has been moved to tensorflow.python.trackable.base. The old module will be deleted in version 2.11.
.. parsed-literal::
[ WARNING ] Please fix your imports. Module %s has been moved to %s. The old module will be deleted in version %s.
VAE `⇑ <#top>`__
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Variational autoencoder (VAE) uses UNet output to decode latents to
visual representations. Our VAE model has KL loss for encoding images
into latents and decoding latent representations into images. For
inference, we need only decoder part.
.. code:: ipython3
class VaeDecoderWrapper(torch.nn.Module):
def __init__(self, vae):
super().__init__()
self.vae = vae
def forward(self, z: torch.FloatTensor):
return self.vae.decode(z)
.. code:: ipython3
vae_decoder_xml_path = convert(
VaeDecoderWrapper(vae),
"models/vae.xml",
example_input=torch.randn(2, 4, 32, 32),
input=((NUM_FRAMES, 4, sample_height, sample_width)),
)
del vae
gc.collect();
Text encoder `⇑ <#top>`__
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Text encoder is used to encode the input prompt to tensor. Default
tensor length is 77.
.. code:: ipython3
text_encoder_xml = convert(
text_encoder,
"models/text_encoder.xml",
example_input=torch.ones(1, 77, dtype=torch.int64),
input=((1, 77), (ov.Type.i64,)),
)
del text_encoder
gc.collect();
Build a pipeline `⇑ <#top>`__
###############################################################################################################################
.. code:: ipython3
def tensor2vid(video: torch.Tensor, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) -> List[np.ndarray]:
# This code is copied from https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/pipelines/multi_modal/text_to_video_synthesis_pipeline.py#L78
# reshape to ncfhw
mean = torch.tensor(mean, device=video.device).reshape(1, -1, 1, 1, 1)
std = torch.tensor(std, device=video.device).reshape(1, -1, 1, 1, 1)
# unnormalize back to [0,1]
video = video.mul_(std).add_(mean)
video.clamp_(0, 1)
# prepare the final outputs
i, c, f, h, w = video.shape
images = video.permute(2, 3, 0, 4, 1).reshape(
f, h, i * w, c
) # 1st (frames, h, batch_size, w, c) 2nd (frames, h, batch_size * w, c)
images = images.unbind(dim=0) # prepare a list of indvidual (consecutive frames)
images = [(image.cpu().numpy() * 255).astype("uint8") for image in images] # f h w c
return images
.. code:: ipython3
class OVTextToVideoSDPipeline(diffusers.DiffusionPipeline):
def __init__(
self,
vae_decoder: ov.CompiledModel,
text_encoder: ov.CompiledModel,
tokenizer: transformers.CLIPTokenizer,
unet: ov.CompiledModel,
scheduler: diffusers.schedulers.DDIMScheduler,
):
super().__init__()
self.vae_decoder = vae_decoder
self.text_encoder = text_encoder
self.tokenizer = tokenizer
self.unet = unet
self.scheduler = scheduler
self.vae_scale_factor = vae_scale_factor
self.unet_in_channels = unet_in_channels
self.width = WIDTH
self.height = HEIGHT
self.num_frames = NUM_FRAMES
def __call__(
self,
prompt: Union[str, List[str]] = None,
num_inference_steps: int = 50,
guidance_scale: float = 9.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "np",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`.
instead.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate videos that are closely linked to the text `prompt`,
usually at the expense of lower video quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the video generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`. Latents should be of shape
`(batch_size, num_channel, num_frames, height, width)`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"np"`):
The output format of the generate video. Choose between `torch.FloatTensor` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.TextToVideoSDPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
Returns:
`List[np.ndarray]`: generated video frames
"""
num_images_per_prompt = 1
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
prompt_embeds = self._encode_prompt(
prompt,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet_in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
prompt_embeds.dtype,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = {"generator": generator, "eta": eta}
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = (
torch.cat([latents] * 2) if do_classifier_free_guidance else latents
)
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
{
"sample": latent_model_input,
"timestep": t,
"encoder_hidden_states": prompt_embeds,
}
)[0]
noise_pred = torch.tensor(noise_pred)
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (
noise_pred_text - noise_pred_uncond
)
# reshape latents
bsz, channel, frames, width, height = latents.shape
latents = latents.permute(0, 2, 1, 3, 4).reshape(
bsz * frames, channel, width, height
)
noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(
bsz * frames, channel, width, height
)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(
noise_pred, t, latents, **extra_step_kwargs
).prev_sample
# reshape latents back
latents = (
latents[None, :]
.reshape(bsz, frames, channel, width, height)
.permute(0, 2, 1, 3, 4)
)
# call the callback, if provided
if i == len(timesteps) - 1 or (
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
video_tensor = self.decode_latents(latents)
if output_type == "pt":
video = video_tensor
else:
video = tensor2vid(video_tensor)
if not return_dict:
return (video,)
return {"frames": video}
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
def _encode_prompt(
self,
prompt,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
"""
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(
prompt, padding="longest", return_tensors="pt"
).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
print(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = self.text_encoder(text_input_ids)
prompt_embeds = prompt_embeds[0]
prompt_embeds = torch.tensor(prompt_embeds)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = self.text_encoder(uncond_input.input_ids)
negative_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = torch.tensor(negative_prompt_embeds)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(
batch_size * num_images_per_prompt, seq_len, -1
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
def prepare_latents(
self,
batch_size,
num_channels_latents,
dtype,
generator,
latents=None,
):
shape = (
batch_size,
num_channels_latents,
self.num_frames,
self.height // self.vae_scale_factor,
self.width // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = diffusers.utils.randn_tensor(shape, generator=generator, dtype=dtype)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def check_inputs(
self,
prompt,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
if self.height % 8 != 0 or self.width % 8 != 0:
raise ValueError(
f"`height` and `width` have to be divisible by 8 but are {self.height} and {self.width}."
)
if (callback_steps is None) or (
callback_steps is not None
and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def decode_latents(self, latents):
scale_factor = 0.18215
latents = 1 / scale_factor * latents
batch_size, channels, num_frames, height, width = latents.shape
latents = latents.permute(0, 2, 1, 3, 4).reshape(
batch_size * num_frames, channels, height, width
)
image = self.vae_decoder(latents)[0]
image = torch.tensor(image)
video = (
image[None, :]
.reshape(
(
batch_size,
num_frames,
-1,
)
+ image.shape[2:]
)
.permute(0, 2, 1, 3, 4)
)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
video = video.float()
return video
Inference with OpenVINO `⇑ <#top>`__
###############################################################################################################################
.. code:: ipython3
core = ov.Core()
Select inference device `⇑ <#top>`__
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
select device from dropdown list for running inference using OpenVINO
.. code:: ipython3
device = widgets.Dropdown(
options=core.available_devices + ["AUTO"],
value='AUTO',
description='Device:',
disabled=False,
)
device
.. parsed-literal::
Dropdown(description='Device:', index=4, options=('CPU', 'GPU.0', 'GPU.1', 'GPU.2', 'AUTO'), value='AUTO')
.. code:: ipython3
%%time
ov_unet = core.compile_model(unet_xml_path, device_name=device.value)
.. parsed-literal::
CPU times: user 14.1 s, sys: 5.62 s, total: 19.7 s
Wall time: 10.6 s
.. code:: ipython3
%%time
ov_vae_decoder = core.compile_model(vae_decoder_xml_path, device_name=device.value)
.. parsed-literal::
CPU times: user 456 ms, sys: 320 ms, total: 776 ms
Wall time: 328 ms
.. code:: ipython3
%%time
ov_text_encoder = core.compile_model(text_encoder_xml, device_name=device.value)
.. parsed-literal::
CPU times: user 1.78 s, sys: 1.44 s, total: 3.22 s
Wall time: 1.13 s
Here we replace the pipeline parts with versions converted to OpenVINO
IR and compiled to specific device. Note that we use original pipeline
tokenizer and scheduler.
.. code:: ipython3
ov_pipe = OVTextToVideoSDPipeline(ov_vae_decoder, ov_text_encoder, tokenizer, ov_unet, scheduler)
Define a prompt `⇑ <#top>`__
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
.. code:: ipython3
prompt = "A panda eating bamboo on a rock."
Lets generate a video for our prompt. For full list of arguments, see
``__call__`` function definition of ``OVTextToVideoSDPipeline`` class in
`Build a pipeline <#Build-a-pipeline>`__ section.
Video generation `⇑ <#top>`__
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
.. code:: ipython3
frames = ov_pipe(prompt, num_inference_steps=25)['frames']
.. parsed-literal::
0%| | 0/25 [00:00<?, ?it/s]
.. code:: ipython3
images = [PIL.Image.fromarray(frame) for frame in frames]
images[0].save("output.gif", save_all=True, append_images=images[1:], duration=125, loop=0)
with open("output.gif", "rb") as gif_file:
b64 = f'data:image/gif;base64,{base64.b64encode(gif_file.read()).decode()}'
IPython.display.HTML(f"<img src=\"{b64}\" />")
.. image:: 253-zeroscope-text2video-with-output_files/253-zeroscope-text2video-with-output_01_03.gif
Interactive demo `⇑ <#top>`__
###############################################################################################################################
.. code:: ipython3
def generate(
prompt, seed, num_inference_steps, _=gr.Progress(track_tqdm=True)
):
generator = torch.Generator().manual_seed(seed)
frames = ov_pipe(
prompt,
num_inference_steps=num_inference_steps,
generator=generator,
)["frames"]
out_file = tempfile.NamedTemporaryFile(suffix=".gif", delete=False)
images = [PIL.Image.fromarray(frame) for frame in frames]
images[0].save(
out_file, save_all=True, append_images=images[1:], duration=125, loop=0
)
return out_file.name
demo = gr.Interface(
generate,
[
gr.Textbox(label="Prompt"),
gr.Slider(0, 1000000, value=42, label="Seed", step=1),
gr.Slider(10, 50, value=25, label="Number of inference steps", step=1),
],
gr.Image(label="Result"),
examples=[
["An astronaut riding a horse.", 0, 25],
["A panda eating bamboo on a rock.", 0, 25],
["Spiderman is surfing.", 0, 25],
],
allow_flagging="never"
)
try:
demo.queue().launch(debug=True)
except Exception:
demo.queue().launch(share=True, debug=True)
# if you are launching remotely, specify server_name and server_port
# demo.launch(server_name='your server name', server_port='server port in int')
# Read more in the docs: https://gradio.app/docs/

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f9b3abdf1818a885d159961285a1ef96a2c0c0c99d26eac96435b7813e28198d
size 41341

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c0786f897470a25d935d1f5e096132f086c7f96f42d441102f598828d6d39452
size 1366066

View File

@ -3,13 +3,13 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset1 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset1 operation set, supported in OpenVINO™
toolkit.
This specification document describes ``opset1`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities available in a inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are expressed as IR V10 xml
This specification document describes ``opset1`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset1``.
@ -126,4 +126,3 @@ Table of Contents
@endsphinxdirective

View File

@ -3,14 +3,14 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset10 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset10 operation set, supported in OpenVINO™
toolkit.
This specification document describes the ``opset10`` operation set supported in OpenVINO™.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR V10 xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding nGraph operation classes
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset10``.
@ -195,4 +195,3 @@ Table of Contents
* :doc:`VariadicSplit <openvino_docs_ops_movement_VariadicSplit_1>`
@endsphinxdirective

View File

@ -3,14 +3,14 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset11 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset11 operation set, supported in OpenVINO™
toolkit.
This specification document describes the ``opset11`` operation set supported in OpenVINO™.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR V10 xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding nGraph operation classes
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset11``.
@ -195,4 +195,3 @@ Table of Contents
* :doc:`VariadicSplit <openvino_docs_ops_movement_VariadicSplit_1>`
@endsphinxdirective

View File

@ -3,14 +3,14 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset12 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset12 operation set, supported in OpenVINO™
toolkit.
This specification document describes the ``opset12`` operation set supported in OpenVINO™.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR V10 xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding nGraph operation classes
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset12``.

View File

@ -3,13 +3,13 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset2 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset2 operation set, supported in OpenVINO™
toolkit.
This specification document describes ``opset2`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities available in a inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are expressed as IR V10 xml
This specification document describes ``opset2`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset2``.
@ -132,4 +132,3 @@ Table of Contents
@endsphinxdirective

View File

@ -3,13 +3,13 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset3 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset3 operation set, supported in OpenVINO™
toolkit.
This specification document describes ``opset3`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities available in a inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are expressed as IR V10 xml
This specification document describes ``opset3`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset3``.
@ -147,4 +147,3 @@ Table of Contents
* :doc:`VariadicSplit <openvino_docs_ops_movement_VariadicSplit_1>`
@endsphinxdirective

View File

@ -3,13 +3,13 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset4 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset4 operation set, supported in OpenVINO™
toolkit.
This specification document describes ``opset4`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities available in a inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are expressed as IR V10 xml
This specification document describes ``opset4`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset4``.
@ -157,4 +157,3 @@ Table of Contents
* :doc:`VariadicSplit <openvino_docs_ops_movement_VariadicSplit_1>`
@endsphinxdirective

View File

@ -3,13 +3,13 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset5 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset5 operation set, supported in OpenVINO™
toolkit.
This specification document describes ``opset5`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities available in a inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are expressed as IR V10 xml
This specification document describes ``opset5`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset5``.
@ -166,4 +166,3 @@ Table of Contents
@endsphinxdirective

View File

@ -3,13 +3,13 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset6 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset6 operation set, supported in OpenVINO™
toolkit.
This specification document describes ``opset6`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities available in a inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are expressed as IR V10 xml
This specification document describes ``opset6`` operation set supported in OpenVINO.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset6``.
@ -171,4 +171,3 @@ Table of Contents
* :doc:`VariadicSplit <openvino_docs_ops_movement_VariadicSplit_1>`
@endsphinxdirective

View File

@ -3,13 +3,13 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset7 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset7 operation set, supported in OpenVINO™
toolkit.
This specification document describes the ``opset7`` operation set supported in OpenVINO™.
Support for each particular operation from the list below depends on the capabilities available in an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR V10 xml
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset7``.
@ -175,4 +175,3 @@ Table of Contents
* :doc:`VariadicSplit <openvino_docs_ops_movement_VariadicSplit_1>`
@endsphinxdirective

View File

@ -3,13 +3,13 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset8 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset8 operation set, supported in OpenVINO™
toolkit.
This specification document describes the ``opset8`` operation set supported in OpenVINO™.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR V10 xml
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset8``.

View File

@ -3,14 +3,14 @@
@sphinxdirective
.. meta::
:description: Explore the examples of operation instances expressed as IR V10
XML snippets in the opset9 operation set, supported in OpenVINO™
:description: Explore the examples of operation instances expressed as IR
XML snippets in the opset9 operation set, supported in OpenVINO™
toolkit.
This specification document describes the ``opset9`` operation set supported in OpenVINO™.
Support for each particular operation from the list below depends on the capabilities of an inference plugin
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR V10 xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding nGraph operation classes
and may vary among different hardware platforms and devices. Examples of operation instances are provided as IR xml
snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding OpenVINO operation classes
declared in ``namespace opset9``.
@ -192,4 +192,3 @@ Table of Contents
* :doc:`VariadicSplit <openvino_docs_ops_movement_VariadicSplit_1>`
@endsphinxdirective

View File

@ -16,7 +16,8 @@ calibration_dataset = nncf.Dataset(calibration_loader, transform_fn)
#! [dataset]
#! [quantization]
model = ... # onnx.ModelProto object
import onnx
model = onnx.load("model_path")
quantized_model = nncf.quantize(model, calibration_dataset)
#! [quantization]

View File

@ -15,14 +15,13 @@ calibration_dataset = nncf.Dataset(calibration_loader, transform_fn)
#! [dataset]
#! [quantization]
model = ... # openvino.runtime.Model object
import openvino.runtime as ov
model = ov.Core().read_model("model_path")
quantized_model = nncf.quantize(model, calibration_dataset)
#! [quantization]
#! [inference]
import openvino.runtime as ov
# compile the model to transform quantized operations to int8
model_int8 = ov.compile_model(quantized_model)

View File

@ -15,7 +15,8 @@ calibration_dataset = nncf.Dataset(calibration_loader, transform_fn)
#! [dataset]
#! [quantization]
model = ... # tensorflow.Module object
import tensorflow as tf
model = tf.saved_model.load("model_path")
quantized_model = nncf.quantize(model, calibration_dataset)
#! [quantization]

View File

@ -15,7 +15,8 @@ calibration_dataset = nncf.Dataset(calibration_loader, transform_fn)
#! [dataset]
#! [quantization]
model = ... # torch.nn.Module object
import torchvision
model = torchvision.models.resnet50(pretrained=True)
quantized_model = nncf.quantize(model, calibration_dataset)
#! [quantization]

View File

@ -154,115 +154,117 @@ Demos that demonstrate inference on a particular model.
.. dropdown:: Explore more notebooks below.
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| Notebook | Description | Preview |
+===============================================================================================================================+============================================================================================================================================+===========================================+
| `201-vision-monodepth <notebooks/201-vision-monodepth-with-output.html>`__ |br| |n201| |br| |c201| | Monocular depth estimation with images and video. | |n201-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `202-vision-superresolution-image <notebooks/202-vision-superresolution-image-with-output.html>`__ |br| |n202i| |br| |c202i| | Upscale raw images with a super resolution model. | |n202i-img1| → |n202i-img2| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `202-vision-superresolution-video <notebooks/202-vision-superresolution-video-with-output.html>`__ |br| |n202v| |br| |c202v| | Turn 360p into 1080p video using a super resolution model. | |n202v-img1| → |n202v-img2| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `203-meter-reader <notebooks/203-meter-reader-with-output.html>`__ |br| |n203| | PaddlePaddle pre-trained models to read industrial meter's value. | |n203-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `204-segmenter-semantic-segmentation <notebooks/204-segmenter-semantic-segmentation-with-output.html>`__ |br| |c204| | Semantic segmentation with OpenVINO™ using Segmenter. | |n204-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `206-vision-paddlegan-anime <notebooks/206-vision-paddlegan-anime-with-output.html>`__ | Turn an image into anime using a GAN. | |n206-img1| → |n206-img2| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `207-vision-paddlegan-superresolution <notebooks/207-vision-paddlegan-superresolution-with-output.html>`__ | Upscale small images with superresolution using a PaddleGAN model. | |n207-img1| → |n207-img2| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `208-optical-character-recognition <notebooks/208-optical-character-recognition-with-output.html>`__ | Annotate text on images using text recognition resnet. | |n208-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `212-pyannote-speaker-diarization <notebooks/212-pyannote-speaker-diarization-with-output.html>`__ | Run inference on speaker diarization pipeline. | |n212-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `210-slowfast-video-recognition <notebooks/210-slowfast-video-recognition-with-output.html>`__ |br| |n210| | Video Recognition using SlowFast and OpenVINO™ | |n210-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `213-question-answering <notebooks/213-question-answering-with-output.html>`__ |br| |n213| | Answer your questions basing on a context. | |n213-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `214-grammar-correction <notebooks/214-grammar-correction-with-output.html>`__ | Grammatical error correction with OpenVINO. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `216-attention-center <notebooks/216-attention-center-with-output.html>`__ | The attention center model with OpenVINO™ | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `217-vision-deblur <notebooks/217-vision-deblur-with-output.html>`__ |br| |n217| | Deblur images with DeblurGAN-v2. | |n217-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `219-knowledge-graphs-conve <notebooks/219-knowledge-graphs-conve-with-output.html>`__ |br| |n219| | Optimize the knowledge graph embeddings model (ConvE) with OpenVINO. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `220-cross-lingual-books-alignment <notebooks/220-cross-lingual-books-alignment-with-output.html>`__ |br| |n220| |br| |c220| | Cross-lingual Books Alignment With Transformers and OpenVINO™ | |n220-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `221-machine-translation <notebooks/221-machine-translation-with-output.html>`__ |br| |n221| |br| |c221| | Real-time translation from English to German. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `222-vision-image-colorization <notebooks/222-vision-image-colorization-with-output.html>`__ |br| |n222| | Use pre-trained models to colorize black & white images using OpenVINO. | |n222-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `223-text-prediction <notebooks/223-text-prediction-with-output.html>`__ |br| |c223| | Use pre-trained models to perform text prediction on an input sequence. | |n223-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `224-3D-segmentation-point-clouds <notebooks/224-3D-segmentation-point-clouds-with-output.html>`__ | Process point cloud data and run 3D Part Segmentation with OpenVINO. | |n224-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `225-stable-diffusion-text-to-image <notebooks/225-stable-diffusion-text-to-image-with-output.html>`__ | Text-to-image generation with Stable Diffusion method. | |n225-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `226-yolov7-optimization <notebooks/226-yolov7-optimization-with-output.html>`__ | Optimize YOLOv7, using NNCF PTQ API. | |n226-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `227-whisper-subtitles-generation <notebooks/227-whisper-subtitles-generation-with-output.html>`__ |br| |c227| | Generate subtitles for video with OpenAI Whisper and OpenVINO. | |n227-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `228-clip-zero-shot-convert <notebooks/228-clip-zero-shot-convert-with-output.html>`__ | Zero-shot Image Classification with OpenAI CLIP and OpenVINO™ | |n228-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `228-clip-zero-shot-quantize <notebooks/228-clip-zero-shot-quantize-with-output.html>`__ | Post-Training Quantization of OpenAI CLIP model with NNCF | |n228-img2| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `229-distilbert-sequence-classification <notebooks/229-distilbert-sequence-classification-with-output.html>`__ |br| |n229| | Sequence classification with OpenVINO. | |n229-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `230-yolov8-optimization <notebooks/230-yolov8-optimization-with-output.html>`__ |br| |c230| | Optimize YOLOv8, using NNCF PTQ API. | |n230-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `231-instruct-pix2pix-image-editing <notebooks/231-instruct-pix2pix-image-editing-with-output.html>`__ | Image editing with InstructPix2Pix. | |n231-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `232-clip-language-saliency-map <notebooks/232-clip-language-saliency-map-with-output.html>`__ |br| |c232| | Language-visual saliency with CLIP and OpenVINO™. | |n232-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `233-blip-visual-language-processing <notebooks/233-blip-visual-language-processing-with-output.html>`__ | Visual question answering and image captioning using BLIP and OpenVINO™. | |n233-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `234-encodec-audio-compression <notebooks/234-encodec-audio-compression-with-output.html>`__ | Audio compression with EnCodec and OpenVINO™. | |n234-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `235-controlnet-stable-diffusion <notebooks/235-controlnet-stable-diffusion-with-output.html>`__ | A text-to-image generation with ControlNet Conditioning and OpenVINO™. | |n235-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `236-stable-diffusion-v2 <notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.html>`__ | Text-to-image generation and Infinite Zoom with Stable Diffusion v2 and OpenVINO™. | |n236-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `236-stable-diffusion-v2 <notebooks/236-stable-diffusion-v2-optimum-demo-comparison-with-output.html>`__ | Stable Diffusion v2.1 using Optimum-Intel OpenVINO and multiple Intel Hardware. | |n236-img4| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `236-stable-diffusion-v2 <notebooks/236-stable-diffusion-v2-optimum-demo-with-output.html>`__ | Stable Diffusion v2.1 using Optimum-Intel OpenVINO. | |n236-img4| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `236-stable-diffusion-v2 <notebooks/236-stable-diffusion-v2-text-to-image-demo-with-output.html>`__ | Stable Diffusion Text-to-Image Demo. | |n236-img4| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `236-stable-diffusion-v2 <notebooks/236-stable-diffusion-v2-text-to-image-with-output.html>`__ | Text-to-image generation with Stable Diffusion v2 and OpenVINO™. | |n236-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `237-segment-anything <notebooks/237-segment-anything-with-output.html>`__ | Prompt based object segmentation mask generation, using Segment Anything and OpenVINO™. | |n237-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `238-deep-floyd-if <notebooks/238-deep-floyd-if-with-output.html>`__ | Text-to-image generation with DeepFloyd IF and OpenVINO™. | |n238-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `239-image-bind <notebooks/239-image-bind-convert-with-output.html>`__ | Binding multimodal data, using ImageBind and OpenVINO™. | |n239-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `240-dolly-2-instruction-following <notebooks/240-dolly-2-instruction-following-with-output.html>`__ | Instruction following using Databricks Dolly 2.0 and OpenVINO™. | |n240-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `241-riffusion-text-to-music <notebooks/241-riffusion-text-to-music-with-output.html>`__ | Text-to-Music generation using Riffusion and OpenVINO™. | |n241-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `242-freevc-voice-conversion <notebooks/242-freevc-voice-conversion-with-output.html>`__ | High-Quality Text-Free One-Shot Voice Conversion with FreeVC and OpenVINO™ | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `243-tflite-selfie-segmentation <notebooks/243-tflite-selfie-segmentation-with-output.html>`__ |br| |n243| |br| |c243| | Selfie Segmentation using TFLite and OpenVINO™. | |n243-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `244-named-entity-recognition <notebooks/244-named-entity-recognition-with-output.html>`__ |br| |c244| | Named entity recognition with OpenVINO™. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `245-typo-detector <notebooks/245-typo-detector-with-output.html>`__ | English Typo Detection in sentences with OpenVINO™. | |n245-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `246-depth-estimation-videpth <notebooks/246-depth-estimation-videpth-with-output.html>`__ | Monocular Visual-Inertial Depth Estimation with OpenVINO™. | |n246-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `247-code-language-id <notebooks/247-code-language-id-with-output.html>`__ |br| |n247| | Identify the programming language used in an arbitrary code snippet. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `248-stable-diffusion-xl <notebooks/248-stable-diffusion-xl-with-output.html>`__ | Image generation with Stable Diffusion XL and OpenVINO™. | |n248-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `249-oneformer-segmentation <notebooks/249-oneformer-segmentation-with-output.html>`__ | Universal segmentation with OneFormer and OpenVINO™. | |n249-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `250-music-generation <notebooks/250-music-generation-with-output.html>`__ |br| |n250| |br| |c250| | Controllable Music Generation with MusicGen and OpenVINO™. | |n250-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `251-tiny-sd-image-generation <notebooks/251-tiny-sd-image-generation-with-output.html>`__ |br| |c251| | Image Generation with Tiny-SD and OpenVINO™. | |n251-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| `252-fastcomposer-image-generation <notebooks/252-fastcomposer-image-generation-with-output.html>`__ | Image generation with FastComposer and OpenVINO™. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| Notebook | Description | Preview |
+===============================================================================================================================+============================================================================================================================================+====================================================+
| `201-vision-monodepth <notebooks/201-vision-monodepth-with-output.html>`__ |br| |n201| |br| |c201| | Monocular depth estimation with images and video. | |n201-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `202-vision-superresolution-image <notebooks/202-vision-superresolution-image-with-output.html>`__ |br| |n202i| |br| |c202i| | Upscale raw images with a super resolution model. | |n202i-img1| → |n202i-img2| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `202-vision-superresolution-video <notebooks/202-vision-superresolution-video-with-output.html>`__ |br| |n202v| |br| |c202v| | Turn 360p into 1080p video using a super resolution model. | |n202v-img1| → |n202v-img2| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `203-meter-reader <notebooks/203-meter-reader-with-output.html>`__ |br| |n203| | PaddlePaddle pre-trained models to read industrial meter's value. | |n203-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `204-segmenter-semantic-segmentation <notebooks/204-segmenter-semantic-segmentation-with-output.html>`__ |br| |c204| | Semantic segmentation with OpenVINO™ using Segmenter. | |n204-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `206-vision-paddlegan-anime <notebooks/206-vision-paddlegan-anime-with-output.html>`__ | Turn an image into anime using a GAN. | |n206-img1| → |n206-img2| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `207-vision-paddlegan-superresolution <notebooks/207-vision-paddlegan-superresolution-with-output.html>`__ | Upscale small images with superresolution using a PaddleGAN model. | |n207-img1| → |n207-img2| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `208-optical-character-recognition <notebooks/208-optical-character-recognition-with-output.html>`__ | Annotate text on images using text recognition resnet. | |n208-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `212-pyannote-speaker-diarization <notebooks/212-pyannote-speaker-diarization-with-output.html>`__ | Run inference on speaker diarization pipeline. | |n212-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `210-slowfast-video-recognition <notebooks/210-slowfast-video-recognition-with-output.html>`__ |br| |n210| | Video Recognition using SlowFast and OpenVINO™ | |n210-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `213-question-answering <notebooks/213-question-answering-with-output.html>`__ |br| |n213| | Answer your questions basing on a context. | |n213-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `214-grammar-correction <notebooks/214-grammar-correction-with-output.html>`__ | Grammatical error correction with OpenVINO. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `216-attention-center <notebooks/216-attention-center-with-output.html>`__ | The attention center model with OpenVINO™ | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `217-vision-deblur <notebooks/217-vision-deblur-with-output.html>`__ |br| |n217| | Deblur images with DeblurGAN-v2. | |n217-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `219-knowledge-graphs-conve <notebooks/219-knowledge-graphs-conve-with-output.html>`__ |br| |n219| | Optimize the knowledge graph embeddings model (ConvE) with OpenVINO. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `220-cross-lingual-books-alignment <notebooks/220-cross-lingual-books-alignment-with-output.html>`__ |br| |n220| |br| |c220| | Cross-lingual Books Alignment With Transformers and OpenVINO™ | |n220-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `221-machine-translation <notebooks/221-machine-translation-with-output.html>`__ |br| |n221| |br| |c221| | Real-time translation from English to German. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `222-vision-image-colorization <notebooks/222-vision-image-colorization-with-output.html>`__ |br| |n222| | Use pre-trained models to colorize black & white images using OpenVINO. | |n222-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `223-text-prediction <notebooks/223-text-prediction-with-output.html>`__ |br| |c223| | Use pre-trained models to perform text prediction on an input sequence. | |n223-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `224-3D-segmentation-point-clouds <notebooks/224-3D-segmentation-point-clouds-with-output.html>`__ | Process point cloud data and run 3D Part Segmentation with OpenVINO. | |n224-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `225-stable-diffusion-text-to-image <notebooks/225-stable-diffusion-text-to-image-with-output.html>`__ | Text-to-image generation with Stable Diffusion method. | |n225-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `226-yolov7-optimization <notebooks/226-yolov7-optimization-with-output.html>`__ | Optimize YOLOv7, using NNCF PTQ API. | |n226-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `227-whisper-subtitles-generation <notebooks/227-whisper-subtitles-generation-with-output.html>`__ |br| |c227| | Generate subtitles for video with OpenAI Whisper and OpenVINO. | |n227-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `228-clip-zero-shot-convert <notebooks/228-clip-zero-shot-convert-with-output.html>`__ | Zero-shot Image Classification with OpenAI CLIP and OpenVINO™ | |n228-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `228-clip-zero-shot-quantize <notebooks/228-clip-zero-shot-quantize-with-output.html>`__ | Post-Training Quantization of OpenAI CLIP model with NNCF | |n228-img2| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `229-distilbert-sequence-classification <notebooks/229-distilbert-sequence-classification-with-output.html>`__ |br| |n229| | Sequence classification with OpenVINO. | |n229-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `230-yolov8-optimization <notebooks/230-yolov8-optimization-with-output.html>`__ |br| |c230| | Optimize YOLOv8, using NNCF PTQ API. | |n230-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `231-instruct-pix2pix-image-editing <notebooks/231-instruct-pix2pix-image-editing-with-output.html>`__ | Image editing with InstructPix2Pix. | |n231-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `232-clip-language-saliency-map <notebooks/232-clip-language-saliency-map-with-output.html>`__ |br| |c232| | Language-visual saliency with CLIP and OpenVINO™. | |n232-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `233-blip-visual-language-processing <notebooks/233-blip-visual-language-processing-with-output.html>`__ | Visual question answering and image captioning using BLIP and OpenVINO™. | |n233-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `234-encodec-audio-compression <notebooks/234-encodec-audio-compression-with-output.html>`__ | Audio compression with EnCodec and OpenVINO™. | |n234-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `235-controlnet-stable-diffusion <notebooks/235-controlnet-stable-diffusion-with-output.html>`__ | A text-to-image generation with ControlNet Conditioning and OpenVINO™. | |n235-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `236-stable-diffusion-v2 <notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.html>`__ | Text-to-image generation and Infinite Zoom with Stable Diffusion v2 and OpenVINO™. | |n236-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `236-stable-diffusion-v2 <notebooks/236-stable-diffusion-v2-optimum-demo-comparison-with-output.html>`__ | Stable Diffusion v2.1 using Optimum-Intel OpenVINO and multiple Intel Hardware. | |n236-img4| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `236-stable-diffusion-v2 <notebooks/236-stable-diffusion-v2-optimum-demo-with-output.html>`__ | Stable Diffusion v2.1 using Optimum-Intel OpenVINO. | |n236-img4| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `236-stable-diffusion-v2 <notebooks/236-stable-diffusion-v2-text-to-image-demo-with-output.html>`__ | Stable Diffusion Text-to-Image Demo. | |n236-img4| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `236-stable-diffusion-v2 <notebooks/236-stable-diffusion-v2-text-to-image-with-output.html>`__ | Text-to-image generation with Stable Diffusion v2 and OpenVINO™. | |n236-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `237-segment-anything <notebooks/237-segment-anything-with-output.html>`__ | Prompt based object segmentation mask generation, using Segment Anything and OpenVINO™. | |n237-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `238-deep-floyd-if <notebooks/238-deep-floyd-if-with-output.html>`__ | Text-to-image generation with DeepFloyd IF and OpenVINO™. | |n238-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `239-image-bind <notebooks/239-image-bind-convert-with-output.html>`__ | Binding multimodal data, using ImageBind and OpenVINO™. | |n239-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `240-dolly-2-instruction-following <notebooks/240-dolly-2-instruction-following-with-output.html>`__ | Instruction following using Databricks Dolly 2.0 and OpenVINO™. | |n240-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `241-riffusion-text-to-music <notebooks/241-riffusion-text-to-music-with-output.html>`__ | Text-to-Music generation using Riffusion and OpenVINO™. | |n241-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `242-freevc-voice-conversion <notebooks/242-freevc-voice-conversion-with-output.html>`__ | High-Quality Text-Free One-Shot Voice Conversion with FreeVC and OpenVINO™ | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `243-tflite-selfie-segmentation <notebooks/243-tflite-selfie-segmentation-with-output.html>`__ |br| |n243| |br| |c243| | Selfie Segmentation using TFLite and OpenVINO™. | |n243-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `244-named-entity-recognition <notebooks/244-named-entity-recognition-with-output.html>`__ |br| |c244| | Named entity recognition with OpenVINO™. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `245-typo-detector <notebooks/245-typo-detector-with-output.html>`__ | English Typo Detection in sentences with OpenVINO™. | |n245-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `246-depth-estimation-videpth <notebooks/246-depth-estimation-videpth-with-output.html>`__ | Monocular Visual-Inertial Depth Estimation with OpenVINO™. | |n246-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `247-code-language-id <notebooks/247-code-language-id-with-output.html>`__ |br| |n247| | Identify the programming language used in an arbitrary code snippet. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `248-stable-diffusion-xl <notebooks/248-stable-diffusion-xl-with-output.html>`__ | Image generation with Stable Diffusion XL and OpenVINO™. | |n248-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `249-oneformer-segmentation <notebooks/249-oneformer-segmentation-with-output.html>`__ | Universal segmentation with OneFormer and OpenVINO™. | |n249-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `250-music-generation <notebooks/250-music-generation-with-output.html>`__ |br| |n250| |br| |c250| | Controllable Music Generation with MusicGen and OpenVINO™. | |n250-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `251-tiny-sd-image-generation <notebooks/251-tiny-sd-image-generation-with-output.html>`__ |br| |c251| | Image Generation with Tiny-SD and OpenVINO™. | |n251-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `252-fastcomposer-image-generation <notebooks/252-fastcomposer-image-generation-with-output.html>`__ | Image generation with FastComposer and OpenVINO™. | |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
| `253-zeroscope-text2video <notebooks/253-zeroscope-text2video-with-output.html>`__ | Text-to video synthesis with ZeroScope and OpenVINO™. | A panda eating bamboo on a rock. |br| |n253-img1| |
+-------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------+
Model Training
@ -501,6 +503,8 @@ Made with `contributors-img <https://contrib.rocks>`__.
:target: https://user-images.githubusercontent.com/76463150/260439306-81c81c8d-1f9c-41d0-b881-9491766def8e.png
.. |n251-img1| image:: https://user-images.githubusercontent.com/29454499/260904650-274fc2f9-24d2-46a3-ac3d-d660ec3c9a19.png
:target: https://user-images.githubusercontent.com/29454499/260904650-274fc2f9-24d2-46a3-ac3d-d660ec3c9a19.png
.. |n253-img1| image:: https://user-images.githubusercontent.com/76161256/261102399-500956d5-4aac-4710-a77c-4df34bcda3be.gif
:target: https://user-images.githubusercontent.com/76161256/261102399-500956d5-4aac-4710-a77c-4df34bcda3be.gif
.. |n301-img1| image:: https://user-images.githubusercontent.com/15709723/127779607-8fa34947-1c35-4260-8d04-981c41a2a2cc.png
:target: https://user-images.githubusercontent.com/15709723/127779607-8fa34947-1c35-4260-8d04-981c41a2a2cc.png
.. |n401-img1| image:: https://user-images.githubusercontent.com/4547501/141471665-82b28c86-cf64-4bfe-98b3-c314658f2d96.gif

View File

@ -20,7 +20,3 @@ tensorflow>=1.15.5,<2.14.0
six~=1.16.0
protobuf>=3.18.1,<4.0.0
onnx==1.13.1
# torchvision > OpenVINO preprocessing converter
pillow>=9.0
torch>=1.13

View File

@ -1,5 +1,4 @@
-c ../../../../constraints.txt
torch
torch>=1.13
torchvision; platform_machine == 'arm64' and python_version >= '3.8'
torchvision; platform_machine != 'arm64'
pillow
pillow>=9.0

View File

@ -59,7 +59,7 @@ void regmodule_offline_transformations(py::module m) {
"apply_pot_transformations",
[](std::shared_ptr<ov::Model> model, std::string device) {
ov::pass::Manager manager;
manager.register_pass<ngraph::pass::POTTransformations>(std::move(device));
manager.register_pass<ov::pass::POTTransformations>(std::move(device));
manager.run_passes(model);
},
py::arg("model"),
@ -79,7 +79,7 @@ void regmodule_offline_transformations(py::module m) {
"apply_pruning_transformation",
[](std::shared_ptr<ov::Model> model) {
ov::pass::Manager manager;
manager.register_pass<ngraph::pass::Pruning>();
manager.register_pass<ov::pass::Pruning>();
manager.run_passes(model);
},
py::arg("model"));
@ -87,7 +87,7 @@ void regmodule_offline_transformations(py::module m) {
m_offline_transformations.def(
"apply_make_stateful_transformation",
[](std::shared_ptr<ov::Model> model, const std::map<std::string, std::string>& param_res_names) {
ngraph::pass::Manager manager;
ov::pass::Manager manager;
manager.register_pass<ov::pass::MakeStateful>(param_res_names);
manager.run_passes(model);
},
@ -108,8 +108,8 @@ void regmodule_offline_transformations(py::module m) {
"compress_quantize_weights_transformation",
[](std::shared_ptr<ov::Model> model) {
ov::pass::Manager manager;
manager.register_pass<ngraph::pass::CompressQuantizeWeights>();
manager.register_pass<ngraph::pass::ZeroPointOptimizer>();
manager.register_pass<ov::pass::CompressQuantizeWeights>();
manager.register_pass<ov::pass::ZeroPointOptimizer>();
manager.run_passes(model);
},
py::arg("model"));

View File

@ -6,7 +6,7 @@
#include <string>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
namespace py = pybind11;

View File

@ -5,74 +5,15 @@
import os
import pytest
from sys import platform
from openvino.runtime import Core
def get_model_with_template_extension():
core = Core()
ir = bytes(b"""<net name="Activation" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data shape="1,3,22,22" element_type="f32"/>
<output>
<port id="0" precision="FP32" names="in_data">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="Identity" version="extension">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="out_data">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>""")
if platform == "win32":
core.add_extension(library_path="openvino_template_extension.dll")
else:
core.add_extension(library_path="libopenvino_template_extension.so")
return core, core.read_model(ir)
def model_path(is_fp16=False):
base_path = os.path.dirname(__file__)
if is_fp16:
test_xml = os.path.join(base_path, "test_utils", "utils", "test_model_fp16.xml")
test_bin = os.path.join(base_path, "test_utils", "utils", "test_model_fp16.bin")
test_xml = os.path.join(base_path, "utils", "utils", "test_model_fp16.xml")
test_bin = os.path.join(base_path, "utils", "utils", "test_model_fp16.bin")
else:
test_xml = os.path.join(base_path, "test_utils", "utils", "test_model_fp32.xml")
test_bin = os.path.join(base_path, "test_utils", "utils", "test_model_fp32.bin")
test_xml = os.path.join(base_path, "utils", "utils", "test_model_fp32.xml")
test_bin = os.path.join(base_path, "utils", "utils", "test_model_fp32.bin")
return (test_xml, test_bin)

View File

@ -13,7 +13,7 @@ from openvino.runtime import Core, Model
from openvino.runtime.passes import Manager, Serialize, ConstantFolding, Version
from tests.test_graph.util import count_ops_of_type
from tests.test_utils.test_utils import create_filename_for_test, compare_models
from tests.utils.helpers import create_filename_for_test, compare_models
def create_model():
shape = [100, 100, 2]

View File

@ -2,17 +2,18 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import pytest
import numpy as np
from tests.conftest import model_path
from tests.test_utils.test_utils import get_relu_model, generate_image, generate_model_and_image, generate_relu_compiled_model
from openvino import Model, Shape, Core, Tensor
from tests.utils.helpers import (
get_relu_model,
generate_image,
generate_model_and_image,
generate_relu_compiled_model,
create_filename_for_test)
from openvino import Model, Shape, Core, Tensor, serialize
from openvino.runtime import ConstOutput
test_net_xml, test_net_bin = model_path()
def test_get_property(device):
model = get_relu_model([1, 3, 32, 32])
@ -23,9 +24,7 @@ def test_get_property(device):
def test_get_runtime_model(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
compiled_model = core.compile_model(model, device)
compiled_model = generate_relu_compiled_model(device)
runtime_model = compiled_model.get_runtime_model()
assert isinstance(runtime_model, Model)
@ -36,8 +35,7 @@ def test_export_import(device):
if "EXPORT_IMPORT" not in core.get_property(device, "OPTIMIZATION_CAPABILITIES"):
pytest.skip(f"{core.get_property(device, 'FULL_DEVICE_NAME')} plugin due-to export, import model API isn't implemented.")
model = core.read_model(model=test_net_xml, weights=test_net_bin)
compiled_model = core.compile_model(model, device)
compiled_model = generate_relu_compiled_model(device)
user_stream = compiled_model.export_model()
@ -46,7 +44,7 @@ def test_export_import(device):
img = generate_image()
res = new_compiled.infer_new_request({"data": img})
assert np.argmax(res[new_compiled.outputs[0]]) == 9
assert np.argmax(res[new_compiled.outputs[0]]) == 531
def test_export_import_advanced(device):
@ -57,8 +55,7 @@ def test_export_import_advanced(device):
if "EXPORT_IMPORT" not in core.get_property(device, "OPTIMIZATION_CAPABILITIES"):
pytest.skip(f"{core.get_property(device, 'FULL_DEVICE_NAME')} plugin due-to export, import model API isn't implemented.")
model = core.read_model(model=test_net_xml, weights=test_net_bin)
compiled_model = core.compile_model(model, device)
compiled_model = generate_relu_compiled_model(device)
user_stream = io.BytesIO()
@ -69,7 +66,7 @@ def test_export_import_advanced(device):
img = generate_image()
res = new_compiled.infer_new_request({"data": img})
assert np.argmax(res[new_compiled.outputs[0]]) == 9
assert np.argmax(res[new_compiled.outputs[0]]) == 531
@pytest.mark.parametrize("input_arguments", [[0], ["data"], []])
@ -222,11 +219,15 @@ def test_direct_infer(device, shared_flag):
assert np.array_equal(ref[compiled_model.outputs[0]], res[compiled_model.outputs[0]])
def test_compiled_model_after_core_destroyed(device):
# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request
def test_compiled_model_after_core_destroyed(request, tmp_path, device):
core = Core()
with open(test_net_bin, "rb") as f:
xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path)
model = get_relu_model()
serialize(model, xml_path, bin_path)
with open(bin_path, "rb") as f:
weights = f.read()
with open(test_net_xml, "rb") as f:
with open(xml_path, "rb") as f:
xml = f.read()
model = core.read_model(model=xml, weights=weights)
compiled = core.compile_model(model, device)

View File

@ -16,29 +16,21 @@ from openvino import (
CompiledModel,
tensor_from_file,
compile_model,
serialize,
)
from openvino.runtime import Extension
from tests.conftest import (
model_path,
model_onnx_path,
get_model_with_template_extension,
)
from tests.test_utils.test_utils import (
from tests.utils.helpers import (
generate_image,
generate_relu_compiled_model,
get_relu_model,
plugins_path,
compare_models,
create_filename_for_test,
get_model_with_template_extension,
)
test_net_xml, test_net_bin = model_path()
test_net_onnx = model_onnx_path()
def test_compact_api_xml():
img = generate_image()
@ -48,8 +40,12 @@ def test_compact_api_xml():
assert np.argmax(results[list(results)[0]]) == 531
def test_compact_api_xml_posix_path():
compiled_model = compile_model(Path(test_net_xml))
# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request
def test_compact_api_xml_posix_path(request, tmp_path):
xml_path, _ = create_filename_for_test(request.node.name, tmp_path, True)
model = get_relu_model()
serialize(model, xml_path)
compiled_model = compile_model(Path(xml_path))
assert isinstance(compiled_model, CompiledModel)
@ -80,33 +76,51 @@ def test_core_class(device):
assert np.allclose(results[list(results)[0]], expected_output)
def test_compile_model(device):
# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request
def test_compile_model(request, tmp_path, device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path)
relu_model = get_relu_model()
serialize(relu_model, xml_path, bin_path)
model = core.read_model(model=xml_path, weights=bin_path)
compiled_model = core.compile_model(model, device)
assert isinstance(compiled_model, CompiledModel)
def test_compile_model_without_device():
# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request
def test_compile_model_without_device(request, tmp_path):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path)
relu_model = get_relu_model()
serialize(relu_model, xml_path, bin_path)
model = core.read_model(model=xml_path, weights=bin_path)
compiled_model = core.compile_model(model)
assert isinstance(compiled_model, CompiledModel)
def test_read_model_from_ir():
# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request
def test_read_model_from_ir(request, tmp_path):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path)
relu_model = get_relu_model()
serialize(relu_model, xml_path, bin_path)
model = core.read_model(model=xml_path, weights=bin_path)
assert isinstance(model, Model)
model = core.read_model(model=test_net_xml)
model = core.read_model(model=xml_path)
assert isinstance(model, Model)
def test_read_model_from_tensor():
# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request
def test_read_model_from_tensor(request, tmp_path):
core = Core()
model = open(test_net_xml).read()
tensor = tensor_from_file(test_net_bin)
xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path, is_xml_path=True, is_bin_path=True)
relu_model = get_relu_model()
serialize(relu_model, xml_path, bin_path)
arr = np.ones(shape=(10), dtype=np.int8)
arr.tofile(bin_path)
model = open(xml_path).read()
tensor = tensor_from_file(bin_path)
model = core.read_model(model=model, weights=tensor)
assert isinstance(model, Model)
@ -118,48 +132,49 @@ def test_read_model_with_wrong_input():
assert "Provided python object type <class 'int'> isn't supported as 'model' argument." in str(e.value)
def test_read_model_as_path():
# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request
def test_read_model_as_path(request, tmp_path):
core = Core()
model = core.read_model(model=Path(test_net_xml), weights=Path(test_net_bin))
xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path, True, True)
relu_model = get_relu_model()
serialize(relu_model, xml_path, bin_path)
model = core.read_model(model=Path(xml_path), weights=Path(bin_path))
assert isinstance(model, Model)
model = core.read_model(model=test_net_xml, weights=Path(test_net_bin))
model = core.read_model(model=xml_path, weights=Path(bin_path))
assert isinstance(model, Model)
model = core.read_model(model=Path(test_net_xml))
model = core.read_model(model=Path(xml_path))
assert isinstance(model, Model)
def test_read_model_from_onnx():
# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request
def test_read_model_from_buffer(request, tmp_path):
core = Core()
model = core.read_model(model=test_net_onnx)
assert isinstance(model, Model)
def test_read_model_from_onnx_as_path():
core = Core()
model = core.read_model(model=Path(test_net_onnx))
assert isinstance(model, Model)
def test_read_model_from_buffer():
core = Core()
with open(test_net_bin, "rb") as f:
xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path)
relu_model = get_relu_model()
serialize(relu_model, xml_path, bin_path)
with open(bin_path, "rb") as f:
weights = f.read()
with open(model_path()[0], "rb") as f:
with open(xml_path, "rb") as f:
xml = f.read()
model = core.read_model(model=xml, weights=weights)
assert isinstance(model, Model)
def test_model_from_buffer_valid():
# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request
def test_model_from_buffer_valid(request, tmp_path):
core = Core()
with open(test_net_bin, "rb") as f:
xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path)
relu_model = get_relu_model()
serialize(relu_model, xml_path, bin_path)
with open(bin_path, "rb") as f:
weights = f.read()
with open(model_path()[0], "rb") as f:
with open(xml_path, "rb") as f:
xml = f.read()
model = core.read_model(model=xml, weights=weights)
ref_model = core.read_model(model=test_net_xml, weights=test_net_bin)
ref_model = core.read_model(model=xml_path, weights=bin_path)
assert compare_models(model, ref_model)
@ -255,7 +270,7 @@ def test_get_property_str():
def test_query_model(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
query_model = core.query_model(model=model, device_name=device)
ops_model = model.get_ordered_ops()
ops_func_names = [op.friendly_name for op in ops_model]
@ -326,7 +341,7 @@ def test_add_extension():
core = Core()
core.add_extension(EmptyExtension())
core.add_extension([EmptyExtension(), EmptyExtension()])
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
assert isinstance(model, Model)
@ -398,7 +413,7 @@ def test_read_model_from_buffer_no_weights():
def test_infer_new_request_return_type(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
img = generate_image()
compiled_model = core.compile_model(model, device)
res = compiled_model.infer_new_request({"data": img})
@ -406,6 +421,6 @@ def test_infer_new_request_return_type(device):
assert isinstance(arr, np.ndarray)
assert arr.itemsize == 4
assert arr.shape == (10,)
assert arr.shape == (3, 32, 32)
assert arr.dtype == "float32"
assert arr.nbytes == 40
assert arr.nbytes == 12288

View File

@ -27,10 +27,7 @@ from openvino.runtime import ProfilingInfo
from openvino.preprocess import PrePostProcessor
from tests import skip_need_mock_op
from tests.conftest import model_path
from tests.test_utils.test_utils import generate_image, get_relu_model
test_net_xml, test_net_bin = model_path()
from tests.utils.helpers import generate_image, get_relu_model
def create_model_with_memory(input_shape, data_type):
@ -100,7 +97,10 @@ def abs_model_with_data(device, ov_type, numpy_dtype):
def test_get_profiling_info(device):
core = Core()
model = core.read_model(test_net_xml, test_net_bin)
param = ops.parameter([1, 3, 32, 32], np.float32, name="data")
softmax = ops.softmax(param, 1, name="fc_out")
model = Model([softmax], [param], "test_model")
core.set_property(device, {"PERF_COUNT": "YES"})
compiled_model = core.compile_model(model, device)
img = generate_image()
@ -164,22 +164,28 @@ def test_tensor_setter(device):
def test_set_tensors(device):
core = Core()
model = core.read_model(test_net_xml, test_net_bin)
param = ops.parameter([1, 3, 32, 32], np.float32, name="data")
softmax = ops.softmax(param, 1, name="fc_out")
res = ops.result(softmax, name="res")
res.output(0).get_tensor().set_names({"res"})
model = Model([res], [param], "test_model")
compiled_model = core.compile_model(model, device)
data1 = generate_image()
tensor1 = Tensor(data1)
data2 = np.ones(shape=(1, 10), dtype=np.float32)
data2 = np.ones(shape=(1, 3, 32, 32), dtype=np.float32)
tensor2 = Tensor(data2)
data3 = np.ones(shape=(1, 3, 32, 32), dtype=np.float32)
tensor3 = Tensor(data3)
data4 = np.zeros(shape=(1, 10), dtype=np.float32)
data4 = np.zeros(shape=(1, 3, 32, 32), dtype=np.float32)
tensor4 = Tensor(data4)
request = compiled_model.create_infer_request()
request.set_tensors({"data": tensor1, "fc_out": tensor2})
request.set_tensors({"data": tensor1, "res": tensor2})
t1 = request.get_tensor("data")
t2 = request.get_tensor("fc_out")
t2 = request.get_tensor("res")
assert np.allclose(tensor1.data, t1.data, atol=1e-2, rtol=1e-2)
assert np.allclose(tensor2.data, t2.data, atol=1e-2, rtol=1e-2)
@ -295,7 +301,7 @@ def test_inputs_outputs_property_and_method(device):
@pytest.mark.skip(reason="Sporadically failed. Need further investigation. Ticket - 95967")
def test_cancel(device):
core = Core()
model = core.read_model(test_net_xml, test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
img = generate_image()
request = compiled_model.create_infer_request()
@ -316,7 +322,7 @@ def test_cancel(device):
@pytest.mark.parametrize("share_inputs", [True, False])
def test_start_async(device, share_inputs):
core = Core()
model = core.read_model(test_net_xml, test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
img = generate_image()
jobs = 3
@ -482,7 +488,7 @@ def test_infer_queue(device, share_inputs):
jobs = 8
num_request = 4
core = Core()
model = core.read_model(test_net_xml, test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
infer_queue = AsyncInferQueue(compiled_model, num_request)
jobs_done = [{"finished": False, "latency": 0} for _ in range(jobs)]
@ -563,7 +569,7 @@ def test_infer_queue_fail_on_cpp_model(device):
jobs = 6
num_request = 4
core = Core()
model = core.read_model(test_net_xml, test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
infer_queue = AsyncInferQueue(compiled_model, num_request)
@ -585,7 +591,7 @@ def test_infer_queue_fail_on_py_model(device):
jobs = 1
num_request = 1
core = Core()
model = core.read_model(test_net_xml, test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
infer_queue = AsyncInferQueue(compiled_model, num_request)

View File

@ -4,19 +4,17 @@
import os
from ..conftest import model_path
from openvino.runtime import Input, RTMap
from openvino._pyopenvino import DescriptorTensor
import openvino.runtime.opset12 as ops
from openvino import Core, OVAny, Shape, PartialShape, Type
test_net_xml, test_net_bin = model_path()
from tests.utils.helpers import get_relu_model
def test_input_type(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
@ -25,7 +23,7 @@ def test_input_type(device):
def test_const_output_docs(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
@ -35,7 +33,7 @@ def test_const_output_docs(device):
def test_input_get_index(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
@ -44,7 +42,7 @@ def test_input_get_index(device):
def test_input_element_type(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
@ -53,36 +51,36 @@ def test_input_element_type(device):
def test_input_get_shape(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
assert str(input_node.get_shape()) == str(Shape([1, 10]))
assert str(input_node.get_shape()) == str(Shape([1, 3, 32, 32]))
def test_input_get_partial_shape(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
expected_partial_shape = PartialShape([1, 10])
expected_partial_shape = PartialShape([1, 3, 32, 32])
assert input_node.get_partial_shape() == expected_partial_shape
def test_input_get_source_output(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
name = input_node.get_source_output().get_node().get_friendly_name()
assert name == "fc_out"
assert name == "relu"
def test_input_get_tensor(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
@ -92,7 +90,7 @@ def test_input_get_tensor(device):
def test_input_get_rt_info(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
@ -102,7 +100,7 @@ def test_input_get_rt_info(device):
def test_input_rt_info(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
@ -126,7 +124,7 @@ def test_input_replace_source_output(device):
def test_input_update_rt_info(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]

View File

@ -9,7 +9,7 @@ import openvino.runtime as ov
from openvino import Tensor
from openvino.runtime.op import Constant
from tests.test_utils.test_utils import generate_image
from tests.utils.helpers import generate_image
@pytest.mark.parametrize(("cls", "cls_str"), [

View File

@ -23,7 +23,7 @@ from openvino import (
)
from openvino.runtime import Output
from tests.test_utils.test_utils import generate_add_model, create_filename_for_test
from tests.utils.helpers import generate_add_model, create_filename_for_test
def test_test_descriptor_tensor():

View File

@ -3,9 +3,9 @@
# SPDX-License-Identifier: Apache-2.0
import os
import pytest
from copy import copy, deepcopy
from ..conftest import model_path
import openvino.runtime.opset12 as ops
from openvino import (
Shape,
@ -19,16 +19,12 @@ from openvino.runtime import (
Output,
RTMap,
)
import pytest
test_net_xml, test_net_bin = model_path()
from tests.utils.helpers import get_relu_model
def test_const_output_type(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
node = compiled_model.input(0)
assert isinstance(node, ConstOutput)
@ -36,7 +32,7 @@ def test_const_output_type(device):
def test_const_output_docs(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
node = compiled_model.input(0)
exptected_string = "openvino.runtime.ConstOutput represents port/node output."
@ -45,7 +41,7 @@ def test_const_output_docs(device):
def test_const_output_get_index(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
node = compiled_model.input("data")
assert node.get_index() == 0
@ -54,7 +50,7 @@ def test_const_output_get_index(device):
def test_const_output_get_element_type(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
node = compiled_model.input("data")
assert node.get_element_type() == Type.f32
@ -63,7 +59,7 @@ def test_const_output_get_element_type(device):
def test_const_output_get_shape(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
node = compiled_model.input("data")
expected_shape = Shape([1, 3, 32, 32])
@ -73,7 +69,7 @@ def test_const_output_get_shape(device):
def test_const_output_get_partial_shape(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
exec_net = core.compile_model(model, device)
node = exec_net.input("data")
expected_partial_shape = PartialShape([1, 3, 32, 32])
@ -83,7 +79,7 @@ def test_const_output_get_partial_shape(device):
def test_const_output_get_target_inputs(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
outputs = compiled_model.outputs
for node in outputs:
@ -93,7 +89,7 @@ def test_const_output_get_target_inputs(device):
def test_const_output_get_names(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
input_name = "data"
node = compiled_model.input(input_name)
@ -107,7 +103,7 @@ def test_const_output_get_names(device):
def test_const_get_rf_info(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
output_node = compiled_model.output(0)
rt_info = output_node.get_rt_info()
@ -116,7 +112,7 @@ def test_const_get_rf_info(device):
def test_const_output_runtime_info(device):
core = Core()
model = core.read_model(model=test_net_xml, weights=test_net_bin)
model = get_relu_model()
compiled_model = core.compile_model(model, device)
input_name = "data"
output_node = compiled_model.input(input_name)

View File

@ -4,7 +4,6 @@
import os
from ..conftest import model_path
import openvino.runtime.opset12 as ops
from openvino import Type

View File

@ -442,7 +442,7 @@ def test_properties_capability_gpu():
def test_properties_hint_model():
# Temporary imports
from tests.test_utils.test_utils import generate_add_model
from tests.utils.helpers import generate_add_model
model = generate_add_model()

View File

@ -15,7 +15,7 @@ from openvino.helpers import pack_data, unpack_data
import pytest
from tests.test_utils.test_utils import generate_image, generate_relu_compiled_model
from tests.utils.helpers import generate_image, generate_relu_compiled_model
@pytest.mark.parametrize(("ov_type", "numpy_dtype"), [

View File

@ -9,7 +9,7 @@ from openvino.runtime.op import Parameter, Constant
from openvino.runtime.opset12 import add, multiply
import openvino as ov
from tests.test_utils.test_utils import create_filename_for_test
from tests.utils.helpers import create_filename_for_test
def make_constant(values, transposed):

View File

@ -19,7 +19,7 @@ from openvino._offline_transformations import (
from openvino.runtime import Model, PartialShape, Core, serialize, save_model
import openvino.runtime as ov
from tests.test_utils.test_utils import create_filename_for_test, compare_models, _compare_models
from tests.utils.helpers import create_filename_for_test, compare_models, _compare_models
def get_relu_model():

View File

@ -17,7 +17,7 @@ from openvino.runtime.passes import (
)
from tests.test_transformations.utils.utils import count_ops, get_relu_model
from tests.test_utils.test_utils import create_filename_for_test, compare_models
from tests.utils.helpers import create_filename_for_test, compare_models
def get_model():

View File

@ -6,13 +6,11 @@ import os
import pytest
import numpy as np
from tests.conftest import model_path
from tests.test_utils.test_utils import generate_relu_compiled_model
from tests.utils.helpers import generate_relu_compiled_model
from openvino.runtime import Model, ConstOutput, Type, Shape, Core, Tensor
from openvino.runtime.utils.data_helpers import _data_dispatch
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
test_net_xml, test_net_bin = model_path(is_myriad)
def _get_value(value):

View File

@ -2,99 +2,19 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from typing import Tuple, Union, List
import os
import sys
import numpy as np
import pytest
from pathlib import Path
import openvino
import openvino.runtime.opset12 as ops
from openvino.runtime import Model, Core, Shape
from openvino.utils import deprecated
from tests.utils.helpers import compare_models, get_relu_model
def _compare_models(model_one: Model, model_two: Model, compare_names: bool = True) -> Tuple[bool, str]: # noqa: C901 the function is too complex
"""Function to compare OpenVINO model (ops names, types and shapes).
Note that the functions uses get_ordered_ops, so the topological order of ops should be also preserved.
:param model_one: The first model to compare.
:param model_two: The second model to compare.
:param compare_names: Flag to control friendly names checking. Default: True
:return: Tuple which consists of bool value (True if models are equal, otherwise False)
and string with the message to reuse for debug/testing purposes. The string value
is empty when models are equal.
"""
result = True
msg = ""
# Check friendly names of models
if compare_names and model_one.get_friendly_name() != model_two.get_friendly_name():
result = False
msg += "Friendly names of models are not equal "
msg += f"model_one: {model_one.get_friendly_name()}, model_two: {model_two.get_friendly_name()}.\n"
model_one_ops = model_one.get_ordered_ops()
model_two_ops = model_two.get_ordered_ops()
# Check overall number of operators
if len(model_one_ops) != len(model_two_ops):
result = False
msg += "Not equal number of ops "
msg += f"model_one: {len(model_one_ops)}, model_two: {len(model_two_ops)}.\n"
for i in range(len(model_one_ops)):
op_one_name = model_one_ops[i].get_friendly_name() # op from model_one
op_two_name = model_two_ops[i].get_friendly_name() # op from model_two
# Check friendly names
if (compare_names and op_one_name != op_two_name and model_one_ops[i].get_type_name() != "Constant"):
result = False
msg += "Not equal op names "
msg += f"model_one: {op_one_name}, "
msg += f"model_two: {op_two_name}.\n"
# Check output sizes
if model_one_ops[i].get_output_size() != model_two_ops[i].get_output_size():
result = False
msg += f"Not equal output sizes of {op_one_name} and {op_two_name}.\n"
for idx in range(model_one_ops[i].get_output_size()):
# Check partial shapes of outputs
op_one_partial_shape = model_one_ops[i].get_output_partial_shape(idx)
op_two_partial_shape = model_two_ops[i].get_output_partial_shape(idx)
if op_one_partial_shape != op_two_partial_shape:
result = False
msg += f"Not equal op partial shapes of {op_one_name} and {op_two_name} on {idx} index "
msg += f"model_one: {op_one_partial_shape}, "
msg += f"model_two: {op_two_partial_shape}.\n"
# Check element types of outputs
op_one_element_type = model_one_ops[i].get_output_element_type(idx)
op_two_element_type = model_two_ops[i].get_output_element_type(idx)
if op_one_element_type != op_two_element_type:
result = False
msg += f"Not equal output element types of {op_one_name} and {op_two_name} on {idx} index "
msg += f"model_one: {op_one_element_type}, "
msg += f"model_two: {op_two_element_type}.\n"
return result, msg
def compare_models(model_one: Model, model_two: Model, compare_names: bool = True):
"""Function to compare OpenVINO model (ops names, types and shapes).
:param model_one: The first model to compare.
:param model_two: The second model to compare.
:param compare_names: Flag to control friendly names checking. Default: True
:return: True if models are equal, otherwise raise an error with a report of mismatches.
"""
result, msg = _compare_models(model_one, model_two, compare_names=compare_names)
if not result:
raise RuntimeError(msg)
return result
def test_compare_functions():
try:
from openvino.test_utils import compare_functions
model = get_relu_model()
status, _ = compare_functions(model, model)
assert status
except RuntimeError:
print("openvino.test_utils.compare_functions is not available") # noqa: T201
def test_compare_models_pass():
@ -113,63 +33,6 @@ def test_compare_models_fail():
assert "Not equal op names model_one: data, model_two: ABC." in str(e.value)
def plugins_path(device, lib_path):
plugin_xml = f"""<ie>
<plugins>
<plugin location="{lib_path}" name="{device}">
</plugin>
</plugins>
</ie>"""
with open("plugin_path.xml", "w") as f:
f.write(plugin_xml)
plugins_paths = os.path.join(os.getcwd(), "plugin_path.xml")
return plugins_paths
def generate_image(shape: Tuple = (1, 3, 32, 32), dtype: Union[str, np.dtype] = "float32") -> np.array:
np.random.seed(42)
return np.random.rand(*shape).astype(dtype)
def get_relu_model(input_shape: List[int] = None, input_dtype=np.float32) -> openvino.runtime.Model:
if input_shape is None:
input_shape = [1, 3, 32, 32]
param = ops.parameter(input_shape, input_dtype, name="data")
relu = ops.relu(param, name="relu")
model = Model([relu], [param], "test_model")
model.get_ordered_ops()[2].friendly_name = "friendly"
assert model is not None
return model
def generate_relu_compiled_model(
device,
input_shape: List[int] = None,
input_dtype=np.float32,
) -> openvino.runtime.CompiledModel:
if input_shape is None:
input_shape = [1, 3, 32, 32]
model = get_relu_model(input_shape, input_dtype)
core = Core()
return core.compile_model(model, device, {})
def generate_model_and_image(device, input_shape: List[int] = None):
if input_shape is None:
input_shape = [1, 3, 32, 32]
return (generate_relu_compiled_model(device, input_shape), generate_image(input_shape))
def generate_add_model() -> openvino._pyopenvino.Model:
param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
add = ops.add(param1, param2)
return Model(add, [param1, param2], "TestFunction")
def test_deprecation_decorator():
@deprecated()
def deprecated_function1(param1, param2=None):
@ -195,21 +58,3 @@ def test_deprecation_decorator():
deprecated_function3()
with pytest.warns(DeprecationWarning, match="deprecated_function4 is deprecated and will be removed in version 2025.4. Use another function instead"):
deprecated_function4()
def create_filename_for_test(test_name, tmp_path, is_xml_path=False, is_bin_path=False):
"""Return a tuple with automatically generated paths for xml and bin files.
:param test_name: Name used in generating.
:param is_xml_path: True if xml file should be pathlib.Path object, otherwise return string.
:param is_bin_path: True if bin file should be pathlib.Path object, otherwise return string.
:return: Tuple with two objects representing xml and bin files.
"""
python_version = str(sys.version_info.major) + "_" + str(sys.version_info.minor)
filename = test_name.replace("test_", "").replace("[", "_").replace("]", "_")
filename = filename + "_" + python_version
path_to_xml = tmp_path / Path(filename + ".xml")
path_to_bin = tmp_path / Path(filename + ".bin")
_xml = path_to_xml if is_xml_path else str(path_to_xml)
_bin = path_to_bin if is_bin_path else str(path_to_bin)
return (_xml, _bin)

View File

@ -0,0 +1,227 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from typing import Tuple, Union, List
import os
import sys
import numpy as np
from sys import platform
from pathlib import Path
import openvino
import openvino.runtime.opset12 as ops
from openvino.runtime import Model, Core, Shape
def _compare_models(model_one: Model, model_two: Model, compare_names: bool = True) -> Tuple[bool, str]: # noqa: C901 the function is too complex
"""Function to compare OpenVINO model (ops names, types and shapes).
Note that the functions uses get_ordered_ops, so the topological order of ops should be also preserved.
:param model_one: The first model to compare.
:param model_two: The second model to compare.
:param compare_names: Flag to control friendly names checking. Default: True
:return: Tuple which consists of bool value (True if models are equal, otherwise False)
and string with the message to reuse for debug/testing purposes. The string value
is empty when models are equal.
"""
result = True
msg = ""
# Check friendly names of models
if compare_names and model_one.get_friendly_name() != model_two.get_friendly_name():
result = False
msg += "Friendly names of models are not equal "
msg += f"model_one: {model_one.get_friendly_name()}, model_two: {model_two.get_friendly_name()}.\n"
model_one_ops = model_one.get_ordered_ops()
model_two_ops = model_two.get_ordered_ops()
# Check overall number of operators
if len(model_one_ops) != len(model_two_ops):
result = False
msg += "Not equal number of ops "
msg += f"model_one: {len(model_one_ops)}, model_two: {len(model_two_ops)}.\n"
for i in range(len(model_one_ops)):
op_one_name = model_one_ops[i].get_friendly_name() # op from model_one
op_two_name = model_two_ops[i].get_friendly_name() # op from model_two
# Check friendly names
if (compare_names and op_one_name != op_two_name and model_one_ops[i].get_type_name() != "Constant"):
result = False
msg += "Not equal op names "
msg += f"model_one: {op_one_name}, "
msg += f"model_two: {op_two_name}.\n"
# Check output sizes
if model_one_ops[i].get_output_size() != model_two_ops[i].get_output_size():
result = False
msg += f"Not equal output sizes of {op_one_name} and {op_two_name}.\n"
for idx in range(model_one_ops[i].get_output_size()):
# Check partial shapes of outputs
op_one_partial_shape = model_one_ops[i].get_output_partial_shape(idx)
op_two_partial_shape = model_two_ops[i].get_output_partial_shape(idx)
if op_one_partial_shape != op_two_partial_shape:
result = False
msg += f"Not equal op partial shapes of {op_one_name} and {op_two_name} on {idx} index "
msg += f"model_one: {op_one_partial_shape}, "
msg += f"model_two: {op_two_partial_shape}.\n"
# Check element types of outputs
op_one_element_type = model_one_ops[i].get_output_element_type(idx)
op_two_element_type = model_two_ops[i].get_output_element_type(idx)
if op_one_element_type != op_two_element_type:
result = False
msg += f"Not equal output element types of {op_one_name} and {op_two_name} on {idx} index "
msg += f"model_one: {op_one_element_type}, "
msg += f"model_two: {op_two_element_type}.\n"
return result, msg
def compare_models(model_one: Model, model_two: Model, compare_names: bool = True):
"""Function to compare OpenVINO model (ops names, types and shapes).
:param model_one: The first model to compare.
:param model_two: The second model to compare.
:param compare_names: Flag to control friendly names checking. Default: True
:return: True if models are equal, otherwise raise an error with a report of mismatches.
"""
result, msg = _compare_models(model_one, model_two, compare_names=compare_names)
if not result:
raise RuntimeError(msg)
return result
def plugins_path(device, lib_path):
plugin_xml = f"""<ie>
<plugins>
<plugin location="{lib_path}" name="{device}">
</plugin>
</plugins>
</ie>"""
with open("plugin_path.xml", "w") as f:
f.write(plugin_xml)
plugins_paths = os.path.join(os.getcwd(), "plugin_path.xml")
return plugins_paths
def generate_image(shape: Tuple = (1, 3, 32, 32), dtype: Union[str, np.dtype] = "float32") -> np.array:
np.random.seed(42)
return np.random.rand(*shape).astype(dtype)
def get_model_with_template_extension():
core = Core()
ir = bytes(b"""<net name="Activation" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data shape="1,3,22,22" element_type="f32"/>
<output>
<port id="0" precision="FP32" names="in_data">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="Identity" version="extension">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="out_data">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>""")
if platform == "win32":
core.add_extension(library_path="openvino_template_extension.dll")
else:
core.add_extension(library_path="libopenvino_template_extension.so")
return core, core.read_model(ir)
def get_relu_model(input_shape: List[int] = None, input_dtype=np.float32) -> openvino.runtime.Model:
if input_shape is None:
input_shape = [1, 3, 32, 32]
param = ops.parameter(input_shape, input_dtype, name="data")
relu = ops.relu(param, name="relu")
model = Model([relu], [param], "test_model")
model.get_ordered_ops()[2].friendly_name = "friendly"
assert model is not None
return model
def generate_relu_compiled_model(
device,
input_shape: List[int] = None,
input_dtype=np.float32,
) -> openvino.runtime.CompiledModel:
if input_shape is None:
input_shape = [1, 3, 32, 32]
model = get_relu_model(input_shape, input_dtype)
core = Core()
return core.compile_model(model, device, {})
def generate_model_and_image(device, input_shape: List[int] = None):
if input_shape is None:
input_shape = [1, 3, 32, 32]
return (generate_relu_compiled_model(device, input_shape), generate_image(input_shape))
def generate_add_model() -> openvino._pyopenvino.Model:
param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
add = ops.add(param1, param2)
return Model(add, [param1, param2], "TestFunction")
def create_filename_for_test(test_name, tmp_path, is_xml_path=False, is_bin_path=False):
"""Return a tuple with automatically generated paths for xml and bin files.
:param test_name: Name used in generating.
:param is_xml_path: True if xml file should be pathlib.Path object, otherwise return string.
:param is_bin_path: True if bin file should be pathlib.Path object, otherwise return string.
:return: Tuple with two objects representing xml and bin files.
"""
python_version = str(sys.version_info.major) + "_" + str(sys.version_info.minor)
filename = test_name.replace("test_", "").replace("[", "_").replace("]", "_")
filename = filename + "_" + python_version
path_to_xml = tmp_path / Path(filename + ".xml")
path_to_bin = tmp_path / Path(filename + ".bin")
_xml = path_to_xml if is_xml_path else str(path_to_xml)
_bin = path_to_bin if is_bin_path else str(path_to_bin)
return (_xml, _bin)

View File

@ -29,7 +29,7 @@ class LP_TRANSFORMATIONS_API AlignQuantizationParameters;
* [AlignQuantizationParameters](@ref openvino_docs_OV_UG_lpt_AlignQuantizationParameters) page
* in the Inference Engine Developer Guide.
*/
class ngraph::pass::low_precision::AlignQuantizationParameters : public ngraph::pass::FunctionPass {
class ngraph::pass::low_precision::AlignQuantizationParameters : public ov::pass::ModelPass {
public:
OPENVINO_RTTI("AlignQuantizationParameters", "0");
AlignQuantizationParameters(const std::vector<ngraph::element::Type> defaultPrecisions = ngraph::pass::low_precision::precision_set::int8_support);

View File

@ -17,7 +17,7 @@ class LP_TRANSFORMATIONS_API BaseMatcherPass;
} // namespace pass
} // namespace ngraph
class LP_TRANSFORMATIONS_API ngraph::pass::low_precision::BaseMatcherPass : public ngraph::pass::MatcherPass {
class LP_TRANSFORMATIONS_API ngraph::pass::low_precision::BaseMatcherPass : public ov::pass::MatcherPass {
public:
BaseMatcherPass(const AttributeParameters& params = AttributeParameters());
AttributeParameters params;

View File

@ -38,7 +38,7 @@ class CreatePrecisionsDependentAttribute;
* in the Inference Engine Developer Guide.
*/
template <typename AttributeType, typename OperationType>
class ngraph::pass::low_precision::CreatePrecisionsDependentAttribute : public ngraph::pass::MatcherPass {
class ngraph::pass::low_precision::CreatePrecisionsDependentAttribute : public ov::pass::MatcherPass {
public:
CreatePrecisionsDependentAttribute() {
auto operation = pattern::wrap_type<OperationType>();

View File

@ -28,7 +28,7 @@ class LP_TRANSFORMATIONS_API MarkupCanBeQuantized;
* [MarkupCanBeQuantized](@ref openvino_docs_OV_UG_lpt_MarkupCanBeQuantized) page
* in the Inference Engine Developer Guide.
*/
class ngraph::pass::low_precision::MarkupCanBeQuantized : public ngraph::pass::FunctionPass {
class ngraph::pass::low_precision::MarkupCanBeQuantized : public ov::pass::ModelPass {
public:
OPENVINO_RTTI("MarkupCanBeQuantized", "0");
MarkupCanBeQuantized(const std::vector<ngraph::element::Type> defaultPrecisions = { ngraph::element::u8, ngraph::element::i8 });

View File

@ -32,7 +32,7 @@ class LP_TRANSFORMATIONS_API MarkupQuantizationGranularity;
* [MarkupPerTensorQuantization](@ref openvino_docs_OV_UG_lpt_MarkupPerTensorQuantization) page
* in the Inference Engine Developer Guide.
*/
class ngraph::pass::low_precision::MarkupQuantizationGranularity : public ngraph::pass::FunctionPass {
class ngraph::pass::low_precision::MarkupQuantizationGranularity : public ov::pass::ModelPass {
public:
class PerTensorQuantization {
public:

View File

@ -36,7 +36,7 @@ class LP_TRANSFORMATIONS_API PropagateSharedValue;
* in the Inference Engine Developer Guide.
*/
template <class AttributeType>
class ngraph::pass::low_precision::PropagateSharedValue : public ngraph::pass::FunctionPass {
class ngraph::pass::low_precision::PropagateSharedValue : public ov::pass::ModelPass {
public:
bool run_on_model(const std::shared_ptr<ngraph::Function>& f) override {
OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::LPT_LT, "PropagateSharedValue");

View File

@ -36,7 +36,7 @@ class PropagateThroughPrecisionPreserved;
* in the Inference Engine Developer Guide.
*/
template <typename AttributeType>
class ngraph::pass::low_precision::PropagateThroughPrecisionPreserved : public ngraph::pass::MatcherPass {
class ngraph::pass::low_precision::PropagateThroughPrecisionPreserved : public ov::pass::MatcherPass {
public:
PropagateThroughPrecisionPreserved(const std::vector<ngraph::element::Type>& defaultPrecisions = precision_set::int8_support) {
ngraph::graph_rewrite_callback callback = [&](pattern::Matcher& m) {

View File

@ -35,7 +35,7 @@ class PropagateToInput;
* in the Inference Engine Developer Guide.
*/
template <typename AttributeType>
class ngraph::pass::low_precision::PropagateToInput : public ngraph::pass::MatcherPass {
class ngraph::pass::low_precision::PropagateToInput : public ov::pass::MatcherPass {
public:
PropagateToInput(const std::vector<ngraph::element::Type>& defaultPrecisions = { ngraph::element::u8, ngraph::element::i8 }) {
ngraph::graph_rewrite_callback callback = [&](pattern::Matcher& m) {

View File

@ -28,7 +28,7 @@ class LP_TRANSFORMATIONS_API PullReshapeThroughDequantization;
* [PullReshapeThroughDequantization](@ref openvino_docs_OV_UG_lpt_PullReshapeThroughDequantization) page
* in the Inference Engine Developer Guide.
*/
class ngraph::pass::low_precision::PullReshapeThroughDequantization : public ngraph::pass::MatcherPass {
class ngraph::pass::low_precision::PullReshapeThroughDequantization : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("PullReshapeThroughDequantization", "0");
PullReshapeThroughDequantization(const std::vector<ngraph::element::Type>& inputPrecisions = {});

View File

@ -28,7 +28,7 @@ class LP_TRANSFORMATIONS_API PullTransposeThroughDequantization;
* [PullTransposeThroughDequantization](@ref openvino_docs_OV_UG_lpt_PullTransposeThroughDequantization) page
* in the Inference Engine Developer Guide.
*/
class ngraph::pass::low_precision::PullTransposeThroughDequantization : public ngraph::pass::MatcherPass {
class ngraph::pass::low_precision::PullTransposeThroughDequantization : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("PullTransposeThroughDequantization", "0");
PullTransposeThroughDequantization(const std::vector<ngraph::element::Type>& inputPrecisions = {});

View File

@ -22,7 +22,7 @@ bool ngraph::pass::low_precision::AlignQuantizationIntervals::run_on_model(const
RUN_ON_FUNCTION_SCOPE(AlignQuantizationIntervals);
ov::pass::Manager manager;
manager.set_per_pass_validation(false);
std::shared_ptr<ngraph::pass::GraphRewrite> intervalsAlignment = manager.register_pass<ngraph::pass::GraphRewrite>();
std::shared_ptr<ov::pass::GraphRewrite> intervalsAlignment = manager.register_pass<ov::pass::GraphRewrite>();
intervalsAlignment->add_matcher<low_precision::CreateAttribute<IntervalsAlignmentAttribute, opset1::FakeQuantize>>(
AttributeParameters(ngraph::element::f32, defaultPrecisions));
intervalsAlignment->add_matcher<low_precision::PropagateThroughPrecisionPreserved<IntervalsAlignmentAttribute>>(defaultPrecisions);

View File

@ -23,7 +23,7 @@ bool ngraph::pass::low_precision::AlignQuantizationParameters::run_on_model(cons
RUN_ON_FUNCTION_SCOPE(AlignQuantizationParameters);
ov::pass::Manager manager;
manager.set_per_pass_validation(false);
std::shared_ptr<ngraph::pass::GraphRewrite> propagation = manager.register_pass<ngraph::pass::GraphRewrite>();
std::shared_ptr<ov::pass::GraphRewrite> propagation = manager.register_pass<ov::pass::GraphRewrite>();
propagation->add_matcher<low_precision::CreateAttribute<QuantizationAlignmentAttribute>>();
propagation->add_matcher<low_precision::PropagateThroughPrecisionPreserved<QuantizationAlignmentAttribute>>();
propagation->add_matcher<low_precision::UpdateSharedPrecisionPreserved<QuantizationAlignmentAttribute, QuantizationGranularityAttribute>>();

View File

@ -97,7 +97,7 @@ ngraph::pass::low_precision::LowPrecision::LowPrecision(
using namespace ngraph::pass::low_precision;
template <typename BaseOp>
void make_matcher_type_relaxed(ngraph::pass::GraphRewrite* transformation) {
void make_matcher_type_relaxed(ov::pass::GraphRewrite* transformation) {
MATCHER_SCOPE(TypeRelaxedReplacer);
using namespace ngraph;
@ -187,7 +187,7 @@ MarkupOptimizations::MarkupOptimizations(
bool ngraph::pass::low_precision::MarkupOptimizations::run_on_model(const std::shared_ptr<ngraph::Function>& f) {
RUN_ON_FUNCTION_SCOPE(MarkupOptimizations);
ngraph::pass::Manager markup(get_pass_config());
ov::pass::Manager markup(get_pass_config());
markup.set_per_pass_validation(false);
markup.register_pass<low_precision::MarkupCanBeQuantized>(params.defaultPrecisions);
if (!precisionRestrictions.empty()) {
@ -214,9 +214,9 @@ bool ngraph::pass::low_precision::LowPrecision::run_on_model(const std::shared_p
OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::LPT_LT, "LowPrecision");
auto passConfig = get_pass_config();
ngraph::pass::Manager manager(passConfig);
ov::pass::Manager manager(passConfig);
auto prerequisites = manager.register_pass<ngraph::pass::GraphRewrite>();
auto prerequisites = manager.register_pass<ov::pass::GraphRewrite>();
const std::vector<ngraph::element::Type> supportedTypes = {ngraph::element::i8, ngraph::element::u8};
ADD_MATCHER(prerequisites, PullReshapeThroughDequantization, supportedTypes)
ADD_MATCHER(prerequisites, PullTransposeThroughDequantization, supportedTypes)
@ -232,7 +232,7 @@ bool ngraph::pass::low_precision::LowPrecision::run_on_model(const std::shared_p
quantizationRestrictions,
attributeParams);
std::shared_ptr<ngraph::pass::GraphRewrite> common = manager.register_pass<ngraph::pass::GraphRewrite>();
std::shared_ptr<ov::pass::GraphRewrite> common = manager.register_pass<ov::pass::GraphRewrite>();
ADD_MATCHER(common, AddTransformation, params)
ADD_MATCHER(common, AssignAndReadValueTransformation, f, params)
@ -269,7 +269,7 @@ bool ngraph::pass::low_precision::LowPrecision::run_on_model(const std::shared_p
ADD_MATCHER(common, UnsqueezeTransformation, params)
ADD_MATCHER(common, VariadicSplitTransformation, params)
std::shared_ptr<ngraph::pass::GraphRewrite> cleanup = manager.register_pass<ngraph::pass::GraphRewrite>();
std::shared_ptr<ov::pass::GraphRewrite> cleanup = manager.register_pass<ov::pass::GraphRewrite>();
ADD_MATCHER(cleanup, EliminateFakeQuantizeTransformation, params)
ADD_MATCHER(cleanup, FoldConvertTransformation, params)
ADD_MATCHER(cleanup, FuseConvertTransformation, params)

View File

@ -21,7 +21,7 @@ bool ngraph::pass::low_precision::MarkupAvgPoolPrecisionPreserved::run_on_model(
RUN_ON_FUNCTION_SCOPE(MarkupAvgPoolPrecisionPreserved);
ov::pass::Manager manager;
manager.set_per_pass_validation(false);
std::shared_ptr<ngraph::pass::GraphRewrite> markupAvgPoolPrecision = manager.register_pass<ngraph::pass::GraphRewrite>();
std::shared_ptr<ov::pass::GraphRewrite> markupAvgPoolPrecision = manager.register_pass<ov::pass::GraphRewrite>();
markupAvgPoolPrecision->add_matcher<low_precision::CreatePrecisionsDependentAttribute<AvgPoolPrecisionPreservedAttribute, opset1::AvgPool>>();
markupAvgPoolPrecision->add_matcher<low_precision::PropagateThroughPrecisionPreserved<AvgPoolPrecisionPreservedAttribute>>(defaultPrecisions);
markupAvgPoolPrecision->add_matcher<low_precision::UpdateSharedPrecisionPreserved<AvgPoolPrecisionPreservedAttribute>>(defaultPrecisions);

View File

@ -23,7 +23,7 @@ bool ngraph::pass::low_precision::PropagatePrecisions::run_on_model(const std::s
RUN_ON_FUNCTION_SCOPE(PropagatePrecisions);
ov::pass::Manager manager;
manager.set_per_pass_validation(false);
std::shared_ptr<ngraph::pass::GraphRewrite> precisionsPropagation = manager.register_pass<ngraph::pass::GraphRewrite>();
std::shared_ptr<ov::pass::GraphRewrite> precisionsPropagation = manager.register_pass<ov::pass::GraphRewrite>();
precisionsPropagation->add_matcher<low_precision::CreateAttribute<PrecisionsAttribute, opset1::FakeQuantize>>(params, AttributeSource::OutputPort);
precisionsPropagation->add_matcher<low_precision::PropagateThroughPrecisionPreserved<PrecisionsAttribute>>(params.defaultPrecisions);
precisionsPropagation->add_matcher<low_precision::PropagateToInput<PrecisionsAttribute>>(params.defaultPrecisions);

View File

@ -12,7 +12,7 @@
#include <transformations/utils/utils.hpp>
#include <utility>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "layer_transformation.hpp"
#include "lpt_ngraph_functions/add_function.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"

View File

@ -13,7 +13,7 @@
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "layer_transformation.hpp"
#include "lpt_ngraph_functions/align_concat_quantization_parameters_function.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"

View File

@ -11,7 +11,7 @@
#include <transformations/init_node_info.hpp>
#include <low_precision/assign_and_read_value.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"
#include "lpt_ngraph_functions/assign_and_read_value_function.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -11,7 +11,7 @@
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "layer_transformation.hpp"
#include "lpt_ngraph_functions/avg_pool_function.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"

View File

@ -11,7 +11,7 @@
#include <transformations/init_node_info.hpp>
#include <transformations/utils/utils.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "layer_transformation.hpp"
#include "lpt_ngraph_functions/avg_pool_function.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"

View File

@ -9,7 +9,7 @@
#include <string>
#include <transformations/init_node_info.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "layer_transformation.hpp"
#include "lpt_ngraph_functions/clamp_function.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"

View File

@ -14,7 +14,7 @@
#include <transformations/utils/utils.hpp>
#include <low_precision/network_helper.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/compose_fake_quantize_function.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"

View File

@ -16,7 +16,7 @@
#include <low_precision/fake_quantize_decomposition.hpp>
#include <low_precision/max_pool.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -11,7 +11,7 @@
#include <low_precision/concat.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -15,7 +15,7 @@
#include <low_precision/fake_quantize_decomposition.hpp>
#include <low_precision/max_pool.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -17,7 +17,7 @@
#include <sstream>
#include <vector>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "layer_transformation.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"

View File

@ -17,7 +17,7 @@
#include <low_precision/max_pool.hpp>
#include <low_precision/fake_quantize_decomposition.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -15,7 +15,7 @@
#include <low_precision/reshape.hpp>
#include <low_precision/concat.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -16,7 +16,7 @@
#include <low_precision/fake_quantize_decomposition.hpp>
#include <low_precision/max_pool.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -17,7 +17,7 @@
#include <low_precision/max_pool.hpp>
#include <low_precision/interpolate.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -23,7 +23,7 @@
#include <low_precision/convolution.hpp>
#include <low_precision/fake_quantize_decomposition.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -24,7 +24,7 @@
#include <low_precision/markup_can_be_quantized.hpp>
#include <low_precision/markup_quantization_granularity.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"

View File

@ -17,7 +17,7 @@
#include <low_precision/max_pool.hpp>
#include <low_precision/reshape.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -23,7 +23,7 @@
#include <low_precision/markup_quantization_granularity.hpp>
#include "low_precision/common/precisions_restriction.hpp"
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -17,7 +17,7 @@
#include <low_precision/max_pool.hpp>
#include <low_precision/strided_slice.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/concat_function.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -13,7 +13,7 @@
#include <transformations/utils/utils.hpp>
#include <low_precision/convert_subtract_constant.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "simple_low_precision_transformer.hpp"
#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp"

View File

@ -15,7 +15,7 @@
#include <low_precision/convolution_backprop_data.hpp>
#include <low_precision/network_helper.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "simple_low_precision_transformer.hpp"
#include "lpt_ngraph_functions/convolution_backprop_data_function.hpp"

View File

@ -14,7 +14,7 @@
#include <transformations/init_node_info.hpp>
#include <low_precision/convolution.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "simple_low_precision_transformer.hpp"
#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp"

View File

@ -14,7 +14,7 @@
#include <transformations/init_node_info.hpp>
#include <low_precision/convolution.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "simple_low_precision_transformer.hpp"
#include "lpt_ngraph_functions/convolution_function.hpp"

View File

@ -12,7 +12,7 @@
#include <string>
#include <transformations/init_node_info.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "layer_transformation.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"

View File

@ -14,7 +14,7 @@
#include <transformations/init_node_info.hpp>
#include "low_precision/depth_to_space.hpp"
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "simple_low_precision_transformer.hpp"
#include "lpt_ngraph_functions/depth_to_space_function.hpp"

View File

@ -14,7 +14,7 @@
#include <transformations/utils/utils.hpp>
#include <transformations/init_node_info.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "simple_low_precision_transformer.hpp"
#include <low_precision/add.hpp>

View File

@ -5,7 +5,7 @@
#include <memory>
#include <sstream>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "layer_transformation.hpp"
#include "low_precision/eliminate_fake_quantize.hpp"
#include "low_precision/fake_quantize.hpp"

View File

@ -14,7 +14,7 @@
#include <low_precision/convolution.hpp>
#include <low_precision/fake_quantize_decomposition.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp"
#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp"

View File

@ -15,7 +15,7 @@
#include <low_precision/fold_fake_quantize.hpp>
#include <openvino/pass/constant_folding.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "simple_low_precision_transformer.hpp"
#include "lpt_ngraph_functions/fake_quantize_on_weights_and_unsupported_child_function.hpp"

View File

@ -15,7 +15,7 @@
#include <low_precision/fake_quantize_decomposition.hpp>
#include <low_precision/max_pool.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp"
#include "simple_low_precision_transformer.hpp"

View File

@ -13,7 +13,7 @@
#include <string>
#include <vector>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "layer_transformation.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"
#include "lpt_ngraph_functions/fake_quantize_function.hpp"

View File

@ -14,7 +14,7 @@
#include <low_precision/convolution.hpp>
#include <low_precision/fake_quantize_decomposition.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "simple_low_precision_transformer.hpp"
#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp"

View File

@ -12,7 +12,7 @@
#include <gtest/gtest.h>
#include <low_precision/fake_quantize.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "simple_low_precision_transformer.hpp"
using namespace testing;

View File

@ -14,7 +14,7 @@
#include <transformations/utils/utils.hpp>
#include <transformations/init_node_info.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "simple_low_precision_transformer.hpp"
#include <low_precision/fold_convert.hpp>

View File

@ -10,7 +10,7 @@
#include <string>
#include <vector>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "layer_transformation.hpp"
#include "low_precision/network_helper.hpp"
#include "lpt_ngraph_functions/common/builders.hpp"

View File

@ -13,7 +13,7 @@
#include <transformations/init_node_info.hpp>
#include "low_precision/fuse_convert.hpp"
#include "common_test_utils/ngraph_test_utils.hpp"
#include "common_test_utils/ov_test_utils.hpp"
#include "lpt_ngraph_functions/common/dequantization_operations.hpp"
#include "simple_low_precision_transformer.hpp"
#include "lpt_ngraph_functions/fuse_convert_function.hpp"

Some files were not shown because too many files have changed in this diff Show More