Compare commits
60 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e662b1a330 | ||
|
|
0aa5a8f704 | ||
|
|
54f6f11186 | ||
|
|
ea482d8391 | ||
|
|
a93f320a48 | ||
|
|
26e9c69440 | ||
|
|
4727efdb3c | ||
|
|
b7415f5c3b | ||
|
|
0262662050 | ||
|
|
576b99fee9 | ||
|
|
4e790d7b46 | ||
|
|
b0394cc3e4 | ||
|
|
18cb7c94c1 | ||
|
|
064364eb5e | ||
|
|
5ded6fb699 | ||
|
|
eabf199c3a | ||
|
|
0e0d166746 | ||
|
|
a6351294e7 | ||
|
|
cac7e2e1c4 | ||
|
|
13e674b1f8 | ||
|
|
a55d1c21ee | ||
|
|
91a4f73971 | ||
|
|
84a3aab115 | ||
|
|
4ddeecc031 | ||
|
|
9c10e33fc7 | ||
|
|
c32b9a0cd5 | ||
|
|
c32eef361b | ||
|
|
8d54bdd4d5 | ||
|
|
64395f0d5e | ||
|
|
9562161f76 | ||
|
|
cb59f057a0 | ||
|
|
28948502a9 | ||
|
|
34748ae3b5 | ||
|
|
06eb4afd41 | ||
|
|
967d74ade6 | ||
|
|
5ae4e2bb2d | ||
|
|
22f6a3bcc0 | ||
|
|
e842453865 | ||
|
|
2abbec386f | ||
|
|
afb2ebcdd4 | ||
|
|
83e45c5ff3 | ||
|
|
bdb6a44942 | ||
|
|
17cd26077a | ||
|
|
247eb8a9b9 | ||
|
|
68b8748c9f | ||
|
|
852efa2269 | ||
|
|
303fb7a121 | ||
|
|
7f1c6c8ce1 | ||
|
|
55530b47c0 | ||
|
|
69a6097a30 | ||
|
|
1f759456d6 | ||
|
|
b05a7f2ed6 | ||
|
|
f4709ffe8b | ||
|
|
bb1e353e58 | ||
|
|
99c7bbc25e | ||
|
|
33cfcb26fb | ||
|
|
39c84e03f7 | ||
|
|
f59126dde0 | ||
|
|
209d506341 | ||
|
|
a710adf81a |
@@ -306,7 +306,7 @@ jobs:
|
||||
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.1906/linux/x64:$(LD_LIBRARY_PATH)
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.2116/linux/x64:$(LD_LIBRARY_PATH)
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph $(PYTHON_STATIC_ARGS) \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
--ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py \
|
||||
@@ -316,7 +316,7 @@ jobs:
|
||||
# Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time
|
||||
- script: |
|
||||
# For python imports to import pybind_mock_frontend
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.1906/linux/x64:$(LD_LIBRARY_PATH)
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.2116/linux/x64:$(LD_LIBRARY_PATH)
|
||||
export PYTHONPATH=$(INSTALL_TEST_DIR):$(INSTALL_DIR)/python/python3.8:$PYTHONPATH
|
||||
python3 -m pytest -sv $(INSTALL_TEST_DIR)/pyopenvino $(PYTHON_STATIC_ARGS) \
|
||||
--junitxml=$(INSTALL_TEST_DIR)/TEST-Pyngraph.xml \
|
||||
@@ -326,7 +326,7 @@ jobs:
|
||||
displayName: 'Python API 2.0 Tests'
|
||||
|
||||
- script: |
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.1906/linux/x64:$(LD_LIBRARY_PATH)
|
||||
export LD_LIBRARY_PATH=$(REPO_DIR)/temp/gna_03.05.00.2116/linux/x64:$(LD_LIBRARY_PATH)
|
||||
python3 -m pytest -s $(INSTALL_TEST_DIR)/mo/unit_tests --junitxml=$(INSTALL_TEST_DIR)/TEST-ModelOptimizer.xml
|
||||
displayName: 'Model Optimizer UT'
|
||||
|
||||
|
||||
@@ -327,8 +327,8 @@ if(ENABLE_INTEL_GNA)
|
||||
GNA_LIB_DIR
|
||||
libGNA_INCLUDE_DIRS
|
||||
libGNA_LIBRARIES_BASE_PATH)
|
||||
set(GNA_VERSION "03.05.00.1906")
|
||||
set(GNA_HASH "4a5be86d9c026b0e10afac2a57fc7c99d762b30e3d506abb3a3380fbcfe2726e")
|
||||
set(GNA_VERSION "03.05.00.2116")
|
||||
set(GNA_HASH "960350567702bda17276ac4c060d7524fb7ce7ced785004bd861c81ff2bfe2c5")
|
||||
|
||||
set(FILES_TO_EXTRACT_LIST gna_${GNA_VERSION}/include)
|
||||
if(WIN32)
|
||||
|
||||
@@ -93,7 +93,7 @@ macro(ov_cpack_settings)
|
||||
# - 2022.1.0 is the last public release with debian packages from Intel install team
|
||||
# - 2022.1.1, 2022.2 do not have debian packages enabled, distributed only as archives
|
||||
# - 2022.3 is the first release where Debian updated packages are introduced, others 2022.3.X are LTS
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5 2023.0.0
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5 2023.0.0 2023.0.1
|
||||
)
|
||||
|
||||
#
|
||||
|
||||
@@ -79,7 +79,7 @@ macro(ov_cpack_settings)
|
||||
# - 2022.1.0 is the last public release with rpm packages from Intel install team
|
||||
# - 2022.1.1, 2022.2 do not have rpm packages enabled, distributed only as archives
|
||||
# - 2022.3 is the first release where RPM updated packages are introduced, others 2022.3.X are LTS
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5 2023.0.0
|
||||
2022.3.0 2022.3.1 2022.3.2 2022.3.3 2022.3.4 2022.3.5 2023.0.0 2023.0.1
|
||||
)
|
||||
|
||||
find_host_program(rpmlint_PROGRAM NAMES rpmlint DOC "Path to rpmlint")
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
ade/0.1.2a
|
||||
onetbb/[>=2021.2.1]
|
||||
pugixml/[>=1.10]
|
||||
protobuf/[>=3.20.3]
|
||||
protobuf/3.21.9
|
||||
ittapi/[>=3.23.0]
|
||||
zlib/[>=1.2.8]
|
||||
opencl-icd-loader/[>=2022.09.30]
|
||||
@@ -19,7 +19,7 @@ flatbuffers/[>=22.9.24]
|
||||
[tool_requires]
|
||||
cmake/[>=3.15]
|
||||
patchelf/[>=0.12]
|
||||
protobuf/[>=3.20.3]
|
||||
protobuf/3.21.9
|
||||
flatbuffers/[>=22.9.24]
|
||||
|
||||
[options]
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
# Datumaro {#datumaro_documentation}
|
||||
|
||||
@sphinxdirective
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Start working with Datumaro, which offers functionalities for basic data
|
||||
import/export, validation, correction, filtration and transformations.
|
||||
|
||||
|
||||
Datumaro provides a suite of basic data import/export (IE) for more than 35 public vision data
|
||||
formats and manipulation functionalities such as validation, correction, filtration, and some
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Explore OpenCV Graph API and other media processing frameworks
|
||||
used for development of computer vision solutions.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
# Model Preparation {#openvino_docs_model_processing_introduction}
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Preparing models for OpenVINO Runtime. Learn how to convert and compile models from different frameworks or read them directly.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: OpenVINO™ is an ecosystem of utilities that have advanced capabilities, which help develop deep learning solutions.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
# OpenVINO™ Training Extensions {#ote_documentation}
|
||||
|
||||
@sphinxdirective
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: OpenVINO™ Training Extensions include advanced algorithms used
|
||||
to create, train and convert deep learning models with OpenVINO
|
||||
Toolkit for optimized inference.
|
||||
|
||||
|
||||
OpenVINO™ Training Extensions provide a suite of advanced algorithms to train
|
||||
Deep Learning models and convert them using the `OpenVINO™
|
||||
|
||||
@@ -3,6 +3,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: OpenVINO toolkit workflow usually involves preparation,
|
||||
optimization, and compression of models, running inference and
|
||||
deploying deep learning applications.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn the details of custom kernel support for the GPU device to
|
||||
enable operations not supported by OpenVINO.
|
||||
|
||||
|
||||
To enable operations not supported by OpenVINO™ out of the box, you may need an extension for OpenVINO operation set, and a custom kernel for the device you will target. This article describes custom kernel support for the GPU device.
|
||||
|
||||
The GPU codepath abstracts many details about OpenCL. You need to provide the kernel code in OpenCL C and an XML configuration file that connects the kernel and its parameters to the parameters of the operation.
|
||||
@@ -13,18 +18,20 @@ There are two options for using the custom operation configuration file:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/custom_kernels_api.cpp
|
||||
:language: cpp
|
||||
:fragment: [part0]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/custom_kernels_api.py
|
||||
:language: python
|
||||
:fragment: [part0]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/gpu/custom_kernels_api.cpp
|
||||
:language: cpp
|
||||
:fragment: [part0]
|
||||
|
||||
|
||||
All OpenVINO samples, except the trivial ``hello_classification``, and most Open Model Zoo demos
|
||||
feature a dedicated command-line option ``-c`` to load custom kernels. For example, to load custom operations for the classification sample, run the command below:
|
||||
@@ -235,7 +242,8 @@ Example Configuration File
|
||||
The following code sample provides an example configuration file in XML
|
||||
format. For information on the configuration file structure, see the `Configuration File Format <#config-file-format>`__.
|
||||
|
||||
.. code-block:: cpp
|
||||
.. code-block:: xml
|
||||
:force:
|
||||
|
||||
<CustomLayer name="ReLU" type="SimpleGPU" version="1">
|
||||
<Kernel entry="example_relu_kernel">
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Explore OpenVINO™ Extensibility API, which allows adding
|
||||
support for models with custom operations and their further implementation
|
||||
in applications.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
@@ -90,6 +95,13 @@ Extensions can be loaded from a code with the ``:ref:`ov::Core::add_extension <
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [add_extension]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
@@ -97,18 +109,18 @@ Extensions can be loaded from a code with the ``:ref:`ov::Core::add_extension <
|
||||
:language: cpp
|
||||
:fragment: [add_extension]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [add_extension]
|
||||
|
||||
|
||||
The ``Identity`` is a custom operation class defined in :doc:`Custom Operation Guide <openvino_docs_Extensibility_UG_add_openvino_ops>`. This is sufficient to enable reading OpenVINO IR which uses the ``Identity`` extension operation emitted by Model Optimizer. In order to load original model directly to the runtime, add a mapping extension:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [add_frontend_extension]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
@@ -116,13 +128,6 @@ The ``Identity`` is a custom operation class defined in :doc:`Custom Operation G
|
||||
:language: cpp
|
||||
:fragment: [add_frontend_extension]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.py
|
||||
:language: python
|
||||
:fragment: [add_frontend_extension]
|
||||
|
||||
When Python API is used, there is no way to implement a custom OpenVINO operation. Even if custom OpenVINO operation is implemented in C++ and loaded into the runtime by a shared library, there is still no way to add a frontend mapping extension that refers to this custom operation. In this case, use C++ shared library approach to implement both operations semantics and framework mapping.
|
||||
|
||||
Python can still be used to map and decompose operations when only operations from the standard OpenVINO operation set are used.
|
||||
@@ -172,13 +177,6 @@ This CMake script finds OpenVINO, using the ``find_package`` CMake command.
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [add_extension_lib]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
@@ -186,6 +184,13 @@ This CMake script finds OpenVINO, using the ``find_package`` CMake command.
|
||||
:language: python
|
||||
:fragment: [add_extension_lib]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [add_extension_lib]
|
||||
|
||||
|
||||
See Also
|
||||
########
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Explore OpenVINO™ Extension API which enables registering
|
||||
custom operations to support models with operations
|
||||
not supported by OpenVINO.
|
||||
|
||||
OpenVINO™ Extension API allows you to register custom operations to support models with operations which OpenVINO™ does not support out-of-the-box. This capability requires writing code in C++, so if you are using Python to develop your application you need to build a separate shared library implemented in C++ first and load it in Python using ``add_extension`` API. Please refer to :ref:`Create library with extensions <create_library_with_extensions>` for more details on library creation and usage. The remining part of this document describes how to implement an operation class.
|
||||
|
||||
Operation Class
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to use frontend extension classes to facilitate the mapping
|
||||
of custom operations from the framework model representation to the OpenVINO
|
||||
representation.
|
||||
|
||||
|
||||
The goal of this chapter is to explain how to use Frontend extension classes to facilitate
|
||||
mapping of custom operations from framework model representation to OpenVINO representation.
|
||||
Refer to :doc:`Introduction to OpenVINO Extension <openvino_docs_Extensibility_UG_Intro>` to
|
||||
@@ -88,13 +94,6 @@ In this case, you can directly say that 'MyRelu' -> ``Relu`` mapping should be u
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_MyRelu]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
@@ -102,6 +101,13 @@ In this case, you can directly say that 'MyRelu' -> ``Relu`` mapping should be u
|
||||
:language: python
|
||||
:fragment: [py_frontend_extension_MyRelu]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_MyRelu]
|
||||
|
||||
|
||||
In the resulting converted OpenVINO model, “MyRelu” operation will be replaced by the standard operation
|
||||
``Relu`` from the latest available OpenVINO operation set. Notice that when standard operation is used,
|
||||
@@ -329,13 +335,6 @@ from ONNX according to the formula: ``ThresholdedRelu(x, alpha) -> Multiply(x, C
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_ThresholdedReLU_header]
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
|
||||
@@ -343,14 +342,14 @@ from ONNX according to the formula: ``ThresholdedRelu(x, alpha) -> Multiply(x, C
|
||||
:language: python
|
||||
:fragment: [py_frontend_extension_ThresholdedReLU_header]
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_ThresholdedReLU]
|
||||
:fragment: [frontend_extension_ThresholdedReLU_header]
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: py
|
||||
@@ -359,6 +358,13 @@ from ONNX according to the formula: ``ThresholdedRelu(x, alpha) -> Multiply(x, C
|
||||
:language: python
|
||||
:fragment: [py_frontend_extension_ThresholdedReLU]
|
||||
|
||||
.. tab-item:: C++
|
||||
:sync: cpp
|
||||
|
||||
.. doxygensnippet:: docs/snippets/ov_extensions.cpp
|
||||
:language: cpp
|
||||
:fragment: [frontend_extension_ThresholdedReLU]
|
||||
|
||||
|
||||
The next example shows how to use ``ConversionExtension`` to convert PyTorch
|
||||
`aten::hardtanh <https://pytorch.org/docs/stable/generated/torch.nn.functional.hardtanh.html>`_
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Get to know how Graph Rewrite handles running multiple matcher passes on
|
||||
ov::Model in a single graph traversal.
|
||||
|
||||
|
||||
``:ref:`ov::pass::GraphRewrite <doxid-classov_1_1pass_1_1_graph_rewrite>``` serves for running multiple matcher passes on ``:ref:`ov::Model <doxid-classov_1_1_model>``` in a single graph traversal.
|
||||
Example:
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to create a pattern, implement a callback, register
|
||||
the pattern and Matcher to execute MatcherPass transformation
|
||||
on a model.
|
||||
|
||||
``:ref:`ov::pass::MatcherPass <doxid-classov_1_1pass_1_1_matcher_pass>``` is used for pattern-based transformations.
|
||||
|
||||
Template for MatcherPass transformation class
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to use Model Pass transformation class to take entire
|
||||
ov::Model as input and process it.
|
||||
|
||||
|
||||
``:ref:`ov::pass::ModelPass <doxid-classov_1_1pass_1_1_model_pass>``` is used for transformations that take entire ``:ref:`ov::Model <doxid-classov_1_1_model>``` as an input and process it.
|
||||
|
||||
Template for ModelPass transformation class
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to apply additional model optimizations or transform
|
||||
unsupported subgraphs and operations, using OpenVINO™ Transformations API.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f7c8ab4f15874d235968471bcf876c89c795d601e69891208107b8b72aa58eb1
|
||||
size 70014
|
||||
@@ -1,3 +0,0 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3d5ccf51fe1babb93d96d042494695a6a6e055d1f8ebf7eef5083d54d8987a23
|
||||
size 58789
|
||||
@@ -1,40 +0,0 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#! [complex:transformation]
|
||||
|
||||
from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph
|
||||
from openvino.tools.mo.graph.graph import Graph
|
||||
|
||||
|
||||
class Complex(FrontReplacementSubgraph):
|
||||
enabled = True
|
||||
|
||||
def pattern(self):
|
||||
return dict(
|
||||
nodes=[
|
||||
('strided_slice_real', dict(op='StridedSlice')),
|
||||
('strided_slice_imag', dict(op='StridedSlice')),
|
||||
('complex', dict(op='Complex')),
|
||||
],
|
||||
edges=[
|
||||
('strided_slice_real', 'complex', {'in': 0}),
|
||||
('strided_slice_imag', 'complex', {'in': 1}),
|
||||
])
|
||||
|
||||
@staticmethod
|
||||
def replace_sub_graph(graph: Graph, match: dict):
|
||||
strided_slice_real = match['strided_slice_real']
|
||||
strided_slice_imag = match['strided_slice_imag']
|
||||
complex_node = match['complex']
|
||||
|
||||
# make sure that both strided slice operations get the same data as input
|
||||
assert strided_slice_real.in_port(0).get_source() == strided_slice_imag.in_port(0).get_source()
|
||||
|
||||
# identify the output port of the operation producing datat for strided slice nodes
|
||||
input_node_output_port = strided_slice_real.in_port(0).get_source()
|
||||
input_node_output_port.disconnect()
|
||||
|
||||
# change the connection so now all consumers of "complex_node" get data from input node of strided slice nodes
|
||||
complex_node.out_port(0).get_connection().set_source(input_node_output_port)
|
||||
#! [complex:transformation]
|
||||
@@ -1,27 +0,0 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#! [complex_abs:transformation]
|
||||
import numpy as np
|
||||
|
||||
from openvino.tools.mo.ops.elementwise import Pow
|
||||
from openvino.tools.mo.ops.ReduceOps import ReduceSum
|
||||
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
|
||||
from openvino.tools.mo.graph.graph import Graph, Node
|
||||
from openvino.tools.mo.ops.const import Const
|
||||
|
||||
|
||||
class ComplexAbs(FrontReplacementOp):
|
||||
op = "ComplexAbs"
|
||||
enabled = True
|
||||
|
||||
def replace_op(self, graph: Graph, node: Node):
|
||||
pow_2 = Const(graph, {'value': np.float32(2.0)}).create_node()
|
||||
reduce_axis = Const(graph, {'value': np.int32(-1)}).create_node()
|
||||
pow_0_5 = Const(graph, {'value': np.float32(0.5)}).create_node()
|
||||
|
||||
sq = Pow(graph, dict(name=node.in_node(0).name + '/sq', power=2.0)).create_node([node.in_node(0), pow_2])
|
||||
sum = ReduceSum(graph, dict(name=sq.name + '/sum')).create_node([sq, reduce_axis])
|
||||
sqrt = Pow(graph, dict(name=sum.name + '/sqrt', power=0.5)).create_node([sum, pow_0_5])
|
||||
return [sqrt.id]
|
||||
#! [complex_abs:transformation]
|
||||
@@ -1,33 +0,0 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# ! [fft_ext:extractor]
|
||||
from ...ops.FFT import FFT
|
||||
from openvino.tools.mo.front.extractor import FrontExtractorOp
|
||||
|
||||
|
||||
class FFT2DFrontExtractor(FrontExtractorOp):
|
||||
op = 'FFT2D'
|
||||
enabled = True
|
||||
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
attrs = {
|
||||
'inverse': 0
|
||||
}
|
||||
FFT.update_node_stat(node, attrs)
|
||||
return cls.enabled
|
||||
|
||||
|
||||
class IFFT2DFrontExtractor(FrontExtractorOp):
|
||||
op = 'IFFT2D'
|
||||
enabled = True
|
||||
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
attrs = {
|
||||
'inverse': 1
|
||||
}
|
||||
FFT.update_node_stat(node, attrs)
|
||||
return cls.enabled
|
||||
# ! [fft_ext:extractor]
|
||||
@@ -1,27 +0,0 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#! [fft:operation]
|
||||
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
|
||||
from openvino.tools.mo.graph.graph import Graph
|
||||
from openvino.tools.mo.ops.op import Op
|
||||
|
||||
|
||||
class FFT(Op):
|
||||
op = 'FFT'
|
||||
enabled = False
|
||||
|
||||
def __init__(self, graph: Graph, attrs: dict):
|
||||
super().__init__(graph, {
|
||||
'type': self.op,
|
||||
'op': self.op,
|
||||
'version': 'custom_opset',
|
||||
'inverse': None,
|
||||
'in_ports_count': 1,
|
||||
'out_ports_count': 1,
|
||||
'infer': copy_shape_infer
|
||||
}, attrs)
|
||||
|
||||
def backend_attrs(self):
|
||||
return ['inverse']
|
||||
#! [fft:operation]
|
||||
@@ -1,106 +0,0 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#! [mri_demo:demo]
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
import argparse
|
||||
import time
|
||||
from openvino.inference_engine import IECore
|
||||
|
||||
|
||||
def kspace_to_image(kspace):
|
||||
assert(len(kspace.shape) == 3 and kspace.shape[-1] == 2)
|
||||
fft = cv.idft(kspace, flags=cv.DFT_SCALE)
|
||||
img = cv.magnitude(fft[:,:,0], fft[:,:,1])
|
||||
return cv.normalize(img, dst=None, alpha=255, beta=0, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='MRI reconstrution demo for network from https://github.com/rmsouza01/Hybrid-CS-Model-MRI (https://arxiv.org/abs/1810.12473)')
|
||||
parser.add_argument('-i', '--input', dest='input', help='Path to input .npy file with MRI scan data.')
|
||||
parser.add_argument('-p', '--pattern', dest='pattern', help='Path to sampling mask in .npy format.')
|
||||
parser.add_argument('-m', '--model', dest='model', help='Path to .xml file of OpenVINO IR.')
|
||||
parser.add_argument('-l', '--cpu_extension', dest='cpu_extension', help='Path to extensions library with FFT implementation.')
|
||||
parser.add_argument('-d', '--device', dest='device', default='CPU',
|
||||
help='Optional. Specify the target device to infer on; CPU, '
|
||||
'GPU, GNA is acceptable. For non-CPU targets, '
|
||||
'HETERO plugin is used with CPU fallbacks to FFT implementation. '
|
||||
'Default value is CPU')
|
||||
args = parser.parse_args()
|
||||
|
||||
xml_path = args.model
|
||||
assert(xml_path.endswith('.xml'))
|
||||
bin_path = xml_path[:xml_path.rfind('.xml')] + '.bin'
|
||||
|
||||
ie = IECore()
|
||||
ie.add_extension(args.cpu_extension, "CPU")
|
||||
|
||||
net = ie.read_network(xml_path, bin_path)
|
||||
|
||||
device = 'CPU' if args.device == 'CPU' else ('HETERO:' + args.device + ',CPU')
|
||||
exec_net = ie.load_network(net, device)
|
||||
|
||||
# Hybrid-CS-Model-MRI/Data/stats_fs_unet_norm_20.npy
|
||||
stats = np.array([2.20295299e-01, 1.11048916e+03, 4.16997984e+00, 4.71741395e+00], dtype=np.float32)
|
||||
# Hybrid-CS-Model-MRI/Data/sampling_mask_20perc.npy
|
||||
var_sampling_mask = np.load(args.pattern) # TODO: can we generate it in runtime?
|
||||
print('Sampling ratio:', 1.0 - var_sampling_mask.sum() / var_sampling_mask.size)
|
||||
|
||||
data = np.load(args.input)
|
||||
num_slices, height, width = data.shape[0], data.shape[1], data.shape[2]
|
||||
pred = np.zeros((num_slices, height, width), dtype=np.uint8)
|
||||
data /= np.sqrt(height * width)
|
||||
|
||||
print('Compute...')
|
||||
start = time.time()
|
||||
for slice_id, kspace in enumerate(data):
|
||||
kspace = kspace.copy()
|
||||
|
||||
# Apply sampling
|
||||
kspace[var_sampling_mask] = 0
|
||||
kspace = (kspace - stats[0]) / stats[1]
|
||||
|
||||
# Forward through network
|
||||
input = np.expand_dims(kspace.transpose(2, 0, 1), axis=0)
|
||||
outputs = exec_net.infer(inputs={'input_1': input})
|
||||
output = next(iter(outputs.values()))
|
||||
output = output.reshape(height, width)
|
||||
|
||||
# Save predictions
|
||||
pred[slice_id] = cv.normalize(output, dst=None, alpha=255, beta=0, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
|
||||
|
||||
print('Elapsed time: %.1f seconds' % (time.time() - start))
|
||||
|
||||
WIN_NAME = 'MRI reconstruction with OpenVINO'
|
||||
|
||||
slice_id = 0
|
||||
def callback(pos):
|
||||
global slice_id
|
||||
slice_id = pos
|
||||
|
||||
kspace = data[slice_id]
|
||||
img = kspace_to_image(kspace)
|
||||
|
||||
kspace[var_sampling_mask] = 0
|
||||
masked = kspace_to_image(kspace)
|
||||
|
||||
rec = pred[slice_id]
|
||||
|
||||
# Add a header
|
||||
border_size = 20
|
||||
render = cv.hconcat((img, masked, rec))
|
||||
render = cv.copyMakeBorder(render, border_size, 0, 0, 0, cv.BORDER_CONSTANT, value=255)
|
||||
cv.putText(render, 'Original', (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
|
||||
cv.putText(render, 'Sampled (PSNR %.1f)' % cv.PSNR(img, masked), (width, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
|
||||
cv.putText(render, 'Reconstructed (PSNR %.1f)' % cv.PSNR(img, rec), (width*2, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
|
||||
|
||||
cv.imshow(WIN_NAME, render)
|
||||
cv.waitKey(1)
|
||||
|
||||
cv.namedWindow(WIN_NAME, cv.WINDOW_NORMAL)
|
||||
print(num_slices)
|
||||
cv.createTrackbar('Slice', WIN_NAME, num_slices // 2, num_slices - 1, callback)
|
||||
callback(num_slices // 2) # Trigger initial visualization
|
||||
cv.waitKey()
|
||||
#! [mri_demo:demo]
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the base ov::IAsyncInferRequest class to implement a custom asynchronous inference request in OpenVINO.
|
||||
|
||||
Asynchronous Inference Request runs an inference pipeline asynchronously in one or several task executors depending on a device pipeline structure.
|
||||
OpenVINO Runtime Plugin API provides the base ov::IAsyncInferRequest class:
|
||||
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to build a plugin using CMake and OpenVINO Developer Package.
|
||||
|
||||
|
||||
OpenVINO build infrastructure provides the OpenVINO Developer Package for plugin development.
|
||||
|
||||
OpenVINO Developer Package
|
||||
@@ -9,7 +13,7 @@ OpenVINO Developer Package
|
||||
|
||||
To automatically generate the OpenVINO Developer Package, run the ``cmake`` tool during a OpenVINO build:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: sh
|
||||
|
||||
$ mkdir openvino-release-build
|
||||
$ cd openvino-release-build
|
||||
@@ -48,7 +52,7 @@ Build Plugin using OpenVINO Developer Package
|
||||
|
||||
To build a plugin source tree using the OpenVINO Developer Package, run the commands below:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: sh
|
||||
|
||||
$ mkdir template-plugin-release-build
|
||||
$ cd template-plugin-release-build
|
||||
@@ -72,7 +76,7 @@ To build a plugin and its tests, run the following CMake scripts:
|
||||
|
||||
The default values of the ``ENABLE_TESTS``, ``ENABLE_FUNCTIONAL_TESTS`` options are shared via the OpenVINO Developer Package and they are the same as for the main OpenVINO build tree. You can override them during plugin build using the command below:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: sh
|
||||
|
||||
$ cmake -DENABLE_FUNCTIONAL_TESTS=OFF -DOpenVINODeveloperPackage_DIR=../openvino-release-build ../template-plugin
|
||||
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the ov::CompiledModel class as the base class for a compiled
|
||||
model and to create an arbitrary number of ov::InferRequest objects.
|
||||
|
||||
ov::CompiledModel class functionality:
|
||||
|
||||
* Compile an ov::Model instance to a backend specific graph representation
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the ov::ISyncInferRequest interface as the base class to implement a synchronous inference request in OpenVINO.
|
||||
|
||||
|
||||
``InferRequest`` class functionality:
|
||||
|
||||
* Allocate input and output tensors needed for a backend-dependent network inference.
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Develop and implement independent inference solutions for
|
||||
different devices with the components of plugin architecture
|
||||
of OpenVINO.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Converting and Preparing Models
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Explore OpenVINO Plugin API, which includes functions and
|
||||
helper classes that simplify the development of new plugins.
|
||||
|
||||
|
||||
OpenVINO Plugin usually represents a wrapper around a backend. Backends can be:
|
||||
|
||||
* OpenCL-like backend (e.g. clDNN library) for GPU devices.
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the openvino::funcSharedTests library, which includes
|
||||
a predefined set of functional tests and utilities to verify a plugin.
|
||||
|
||||
|
||||
OpenVINO tests infrastructure provides a predefined set of functional tests and utilities. They are used to verify a plugin using the OpenVINO public API.
|
||||
All the tests are written in the `Google Test C++ framework <https://github.com/google/googletest>`__.
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the ov::Property class to define access rights and
|
||||
specific properties of an OpenVINO plugin.
|
||||
|
||||
|
||||
Plugin can provide own device-specific properties.
|
||||
|
||||
Property Class
|
||||
|
||||
@@ -3,6 +3,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about the support for quantized models with different
|
||||
precisions and the FakeQuantize operation used to express
|
||||
quantization rules.
|
||||
|
||||
One of the feature of OpenVINO is the support of quantized models with different precisions: INT8, INT4, etc.
|
||||
However, it is up to the plugin to define what exact precisions are supported by the particular HW.
|
||||
All quantized models which can be expressed in IR have a unified representation by means of *FakeQuantize* operation.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the ov::RemoteContext class as the base class for a plugin-specific remote context.
|
||||
|
||||
|
||||
ov::RemoteContext class functionality:
|
||||
|
||||
* Represents device-specific inference context.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Use the ov::IRemoteTensor interface as a base class for device-specific remote tensors.
|
||||
|
||||
|
||||
ov::RemoteTensor class functionality:
|
||||
|
||||
* Provides an interface to work with device-specific memory.
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn more about plugin development and specific features in
|
||||
OpenVINO: precision transformations and support for quantized
|
||||
models with different precisions.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about extra API references required for the development of
|
||||
plugins in OpenVINO.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about AvgPoolPrecisionPreserved attribute used only during AvgPool operation.
|
||||
|
||||
:ref:`ngraph::AvgPoolPrecisionPreservedAttribute <doxid-classngraph_1_1_avg_pool_precision_preserved_attribute>` class represents the ``AvgPoolPrecisionPreserved`` attribute.
|
||||
|
||||
Utility attribute, which is used only during ``AvgPool`` operation, precision preserved property definition.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about IntervalsAlignment attribute, which describes a subgraph with the same quantization intervals alignment.
|
||||
|
||||
|
||||
:ref:`ngraph::IntervalsAlignmentAttribute <doxid-classngraph_1_1_intervals_alignment_attribute>` class represents the ``IntervalsAlignment`` attribute.
|
||||
|
||||
The attribute defines a subgraph with the same quantization intervals alignment. ``FakeQuantize`` operations are included. The attribute is used by quantization operations.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about PrecisionPreserved attribute, which describes a precision preserved operation.
|
||||
|
||||
|
||||
:ref:`ngraph::PrecisionPreservedAttribute <doxid-classngraph_1_1_precision_preserved_attribute>` class represents the ``PrecisionPreserved`` attribute.
|
||||
|
||||
The attribute defines a precision preserved operation. If the attribute is absent, then an operation is not precision preserved.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about Precisions attribute, which describes the precision required for an input/output port or an operation.
|
||||
|
||||
|
||||
:ref:`ngraph::PrecisionsAttribute <doxid-classngraph_1_1_precisions_attribute>` class represents the ``Precisions`` attribute.
|
||||
|
||||
The attribute defines precision which is required for input/output port or an operation.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about QuantizationAlignment attribute, which describes a subgraph with the same quantization alignment.
|
||||
|
||||
|
||||
:ref:`ngraph::QuantizationAlignmentAttribute <doxid-classngraph_1_1_quantization_alignment_attribute>` class represents the ``QuantizationAlignment`` attribute.
|
||||
|
||||
The attribute defines a subgraph with the same quantization alignment. ``FakeQuantize`` operations are not included. The attribute is used by quantization operations.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about QuantizationGranularity attribute, which describes quantization granularity of operation inputs.
|
||||
|
||||
|
||||
ngraph::QuantizationAttribute class represents the ``QuantizationGranularity`` attribute.
|
||||
|
||||
The attribute defines quantization granularity of operation inputs.
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about low precision transformations used to infer a quantized model in low precision with the maximum performance on Intel CPU, GPU, and ARM platforms.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Low Precision Transformations
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Check the lists of attributes created or used by model transformations.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Attributes
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about optional Prerequisites transformations, that
|
||||
prepare a model before applying other low precision transformations.
|
||||
|
||||
Prerequisites transformations are optional. The transformations prepare a model before running other low precision transformations. The transformations do not operate with dequantization operations or update precisions. Prerequisites transformations include:
|
||||
|
||||
* :doc:`PullReshapeThroughDequantization <openvino_docs_OV_UG_lpt_PullReshapeThroughDequantization>`
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about markup transformations, which are used to create
|
||||
attributes for input and output ports and operations during runtime.
|
||||
|
||||
This step defines the optimal ``FakeQuantize`` decomposition precisions for the best inference performance via operations markup with runtime attribute instances. Attributes are created for input and output ports and operations. Transformations do not change the operation output port precisions. A model markup low precision logic is decomposed and implemented into the following common markup transformations. The order of transformations is important:
|
||||
|
||||
1. :doc:`MarkupBias <openvino_docs_OV_UG_lpt_MarkupBias>`
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about main transformations, which are mostly low
|
||||
precision transformations that handle decomposition and
|
||||
dequantization operations.
|
||||
|
||||
|
||||
Main transformations are the majority of low precision transformations. Transformations operate with dequantization operations. Main transformations include:
|
||||
|
||||
* :doc:`AddTransformation <openvino_docs_OV_UG_lpt_AddTransformation>`
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Check the list of transformations used to clean up the
|
||||
resulting model to avoid unhandled dequantization operations.
|
||||
|
||||
* :doc:`EliminateFakeQuantizeTransformation <openvino_docs_OV_UG_lpt_EliminateFakeQuantizeTransformation>`
|
||||
* :doc:`FoldConvertTransformation <openvino_docs_OV_UG_lpt_FoldConvertTransformation>`
|
||||
* :doc:`FoldFakeQuantizeTransformation <openvino_docs_OV_UG_lpt_FoldFakeQuantizeTransformation>`
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn about legal information and policies related to the use
|
||||
of Intel® Distribution of OpenVINO™ toolkit.
|
||||
|
||||
|
||||
Performance varies by use, configuration and other factors. Learn more at `www.intel.com/PerformanceIndex <https://www.intel.com/PerformanceIndex>`__.
|
||||
|
||||
|
||||
@@ -15,21 +15,27 @@
|
||||
openvino_docs_MO_DG_Python_API
|
||||
openvino_docs_MO_DG_prepare_model_Model_Optimizer_FAQ
|
||||
|
||||
.. meta::
|
||||
:description: Model conversion (MO) furthers the transition between training and
|
||||
deployment environments, it adjusts deep learning models for
|
||||
optimal execution on target devices.
|
||||
|
||||
|
||||
To convert a model to OpenVINO model format (``ov.Model``), you can use the following command:
|
||||
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model(INPUT_MODEL)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -39,11 +45,11 @@ To convert a model to OpenVINO model format (``ov.Model``), you can use the foll
|
||||
If the out-of-the-box conversion (only the ``input_model`` parameter is specified) is not successful, use the parameters mentioned below to override input shapes and cut the model:
|
||||
|
||||
- model conversion API provides two parameters to override original input shapes for model conversion: ``input`` and ``input_shape``.
|
||||
For more information about these parameters, refer to the :doc:`Setting Input Shapes <openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model>` guide.
|
||||
For more information about these parameters, refer to the :doc:`Setting Input Shapes <openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model>` guide.
|
||||
|
||||
- To cut off unwanted parts of a model (such as unsupported operations and training sub-graphs),
|
||||
use the ``input`` and ``output`` parameters to define new inputs and outputs of the converted model.
|
||||
For a more detailed description, refer to the :doc:`Cutting Off Parts of a Model <openvino_docs_MO_DG_prepare_model_convert_model_Cutting_Model>` guide.
|
||||
use the ``input`` and ``output`` parameters to define new inputs and outputs of the converted model.
|
||||
For a more detailed description, refer to the :doc:`Cutting Off Parts of a Model <openvino_docs_MO_DG_prepare_model_convert_model_Cutting_Model>` guide.
|
||||
|
||||
You can also insert additional input pre-processing sub-graphs into the converted model by using
|
||||
the ``mean_values``, ``scales_values``, ``layout``, and other parameters described
|
||||
@@ -56,15 +62,16 @@ To get the full list of conversion parameters, run the following command:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model(help=True)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -81,15 +88,16 @@ Below is a list of separate examples for different frameworks and model conversi
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("MobileNet.pb")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -101,15 +109,16 @@ Below is a list of separate examples for different frameworks and model conversi
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("BERT", input_shape=[[2,30],[2,30],[2,30]])
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -123,15 +132,16 @@ Below is a list of separate examples for different frameworks and model conversi
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("ocr.onnx", output="probabilities")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -149,15 +159,16 @@ Below is a list of separate examples for different frameworks and model conversi
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("unet.pdmodel", mean_values=[123,117,104], scale=255)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn the essentials of representing deep learning models in OpenVINO
|
||||
IR format and the use of supported operation sets.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
@@ -9,7 +13,7 @@
|
||||
openvino_docs_ops_opset
|
||||
openvino_docs_operations_specifications
|
||||
openvino_docs_ops_broadcast_rules
|
||||
|
||||
|
||||
|
||||
This article provides essential information on the format used for representation of deep learning models in OpenVINO toolkit and supported operation sets.
|
||||
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
Input data for inference can be different from the training dataset and requires
|
||||
additional preprocessing before inference. To accelerate the whole pipeline including
|
||||
preprocessing and inference, model conversion API provides special parameters such as ``mean_values``,
|
||||
``scale_values``, ``reverse_input_channels``, and ``layout``.
|
||||
|
||||
``scale_values``, ``reverse_input_channels``, and ``layout``. Based on these
|
||||
parameters, model conversion API generates OpenVINO IR with additionally inserted sub-graphs
|
||||
Based on these parameters, model conversion API generates OpenVINO IR with additionally inserted sub-graphs
|
||||
to perform the defined preprocessing. This preprocessing block can perform mean-scale
|
||||
normalization of input data, reverting data along channel dimension, and changing
|
||||
the data layout. See the following sections for details on the parameters, or the
|
||||
@@ -34,15 +34,16 @@ For example, the following command specifies the ``NHWC`` layout for a Tensorflo
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("tf_nasnet_large.onnx", layout="nhwc")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -59,15 +60,16 @@ having two dimensions: batch and size of the image expressed as the ``N?`` layou
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("yolov3-tiny.onnx", layout={"input_1": "nchw", "image_shape": "n?"})
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -87,9 +89,10 @@ the following commands to provide data in the ``NCHW`` layout:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("tf_nasnet_large.onnx", source_layout="nhwc", target_layout="nchw")
|
||||
@@ -97,7 +100,7 @@ the following commands to provide data in the ``NCHW`` layout:
|
||||
ov_model = convert_model("tf_nasnet_large.onnx", layout="nhwc->nchw")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -115,9 +118,10 @@ mentioned earlier, use the following commands:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("yolov3-tiny.onnx", source_layout={"input_1": "nchw", "image_shape": "n?"}, target_layout={"input_1": "nhwc"})
|
||||
@@ -125,7 +129,7 @@ mentioned earlier, use the following commands:
|
||||
ov_model = convert_model("yolov3-tiny.onnx", layout={"input_1": "nchw->nhwc", "image_shape": "n?"}
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -168,15 +172,16 @@ model and applies mean-scale normalization to the input data:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("unet.pdmodel", mean_values=[123,117,104], scale=255)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -211,15 +216,16 @@ model and embeds the ``reverse_input_channel`` preprocessing block into OpenVINO
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("alexnet.pb", reverse_input_channels=True)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
Optionally all relevant floating-point weights can be compressed to ``FP16`` data type during the model conversion.
|
||||
Optionally, all relevant floating-point weights can be compressed to ``FP16`` data type during model conversion.
|
||||
It results in creating a "compressed ``FP16`` model", which occupies about half of
|
||||
the original space in the file system. The compression may introduce a drop in accuracy.
|
||||
the original space in the file system. The compression may introduce a minor drop in accuracy,
|
||||
but it is negligible for most models.
|
||||
|
||||
To compress the model, use the ``compress_to_fp16=True`` option:
|
||||
@@ -12,19 +12,20 @@ To compress the model, use the ``compress_to_fp16=True`` option:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model(INPUT_MODEL, compress_to_fp16=False)
|
||||
ov_model = convert_model(INPUT_MODEL, compress_to_fp16=True)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model INPUT_MODEL --compress_to_fp16=False
|
||||
mo --input_model INPUT_MODEL --compress_to_fp16=True
|
||||
|
||||
|
||||
For details on how plugins handle compressed ``FP16`` models, see
|
||||
@@ -39,8 +40,8 @@ For details on how plugins handle compressed ``FP16`` models, see
|
||||
|
||||
.. note::
|
||||
|
||||
Some large models (larger than a few GB) when compressed to ``FP16`` may consume enormous amount of RAM on the loading
|
||||
phase of the inference. In case if you are facing such problems, please try to convert them without compression:
|
||||
Some large models (larger than a few GB) when compressed to ``FP16`` may consume an overly large amount of RAM on the loading
|
||||
phase of the inference. If that is the case for your model, try to convert it without compression:
|
||||
``convert_model(INPUT_MODEL, compress_to_fp16=False)`` or ``convert_model(INPUT_MODEL)``
|
||||
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ To test performance of your model, make sure you :doc:`prepare the model for use
|
||||
For example, if you use :doc:`OpenVINO's automation tools <omz_tools_downloader>`, these two lines of code will download the
|
||||
resnet-50-tf and convert it to OpenVINO IR.
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: sh
|
||||
|
||||
omz_downloader --name resnet-50-tf
|
||||
omz_converter --name resnet-50-tf
|
||||
@@ -36,7 +36,7 @@ For a detailed description, see the dedicated articles:
|
||||
|
||||
The benchmark_app includes a lot of device-specific options, but the primary usage is as simple as:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: sh
|
||||
|
||||
benchmark_app -m <model> -d <device> -i <input>
|
||||
|
||||
@@ -47,7 +47,7 @@ performance settings that contain command-line equivalents in the Benchmark app.
|
||||
While these settings provide really low-level control for the optimal model performance on the *specific* device,
|
||||
it is recommended to always start performance evaluation with the :doc:`OpenVINO High-Level Performance Hints <openvino_docs_OV_UG_Performance_Hints>` first, like so:
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: sh
|
||||
|
||||
# for throughput prioritization
|
||||
benchmark_app -hint tput -m <model> -d <device>
|
||||
|
||||
@@ -15,7 +15,8 @@ Model conversion API is represented by ``convert_model()`` method in openvino.to
|
||||
|
||||
Example of converting a PyTorch model directly from memory:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import torchvision
|
||||
|
||||
@@ -31,7 +32,8 @@ The following types are supported as an input model for ``convert_model()``:
|
||||
|
||||
Example of using native Python classes to set ``input_shape``, ``mean_values`` and ``layout``:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.runtime import PartialShape, Layout
|
||||
|
||||
@@ -39,7 +41,8 @@ Example of using native Python classes to set ``input_shape``, ``mean_values`` a
|
||||
|
||||
Example of using strings for setting ``input_shape``, ``mean_values`` and ``layout``:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
ov_model = convert_model(model, input_shape="[1,3,100,100]", mean_values="[127,127,127]", layout="NCHW")
|
||||
|
||||
@@ -49,7 +52,8 @@ The shape can be a ``list`` or ``tuple`` of dimensions (``int`` or ``openvino.ru
|
||||
|
||||
Example of using a tuple in the ``input`` parameter to cut a model:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
ov_model = convert_model(model, input=("input_name", [3], np.float32))
|
||||
|
||||
@@ -66,7 +70,8 @@ Supported types for ``InputCutInfo``:
|
||||
|
||||
Example of using ``InputCutInfo`` to freeze an input with value:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model, InputCutInfo
|
||||
|
||||
@@ -85,7 +90,8 @@ Parameters supporting ``list``:
|
||||
|
||||
Example of using lists to set shapes, types and layout for multiple inputs:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
ov_model = convert_model(model, input=[("input1", [1,3,100,100], np.float32), ("input2", [1,3,100,100], np.float32)], layout=[Layout("NCHW"), LayoutMap("NCHW", "NHWC")])
|
||||
|
||||
@@ -93,7 +99,8 @@ Example of using lists to set shapes, types and layout for multiple inputs:
|
||||
|
||||
Example of using the ``Layout`` class to set the layout of a model input:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.runtime import Layout
|
||||
from openvino.tools.mo import convert_model
|
||||
@@ -106,7 +113,8 @@ To set both source and destination layouts in the ``layout`` parameter, use the
|
||||
|
||||
Example of using the ``LayoutMap`` class to change the layout of a model input:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model, LayoutMap
|
||||
|
||||
|
||||
@@ -772,7 +772,8 @@ The name should be the compilation of the layer name with the module name separa
|
||||
|
||||
For example, your topology contains this layer with type ``Python``:
|
||||
|
||||
.. code-block::
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
layer {
|
||||
name: 'proposal'
|
||||
@@ -788,7 +789,8 @@ For example, your topology contains this layer with type ``Python``:
|
||||
|
||||
The first step is to implement an extension for this layer in Model Optimizer as an ancestor of ``Op`` class:
|
||||
|
||||
.. code-block::
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
class ProposalPythonExampleOp(Op):
|
||||
op = 'Proposal'
|
||||
@@ -799,7 +801,8 @@ The first step is to implement an extension for this layer in Model Optimizer as
|
||||
|
||||
It is mandatory to call two functions right after the implementation of that class:
|
||||
|
||||
.. code-block::
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
class ProposalPythonExampleOp(Op):
|
||||
...
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a model from the
|
||||
Caffe format to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
Note that OpenVINO support for Caffe is currently being deprecated and will be removed entirely in the future.
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a model from the
|
||||
Kaldi format to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a model from the
|
||||
MXNet format to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
Note that OpenVINO support for Apache MXNet is currently being deprecated and will be removed entirely in the future.
|
||||
|
||||
@@ -2,11 +2,18 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a model from the
|
||||
ONNX format to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
Introduction to ONNX
|
||||
####################
|
||||
|
||||
`ONNX <https://github.com/onnx/onnx>`__ is a representation format for deep learning models that allows AI developers to easily transfer models between different frameworks. It is hugely popular among deep learning tools, like PyTorch, Caffe2, Apache MXNet, Microsoft Cognitive Toolkit, and many others.
|
||||
|
||||
.. note:: ONNX models are supported via FrontEnd API. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example <openvino_docs_OV_UG_Integrate_OV_with_your_application>` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions.
|
||||
|
||||
Converting an ONNX Model
|
||||
########################
|
||||
|
||||
|
||||
@@ -2,6 +2,19 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a model from the
|
||||
PaddlePaddle format to the OpenVINO Intermediate Representation.
|
||||
|
||||
This page provides general instructions on how to convert a model from a PaddlePaddle format to the OpenVINO IR format using Model Optimizer. The instructions are different depending on PaddlePaddle model format.
|
||||
|
||||
.. note:: PaddlePaddle models are supported via FrontEnd API. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example <openvino_docs_OV_UG_Integrate_OV_with_your_application>` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions.
|
||||
|
||||
Converting PaddlePaddle Model Inference Format
|
||||
##############################################
|
||||
|
||||
PaddlePaddle inference model includes ``.pdmodel`` (storing model structure) and ``.pdiparams`` (storing model weight). For how to export PaddlePaddle inference model, please refer to the `Exporting PaddlePaddle Inference Model <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/beginner/model_save_load_cn.html>`__ Chinese guide.
|
||||
|
||||
To convert a PaddlePaddle model, use the ``mo`` script and specify the path to the input ``.pdmodel`` model file:
|
||||
|
||||
.. code-block:: sh
|
||||
@@ -29,24 +42,25 @@ Converting certain PaddlePaddle models may require setting ``example_input`` or
|
||||
|
||||
* Example of converting ``paddle.hapi.model.Model`` format model:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import paddle
|
||||
from openvino.tools.mo import convert_model
|
||||
|
||||
# create a paddle.hapi.model.Model format model
|
||||
resnet50 = paddle.vision.models.resnet50()
|
||||
x = paddle.static.InputSpec([1,3,224,224], 'float32', 'x')
|
||||
y = paddle.static.InputSpec([1,1000], 'float32', 'y')
|
||||
|
||||
model = paddle.Model(resnet50, x, y)
|
||||
|
||||
# convert to OpenVINO IR format
|
||||
ov_model = convert_model(model)
|
||||
|
||||
# optional: serialize OpenVINO IR to *.xml & *.bin
|
||||
from openvino.runtime import serialize
|
||||
serialize(ov_model, "ov_model.xml", "ov_model.bin")
|
||||
import paddle
|
||||
from openvino.tools.mo import convert_model
|
||||
|
||||
# create a paddle.hapi.model.Model format model
|
||||
resnet50 = paddle.vision.models.resnet50()
|
||||
x = paddle.static.InputSpec([1,3,224,224], 'float32', 'x')
|
||||
y = paddle.static.InputSpec([1,1000], 'float32', 'y')
|
||||
|
||||
model = paddle.Model(resnet50, x, y)
|
||||
|
||||
# convert to OpenVINO IR format
|
||||
ov_model = convert_model(model)
|
||||
|
||||
# optional: serialize OpenVINO IR to *.xml & *.bin
|
||||
from openvino.runtime import serialize
|
||||
serialize(ov_model, "ov_model.xml", "ov_model.bin")
|
||||
|
||||
* Example of converting ``paddle.fluid.dygraph.layers.Layer`` format model:
|
||||
|
||||
@@ -54,17 +68,18 @@ Converting certain PaddlePaddle models may require setting ``example_input`` or
|
||||
|
||||
``list`` with tensor(``paddle.Tensor``) or InputSpec(``paddle.static.input.InputSpec``)
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import paddle
|
||||
from openvino.tools.mo import convert_model
|
||||
|
||||
# create a paddle.fluid.dygraph.layers.Layer format model
|
||||
model = paddle.vision.models.resnet50()
|
||||
x = paddle.rand([1,3,224,224])
|
||||
|
||||
# convert to OpenVINO IR format
|
||||
ov_model = convert_model(model, example_input=[x])
|
||||
import paddle
|
||||
from openvino.tools.mo import convert_model
|
||||
|
||||
# create a paddle.fluid.dygraph.layers.Layer format model
|
||||
model = paddle.vision.models.resnet50()
|
||||
x = paddle.rand([1,3,224,224])
|
||||
|
||||
# convert to OpenVINO IR format
|
||||
ov_model = convert_model(model, example_input=[x])
|
||||
|
||||
* Example of converting ``paddle.fluid.executor.Executor`` format model:
|
||||
|
||||
@@ -72,25 +87,26 @@ Converting certain PaddlePaddle models may require setting ``example_input`` or
|
||||
|
||||
``list`` or ``tuple`` with variable(``paddle.static.data``)
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import paddle
|
||||
from openvino.tools.mo import convert_model
|
||||
|
||||
paddle.enable_static()
|
||||
|
||||
# create a paddle.fluid.executor.Executor format model
|
||||
x = paddle.static.data(name="x", shape=[1,3,224])
|
||||
y = paddle.static.data(name="y", shape=[1,3,224])
|
||||
relu = paddle.nn.ReLU()
|
||||
sigmoid = paddle.nn.Sigmoid()
|
||||
y = sigmoid(relu(x))
|
||||
|
||||
exe = paddle.static.Executor(paddle.CPUPlace())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
# convert to OpenVINO IR format
|
||||
ov_model = convert_model(exe, example_input=[x], example_output=[y])
|
||||
import paddle
|
||||
from openvino.tools.mo import convert_model
|
||||
|
||||
paddle.enable_static()
|
||||
|
||||
# create a paddle.fluid.executor.Executor format model
|
||||
x = paddle.static.data(name="x", shape=[1,3,224])
|
||||
y = paddle.static.data(name="y", shape=[1,3,224])
|
||||
relu = paddle.nn.ReLU()
|
||||
sigmoid = paddle.nn.Sigmoid()
|
||||
y = sigmoid(relu(x))
|
||||
|
||||
exe = paddle.static.Executor(paddle.CPUPlace())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
# convert to OpenVINO IR format
|
||||
ov_model = convert_model(exe, example_input=[x], example_output=[y])
|
||||
|
||||
Supported PaddlePaddle Layers
|
||||
#############################
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a model from the
|
||||
PyTorch format to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
This page provides instructions on how to convert a model from the PyTorch format to the OpenVINO IR format using Model Optimizer.
|
||||
Model Optimizer Python API allows the conversion of PyTorch models using the ``convert_model()`` method.
|
||||
|
||||
@@ -10,7 +15,8 @@ Model Optimizer Python API allows the conversion of PyTorch models using the ``c
|
||||
|
||||
Example of PyTorch model conversion:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import torchvision
|
||||
import torch
|
||||
@@ -32,7 +38,8 @@ Converting certain PyTorch models may require model tracing, which needs ``input
|
||||
|
||||
Example of using ``example_input``:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import torchvision
|
||||
import torch
|
||||
@@ -65,6 +72,7 @@ For more information, refer to the `Exporting PyTorch models to ONNX format <htt
|
||||
To export a PyTorch model, you need to obtain the model as an instance of ``torch.nn.Module`` class and call the ``export`` function.
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
@@ -2,8 +2,15 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a model from a
|
||||
TensorFlow format to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
This page provides general instructions on how to run model conversion from a TensorFlow format to the OpenVINO IR format. The instructions are different depending on whether your model was created with TensorFlow v1.X or TensorFlow v2.X.
|
||||
|
||||
.. note:: TensorFlow models are supported via :doc:`FrontEnd API <openvino_docs_MO_DG_TensorFlow_Frontend>`. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example <openvino_docs_OV_UG_Integrate_OV_with_your_application>` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions.
|
||||
|
||||
To use model conversion API, install OpenVINO Development Tools by following the :doc:`installation instructions <openvino_docs_install_guides_install_dev_tools>`.
|
||||
|
||||
Converting TensorFlow 1 Models
|
||||
@@ -69,7 +76,8 @@ When a network is defined in Python code, you have to create an inference graph
|
||||
that allows model training. That means all trainable parameters are represented as variables in the graph.
|
||||
To be able to use such graph with model conversion API, it should be frozen and dumped to a file with the following code:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import graph_io
|
||||
@@ -114,7 +122,8 @@ pruning, find custom input nodes in the ``StatefulPartitionedCall/*`` subgraph.
|
||||
Since the 2023.0 release, direct pruning of models in SavedModel format is not supported.
|
||||
It is essential to freeze the model before pruning. Use the following code snippet for model freezing:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
||||
@@ -135,7 +144,8 @@ Keras H5
|
||||
If you have a model in the HDF5 format, load the model using TensorFlow 2 and serialize it in the
|
||||
SavedModel format. Here is an example of how to do it:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import tensorflow as tf
|
||||
model = tf.keras.models.load_model('model.h5')
|
||||
@@ -145,7 +155,8 @@ SavedModel format. Here is an example of how to do it:
|
||||
The Keras H5 model with a custom layer has specifics to be converted into SavedModel format.
|
||||
For example, the model with a custom layer ``CustomLayer`` from ``custom_layer.py`` is converted as follows:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import tensorflow as tf
|
||||
from custom_layer import CustomLayer
|
||||
@@ -189,7 +200,8 @@ Model conversion API supports passing TensorFlow/TensorFlow2 models directly fro
|
||||
|
||||
* ``tf.keras.Model``
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
model = tf.keras.applications.ResNet50(weights="imagenet")
|
||||
ov_model = convert_model(model)
|
||||
@@ -197,7 +209,8 @@ Model conversion API supports passing TensorFlow/TensorFlow2 models directly fro
|
||||
|
||||
* ``tf.keras.layers.Layer``. Requires setting the "input_shape".
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import tensorflow_hub as hub
|
||||
|
||||
@@ -206,7 +219,8 @@ Model conversion API supports passing TensorFlow/TensorFlow2 models directly fro
|
||||
|
||||
* ``tf.Module``. Requires setting the "input_shape".
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
class MyModule(tf.Module):
|
||||
def __init__(self, name=None):
|
||||
@@ -221,7 +235,8 @@ Model conversion API supports passing TensorFlow/TensorFlow2 models directly fro
|
||||
|
||||
* ``tf.compat.v1.Graph``
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
with tf.compat.v1.Session() as sess:
|
||||
inp1 = tf.compat.v1.placeholder(tf.float32, [100], 'Input1')
|
||||
@@ -234,7 +249,8 @@ Model conversion API supports passing TensorFlow/TensorFlow2 models directly fro
|
||||
|
||||
* ``tf.compat.v1.GraphDef``
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
with tf.compat.v1.Session() as sess:
|
||||
inp1 = tf.compat.v1.placeholder(tf.float32, [100], 'Input1')
|
||||
@@ -247,7 +263,8 @@ Model conversion API supports passing TensorFlow/TensorFlow2 models directly fro
|
||||
|
||||
* ``tf.function``
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
@tf.function(
|
||||
input_signature=[tf.TensorSpec(shape=[1, 2, 3], dtype=tf.float32),
|
||||
@@ -259,7 +276,8 @@ Model conversion API supports passing TensorFlow/TensorFlow2 models directly fro
|
||||
|
||||
* ``tf.compat.v1.session``
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
with tf.compat.v1.Session() as sess:
|
||||
inp1 = tf.compat.v1.placeholder(tf.float32, [100], 'Input1')
|
||||
@@ -271,7 +289,8 @@ Model conversion API supports passing TensorFlow/TensorFlow2 models directly fro
|
||||
|
||||
* ``tf.train.checkpoint``
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
model = tf.keras.Model(...)
|
||||
checkpoint = tf.train.Checkpoint(model)
|
||||
|
||||
@@ -2,13 +2,18 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a model from a
|
||||
TensorFlow Lite format to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
To convert a TensorFlow Lite model, use the ``mo`` script and specify the path to the input ``.tflite`` model file:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model <INPUT_MODEL>.tflite
|
||||
|
||||
.. note:: TensorFlow Lite models are supported via FrontEnd API. You may skip conversion to IR and read models directly by OpenVINO runtime API.
|
||||
.. note:: TensorFlow Lite models are supported via FrontEnd API. You may skip conversion to IR and read models directly by OpenVINO runtime API. Refer to the :doc:`inference example <openvino_docs_OV_UG_Integrate_OV_with_your_application>` for more details. Using ``convert_model`` is still necessary in more complex cases, such as new custom inputs/outputs in model pruning, adding pre-processing, or using Python conversion extensions.
|
||||
|
||||
Supported TensorFlow Lite Layers
|
||||
###################################
|
||||
|
||||
@@ -35,6 +35,8 @@
|
||||
openvino_docs_MO_DG_prepare_model_convert_model_mxnet_specific_Convert_Style_Transfer_From_MXNet
|
||||
openvino_docs_MO_DG_prepare_model_convert_model_kaldi_specific_Aspire_Tdnn_Model
|
||||
|
||||
.. meta::
|
||||
:description: Get to know conversion methods for specific TensorFlow, ONNX, PyTorch, MXNet, and Kaldi models.
|
||||
|
||||
|
||||
This section provides a set of tutorials that demonstrate conversion methods for specific
|
||||
|
||||
@@ -4,6 +4,10 @@ With model conversion API you can increase your model's efficiency by providing
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to increase the efficiency of a model with MO by providing an additional shape definition with the input_shape and static_shape parameters.
|
||||
|
||||
|
||||
.. _when_to_specify_input_shapes:
|
||||
|
||||
|
||||
@@ -27,15 +31,16 @@ and specify the input shape of ``[2,300,300,3]``:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("MobileNet.pb", input_shape=[2,300,300,3])
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -50,15 +55,16 @@ and specify shapes ``[3,150,200,1]`` and ``[3]`` for them:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("ocr.onnx", input=["data","seq_len"], input_shape=[[3,150,200,1],[3]])
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -70,15 +76,16 @@ Alternatively, specify input shapes, using the ``input`` parameter as follows:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("ocr.onnx", input=[("data",[3,150,200,1]),("seq_len",[3])])
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -93,15 +100,16 @@ For example, launch model conversion for the ONNX OCR model and specify dynamic
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("ocr.onnx", input=["data","seq_len"], input_shape=[[-1,150,200,1],[-1]]
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -115,16 +123,17 @@ For example, launch model conversion for the ONNX OCR model and specify a bounda
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
from openvino.runtime import Dimension
|
||||
ov_model = convert_model("ocr.onnx", input=["data","seq_len"], input_shape=[[Dimension(1,3),150,200,1],[Dimension(1,3)]]
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
|
||||
@@ -53,15 +53,16 @@ Convert this model to ``ov.Model``:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", batch=1)
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -72,6 +73,7 @@ Convert this model to ``ov.Model``:
|
||||
In IR, the structure of a model has the following layers:
|
||||
|
||||
.. code-block:: xml
|
||||
:force:
|
||||
|
||||
<layer id="286" name="input" precision="FP32" type="Input">
|
||||
<output>
|
||||
@@ -92,6 +94,7 @@ The ``-b`` option is used here for conversion to override a possible undefined b
|
||||
The last layer in the model is ``InceptionV1/Logits/Predictions/Reshape_1``, which matches an output operation in the TensorFlow graph:
|
||||
|
||||
.. code-block:: xml
|
||||
:force:
|
||||
|
||||
<layer id="389" name="InceptionV1/Logits/Predictions/Reshape_1" precision="FP32" type="Reshape">
|
||||
<data axis="0" dim="1,1001" num_axes="-1"/>
|
||||
@@ -115,9 +118,10 @@ Due to automatic identification of inputs and outputs, providing the ``input`` a
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", batch=1)
|
||||
@@ -125,7 +129,7 @@ Due to automatic identification of inputs and outputs, providing the ``input`` a
|
||||
ov_model = convert_model("inception_v1.pb", batch=1, input="input", output="InceptionV1/Logits/Predictions/Reshape_1")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -154,15 +158,16 @@ If you want to cut your model at the end, you have the following options:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", batch=1, output="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -172,6 +177,7 @@ If you want to cut your model at the end, you have the following options:
|
||||
The resulting Intermediate Representation has three layers:
|
||||
|
||||
.. code-block:: xml
|
||||
:force:
|
||||
|
||||
<?xml version="1.0" ?>
|
||||
<net batch="1" name="model" version="2">
|
||||
@@ -217,15 +223,16 @@ If you want to cut your model at the end, you have the following options:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", batch=1, output="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu:0")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -235,6 +242,7 @@ If you want to cut your model at the end, you have the following options:
|
||||
The resulting Intermediate Representation has three layers, which are the same as in the previous case:
|
||||
|
||||
.. code-block:: xml
|
||||
:force:
|
||||
|
||||
<?xml version="1.0" ?>
|
||||
<net batch="1" name="model" version="2">
|
||||
@@ -280,15 +288,16 @@ If you want to cut your model at the end, you have the following options:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", batch=1, output="0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -298,6 +307,7 @@ If you want to cut your model at the end, you have the following options:
|
||||
The resulting Intermediate Representation has two layers, which are the same as the first two layers in the previous case:
|
||||
|
||||
.. code-block:: xml
|
||||
:force:
|
||||
|
||||
<?xml version="1.0" ?>
|
||||
<net batch="1" name="inception_v1" version="2">
|
||||
@@ -337,15 +347,16 @@ If you want to go further and cut the beginning of the model, leaving only the `
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", batch=1, output="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu", input="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -355,6 +366,7 @@ If you want to go further and cut the beginning of the model, leaving only the `
|
||||
The resulting Intermediate Representation looks as follows:
|
||||
|
||||
.. code-block:: xml
|
||||
:force:
|
||||
|
||||
<xml version="1.0">
|
||||
<net batch="1" name="model" version="2">
|
||||
@@ -388,15 +400,16 @@ If you want to go further and cut the beginning of the model, leaving only the `
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", batch=1, input="0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu", output="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -406,6 +419,7 @@ If you want to go further and cut the beginning of the model, leaving only the `
|
||||
The resulting Intermediate Representation looks as follows:
|
||||
|
||||
.. code-block:: xml
|
||||
:force:
|
||||
|
||||
<xml version="1.0">
|
||||
<net batch="1" name="model" version="2">
|
||||
@@ -439,15 +453,16 @@ If you want to go further and cut the beginning of the model, leaving only the `
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", batch=1, input="InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm/batchnorm/add_1:0", output="InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -457,6 +472,7 @@ If you want to go further and cut the beginning of the model, leaving only the `
|
||||
The resulting Intermediate Representation looks as follows:
|
||||
|
||||
.. code-block:: xml
|
||||
:force:
|
||||
|
||||
<xml version="1.0">
|
||||
<net batch="1" name="model" version="2">
|
||||
@@ -494,15 +510,16 @@ Following this behavior, ``convert_model()`` creates an ``Input`` layer for port
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", batch=1, input="InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution")
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -516,15 +533,16 @@ Different behavior occurs when ``input_shape`` is also used as an attempt to ove
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", input="InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution", input_shape=[1,224,224,3])
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
@@ -546,15 +564,16 @@ The correct command line is:
|
||||
.. tab-set::
|
||||
|
||||
.. tab-item:: Python
|
||||
:sync: mo-python-api
|
||||
:sync: py
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.tools.mo import convert_model
|
||||
ov_model = convert_model("inception_v1.pb", input="0:InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution", input_shape=[1,224,224,3])
|
||||
|
||||
.. tab-item:: CLI
|
||||
:sync: cli-tool
|
||||
:sync: cli
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to generate a Low Precision IR - Intermediate
|
||||
Representation suitable for INT8 low precision inference on CPU
|
||||
and GPU devices.
|
||||
|
||||
Introduction
|
||||
############
|
||||
|
||||
|
||||
@@ -2,11 +2,15 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert an ASpIRE Chain TDNN
|
||||
model from Kaldi to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
Note that OpenVINO support for Kaldi is currently being deprecated and will be removed entirely in the future.
|
||||
|
||||
|
||||
At the beginning, you should `download a pre-trained model <https://kaldi-asr.org/models/1/0001_aspire_chain_model.tar.gz>`__
|
||||
for the ASpIRE Chain Time Delay Neural Network (TDNN) from the Kaldi project official website.
|
||||
|
||||
@@ -103,7 +107,8 @@ Prepare ivectors for the Speech Recognition sample:
|
||||
|
||||
5. For the Speech Recognition sample, the ``.ark`` file must contain an ivector for each frame. Copy the ivector ``frame_count`` times by running the below script in the Python command prompt:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import subprocess
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert GluonCV models
|
||||
from MXNet to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
Note that OpenVINO support for Apache MXNet is currently being deprecated and will be removed entirely in the future.
|
||||
@@ -10,7 +15,8 @@ This article provides the instructions and examples on how to convert `GluonCV S
|
||||
|
||||
1. Choose the topology available from the `GluonCV Model Zoo <https://gluon-cv.mxnet.io/model_zoo/detection.html>`__ and export to the MXNet format using the GluonCV API. For example, for the ``ssd_512_mobilenet1.0`` topology:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from gluoncv import model_zoo, data, utils
|
||||
from gluoncv.utils import export_block
|
||||
|
||||
@@ -2,13 +2,15 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a Style Transfer
|
||||
model from MXNet to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
.. warning::
|
||||
|
||||
Note that OpenVINO support for Apache MXNet is currently being deprecated and will be removed entirely in the future.
|
||||
|
||||
|
||||
|
||||
This article provides instructions on how to generate a model for style transfer, using the public MXNet neural style transfer sample.
|
||||
|
||||
**Step 1**: Download or clone the repository `Zhaw's Neural Style Transfer repository <https://github.com/zhaw/neural_style>`__ with an MXNet neural style transfer sample.
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a Faster R-CNN model
|
||||
from ONNX to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
The instructions below are applicable **only** to the Faster R-CNN model converted to the ONNX file format from the `maskrcnn-benchmark model <https://github.com/facebookresearch/maskrcnn-benchmark>`__:
|
||||
|
||||
1. Download the pretrained model file from `onnx/models <https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/faster-rcnn>`__ (commit-SHA: 8883e49e68de7b43e263d56b9ed156dfa1e03117).
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a pre-trained GPT-2
|
||||
model from ONNX to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
`Public pre-trained GPT-2 model <https://github.com/onnx/models/tree/master/text/machine_comprehension/gpt-2>`__ is a large
|
||||
transformer-based language model with a simple objective: predict the next word, given all of the previous words within some text.
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a pre-trained Mask
|
||||
R-CNN model from ONNX to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
The instructions below are applicable **only** to the Mask R-CNN model converted to the ONNX file format from the `maskrcnn-benchmark model <https://github.com/facebookresearch/maskrcnn-benchmark>`__.
|
||||
|
||||
1. Download the pretrained model file from `onnx/models <https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/mask-rcnn>`__ (commit-SHA: 8883e49e68de7b43e263d56b9ed156dfa1e03117).
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a BERT-NER model
|
||||
from Pytorch to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
The goal of this article is to present a step-by-step guide on how to convert PyTorch BERT-NER model to OpenVINO IR. First, you need to download the model and convert it to ONNX.
|
||||
|
||||
|
||||
@@ -17,7 +22,8 @@ directory of the model repository. If you download the pretrained model, you nee
|
||||
to download `bert.py <https://github.com/kamalkraj/BERT-NER/blob/dev/bert.py>`__ to run the script.
|
||||
The instructions were tested with the commit-SHA: ``e5be564156f194f1becb0d82aeaf6e762d9eb9ed``.
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import torch
|
||||
|
||||
@@ -56,7 +62,7 @@ The script generates ONNX model file ``bert-ner.onnx``.
|
||||
Converting an ONNX BERT-NER model to IR
|
||||
#######################################
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: sh
|
||||
|
||||
mo --input_model bert-ner.onnx --input "input_mask[1,128],segment_ids[1,128],input_ids[1,128]"
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a Cascade RCNN R-101
|
||||
model from Pytorch to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
The goal of this article is to present a step-by-step guide on how to convert a PyTorch Cascade RCNN R-101 model to OpenVINO IR. First, you need to download the model and convert it to ONNX.
|
||||
|
||||
Downloading and Converting Model to ONNX
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a F3Net model
|
||||
from Pytorch to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
`F3Net <https://github.com/weijun88/F3Net>`__ : Fusion, Feedback and Focus for Salient Object Detection
|
||||
|
||||
Cloning the F3Net Repository
|
||||
@@ -20,7 +25,8 @@ Downloading and Converting the Model to ONNX
|
||||
To download the pretrained model or train the model yourself, refer to the
|
||||
`instructions <https://github.com/weijun88/F3Net/blob/master/README.md>`__ in the F3Net model repository. First, convert the model to ONNX format. Create and run the following Python script in the ``src`` directory of the model repository:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import torch
|
||||
from dataset import Config
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a QuartzNet model
|
||||
from Pytorch to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
`NeMo project <https://github.com/NVIDIA/NeMo>`__ provides the QuartzNet model.
|
||||
|
||||
Downloading the Pre-trained QuartzNet Model
|
||||
@@ -14,7 +19,8 @@ Here are the instructions on how to obtain QuartzNet in ONNX format.
|
||||
|
||||
2. Run the following code:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import nemo
|
||||
import nemo.collections.asr as nemo_asr
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a RCAN model
|
||||
from Pytorch to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
`RCAN <https://github.com/yulunzhang/RCAN>`__ : Image Super-Resolution Using Very Deep Residual Channel Attention Networks
|
||||
|
||||
Downloading and Converting the Model to ONNX
|
||||
@@ -10,7 +15,8 @@ Downloading and Converting the Model to ONNX
|
||||
To download the pre-trained model or train the model yourself, refer to the `instruction <https://github.com/yulunzhang/RCAN/blob/master/README.md>`__ in the RCAN model repository. First, convert the model to ONNX format. Create and run the script with the following content in the root
|
||||
directory of the model repository:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from argparse import Namespace
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a RNN-T model
|
||||
from Pytorch to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
This guide covers conversion of RNN-T model from `MLCommons <https://github.com/mlcommons>`__ repository. Follow
|
||||
the instructions below to export a PyTorch model into ONNX, before converting it to IR:
|
||||
|
||||
@@ -51,7 +56,8 @@ if you were following the `guide <https://github.com/mlcommons/inference/tree/ma
|
||||
If you already have a full clone of MLCommons inference repository, you need
|
||||
to specify the ``mlcommons_inference_path`` variable.
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import toml
|
||||
import torch
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a YOLACT model
|
||||
from Pytorch to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
You Only Look At CoefficienTs (YOLACT) is a simple, fully convolutional model for real-time instance segmentation.
|
||||
The PyTorch implementation is publicly available in `this GitHub repository <https://github.com/dbolya/yolact>`__.
|
||||
The YOLACT++ model is not supported, because it uses deformable convolutional layers that cannot be represented in ONNX format.
|
||||
|
||||
@@ -16,6 +16,10 @@
|
||||
openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Kaldi
|
||||
openvino_docs_MO_DG_prepare_model_convert_model_tutorials
|
||||
|
||||
.. meta::
|
||||
:description: In OpenVINO, ONNX, PaddlePaddle, TensorFlow and TensorFlow Lite
|
||||
models do not require any prior conversion, while MxNet, Caffe and Kaldi do.
|
||||
|
||||
|
||||
**OpenVINO IR (Intermediate Representation)** - the proprietary format of OpenVINO™, benefiting from the full extent of its features.
|
||||
|
||||
@@ -23,7 +27,9 @@
|
||||
OpenVINO Runtime without any prior conversion. For a guide on how to run inference on ONNX, PaddlePaddle, or TensorFlow,
|
||||
see how to :doc:`Integrate OpenVINO™ with Your Application <openvino_docs_OV_UG_Integrate_OV_with_your_application>`.
|
||||
|
||||
**MXNet, Caffe, Kaldi** - formats supported indirectly, which means they need to be converted to OpenVINO IR before running inference. The conversion is done with Model Conversion API and in some cases may involve intermediate steps.
|
||||
**MXNet, Caffe, Kaldi** - legacy formats that need to be converted to OpenVINO IR before running inference.
|
||||
The model conversion in some cases may involve intermediate steps. OpenVINO is currently proceeding
|
||||
**to deprecate these formats** and **remove their support entirely in the future**.
|
||||
|
||||
Refer to the following articles for details on conversion for different formats and models:
|
||||
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert the Attention OCR
|
||||
model from the TensorFlow Attention OCR repository to the
|
||||
OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
This tutorial explains how to convert the Attention OCR (AOCR) model from the `TensorFlow Attention OCR repository <https://github.com/emedvedev/attention-ocr>`__ to the Intermediate Representation (IR).
|
||||
|
||||
Extracting a Model from ``aocr`` Library
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a BERT model
|
||||
from TensorFlow to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
Pretrained models for BERT (Bidirectional Encoder Representations from Transformers) are
|
||||
`publicly available <https://github.com/google-research/bert>`__.
|
||||
|
||||
@@ -95,14 +100,16 @@ Follow these steps to make a pretrained TensorFlow BERT model reshapable over ba
|
||||
|
||||
7. Open the file ``modeling.py`` in the text editor and delete lines 923-924. They should look like this:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
if not non_static_indexes:
|
||||
return shape
|
||||
|
||||
8. Open the file ``run_classifier.py`` and insert the following code after the line 645:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import os, sys
|
||||
import tensorflow as tf
|
||||
@@ -119,7 +126,8 @@ Follow these steps to make a pretrained TensorFlow BERT model reshapable over ba
|
||||
|
||||
Lines before the inserted code should look like this:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
(total_loss, per_example_loss, logits, probabilities) = create_model(
|
||||
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a CRNN model
|
||||
from TensorFlow to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
This tutorial explains how to convert a CRNN model to OpenVINO™ Intermediate Representation (IR).
|
||||
|
||||
There are several public versions of TensorFlow CRNN model implementation available on GitHub. This tutorial explains how to convert the model from
|
||||
@@ -49,7 +54,8 @@ If you have another implementation of CRNN model, it can be converted to OpenVIN
|
||||
|
||||
2. Edit the ``tools/demo_shadownet.py`` script. After ``saver.restore(sess=sess, save_path=weights_path)`` line, add the following code:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from tensorflow.python.framework import graph_io
|
||||
frozen = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ['shadow/LSTMLayers/transpose_time_major'])
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a DeepSpeech model
|
||||
from TensorFlow to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
`DeepSpeech project <https://github.com/mozilla/DeepSpeech>`__ provides an engine to train speech-to-text models.
|
||||
|
||||
Downloading the Pretrained DeepSpeech Model
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert an EfficientDet model
|
||||
from TensorFlow to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
This tutorial explains how to convert EfficientDet public object detection models to the Intermediate Representation (IR).
|
||||
|
||||
.. _efficientdet-to-ir:
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a FaceNet model
|
||||
from TensorFlow to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
`Public pre-trained FaceNet models <https://github.com/davidsandberg/facenet#pre-trained-models>`__ contain both training
|
||||
and inference part of graph. Switch between this two states is manageable with placeholder value.
|
||||
Intermediate Representation (IR) models are intended for inference, which means that train part is redundant.
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a GNMT model
|
||||
from TensorFlow to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
This tutorial explains how to convert Google Neural Machine Translation (GNMT) model to the Intermediate Representation (IR).
|
||||
|
||||
There are several public versions of TensorFlow GNMT model implementation available on GitHub. This tutorial explains how to convert the GNMT model from the `TensorFlow Neural Machine Translation (NMT) repository <https://github.com/tensorflow/nmt>`__ to the IR.
|
||||
@@ -203,6 +208,7 @@ Inference checkpoint ``inference_GNMT_graph`` and frozen inference graph ``froze
|
||||
To generate ``vocab.bpe.32000``, execute the ``nmt/scripts/wmt16_en_de.sh`` script. If you face an issue of a size mismatch between the checkpoint graph's embedding layer and vocabulary (both src and target), make sure you add the following code to the ``nmt.py`` file to the ``extend_hparams`` function after the line 508 (after initialization of the ``src_vocab_size`` and ``tgt_vocab_size`` variables):
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
src_vocab_size -= 1
|
||||
tgt_vocab_size -= 1
|
||||
@@ -273,6 +279,7 @@ Running GNMT IR
|
||||
Before running the example, insert a path to your GNMT ``.xml`` and ``.bin`` files into ``MODEL_PATH`` and ``WEIGHTS_PATH``, and fill ``input_data_tensor`` and ``seq_lengths`` tensors according to your input data.
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from openvino.inference_engine import IENetwork, IECore
|
||||
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a Neural Collaborative
|
||||
Filtering Model from TensorFlow to the OpenVINO Intermediate
|
||||
Representation.
|
||||
|
||||
|
||||
This tutorial explains how to convert Neural Collaborative Filtering (NCF) model to the OpenVINO Intermediate Representation.
|
||||
|
||||
`Public TensorFlow NCF model <https://github.com/tensorflow/models/tree/master/official/recommendation>`__ does not contain pre-trained weights. To convert this model to the IR:
|
||||
@@ -13,6 +19,7 @@ This tutorial explains how to convert Neural Collaborative Filtering (NCF) model
|
||||
Run the following commands:
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import graph_io
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert Object Detection
|
||||
API Models from TensorFlow to the OpenVINO Intermediate
|
||||
Representation.
|
||||
|
||||
|
||||
* Starting with the 2022.1 release, model conversion API can convert the TensorFlow Object Detection API Faster and Mask RCNNs topologies differently. By default, model conversion adds operation "Proposal" to the generated IR. This operation needs an additional input to the model with name "image_info" which should be fed with several values describing the preprocessing applied to the input image (refer to the :doc:`Proposal <openvino_docs_ops_detection_Proposal_4>` operation specification for more information). However, this input is redundant for the models trained and inferred with equal size images. Model conversion API can generate IR for such models and insert operation :doc:`DetectionOutput <openvino_docs_ops_detection_DetectionOutput_1>` instead of ``Proposal``. The `DetectionOutput` operation does not require additional model input "image_info". Moreover, for some models the produced inference results are closer to the original TensorFlow model. In order to trigger new behavior, the attribute "operation_to_add" in the corresponding JSON transformation configuration file should be set to value "DetectionOutput" instead of default one "Proposal".
|
||||
* Starting with the 2021.1 release, model conversion API converts the TensorFlow Object Detection API SSDs, Faster and Mask RCNNs topologies keeping shape-calculating sub-graphs by default, so topologies can be re-shaped in the OpenVINO Runtime using dedicated reshape API. Refer to the :doc:`Using Shape Inference <openvino_docs_OV_UG_ShapeInference>` guide for more information on how to use this feature. It is possible to change the both spatial dimensions of the input image and batch size.
|
||||
* To generate IRs for TF 1 SSD topologies, model conversion API creates a number of ``PriorBoxClustered`` operations instead of a constant node with prior boxes calculated for the particular input image size. This change allows you to reshape the topology in the OpenVINO Runtime using dedicated API. The reshaping is supported for all SSD topologies except FPNs, which contain hardcoded shapes for some operations preventing from changing topology input shape.
|
||||
@@ -136,12 +142,13 @@ Keeping Aspect Ratio Resizer Replacement
|
||||
* If the ``input_shape [1, H, W, 3]`` command line parameter is specified, model conversion API scales the specified input image height ``H`` and width ``W`` to satisfy the ``min_dimension`` and ``max_dimension`` constraints defined in the ``keep_aspect_ratio_resizer``. The following function calculates the input operation height and width:
|
||||
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
def calculate_shape_keeping_aspect_ratio(H: int, W: int, min_dimension: int, max_dimension: int):
|
||||
ratio_min = min_dimension / min(H, W)
|
||||
ratio_max = max_dimension / max(H, W)
|
||||
ratio = min(ratio_min, ratio_max)
|
||||
return int(round(H * ratio)), int(round(W * ratio))
|
||||
def calculate_shape_keeping_aspect_ratio(H: int, W: int, min_dimension: int, max_dimension: int):
|
||||
ratio_min = min_dimension / min(H, W)
|
||||
ratio_max = max_dimension / max(H, W)
|
||||
ratio = min(ratio_min, ratio_max)
|
||||
return int(round(H * ratio)), int(round(W * ratio))
|
||||
|
||||
The ``input_shape`` command line parameter should be specified only if the "pad_to_max_dimension" does not exist of is set to "false" in the ``keep_aspect_ratio_resizer``.
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a RetinaNet model
|
||||
from TensorFlow to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
This tutorial explains how to convert a RetinaNet model to the Intermediate Representation (IR).
|
||||
|
||||
`Public RetinaNet model <https://github.com/fizyr/keras-retinanet>`__ does not contain pretrained TensorFlow weights.
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a Slim Image
|
||||
Classification model from TensorFlow to the OpenVINO
|
||||
Intermediate Representation.
|
||||
|
||||
|
||||
`TensorFlow-Slim Image Classification Model Library <https://github.com/tensorflow/models/tree/master/research/slim/README.md>`__ is a library to define, train and evaluate classification models in TensorFlow. The library contains Python scripts defining the classification topologies together with checkpoint files for several pre-trained classification topologies. To convert a TensorFlow-Slim library model, complete the following steps:
|
||||
|
||||
1. Download the TensorFlow-Slim models `git repository <https://github.com/tensorflow/models>`__.
|
||||
@@ -83,7 +89,8 @@ The file `preprocessing_factory.py <https://github.com/tensorflow/models/blob/ma
|
||||
|
||||
The `inception_preprocessing.py <https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py>`__ file defines the pre-processing function for the Inception models. The ``preprocess_for_eval`` function contains the following code:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
...
|
||||
import tensorflow as tf
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert Wide and Deep Family
|
||||
models from TensorFlow to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
The Wide and Deep models is a combination of wide and deep parts for memorization and generalization of object features respectively.
|
||||
These models can contain different types of object features such as numerical, categorical, sparse and sequential features. These feature types are specified
|
||||
through Tensorflow tf.feature_column API. Table below presents what feature types are supported by the OpenVINO toolkit.
|
||||
@@ -44,7 +49,8 @@ The Wide and Deep model is no longer in the master branch of the repository but
|
||||
As the OpenVINO™ toolkit does not support the categorical with hash and crossed features, such feature types must be switched off in the model
|
||||
by changing the ``build_model_columns()`` function in `census_dataset.py` as follows:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
def build_model_columns():
|
||||
"""Builds a set of wide and deep feature columns."""
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert an XLNet model from
|
||||
TensorFlow to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
Pretrained models for XLNet (Bidirectional Encoder Representations from Transformers) are
|
||||
`publicly available <https://github.com/zihangdai/xlnet>`__.
|
||||
|
||||
@@ -43,7 +48,8 @@ To get pb-file from the archive contents, you need to do the following.
|
||||
|
||||
.. note:: The original model repository has been tested with TensorFlow 1.13.1 under Python2.
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
@@ -126,7 +132,8 @@ To get ``pb-file`` from the archive contents, follow the instructions below:
|
||||
|
||||
2. Save and run the following Python script in ``~/XLNet-Large/xlnet``:
|
||||
|
||||
.. code-block:: python
|
||||
.. code-block:: py
|
||||
:force:
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert YOLO models from
|
||||
TensorFlow to the OpenVINO Intermediate Representation.
|
||||
|
||||
|
||||
This document explains how to convert real-time object detection YOLOv1, YOLOv2, YOLOv3 and YOLOv4 public models to the Intermediate Representation (IR). All YOLO models are originally implemented in the DarkNet framework and consist of two files:
|
||||
|
||||
* The ``.cfg`` file with model configurations
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
|
||||
@sphinxdirective
|
||||
|
||||
.. meta::
|
||||
:description: Learn how to convert a TensorFlow Language
|
||||
Model on One Billion Word Benchmark to the OpenVINO Intermediate
|
||||
Representation.
|
||||
|
||||
|
||||
Downloading a Pre-trained Language Model on One Billion Word Benchmark
|
||||
######################################################################
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user