fix errors in documentation (#9384)

* add sphinx log parsing

* fix

* fix log

* fixes

* fixes

* fixes

* fixes

* fixes

* fixes

* fixes

* fixes

* fixes

* fixes

* doxygen-xfail

* fixes

* fixes

* fixes

* fixe

* fixes

* fixes

* fix pot

* add pot check

* fixes

* fixes

* Fixed POT docs

* Fixed POT docs

* Fixes

* change heading markup

* fixes

Co-authored-by: azaytsev <andrey.zaytsev@intel.com>
This commit is contained in:
Nikolay Tyukaev 2022-01-27 19:39:49 +03:00 committed by GitHub
parent a99ae43d61
commit 622027bee5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
163 changed files with 1029 additions and 5628 deletions

View File

@ -47,7 +47,7 @@ assignees: ''
- [ ] I checked the problem with documentation, FAQ, open issues, Stack Overflow, etc and have not found solution
<!--
Places to check:
* OpenVINO documentation: https://docs.openvinotoolkit.org/
* OpenVINO documentation: https://docs.openvino.ai/
* OpenVINO forum: https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/bd-p/distribution-openvino-toolkit
* OpenVINO issue tracker: https://github.com/openvinotoolkit/openvino/issues?q=is%3Aissue
* Stack Overflow branch: https://stackoverflow.com/questions/tagged/openvino

View File

@ -47,7 +47,7 @@ jobs:
- name: Build doc
run: |
cmake --build . --target sphinx_docs
cmake --build . --target sphinx_docs -j8
working-directory: build
- name: Archive HTML
@ -58,9 +58,13 @@ jobs:
- name: Run Pytest
run: |
pytest --doxygen="./build/docs/doxygen.log" \
--include_pot \
--sphinx="./build/docs/sphinx.log" \
--suppress-warnings="./docs/suppress_warnings.txt" \
--confcutdir="./docs/scripts/tests/" \
--html="./build/docs/_artifacts/doc-generation.html" \
--doxygen-strip="$(pwd)" \
--sphinx-strip="$(pwd)/build/docs/rst" \
--doxygen-xfail="./docs/doxygen-xfail.txt" \
--self-contained-html ./docs/scripts/tests/test_docs.py
@ -70,6 +74,20 @@ jobs:
with:
name: openvino_doc_pytest
path: build/docs/_artifacts/
- name: 'Upload doxygen.log'
if: always()
uses: actions/upload-artifact@v2
with:
name: doxygen_log
path: build/docs/doxygen.log
- name: 'Upload sphinx.log'
if: always()
uses: actions/upload-artifact@v2
with:
name: sphinx_log
path: build/docs/sphinx.log
- name: 'Upload html'
if: github.event_name == 'push'

View File

@ -25,7 +25,7 @@ By contributing to the project, you agree to the license and copyright terms the
and release your contribution under these terms.
## Resources:
* Docs: https://docs.openvinotoolkit.org/
* Docs: https://docs.openvino.ai/
* Wiki: https://github.com/openvinotoolkit/openvino/wiki
* Issue tracking: https://github.com/openvinotoolkit/openvino/issues
* Storage: https://storage.openvinotoolkit.org/
@ -46,6 +46,6 @@ Please report questions, issues and suggestions using:
[Open Model Zoo]:https://github.com/openvinotoolkit/open_model_zoo
[Inference Engine]:https://software.intel.com/en-us/articles/OpenVINO-InferEngine
[Model Optimizer]:https://software.intel.com/en-us/articles/OpenVINO-ModelOptimizer
[nGraph]:https://docs.openvinotoolkit.org/latest/openvino_docs_nGraph_DG_DevGuide.html
[nGraph]:https://docs.openvino.ai/latest/openvino_docs_nGraph_DG_DevGuide.html
[tag on StackOverflow]:https://stackoverflow.com/search?q=%23openvino

View File

@ -10,7 +10,7 @@ set(IE_VS_VER_COMPANY_NAME_STR "Intel Corporation")
set(IE_VS_VER_PRODUCTVERSION_STR "${CI_BUILD_NUMBER}")
set(IE_VS_VER_PRODUCTNAME_STR "OpenVINO toolkit")
set(IE_VS_VER_COPYRIGHT_STR "Copyright (C) 2018-2021, Intel Corporation")
set(IE_VS_VER_COMMENTS_STR "https://docs.openvinotoolkit.org/")
set(IE_VS_VER_COMMENTS_STR "https://docs.openvino.ai/")
#
# ie_add_vs_version_file(NAME <name>

View File

@ -50,7 +50,6 @@ set(LINKCHECKER_PY "" CACHE FILEPATH "Path to linkchecker.py for documentation c
set(ENABLE_OPENVINO_NOTEBOOKS OFF CACHE BOOL "Build with openvino notebooks")
set(OMZ_DOCS_DIR "" CACHE PATH "Path to open_model_zoo documentation dir.")
set(WORKBENCH_DOCS_DIR "" CACHE PATH "Path to workbench documentation dir.")
set(POT_DOCS_DIR "" CACHE PATH "Path to post-training-compression-tool documentation dir.")
set(OVMS_DOCS_DIR "" CACHE PATH "Path to model server documentation dir.")
set(GST_DOCS_DIR "" CACHE PATH "Path to gst-video-analytics documentation dir.")
set(GRAPH_CSV_DIR "" CACHE PATH "Path to the folder containing csv data for rendering graphs.")
@ -152,15 +151,6 @@ function(build_docs)
--output_dir=${DOCS_BUILD_DIR}/workbench)
endif()
# pot doc files
if(EXISTS "${POT_DOCS_DIR}")
get_filename_component(POT_DOCS_DIR "${POT_DOCS_DIR}" ABSOLUTE)
list(APPEND commands COMMAND ${PYTHON_EXECUTABLE} ${DOXY_MD_FILTER}
--input_dir=${POT_DOCS_DIR}
--output_dir=${DOCS_BUILD_DIR}/pot)
endif()
# ovms doc files
if(EXISTS "${OVMS_DOCS_DIR}")
get_filename_component(OVMS_DOCS_DIR "${OVMS_DOCS_DIR}" ABSOLUTE)
@ -216,7 +206,7 @@ function(build_docs)
add_custom_target(sphinx_docs
DEPENDS doxygen_xml
COMMAND sphinx-build -b html ${RST_OUTPUT} ${SPHINX_OUTPUT}
COMMAND sphinx-build -j auto -w ${DOCS_BUILD_DIR}/sphinx.log -b html ${RST_OUTPUT} ${SPHINX_OUTPUT}
WORKING_DIRECTORY ${RST_OUTPUT}
VERBATIM)

View File

@ -826,25 +826,50 @@ WARN_LOGFILE = "@DOCS_BUILD_DIR@/doxygen.log"
# Note: If this tag is empty the current directory is searched.
INPUT = "@MARKDOWN_INPUT@" \
"@OpenVINO_SOURCE_DIR@/src/frontends/common/include" \
"@OpenVINO_SOURCE_DIR@/src/common/conditional_compilation/include" \
"@OpenVINO_SOURCE_DIR@/src/common/itt/include" \
"@OpenVINO_SOURCE_DIR@/src/common/legacy/include" \
"@OpenVINO_SOURCE_DIR@/src/common/legacy/include" \
"@OpenVINO_SOURCE_DIR@/src/common/low_precision_transformations/include" \
"@OpenVINO_SOURCE_DIR@/src/common/low_precision_transformations/include" \
"@OpenVINO_SOURCE_DIR@/src/common/offline_transformations/include" \
"@OpenVINO_SOURCE_DIR@/src/common/preprocessing/include" \
"@OpenVINO_SOURCE_DIR@/src/common/snippets/include" \
"@OpenVINO_SOURCE_DIR@/src/common/transformations/include" \
"@OpenVINO_SOURCE_DIR@/src/common/util/include" \
"@OpenVINO_SOURCE_DIR@/src/core/include" \
"@OpenVINO_SOURCE_DIR@/src/core/builder/include" \
"@OpenVINO_SOURCE_DIR@/src/core/reference/include" \
"@OpenVINO_SOURCE_DIR@/src/core/shape_inference/include" \
"@OpenVINO_SOURCE_DIR@/src/frontends/common/include" \
"@OpenVINO_SOURCE_DIR@/src/inference/dev_api" \
"@OpenVINO_SOURCE_DIR@/src/inference/include"
"@OpenVINO_SOURCE_DIR@/src/frontends/common/include/" \
"@OpenVINO_SOURCE_DIR@/src/common/conditional_compilation/include/" \
"@OpenVINO_SOURCE_DIR@/src/common/itt/include/" \
"@OpenVINO_SOURCE_DIR@/src/common/legacy/include/" \
"@OpenVINO_SOURCE_DIR@/src/common/legacy/include/" \
"@OpenVINO_SOURCE_DIR@/src/common/low_precision_transformations/include/" \
"@OpenVINO_SOURCE_DIR@/src/common/low_precision_transformations/include/" \
"@OpenVINO_SOURCE_DIR@/src/common/offline_transformations/include/" \
"@OpenVINO_SOURCE_DIR@/src/common/preprocessing/" \
"@OpenVINO_SOURCE_DIR@/src/common/preprocessing/arm_neon/" \
"@OpenVINO_SOURCE_DIR@/src/common/preprocessing/cpu_x86_avx2/" \
"@OpenVINO_SOURCE_DIR@/src/common/preprocessing/cpu_x86_avx512/" \
"@OpenVINO_SOURCE_DIR@/src/common/preprocessing/cpu_x86_sse42/" \
"@OpenVINO_SOURCE_DIR@/src/common/snippets/include/" \
"@OpenVINO_SOURCE_DIR@/src/common/transformations/include/" \
"@OpenVINO_SOURCE_DIR@/src/common/util/include/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/descriptor" \
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/op/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/op/util" \
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/opsets/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/pass/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/pattern/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/pattern/op/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/runtime/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/ngraph/type/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/core/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/core/descriptor/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/core/preprocess/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/core/type/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/op/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/op/util/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/opsets/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/pass/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/pass/pattern/" \
"@OpenVINO_SOURCE_DIR@/src/core/include/openvino/runtime/" \
"@OpenVINO_SOURCE_DIR@/src/core/builder/include/" \
"@OpenVINO_SOURCE_DIR@/src/core/reference/include/" \
"@OpenVINO_SOURCE_DIR@/src/core/shape_inference/include/" \
"@OpenVINO_SOURCE_DIR@/src/frontends/common/include/" \
"@OpenVINO_SOURCE_DIR@/src/inference/dev_api/" \
"@OpenVINO_SOURCE_DIR@/src/inference/include/"
# This tag can be used to specify the character encoding of the source files
@ -1017,15 +1042,25 @@ EXCLUDE_SYMBOLS = InferenceEngine::details \
# that contain example code fragments that are included (see the \include
# command).
EXAMPLE_PATH = "@CMAKE_CURRENT_SOURCE_DIR@" \
"@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/src" \
"@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/include" \
"@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/src/CMakeLists.txt" \
"@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/tests/functional/CMakeLists.txt" \
"@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/tests/functional/transformations" \
"@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/tests/functional/shared_tests_instances/" \
"@CMAKE_CURRENT_SOURCE_DIR@/snippets"
"@IE_SOURCE_DIR@/tests/functional/plugin/shared/include"
EXAMPLE_PATH = "@OpenVINO_SOURCE_DIR@" \
"@OpenVINO_SOURCE_DIR@/docs/HOWTO/" \
"@OpenVINO_SOURCE_DIR@/docs/" \
"@OpenVINO_SOURCE_DIR@/docs/onnx_custom_op/" \
"@OpenVINO_SOURCE_DIR@/docs/template_extension/" \
"@OpenVINO_SOURCE_DIR@/docs/template_extension/old/" \
"@OpenVINO_SOURCE_DIR@/docs/template_extension/new/" \
"@OpenVINO_SOURCE_DIR@/docs/template_plugin/" \
"@OpenVINO_SOURCE_DIR@/docs/template_plugin/src/" \
"@OpenVINO_SOURCE_DIR@/docs/template_plugin/include/" \
"@OpenVINO_SOURCE_DIR@/docs/template_plugin/src/CMakeLists.txt" \
"@OpenVINO_SOURCE_DIR@/docs/template_plugin/tests/functional/CMakeLists.txt" \
"@OpenVINO_SOURCE_DIR@/docs/template_plugin/tests/functional/transformations/" \
"@OpenVINO_SOURCE_DIR@/docs/template_plugin/tests/functional/shared_tests_instances/" \
"@OpenVINO_SOURCE_DIR@/docs/snippets/" \
"@OpenVINO_SOURCE_DIR@/src/tests/functional/plugin/shared/include/" \
"@OpenVINO_SOURCE_DIR@/docs/HOWTO/mo_extensions/ops/" \
"@OpenVINO_SOURCE_DIR@/docs/HOWTO/mo_extensions/front/tf/" \
"@OpenVINO_SOURCE_DIR@/src/tests/functional/shared_test_classes/include/shared_test_classes/"
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
@ -2378,7 +2413,7 @@ SKIP_FUNCTION_MACROS = YES
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES = "@DOCS_BUILD_DIR@/ngraph_cpp_api.tag=ngraph_cpp_api"
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to

View File

@ -254,8 +254,6 @@ Now it is possible to convert the model using the following command line:
mo --input_model <PATH_TO_MODEL>/wnet_20.pb -b 1 --extensions mo_extensions/
```
@endsphinxdirective
The sub-graph corresponding to the originally non-supported one is depicted in the image below:
![Converted sub-graph](img/converted_subgraph.png)
@ -271,7 +269,7 @@ below is based on the template extension described in [Inference Engine Extensib
The first step is to create a CMake configuration file which builds the extension. The content of the "CMakeLists.txt"
file is the following:
@snippet ../template_extension/CMakeLists.txt cmake:extension
@snippet template_extension/old/CMakeLists.txt cmake:extension
The CPU FFT kernel implementation uses OpenCV to perform the FFT that is why the extension library is linked with
`opencv_core` which comes with the OpenVINO.
@ -279,12 +277,12 @@ The CPU FFT kernel implementation uses OpenCV to perform the FFT that is why the
#### Custom nGraph Operation "FFT" Implementation
The next step is to create the nGraph operation FFT. The header file "fft_op.hpp" has the following content:
@snippet ../template_extension/fft_op.hpp fft_op:header
@snippet template_extension/old/fft_op.hpp fft_op:header
The operation has just one boolean attribute `inverse`. Implementation of the necessary nGraph operation functions are
in the `fft_op.cpp` file with the following content:
@snippet ../template_extension/fft_op.cpp fft_op:implementation
@snippet template_extension/old/fft_op.cpp fft_op:implementation
Refer to the [Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps.md) for more details.
@ -292,11 +290,11 @@ Refer to the [Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps
The operation implementation for CPU plugin uses OpenCV to perform the FFT. The header file "fft_kernel.hpp" has the
following content:
@snippet ../template_extension/fft_kernel.hpp fft_kernel:header
@snippet template_extension/old/fft_kernel.hpp fft_kernel:header
The "fft_kernel.cpp" with the implementation of the CPU has the following content:
@snippet ../template_extension/fft_kernel.cpp fft_kernel:implementation
@snippet template_extension/old/fft_kernel.cpp fft_kernel:implementation
Refer to the [How to Implement Custom CPU Operations](../IE_DG/Extensibility_DG/CPU_Kernel.md) for more details.
@ -334,7 +332,7 @@ python3 mri_reconstruction_demo.py \
## Additional Resources
- Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit)
- OpenVINO™ toolkit online documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org)
- OpenVINO™ toolkit online documentation: [https://docs.openvino.ai](https://docs.openvino.ai)
- [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
- [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md)
- [Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md)

View File

@ -554,7 +554,7 @@ The sections below contain detailed list of changes made to the Inference Engine
### Removed API
Removed all [Inference Engine API which deprecated in 2019'R2](https://docs.openvinotoolkit.org/2019_R3/_docs_IE_DG_API_Changes.html#deprecated_api)
Removed all [Inference Engine API which deprecated in 2019'R2](https://docs.openvino.ai/2019_R3/_docs_IE_DG_API_Changes.html#deprecated_api)
## 2019 R3

View File

@ -1,298 +0,0 @@
Cross Check Tool {#openvino_docs_IE_DG_Cross_Check_Tool}
================
Cross Check Tool is a console application that enables comparing accuracy and performance metrics for two successive
model inferences that are performed
on two different supported Intel&reg; devices or with different precisions.
The Cross Check Tool can compare metrics per layer or all over the model.
On Linux* OS, before running the Cross Check Tool binary, make sure your application can find the
Deep Learning Inference Engine libraries.
Navigate to the `<INSTALL_DIR>` folder and run the `setupvars.sh` script to
set all necessary environment variables:
```sh
source setupvars.sh
```
## Running the Cross Check Tool
Cross Check Tool is distributed as a binary file and there is no need to build it. To run the Cross Check Tool,
execute the tool's binary file with necessary parameters. Please note that the Inference Engine assumes that weights
are in the same folder as the _.xml_ file.
You can get the list of all available options using the -h option:
```sh
$./cross_check_tool -h
InferenceEngine:
API version ............ 1.0
Build .................. ###
[ INFO ] Parsing input parameters
./cross_check_tool [OPTION]
Options:
-h Prints a usage message.
-i "<path>" Optional. Path to an input image file or multi-input file to infer. Generates input(s) from normal distribution if empty
-m "<path>" Required. Path to an .xml file that represents the first IR of the trained model to infer.
-l "<absolute_path>" Required for MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the kernels implementation.
Or
-c "<absolute_path>" Required for clDNN (GPU)-targeted custom kernels. Absolute path to the xml file with the kernels description.
-conf "<path>" Optional. Path to config file for -d device plugin
-ref_conf "<path>" Optional. Path to config file for -ref_d device plugin
-pp "<path>" Optional. Path to a plugin folder.
-d "<device>" Required. The first target device to infer the model specified with the -m option. CPU, GPU, HDDL or MYRIAD is acceptable.
-ref_m "<path>" Optional. Path to an .xml file that represents the second IR in different precision to compare the metrics.
-ref_d "<device>" Required. The second target device to infer the model and compare the metrics. CPU, GPU, HDDL or MYRIAD is acceptable.
-layers "<options>" Defines layers to check. Options: all, None - for output layers check, list of comma-separated layer names to check. Default value is None.
-eps "<float>" Optional. Threshold for filtering out those blob statistics that do not statify the condition: max_abs_diff < eps.
-dump Enables blobs statistics dumping
-load "<path>" Path to a file to load blobs from
```
### Examples
1. To check per-layer accuracy and performance of inference in FP32 precision on the CPU against the GPU, run:
```sh
./cross_check_tool -i <path_to_input_image_or_multi_input_file> \
-m <path_to_FP32_xml> \
-d CPU \
-ref_d GPU \
-layers all
```
The output looks as follows:
```
InferenceEngine:
API version ............ 1.0
Build .................. ###
[ INFO ] Parsing input parameters
The same IR on both devices: <path_to_IR>
[ INFO ] No extensions provided
API version ............ 1.0
Build .................. lnx_20180510
Description ....... ov_intel_cpu_plugin
API version ............ 0.1
Build .................. ci-main-03659
Description ....... clDNNPlugin
[ INFO ] Inputs detected: Placeholder
[ INFO ] Statistics will be dumped for X layers: <layer_1_name>, <layer_2_name>, ... , <layer_X_name>
[ INFO ] Layer <layer_1_name> statistics
Max absolute difference: 1.52588e-05
Min absolute difference: 0
Max relative difference: 0.000288028%
Min relative difference: 0%
Blob size: 1000
Devices: CPU_FP32 GPU_FP32
Status: EXECUTED EXECUTED
Layer type: Reshape Reshape
Real time, microsec: 20 154
Execution type: unknown GPU
Number of NAN: 0 0
Number of INF: 0 0
Number of ZERO: 0 0
...
<list_of_layer_statistics>
...
[ INFO ] Overall max absolute difference 2.81334e-05 was reached by <layer_name> layer
[ INFO ] Overall min absolute difference 0 was reached by <layer_name> layer
[ INFO ] Overall max relative difference 0.744893% was reached by <layer_name> layer
[ INFO ] Overall min relative difference -2.47948% was reached by <layer_name> layer
[ INFO ] Execution successful
```
2. To check the overall accuracy and performance of inference on the CPU in FP32 precision against the
Intel&reg; Movidius&trade; Myriad&trade; device in FP16 precision, run:
```sh
./cross_check_tool -i <path_to_input_image_or_multi_input_file> \
-m <path_to_FP16_xml> \
-ref_d CPU \
-ref_m <path_to_FP32_xml>\
-d MYRIAD \
```
The output looks as follows:
```
InferenceEngine:
API version ............ 1.0
Build .................. ###
[ INFO ] Parsing input parameters
[ INFO ] MYRIAD vs CPU
IR for MYRIAD : <path_to_FP16_xml>
IR for CPU : <path_to_FP32_xml>
[ INFO ] No extensions provided
[ INFO ] Loading plugins
API version ............ 0.1
Build .................. ###
Description ....... ov_intel_vpu_plugin
API version ............ 1.0
Build .................. ###
Description ....... ov_intel_cpu_plugin
[ INFO ] Inputs detected: <list_of_input_layers>
[ INFO ] Statistics will be dumped for 1 layers: <output_layer_name(s)>
[ INFO ] Layer <output_layer_name> statistics
Max absolute difference: 0.003889
Min absolute difference: 2.49778e-12
Max relative difference: 290.98%
Min relative difference: 0.0327804%
Devices: MYRIAD_FP16 CPU_FP32
Real time, microsec: 69213.978946 4149.904940
[ INFO ] Execution successful
```
3. To dump layer statistics from specific list of layers, run:
```sh
./cross_check_tool -i <path_to_input_image_or_multi_input_file> \
-m <path_to_FP16_xml> \
-d MYRIAD \
-dump \
-layers <comma_separated_list_of_layers>
```
The output looks as follows:
```
InferenceEngine:
API version ............ 1.0
Build .................. ###
[ INFO ] Blob and statistics dumping enabled
[ INFO ] No extensions provided
API version ............ 0.1
Build .................. custom_releases/cvsdk-2018-r2_e28ec0278fb749d6b999c688a8e90a8a25c0f2b5
Description ....... ov_intel_vpu_plugin
[ INFO ] Inputs detected: <list_of_input_layers>
[ INFO ] Statistics will be dumped for X layers: <comma_separated_list_of_layers>
[ INFO ] Dump path: <path_where_dump_will_be_saved>
[ INFO ] <layer_1_name> layer processing
...
[ INFO ] <layer_X_name> layer processing
[ INFO ] Execution successful
```
If you do not provide the `-i` key, the Cross Check Tool generates an input from normal distributed noise and saves
it in a multi-input file format with the filename `<path_to_xml>_input_layers_dump.txt` in the same folder as the IR.
4. To check the overall accuracy and performance of inference on the CPU in FP32 precision against dumped results, run:
```sh
./cross_check_tool -i <path_to_input_image_or_multi_input_file> \
-m <path_to_FP32_xml> \
-d CPU \
-load <path_to_dump> \
-layers all
```
The output looks as follows:
```
InferenceEngine:
API version ............ 1.0
Build .................. ###
[ INFO ] Blob and statistics loading enabled. File /localdisk/models/FP16/icv_squeezenet_v1.0_MYRIAD_FP16_dump.txt
The same IR on both devices: <path_to_FP32_xml>
[ INFO ] No extensions provided
API version ............ 0.1
Build .................. ###
Description ....... ov_intel_vpu_plugin
[ INFO ] Inputs detected: <list_of_input_layers>
[ INFO ] Statistics will be dumped for X layers: <layer_1_name>, <layer_2_name>, ... , <layer_X_name>
[ INFO ] <layer_1_name> layer processing
[ INFO ] Layer <layer_1_name> statistics
Max absolute difference: 0
Min absolute difference: 0
Max relative difference: 0%
Min relative difference: 0%
Blob size: 1000
Devices: MYRIAD_FP16 MYRIAD_FP16_loaded
Status: EXECUTED EXECUTED
Layer type: SoftMax SoftMax
Real time, microsec: 43 43
Execution type: SoftMax SoftMax
Number of NAN: 0 0
Number of INF: 0 0
Number of ZERO: 0 0
...
<list_of_layer_statistics>
...
[ INFO ] Overall max absolute difference 0
[ INFO ] Overall min absolute difference 0 was reached by <layer_1_name> layer
[ INFO ] Overall max relative difference 0%
[ INFO ] Overall min relative difference 0% was reached by <layer_1_name> layer
[ INFO ] Execution successful
```
### Multi-input and dump file experimental format
Text file contains description of each layer in structure like this:
* 1<sup>st</sup> line is layer name (required)
* 2<sup>nd</sup> line is shape like "(1,224,224,3)" (required)
* 3<sup>rd</sup> line is a device and precision information like "CPU_FP32" (optional for multi-input file)
* 4<sup>th</sup> line is execution status Options are: EXECUTED, OPTIMIZED_OUT (optional for multi-input file)
* 5<sup>th</sup> line is type of layer (optional for multi-input file)
* 6<sup>th</sup> line is execution time in microseconds (optional for multi-input file)
* 7<sup>th</sup> line is type of execution (optional for multi-input file)
* 8<sup>th</sup> line is word "CONTENT" which means that the next line or lines are consisted of blob elements
* Next line or lines are for blob elements. They may be separated with one or several spaces, tabs and new lines.
#### Multi-input file example
```
Input_1
(1,10)
CONTENT
0 0.000628471375 0.00185108185
0.000580787659
0.00137138367
0.000561237335 0.0040473938 0 0 0
Input_2
(1,8)
CONTENT
0 0 0.00194549561 0.0017490387 7.73072243e-05 0.000135779381 0.000186920166 0 7.52806664e-05
```
#### Dump file example
```
Softmax
(1,10)
MYRIAD_FP16
EXECUTED
SoftMax
43
SoftMax
CONTENT
7.44462013e-05
0
0.000810623169
0.000361680984
0
9.14335251e-05
0
0
8.15987587e-05
0
```
### Configuration file
There is an option to pass configuration file to plugin by providing
`-conf` and/or `--ref_conf` keys.
Configuration file is a text file with content of pairs of keys and values.
Structure of configuration file:
```sh
KEY VALUE
ANOTHER_KEY ANOTHER_VALUE,VALUE_1
```

View File

@ -6,10 +6,12 @@
:maxdepth: 1
:hidden:
openvino_2_0_transition_guide
openvino_docs_IE_DG_Integrate_with_customer_application_new_API
openvino_docs_deployment_optimization_guide_dldt_optimization_guide
openvino_docs_IE_DG_Device_Plugins
Direct ONNX Format Support <openvino_docs_IE_DG_ONNX_Support>
openvino_docs_IE_DG_Paddle_Support
openvino_docs_IE_DG_Int8Inference
openvino_docs_IE_DG_Bfloat16Inference
openvino_docs_IE_DG_DynamicBatching
@ -33,7 +35,7 @@ The scheme below illustrates the typical workflow for deploying a trained deep l
![](img/BASIC_FLOW_IE_C.svg)
\\* _nGraph_ is the internal graph representation in the OpenVINO™ toolkit. Use it to [build a model from source code](https://docs.openvinotoolkit.org/latest/openvino_docs_nGraph_DG_build_function.html).
\\* _nGraph_ is the internal graph representation in the OpenVINO™ toolkit. Use it to [build a model from source code](https://docs.openvino.ai/latest/openvino_docs_nGraph_DG_build_function.html).
## Video

View File

@ -1,5 +1,4 @@
Glossary {#openvino_docs_IE_DG_Glossary}
=======
# Glossary {#openvino_docs_IE_DG_Glossary}
## Acronyms and Abbreviations

View File

@ -96,7 +96,7 @@ The only device that supports this method is [Multi-Device](supported_plugins/MU
<div id="switcher-python" class="switcher-anchor">Python</div>
@endsphinxdirective
This section provides a high-level description of the process of querying of different device properties and configuration values. Refer to the [Hello Query Device Python Sample](../../inference-engine/ie_bridges/python/sample/hello_query_device/README.md) sources and the [Multi-Device Plugin documentation](supported_plugins/MULTI.md) for examples of using the Inference Engine Query API in user applications.
This section provides a high-level description of the process of querying of different device properties and configuration values. Refer to the [Hello Query Device Python Sample](../../samples/python/hello_query_device/README.md) sources and the [Multi-Device Plugin documentation](supported_plugins/MULTI.md) for examples of using the Inference Engine Query API in user applications.
### Using the Inference Engine Query API in Your Code

View File

@ -82,7 +82,7 @@ Optionally, configure input and output of the model using the steps below:
auto network = core.ReadNetwork("model.onnx");
You can find more information about the ONNX format support in the document `ONNX format support in the OpenVINO™ <https://docs.openvinotoolkit.org/latest/openvino_docs_IE_DG_ONNX_Support.html>`_
You can find more information about the ONNX format support in the document `ONNX format support in the OpenVINO™ <https://docs.openvino.ai/latest/openvino_docs_IE_DG_ONNX_Support.html>`_
.. tab:: nGraph
@ -90,7 +90,7 @@ Optionally, configure input and output of the model using the steps below:
std::shared_ptr<Function> createNetwork() {
// To construct a network, please follow
// https://docs.openvinotoolkit.org/latest/openvino_docs_nGraph_DG_build_function.html
// https://docs.openvino.ai/latest/openvino_docs_nGraph_DG_build_function.html
}
auto network = CNNNetwork(createNetwork());
@ -192,7 +192,7 @@ Load the model to the device using `InferenceEngine::Core::LoadNetwork()`:
std::shared_ptr<Function> createNetwork() {
// To construct a network, please follow
// https://docs.openvinotoolkit.org/latest/openvino_docs_nGraph_DG_build_function.html
// https://docs.openvino.ai/latest/openvino_docs_nGraph_DG_build_function.html
}
auto network = CNNNetwork(createNetwork());
executable_network = core.LoadNetwork(network, "CPU");

View File

@ -1,98 +0,0 @@
# Runtime Optimization Guide {#openvino_docs_IE_DG_Intro_to_Performance}
This section is a shorter version of the
[Optimization Guide](../optimization_guide/dldt_optimization_guide.md) for the Intel® Distribution of OpenVINO™ Toolkit.
## Precision
Inference precision directly affects the performance.
Model Optimizer can produce an IR with different precision. For example, an FP16 IR initially targets VPU and GPU devices, while, for example, for the CPU, an FP16 IR is typically up-scaled to the regular FP32 automatically upon loading. But notice that further device-specific inference precision settings are available,
for example, [8-bit integer](Int8Inference.md) or [bfloat16](Bfloat16Inference.md), which is specific to the CPU inference, below.
Note that for the [MULTI device](supported_plugins/MULTI.md) plugin that supports automatic inference on multiple devices in parallel, you can use an FP16 IR (no need for FP32).
You can find more information, including preferred data types for specific devices, in the
[Supported Devices](supported_plugins/Supported_Devices.md) document.
## Automatic Lowering of the Inference Precision
By default, plugins enable the optimizations that allow lower precision if the acceptable range of accuracy is preserved.
For example, for the CPU that supports the AVX512_BF16 instructions, an FP16/FP32 model is converted to a [bfloat16](Bfloat16Inference.md) IR to accelerate inference.
To compare the associated speedup, run the example command below to disable this feature on the CPU device with the AVX512_BF16 support and get regular FP32 execution:
```
$ benchmark_app -m <model.xml> -enforcebf16=false
```
Notice that for quantized (e.g. INT8) models the bfloat16 calculations (of the layers that remain in FP32) is disabled by default.
Refer to the [CPU Plugin documentation](supported_plugins/CPU.md) for more details.
Similarly, the GPU device automatically executes FP16 for the layers that remain in FP16 in the quantized models (assuming that the FP16 model was quantized).
Refer to the ENABLE_FP16_FOR_QUANTIZED_MODELS key in the [GPU Plugin documentation](supported_plugins/GPU.md).
## Latency vs. Throughput
One way to increase computational efficiency is batching, which combines many (potentially tens) of
input images to achieve optimal throughput. However, high batch size also comes with a
latency penalty. So, for more real-time oriented usages, lower batch sizes (as low as a single input) are used.
Refer to the [Benchmark App](../../samples/cpp/benchmark_app/README.md) sample, which allows latency vs. throughput measuring.
## Using Caching API for first inference latency optimization
Since with the 2021.4 release, Inference Engine provides an ability to enable internal caching of loaded networks.
This can significantly reduce load network latency for some devices at application startup.
Internally caching uses plugin's Export/ImportNetwork flow, like it is done for [Compile tool](../../tools/compile_tool/README.md), using the regular ReadNetwork/LoadNetwork API.
Refer to the [Model Caching Overview](Model_caching_overview.md) for more detailed explanation.
## Using Async API
To gain better performance on accelerators, such as VPU, the Inference Engine uses the asynchronous approach (see
[Integrating Inference Engine in Your Application (current API)](Integrate_with_customer_application_new_API.md)).
The point is amortizing the costs of data transfers, by pipe-lining, see [Async API explained](@ref omz_demos_object_detection_demo_cpp).
Since the pipe-lining relies on the availability of the parallel slack, running multiple inference requests in parallel is essential.
Refer to the [Benchmark App](../../samples/cpp/benchmark_app/README.md) sample, which enables running a number of inference requests in parallel. Specifying different number of request produces different throughput measurements.
## Best Latency on the Multi-Socket CPUs
Note that when latency is of concern, there are additional tips for multi-socket systems.
When input is limited to the single image, the only way to achieve the best latency is to limit execution to the single socket.
The reason is that single image is simply not enough
to saturate more than one socket. Also NUMA overheads might dominate the execution time.
Below is the example command line that limits the execution to the single socket using numactl for the best *latency* value
(assuming the machine with 28 phys cores per socket):
```
limited to the single socket).
$ numactl -m 0 --physcpubind 0-27 benchmark_app -m <model.xml> -api sync -nthreads 28
```
Note that if you have more than one input, running as many inference streams as you have NUMA nodes (or sockets)
usually gives the same best latency as a single request on the single socket, but much higher throughput. Assuming two NUMA nodes machine:
```
$ benchmark_app -m <model.xml> -nstreams 2
```
Number of NUMA nodes on the machine can be queried via 'lscpu'.
Please see more on the NUMA support in the [Optimization Guide](../optimization_guide/dldt_optimization_guide.md).
## Throughput Mode for CPU
Unlike most accelerators, CPU is perceived as an inherently latency-oriented device.
OpenVINO™ toolkit provides a "throughput" mode that allows running multiple inference requests on the CPU simultaneously, which greatly improves the throughput.
Internally, the execution resources are split/pinned into execution "streams".
Using this feature gains much better performance for the networks that originally are not scaled well with a number of threads (for example, lightweight topologies). This is especially pronounced for the many-core server machines.
Run the [Benchmark App](../../samples/cpp/benchmark_app/README.md) and play with number of infer requests running in parallel, next section.
Try different values of the `-nstreams` argument from `1` to a number of CPU cores and find one that provides the best performance.
The throughput mode relaxes the requirement to saturate the CPU by using a large batch: running multiple independent inference requests in parallel often gives much better performance, than using a batch only.
This allows you to simplify the app-logic, as you don't need to combine multiple inputs into a batch to achieve good CPU performance.
Instead, it is possible to keep a separate infer request per camera or another source of input and process the requests in parallel using Async API.
## Benchmark App
[Benchmark App](../../samples/cpp/benchmark_app/README.md) sample is the best performance reference.
It has a lot of device-specific knobs, but the primary usage is as simple as:
```bash
$ ./benchmark_app d GPU m <model> -i <input>
```
to measure the performance of the model on the GPU.
Or
```bash
$ ./benchmark_app d CPU m <model> -i <input>
```
to execute on the CPU instead.
For example, for the CPU throughput mode from the previous section, you can play with number of streams (`-nstreams` command-line param).
Try different values of the `-nstreams` argument from `1` to a number of CPU cores and find one that provides the best performance. For example, on a 8-core CPU, compare the `-nstreams 1` (which is a latency-oriented scenario) to the `2`, `4` and `8` streams. Notice that `benchmark_app` automatically queries/creates/runs number of requests required to saturate the given number of streams.
Finally, notice that when you don't specify number of streams with `-nstreams`, "AUTO" value for the streams is used, e.g. for the CPU this is [CPU_THROUGHPUT_AUTO](supported_plugins/CPU.md). You can spot the actual value behind "AUTO" for your machine in the application output.
Notice that the "AUTO" number is not necessarily most optimal, so it is generally recommended to play either with the benchmark_app's "-nstreams" as described above, or via [new Workbench tool](@ref workbench_docs_Workbench_DG_Introduction).This allows you to simplify the app-logic, as you don't need to combine multiple inputs into a batch to achieve good CPU performance.
Instead, it is possible to keep a separate infer request per camera or another source of input and process the requests in parallel using Async API.

View File

@ -1,125 +0,0 @@
# Introduction to Intel® Deep Learning Deployment Toolkit {#openvino_docs_IE_DG_Introduction}
## Deployment Challenges
Deploying deep learning networks from the training environment to embedded platforms for inference
might be a complex task that introduces a number of technical challenges that must be addressed:
* There are a number of deep learning frameworks widely used in the industry, such as Caffe*, TensorFlow*, MXNet*, Kaldi* etc.
* Typically the training of the deep learning networks is performed in data centers or server farms while the inference
might take place on embedded platforms, optimized for performance and power consumption. Such platforms are typically
limited both from software perspective (programming languages, third party dependencies, memory consumption,
supported operating systems), and from hardware perspective (different data types, limited power envelope),
so usually it is not recommended (and sometimes just impossible) to use original training framework for inference.
An alternative solution would be to use dedicated inference APIs that are well optimized for specific hardware platforms.
* Additional complications of the deployment process include supporting various layer types and networks that are getting
more and more complex. Obviously, ensuring the accuracy of the transforms networks is not trivial.
## Deployment Workflow
The process assumes that you have a network model trained using one of the [supported frameworks](#SupportedFW).
The scheme below illustrates the typical workflow for deploying a trained deep learning model:
![scheme]
The steps are:
1. [Configure Model Optimizer](../MO_DG/prepare_model/Config_Model_Optimizer.md) for the specific framework (used to train your model).
2. Run [Model Optimizer](#MO) to produce an optimized [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md)
of the model based on the trained network topology, weights and biases values, and other optional parameters.
3. Test the model in the IR format using the [Inference Engine](#IE) in the target environment with provided
[Inference Engine sample applications](Samples_Overview.md).
4. [Integrate Inference Engine](Integrate_with_customer_application_new_API.md) in your application to deploy the model in the target environment.
## Model Optimizer <a name = "MO"></a>
Model Optimizer is a cross-platform command line tool that facilitates the transition between the training and
deployment environment, performs static model analysis and automatically adjusts deep learning
models for optimal execution on end-point target devices.
Model Optimizer is designed to support multiple deep learning [supported frameworks and formats](#SupportedFW).
While running Model Optimizer you do not need to consider what target device you wish to use, the same output of the MO can be used in all targets.
### Model Optimizer Workflow
The process assumes that you have a network model trained using one of the [supported frameworks](#SupportedFW).
The Model Optimizer workflow can be described as following:
* [Configure Model Optimizer](../MO_DG/prepare_model/Config_Model_Optimizer.md) for one of the supported deep learning framework that was used to train the model.
* Provide as input a trained network that contains a certain network topology, and the adjusted weights and
biases (with some optional parameters).
* [Run Model Optimizer](../MO_DG/prepare_model/convert_model/Converting_Model.md) to perform specific model optimizations (for example, horizontal fusion of certain network layers). Exact optimizations
are framework-specific, refer to appropriate documentation pages: [Converting a Caffe Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md),
[Converting a TensorFlow Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md), [Converting a MXNet Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md), [Converting a Kaldi Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md),
[Converting an ONNX Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md).
* Model Optimizer produces as output an [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md) of the network which is used as an input for the Inference Engine on all targets.
### Supported Frameworks and Formats <a name = "SupportedFW"></a>
* Caffe* (most public branches)
* TensorFlow*
* MXNet*
* Kaldi*
* ONNX*
### Supported Models
For the list of supported models refer to the framework or format specific page:
* [Supported Caffe* models](../MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md)
* [Supported TensorFlow* models](../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md)
* [Supported MXNet* models](../MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md)
* [Supported Kaldi* models](../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md)
* [Supported ONNX* models](../MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md)
## Intermediate Representation
Intermediate representation describing a deep learning model plays an important role connecting the OpenVINO&trade; toolkit components.
The IR is a pair of files:
* `.xml`: The topology file - an XML file that describes the network topology
* `.bin`: The trained data file - a .bin file that contains the weights and biases binary data
Intermediate Representation (IR) files can be read, loaded and inferred with the [Inference Engine](#IE).
Inference Engine API offers a unified API across a number of [supported Intel® platforms](#SupportedTargets).
IR is also consumed, modified and written by Post-Training Optimization Tool which provides quantization capabilities.
Refer to a dedicated description about [Intermediate Representation and Operation Sets](../MO_DG/IR_and_opsets.md) for further details.
## nGraph Integration
OpenVINO toolkit is powered by nGraph capabilities for Graph construction API, Graph transformation engine and Reshape.
nGraph Function is used as an intermediate representation for a model in the runtime underneath the CNNNetwork API.
The conventional representation for CNNNetwork is still available if requested for backward compatibility when some conventional API methods are used.
Please refer to the [Overview of nGraph](../nGraph_DG/nGraph_dg.md) describing the details of nGraph representation.
## Inference Engine <a name = "IE"></a>
Inference Engine is a runtime that delivers a unified API to integrate the inference with application logic:
* Takes a model as an input. The model can be presented in [the native ONNX format](./ONNX_Support.md) or in the specific form of [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md)
produced by Model Optimizer.
* Optimizes inference execution for target hardware.
* Delivers inference solution with reduced footprint on embedded inference platforms.
The Inference Engine supports inference of multiple image classification networks,
including AlexNet, GoogLeNet, VGG and ResNet families of networks, fully convolutional networks like FCN8 used for image
segmentation, and object detection networks like Faster R-CNN.
For the full list of supported hardware, refer to the
[Supported Devices](supported_plugins/Supported_Devices.md) section.
For Intel® Distribution of OpenVINO™ toolkit, the Inference Engine package contains [headers](files.html), runtime libraries, and
[sample console applications](Samples_Overview.md) demonstrating how you can use
the Inference Engine in your applications.
The open source version is available in the [OpenVINO™ toolkit GitHub repository](https://github.com/openvinotoolkit/openvino) and can be built for supported platforms using the <a href="https://github.com/openvinotoolkit/openvino/wiki/BuildingCode">Inference Engine Build Instructions</a>.
## See Also
- [Inference Engine Samples](Samples_Overview.md)
- [Intel&reg; Deep Learning Deployment Toolkit Web Page](https://software.intel.com/en-us/computer-vision-sdk)
[scheme]: img/workflow_steps.png

View File

@ -1,446 +0,0 @@
# Model Downloader {#openvino_docs_IE_DG_Tools_Model_Downloader}
This directory contains scripts that automate certain model-related tasks
based on configuration files in the models' directories.
* Model Downloader: `downloader.py` downloads model files from online sources
and, if necessary, patches them to make them more usable with Model
Optimizer;
* Model Converter: `converter.py` converts the models that are not in the
Inference Engine IR format into that format using Model Optimizer.
* Model Quantizer: `quantizer.py` quantizes full-precision models in the IR
format into low-precision versions using Post-Training Optimization Toolkit.
* Model Information Dumper: `info_dumper.py` prints information about the models
in a stable machine-readable format.
> **TIP**: You can quick start with the Model Downloader inside the OpenVINO™ Deep Learning Workbench (DL Workbench). DL Workbench is the OpenVINO™ toolkit UI that enables you to import a model, analyze its performance and accuracy, visualize the outputs, optimize and prepare the model for deployment on various Intel® platforms.
## Prerequisites
1. Install Python (version 3.6 or higher)
2. Install the tools' dependencies with the following command:
```sh
python3 -mpip install --user -r ./requirements.in
```
For the model converter, you will also need to install the OpenVINO&trade;
toolkit and the prerequisite libraries for Model Optimizer. See the
[OpenVINO toolkit documentation](https://docs.openvinotoolkit.org/) for details.
To convert models from certain frameworks, you will also need to install
additional dependencies.
@sphinxdirective
.. tab:: Caffe2
.. code-block:: python
python3 -mpip install --user -r ./requirements-caffe2.in
.. tab:: Pytorch
.. code-block:: python
python3 -mpip install --user -r ./requirements-pytorch.in
.. tab:: TensorFlow
.. code-block:: python
python3 -mpip install --user -r ./requirements-tensorflow.in
@endsphinxdirective
## Model Downloader
The basic usage is to run the script like this:
```sh
./downloader.py --all
```
This will download all models. The `--all` option can be replaced with
other filter options to download only a subset of models. See the "Shared options"
section.
### Model Downloader Starting Parameters
@sphinxdirective
+---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------+
| Parameter | Explanation | Example |
+===========================+==================================================================================================================================================================================================================================================================================================================================================================================================+=====================================================================================+
| ``-o``/``--output_dir`` | By default, the script will download models into a directory tree rooted in the current directory. Use this parameter to download into a different directory. | ``./downloader.py --all --output_dir my/download/directory`` |
+---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------+
| ``--precisions`` | Specify comma separated precisions of weights to be downloaded | ``./downloader.py --name face-detection-retail-0004 --precisions FP16,FP16-INT8`` |
+---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------+
| ``--num_attempts`` | By default, the script will attempt to download each file only once. Use this parameter to change that and increase the robustness of the download process | ``./downloader.py --all --num_attempts 5 # attempt each download five times`` |
+---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------+
| ``--cache_dir`` | Make the script use the specified directory as a cache. The script will place a copy of each downloaded file in the cache, or, if it is already there, retrieve it from the cache instead of downloading it again. The cache format is intended to remain compatible in future Open Model Zoo versions, so you can use a cache to avoid redownloading most files when updating Open Model Zoo. | ``./downloader.py --all --cache_dir my/cache/directory`` |
+---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------+
| ``-j``/``--jobs`` | The script downloads files for multiple models concurrently. | ``./downloader.py --all -j8 # download up to 8 models at a time`` |
+---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------+
| ``--progress_format`` | By default, the script outputs progress information as unstructured, human-readable text. Use this option, if you want to consume progress information programmatically. | ``./downloader.py --all --progress_format=json`` |
+---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------+
@endsphinxdirective
When this option is set to `json`, the script's standard output is replaced by
a machine-readable progress report, whose format is documented in the
"JSON progress report format" section. This option does not affect errors and
warnings, which will still be printed to the standard error stream in a
human-readable format.
You can also set this option to `text` to explicitly request the default text
format.
See the "Shared options" section for information on other options accepted by
the script.
### JSON progress report format
This section documents the format of the progress report produced by the script
when the `--progress_format=json` option is specified.
The report consists of a sequence of events, where each event is represented
by a line containing a JSON-encoded object. Each event has a member with the
name `$type` whose value determines the type of the event, as well as which
additional members it contains.
@sphinxdirective
+------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Event type | Additional members | Explanation |
+====================================+=========================================================================+================================================================================================================================================================================================================================================================================================================================================+
| ``model_download_begin`` | ``model`` (string), ``num_files`` (integer) | The script started downloading the model named by ``model``. ``num_files`` is the number of files that will be downloaded for this model. This event will always be followed by a corresponding ``model_download_end`` event. |
+------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``model_download_end`` | ``model`` (string), ``successful`` (boolean) | The script stopped downloading the model named by ``model``. ``successful`` is true if every file was downloaded successfully. |
+------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``model_file_download_begin`` | ``model`` (string), ``model_file`` (string), ``size`` (integer) | The script started downloading the file named by ``model_file`` of the model named by ``model``. ``size`` is the size of the file in bytes. This event will always occur between ``model_download_begin`` and ``model_download_end`` events for the model, and will always be followed by a corresponding ``model_file_download_end`` event. |
+------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``model_file_download_end`` | ``model`` (string), ``model_file`` (string), ``successful`` (boolean) | The script stopped downloading the file named by ``model_file`` of the model named by ``model``. ``successful`` is true if the file was downloaded successfully. |
+------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``model_file_download_progress`` | ``model`` (string), ``model_file`` (string), ``size`` (integer) | The script downloaded ``size`` bytes of the file named by ``model_file`` of the model named by ``model`` so far. Note that ``size`` can decrease in a subsequent event if the download is interrupted and retried. This event will always occur between ``model_file_download_begin`` and ``model_file_download_end`` events for the file. |
+------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``model_postprocessing_begin`` | ``model`` | The script started post-download processing on the model named by ``model``. This event will always be followed by a corresponding ``model_postprocessing_end`` event. |
+------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``model_postprocessing_end`` | ``model`` | The script stopped post-download processing on the model named by ``model``. |
+------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
@endsphinxdirective
Additional event types and members may be added in the future.
Tools parsing the machine-readable format should avoid relying on undocumented details.
In particular:
* Tools should not assume that any given event will occur for a given model/file
(unless specified otherwise above) or will only occur once.
* Tools should not assume that events will occur in a certain order beyond
the ordering constraints specified above. In particular, when the `--jobs` option
is set to a value greater than 1, event sequences for different files or models
may get interleaved.
## Model Converter
The basic usage is to run the script like this:
```sh
./converter.py --all
```
This will convert all models into the Inference Engine IR format. Models that
were originally in that format are ignored. Models in PyTorch and Caffe2 formats will be
converted in ONNX format first.
The `--all` option can be replaced with other filter options to convert only
a subset of models. See the "Shared options" section.
### Model Converter Starting Parameters
@sphinxdirective
+-----------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| Parameter | Explanation | Example |
+=============================+==================================================================================================================================================================================================================================================================+==================================================================================================+
| ``-d``/``--download_dir`` | The current directory must be the root of a download tree created by the model downloader. Use this parameter to specify a different download tree path. | ``./converter.py --all --download_dir my/download/directory`` |
+-----------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| ``-o``/``--output_dir`` | By default, the script will download models into a directory tree rooted in the current directory. Use this parameter to download into a different directory. Note: models in intermediate format are placed to this directory too. | ``./converter.py --all --output_dir my/output/directory`` |
+-----------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| ``--precisions`` | By default, the script will produce models in every precision that is supported for conversion. Use this parameter to only produce models in a specific precision. If the specified precision is not supported for a model, that model will be skipped. | ``./converter.py --all --precisions=FP16`` |
+-----------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| ``--add_mo_arg`` | Add extra Model Optimizer arguments to the ones specified in the model configuration. The option can be repeated to add multiple arguments | ``./converter.py --name=caffenet --add_mo_arg=--reverse_input_channels --add_mo_arg=--silent`` |
+-----------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| ``-j``/``--jobs`` | Run multiple conversion commands concurrently. The argument to the option must be either a maximum number of concurrently executed commands, or "auto", in which case the number of CPUs in the system is used. By default, all commands are run sequentially. | ``./converter.py --all -j8 # run up to 8 commands at a time`` |
+-----------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| ``--dry_run`` | Print the conversion commands without actually running them.. | ``./converter.py --all --dry_run`` |
+-----------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
| ``-p``/``--python`` | By default, the script will run Model Optimizer using the same Python executable that was used to run the script itself. Apply this parameter to use a different Python executable. | ``./converter.py --all --python my/python`` |
+-----------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
@endsphinxdirective
The Python script will attempt to locate Model Optimizer using several methods:
1. If the `--mo` option was specified, then its value will be used as the path
to the script to run:
```sh
./converter.py --all --mo my/openvino/path/model_optimizer/mo.py
```
2. Otherwise, if the selected Python executable can import the `mo` package,
then that package will be used.
3. Otherwise, if the OpenVINO&trade; toolkit's `setupvars.sh`/`setupvars.bat`
script has been executed, the environment variables set by that script will
be used to locate Model Optimizer within the toolkit.
4. Otherwise, the script will fail.
See the "Shared options" section for information on other options accepted by
the script.
## Model Quantizer
Before you run the model quantizer, you must prepare a directory with
the datasets required for the quantization process. This directory will be
referred to as `<DATASET_DIR>` below. You can find more detailed information
about dataset preparation in the [Dataset Preparation Guide](../../data/datasets.md).
The basic usage is to run the script like this:
```sh
./quantizer.py --all --dataset_dir <DATASET_DIR>
```
This will quantize all models for which quantization is supported. Other models
are ignored.
The `--all` option can be replaced with other filter options to quantize only
a subset of models. See the "Shared options" section.
### Model Quantizer Starting Parameters
@sphinxdirective
+---------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------+
| Parameter | Explanation | Example |
+===========================+=====================================================================================================================================================================================================================================================================================================================+=========================================================================================+
| ``--model_dir`` | The current directory must be the root of a tree of model files create by the model converter. Use this parameter to specify a different model tree path | ``./quantizer.py --all --dataset_dir <DATASET_DIR> --model_dir my/model/directory`` |
+---------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------+
| ``-o``/``--output_dir`` | By default, the script will download models into a directory tree rooted in the current directory. Use this parameter to download into a different directory. Note: models in intermediate format are placed to this directory too. | ``./quantizer.py --all --dataset_dir <DATASET_DIR> --output_dir my/output/directory`` |
+---------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------+
| ``--precisions`` | By default, the script will produce models in every precision that is supported as a quantization output. Use this parameter to only produce models in a specific precision. | ``./quantizer.py --all --dataset_dir <DATASET_DIR> --precisions=FP16-INT8`` |
+---------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------+
| ``--target_device`` | It's possible to specify a target device for Post-Training Optimization Toolkitto optimize for. The supported values are those accepted by the "target\_device" option in Post-Training Optimization Toolkit's config files. If this option is unspecified, Post-Training Optimization Toolkit's default is used. | ``../quantizer.py --all --dataset_dir <DATASET_DIR> --target_device VPU`` |
+---------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------+
| ``--dry_run`` | The script can print the quantization commands without actually running them. With this option specified, the configuration file for Post-Training Optimization Toolkit will still be created, so that you can inspect it. | ``./quantizer.py --all --dataset_dir <DATASET_DIR> --dry_run`` |
+---------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------+
| ``-p``/``--python`` | By default, the script will run Model Optimizer using the same Python executable that was used to run the script itself. Apply this parameter to use a different Python executable. | ``./quantizer.py --all --dataset_dir <DATASET_DIR> --python my/python`` |
+---------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------+
@endsphinxdirective
The script will attempt to locate Post-Training Optimization Toolkit using several methods:
1. If the `--pot` option was specified, then its value will be used as the path
to the script to run:
```sh
./quantizer.py --all --dataset_dir <DATASET_DIR> --pot my/openvino/path/post_training_optimization_toolkit/main.py
```
2. Otherwise, if the selected Python executable can import the `pot` package,
then that package will be used.
3. Otherwise, if the OpenVINO&trade; toolkit's `setupvars.sh`/`setupvars.bat`
script has been executed, the environment variables set by that script will
be used to locate Post-Training Optimization Toolkit within the OpenVINO toolkit.
4. Otherwise, the script will fail.
See the "Shared options" section for information on other options accepted by
the script.
## Model Information Dumper
The basic usage is to run the script like this:
```sh
./info_dumper.py --all
```
This will print to standard output information about all models.
The only options accepted by the script are those described in the "Shared options"
section.
The script's output is a JSON array, each element of which is a JSON object
describing a single model. Each such object has the following keys:
@sphinxdirective
+--------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Parameter | Explanation |
+======================================+=====================================================================================================================================================================================================================================================================================+
| ``name`` | the identifier of the model, as accepted by the ``--name`` option. |
+--------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``composite_model_name`` | the identifier of the composite model name, if the model is a part of composition of several models (e.g. encoder-decoder), otherwise - ``null`` |
+--------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``description`` | text describing the model. Paragraphs are separated by line feed characters. |
+--------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``framework`` | a string identifying the framework whose format the model is downloaded in. Current possible values are ``dldt`` (Inference Engine IR), ``caffe``, ``caffe2``, ``mxnet``, ``onnx``, ``pytorch`` and ``tf`` (TensorFlow). Additional possible values might be added in the future. |
+--------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``license_url`` | a URL for the license that the model is distributed under. |
+--------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``quantization_output_precisions`` | the list of precisions that the model can be quantized to by the model quantizer. Current possible values are ``FP16-INT8`` and ``FP32-INT8``; additional possible values might be added in the future. |
+--------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``quantization_output_precisions`` | the list of precisions that the model can be quantized to by the model quantizer. Current possible values are ``FP16-INT8`` and ``FP32-INT8``; additional possible values might be added in the future. |
+--------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| ``subdirectory`` | the subdirectory of the output tree into which the downloaded or converted files will be placed by the downloader or the converter, respectively. |
+--------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
@endsphinxdirective
* `precisions`: the list of precisions that the model has IR files for. For models downloaded
in a format other than the Inference Engine IR format, these are the precisions that the model
converter can produce IR files in. Current possible values are:
* `FP16`
* `FP16-INT1`
* `FP16-INT8`
* `FP32`
* `FP32-INT1`
* `FP32-INT8`
Additional possible values might be added in the future.
* `task_type`: a string identifying the type of task that the model performs.
are:
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="Current possible values">
@endsphinxdirective
* `action_recognition`
* `classification`
* `colorization`
* `detection`
* `face_recognition`
* `feature_extraction`
* `head_pose_estimation`
* `human_pose_estimation`
* `image_inpainting`
* `image_processing`
* `image_translation`
* `instance_segmentation`
* `machine_translation`
* `monocular_depth_estimation`
* `named_entity_recognition`
* `noise_suppression`
* `object_attributes`
* `optical_character_recognition`
* `place_recognition`
* `question_answering`
* `salient_object_detection`
* `semantic_segmentation`
* `sound_classification`
* `speech_recognition`
* `style_transfer`
* `text_to_speech`
* `time_series`
* `token_recognition`
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
Additional possible values might be added in the future.
## Shared options
The are certain options that all tools accept.
`-h`/`--help` can be used to print a help message:
```sh
./TOOL.py --help
```
There are several mutually exclusive filter options that select the models the
tool will process:
@sphinxdirective
+--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| Parameter | Explanation | Example |
+==============+===================================================================================================================================================================================================================================================================================+===========================================+
| ``--all`` | Selects all models | ``./TOOL.py --all`` |
+--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
| ``--name`` | takes a comma-separated list of patterns and selects models that match at least one of these patterns. The patterns may contain shell-style wildcards. For composite models, the name of composite model is accepted, as well as the names of individual models it consists of. | ``./TOOL.py --name 'mtcnn,densenet-*'`` |
+--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------+
@endsphinxdirective
See https://docs.python.org/3/library/fnmatch.html for a full description of
the pattern syntax.
`--list` takes a path to a file that must contain a list of patterns and
selects models that match at least one of those patterns.
For composite models, the name of composite model is accepted, as well as the names
of individual models it consists of.
```sh
./TOOL.py --list my.lst
```
The file must contain one pattern per line. The pattern syntax is the same
as for the `--name` option. Blank lines and comments starting with `#` are
ignored. For example:
```
mtcnn # get all three models: mtcnn-o, mtcnn-p, mtcnn-r
densenet-* # get all DenseNet variants
```
To see the available models, you can use the `--print_all` option. When this
option is specified, the tool will print all model names defined in the
configuration file and exit:
```
$ ./TOOL.py --print_all
action-recognition-0001-decoder
action-recognition-0001-encoder
age-gender-recognition-retail-0013
driver-action-recognition-adas-0002-decoder
driver-action-recognition-adas-0002-encoder
emotions-recognition-retail-0003
face-detection-adas-0001
face-detection-retail-0004
face-detection-retail-0005
[...]
```
Either `--print_all` or one of the filter options must be specified.

View File

@ -82,7 +82,7 @@ As described in Inference Engine Developer Guide, a common application flow cons
6. **Set input data**
7. **Execute the model** - Run inference
Step #5 can potentially perform several time-consuming device-specific optimizations and network compilations, and such delays can lead to bad user experience on application startup. To avoid this, some devices offer Import/Export network capability, and it is possible to either use the [Compile tool](../../inference-engine/tools/compile_tool/README.md) or enable model caching to export the compiled network automatically. Reusing cached networks can significantly reduce load network time.
Step #5 can potentially perform several time-consuming device-specific optimizations and network compilations, and such delays can lead to bad user experience on application startup. To avoid this, some devices offer Import/Export network capability, and it is possible to either use the [Compile tool](../../tools/compile_tool/README.md) or enable model caching to export the compiled network automatically. Reusing cached networks can significantly reduce load network time.
### Set the “CACHE_DIR” config option to enable model caching

View File

@ -38,6 +38,7 @@
openvino_docs_ops_arithmetic_Cosh_1
openvino_docs_ops_sequence_CTCLoss_4
openvino_docs_ops_arithmetic_CumSum_3
openvino_docs_ops_convolution_DeformableConvolution_1
openvino_docs_ops_convolution_DeformableConvolution_8
openvino_docs_ops_detection_DeformablePSROIPooling_1
openvino_docs_ops_movement_DepthToSpace_1
@ -84,7 +85,7 @@
openvino_docs_ops_image_I420toBGR_8
openvino_docs_ops_image_I420toRGB_8
openvino_docs_ops_signals_IDFT_7
openvino_docs_ops_condition_If_8
openvino_docs_ops_infrastructure_If_8
openvino_docs_ops_image_Interpolate_1
openvino_docs_ops_image_Interpolate_4
openvino_docs_ops_normalization_LRN_1
@ -102,8 +103,9 @@
openvino_docs_ops_normalization_MVN_1
openvino_docs_ops_normalization_MVN_6
openvino_docs_ops_matrix_MatMul_1
openvino_docs_ops_sort_MatrixNonMaxSuppression_8
openvino_docs_ops_sort_MatrixNms_8
openvino_docs_ops_pooling_MaxPool_1
openvino_docs_ops_pooling_MaxPool_8
openvino_docs_ops_arithmetic_Maximum_1
openvino_docs_ops_arithmetic_Minimum_1
openvino_docs_ops_activation_Mish_4
@ -169,6 +171,7 @@
openvino_docs_ops_arithmetic_Sign_1
openvino_docs_ops_arithmetic_Sin_1
openvino_docs_ops_arithmetic_Sinh_1
openvino_docs_ops_movement_Slice_8
openvino_docs_ops_activation_SoftMax_1
openvino_docs_ops_activation_SoftMax_8
openvino_docs_ops_activation_SoftPlus_4

View File

@ -1,5 +1,4 @@
OpenVINO™ Python* package {#openvino_docs_IE_DG_PythonPackage_Overview}
========================
# OpenVINO™ Python* Package
OpenVINO™ Python\* package includes types to measure model and calibrate to low precision.
@ -12,4 +11,4 @@ The OpenVINO™ Python\* package includes the following sub-packages:
- `openvino.tools.benchmark` - Measure latency and throughput.
## See Also
* [Introduction to Inference Engine](inference_engine_intro.md)
* [Integrate with Customer Application New API](Integrate_with_customer_application_new_API.md)

View File

@ -19,13 +19,8 @@
openvino_inference_engine_ie_bridges_python_sample_hello_query_device_README
openvino_inference_engine_samples_ngraph_function_creation_sample_README
openvino_inference_engine_ie_bridges_python_sample_ngraph_function_creation_sample_README
openvino_inference_engine_samples_object_detection_sample_ssd_README
openvino_inference_engine_ie_bridges_python_sample_object_detection_sample_ssd_README
openvino_inference_engine_ie_bridges_c_samples_object_detection_sample_ssd_README
openvino_inference_engine_samples_speech_sample_README
openvino_inference_engine_ie_bridges_python_sample_speech_sample_README
openvino_inference_engine_samples_style_transfer_sample_README
openvino_inference_engine_ie_bridges_python_sample_style_transfer_sample_README
openvino_inference_engine_samples_benchmark_app_README
openvino_inference_engine_tools_benchmark_tool_README

View File

@ -167,7 +167,7 @@ To feed input data of a shape that is different from the model input shape, resh
Once the input shape of IENetwork is set, call the `IECore.load_network` method to get an ExecutableNetwork object for inference with updated shapes.
There are other approaches to reshape the model during the stage of IR generation or [nGraph function](https://docs.openvinotoolkit.org/latest/openvino_docs_nGraph_DG_PythonAPI.html#create_an_ngraph_function_from_a_graph) creation.
There are other approaches to reshape the model during the stage of IR generation or [nGraph function](https://docs.openvino.ai/latest/openvino_docs_nGraph_DG_PythonAPI.html#create_an_ngraph_function_from_a_graph) creation.
Practically, some models are not ready to be reshaped. In this case, a new input shape cannot be set with the Model Optimizer or the `IENetwork.reshape` method.

View File

@ -1,17 +0,0 @@
# OpenVINO™ Tools {#openvino_docs_IE_DG_Tools_Overview}
OpenVINO™ tools are C++ and Python\* console command line applications that can be used for models downloading, accuracy measurement, calibration and checking.
The OpenVINO™ toolkit installation includes the following tools:
|Tool | Location in the Installation Directory|
|-----------------------------------------------------------------------------|---------------------------------------|
|[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `<INSTALL_DIR>/tools/accuracy_checker`|
|[Post-Training Optimization Tool](@ref pot_README) | `<INSTALL_DIR>/tools/post_training_optimization_toolkit`|
|[Model Downloader](@ref omz_tools_downloader) | `<INSTALL_DIR>/extras/open_model_zoo/tools/downloader`|
|[Cross Check Tool](../../tools/cross_check_tool/README.md) | `<INSTALL_DIR>/tools/cross_check_tool`|
|[Compile Tool](../../tools/compile_tool/README.md) | `<INSTALL_DIR>/tools/compile_tool`|
## See Also
* [Introduction to Inference Engine](inference_engine_intro.md)

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 162 KiB

After

Width:  |  Height:  |  Size: 162 KiB

View File

@ -1,123 +0,0 @@
# Introduction to Inference Engine {#openvino_docs_IE_DG_inference_engine_intro}
> **NOTE**: [Intel® System Studio](https://software.intel.com/en-us/system-studio) is an all-in-one, cross-platform tool suite, purpose-built to simplify system bring-up and improve system and IoT device application performance on Intel® platforms. If you are using the Intel® Distribution of OpenVINO™ with Intel® System Studio, go to [Get Started with Intel® System Studio](https://software.intel.com/en-us/articles/get-started-with-openvino-and-intel-system-studio-2019).
This Guide provides an overview of the Inference Engine describing the typical workflow for performing
inference of a pre-trained and optimized deep learning model and a set of sample applications.
> **NOTE**: Before you perform inference with the Inference Engine, your models should be converted to the Inference Engine format using the Model Optimizer or built directly in run-time using nGraph API. To learn about how to use Model Optimizer, refer to the [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). To learn about the pre-trained and optimized models delivered with the OpenVINO™ toolkit, refer to [Pre-Trained Models](@ref omz_models_group_intel).
After you have used the Model Optimizer to create an Intermediate Representation (IR), use the Inference Engine to infer the result for a given input data.
Inference Engine is a set of C++ libraries providing a common API to deliver inference solutions on the platform of your choice: CPU, GPU, or VPU. Use the Inference Engine API to read the Intermediate Representation, set the input and output formats, and execute the model on devices. While the C++ libraries is the primary implementation, C libraries and Python bindings are also available.
For Intel® Distribution of OpenVINO™ toolkit, Inference Engine binaries are delivered within release packages.
The open source version is available in the [OpenVINO™ toolkit GitHub repository](https://github.com/openvinotoolkit/openvino) and can be built for supported platforms using the <a href="https://github.com/openvinotoolkit/openvino/wiki/BuildingCode">Inference Engine Build Instructions</a>.
To learn about how to use the Inference Engine API for your application, see the [Integrating Inference Engine in Your Application](Integrate_with_customer_application_new_API.md) documentation.
For complete API Reference, see the [Inference Engine API References](./api_references.html) section.
Inference Engine uses a plugin architecture. Inference Engine plugin is a software component that contains complete implementation for inference on a certain Intel&reg; hardware device: CPU, GPU, VPU, etc. Each plugin implements the unified API and provides additional hardware-specific APIs.
Modules in the Inference Engine component
-----------------------------------------
### Core Inference Engine Libraries ###
Your application must link to the core Inference Engine libraries:
* Linux* OS:
- `libov_runtime.so`, which depends on `libtbb.so`, `libtbbmalloc.so`
* Windows* OS:
- `ov_runtime.dll`, which depends on `tbb.dll`, `tbbmalloc.dll`
* macOS*:
- `libov_runtime.dylib`, which depends on `libtbb.dylib`, `libtbbmalloc.dylib`
The required C++ header files are located in the `include` directory.
This library contains the classes to:
* Create Inference Engine Core object to work with devices and read network (InferenceEngine::Core)
* Manipulate network information (InferenceEngine::CNNNetwork)
* Execute and pass inputs and outputs (InferenceEngine::ExecutableNetwork and InferenceEngine::InferRequest)
### Plugin Libraries to read a network object ###
Starting from 2022.1 release, OpenVINO Runtime introduced a concept of frontend plugins. Such plugins can be automatically dynamically loaded by OpenVINO Runtime dynamically depending on file format:
* Unix* OS:
- `libov_ir_frontend.so` to read a network from IR
- `libov_paddle_frontend.so` to read a network from PaddlePaddle model format
- `libov_onnx_frontend.so` to read a network from ONNX model format
* Windows* OS:
- `ov_ir_frontend.dll` to read a network from IR
- `ov_paddle_frontend.dll` to read a network from PaddlePaddle model format
- `ov_onnx_frontend.dll` to read a network from ONNX model format
### Device-specific Plugin Libraries ###
For each supported target device, Inference Engine provides a plugin — a DLL/shared library that contains complete implementation for inference on this particular device. The following plugins are available:
| Plugin | Device Type |
| ------- | ----------------------------- |
|CPU | Intel® Xeon® with Intel® AVX2 and AVX512, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® SSE |
|GPU | Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics |
|MYRIAD | Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X |
|GNA | Intel&reg; Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel&reg; Pentium&reg; Silver J5005 Processor, Intel&reg; Pentium&reg; Silver N5000 Processor, Intel&reg; Celeron&reg; J4005 Processor, Intel&reg; Celeron&reg; J4105 Processor, Intel&reg; Celeron&reg; Processor N4100, Intel&reg; Celeron&reg; Processor N4000, Intel&reg; Core&trade; i3-8121U Processor, Intel&reg; Core&trade; i7-1065G7 Processor, Intel&reg; Core&trade; i7-1060G7 Processor, Intel&reg; Core&trade; i5-1035G4 Processor, Intel&reg; Core&trade; i5-1035G7 Processor, Intel&reg; Core&trade; i5-1035G1 Processor, Intel&reg; Core&trade; i5-1030G7 Processor, Intel&reg; Core&trade; i5-1030G4 Processor, Intel&reg; Core&trade; i3-1005G1 Processor, Intel&reg; Core&trade; i3-1000G1 Processor, Intel&reg; Core&trade; i3-1000G4 Processor |
|HETERO | Automatic splitting of a network inference between several devices (for example if a device doesn't support certain layers|
|MULTI | Simultaneous inference of the same network on several devices in parallel|
The table below shows the plugin libraries and additional dependencies for Linux, Windows and macOS platforms.
| Plugin | Library name for Linux | Dependency libraries for Linux | Library name for Windows | Dependency libraries for Windows | Library name for macOS | Dependency libraries for macOS |
|--------|-----------------------------|-------------------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------|------------------------------|---------------------------------------------|
| CPU | `libov_intel_cpu_plugin.so` | | `ov_intel_cpu_plugin.dll` | | `libov_intel_cpu_plugin.so` | |
| GPU | `libov_intel_gpu_plugin.so` | `libOpenCL.so` | `ov_intel_gpu_plugin.dll` | `OpenCL.dll` | Is not supported | - |
| MYRIAD | `libov_intel_vpu_plugin.so` | `libusb.so` | `ov_intel_vpu_plugin.dll`| `usb.dll` | `libov_intel_vpu_plugin.so` | `libusb.dylib` |
| HDDL | `libHDDLPlugin.so` | `libbsl.so`, `libhddlapi.so`, `libmvnc-hddl.so` | `HDDLPlugin.dll` | `bsl.dll`, `hddlapi.dll`, `json-c.dll`, `libcrypto-1_1-x64.dll`, `libssl-1_1-x64.dll`, `mvnc-hddl.dll` | Is not supported | - |
| GNA | `libov_intel_gna_plugin.so` | `libgna.so`, | `ov_intel_gna_plugin.dll` | `gna.dll` | Is not supported | - |
| HETERO | `libov_hetero_plugin.so` | Same as for selected plugins | `ov_hetero_plugin.dll` | Same as for selected plugins | `libov_hetero_plugin.so` | Same as for selected plugins |
| MULTI | `libov_auto_plugin.so` | Same as for selected plugins | `ov_auto_plugin.dll` | Same as for selected plugins | `libov_auto_plugin.so` | Same as for selected plugins |
| AUTO | `libov_auto_plugin.so` | Same as for selected plugins | `ov_auto_plugin.dll` | Same as for selected plugins | `libov_auto_plugin.so` | Same as for selected plugins |
> **NOTE**: All plugin libraries also depend on core Inference Engine libraries.
Make sure those libraries are in your computer's path or in the place you pointed to in the plugin loader. Make sure each plugin's related dependencies are in the:
* Linux: `LD_LIBRARY_PATH`
* Windows: `PATH`
* macOS: `DYLD_LIBRARY_PATH`
On Linux and macOS, use the script `setupvars.sh` to set the environment variables.
On Windows, run the `setupvars.bat` batch file to set the environment variables.
To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter.
Common Workflow for Using the Inference Engine API
--------------------------------------------------
The common workflow contains the following steps:
1. **Create Inference Engine Core object** - Create an `InferenceEngine::Core` object to work with different devices, all device plugins are managed internally by the `Core` object. Register extensions with custom nGraph operations (`InferenceEngine::Core::AddExtension`).
2. **Read the Intermediate Representation** - Using the `InferenceEngine::Core` class, read an Intermediate Representation file into an object of the `InferenceEngine::CNNNetwork` class. This class represents the network in the host memory.
3. **Prepare inputs and outputs format** - After loading the network, specify input and output precision and the layout on the network. For these specification, use the `InferenceEngine::CNNNetwork::getInputsInfo()` and `InferenceEngine::CNNNetwork::getOutputsInfo()`.
4. **Pass per device loading configurations** specific to this device (`InferenceEngine::Core::SetConfig`) and register extensions to this device (`InferenceEngine::Core::AddExtension`).
5. **Compile and Load Network to device** - Use the `InferenceEngine::Core::LoadNetwork()` method with specific device (e.g. `CPU`, `GPU`, etc.) to compile and load the network on the device. Pass in the per-target load configuration for this compilation and load operation.
6. **Set input data** - With the network loaded, you have an `InferenceEngine::ExecutableNetwork` object. Use this object to create an `InferenceEngine::InferRequest` in which you signal the input buffers to use for input and output. Specify a device-allocated memory and copy it into the device memory directly, or tell the device to use your application memory to save a copy.
7. **Execute** - With the input and output memory now defined, choose your execution mode:
* Synchronously - `InferenceEngine::InferRequest::Infer()` method. Blocks until inference is completed.
* Asynchronously - `InferenceEngine::InferRequest::StartAsync()` method. Check status with the `InferenceEngine::InferRequest::Wait()` method (0 timeout), wait, or specify a completion callback.
8. **Get the output** - After inference is completed, get the output memory or read the memory you provided earlier. Do this with the `InferenceEngine::IInferRequest::GetBlob()` method.
Further Reading
---------------
For more details on the Inference Engine API, refer to the [Integrating Inference Engine in Your Application](Integrate_with_customer_application_new_API.md) documentation.

View File

@ -1,152 +0,0 @@
# Integrate Inference Engine with Your Python Application {#openvino_docs_IE_DG_integrate_with_customer_application_python}
This document explains how to integrate and use the Inference Engine API with your Python application.
The following diagram illustrates the typical Inference Engine Python API workflow:
![ie_api_flow_python]
Read the sections below to learn about each item.
## Link with Inference Engine Library
To make use of the Inference Engine functionality, import IECore to your application:
```py
from openvino.inference_engine import IECore
```
## Use Inference Engine API Implement Inference Pipeline
This section provides step-by-step instructions to implement a typical inference pipeline with the Inference Engine Python API:
![ie_api_use_python]
### Step 1. Create Inference Engine Core
Use the following code to create Inference Engine Core to manage available devices and read network objects:
```py
ie = IECore()
```
### Step 2 (Optional). Read model. Configure Input and Output of the Model
@sphinxdirective
.. raw:: html
<div class="collapsible-section">
@endsphinxdirective
Optionally, configure input and output of the model using the steps below:
1. Read model
@sphinxdirective
.. tab:: IR
.. code-block:: python
net = ie.read_network(model="model.xml")
.. tab:: ONNX
.. code-block:: python
net = ie.read_network(model="model.onnx")
.. tab:: nGraph
.. code-block:: python
// TBD
@endsphinxdirective
2. Request input and output information using input_info, outputs
```py
input_name = next(iter(net.input_info))
output_name = next(iter(net.outputs))
```
Information for this input layer is stored ininput_info. The next cell prints the input layout, precision and shape.
```py
print(f"input layout: {net.input_info[input_layer].layout}")
print(f"input precision: {net.input_info[input_layer].precision}")
print(f"input shape: {net.input_info[input_layer].tensor_desc.dims}")
```
This cell output tells us that the model expects inputs with a shape of [1,3,224,224], and that this is in NCHW layout. This means that the model expects input data with a batch size (N) of 1, 3 channels (C), and images of a height (H) and width (W) of 224. The input data is expected to be of FP32 (floating point) precision.
Getting the output layout, precision and shape is similar to getting the input layout, precision and shape.
```py
print(f"output layout: {net.outputs[output_layer].layout}")
print(f"output precision: {net.outputs[output_layer].precision}")
print(f"output shape: {net.outputs[output_layer].shape}")
```
This cell output shows that the model returns outputs with a shape of [1, 1001], where 1 is the batch size (N) and 1001 the number of classes (C). The output is returned as 32-bit floating point.
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### Step 3. Load model to the Device
Load the model to the device using `load_network()`:
@sphinxdirective
.. tab:: IR
.. code-block:: python
exec_net = ie.load_network(network= "model.xml", device_name="CPU")
.. tab:: ONNX
.. code-block:: python
exec_net = ie.load_network(network= "model.onnx", device_name="CPU")
.. tab:: nGraph
.. code-block:: python
// TBD
.. tab:: Model from Step 2
.. code-block:: python
exec_net = ie.load_network(network=net, device_name="CPU")
@endsphinxdirective
### Step 4. Prepare input
```py
import cv2
import numpy as np
image = cv2.imread("image.png")
# Resize with OpenCV your image if needed to match with net input shape
# res_image = cv2.resize(src=image, dsize=(W, H))
# Converting image to NCHW format with FP32 type
input_data = np.expand_dims(np.transpose(image, (2, 0, 1)), 0).astype(np.float32)
```
### Step 5. Start Inference
```py
input_name = next(iter(net.input_info))
result = exec_net.infer({input_name: input_data})
```
### Step 6. Process the Inference Results
```py
output_name = next(iter(net.outputs))
output = result[output_name]
```
## Run Application
[ie_api_flow_python]: img/ie_api_python.png
[ie_api_use_python]: img/ie_api_integration_python.png

View File

@ -52,7 +52,7 @@ should be called with `weights` passed as an empty `Blob`.
## Additional Resources
- Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit)
- OpenVINO™ toolkit online documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org)
- OpenVINO™ toolkit online documentation: [https://docs.openvino.ai](https://docs.openvino.ai)
- Model Optimizer Developer Guide: [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
- Inference Engine Developer Guide: [Inference Engine Developer Guide](Deep_Learning_Inference_Engine_DevGuide.md)
- For more information on Sample Applications, see the [Inference Engine Samples Overview](Samples_Overview.md)

View File

@ -50,7 +50,7 @@ The Auto-device plugin supports query device optimization capabilities in metric
### Enumerating Devices and Selection Logic
The Inference Engine now features a dedicated API to enumerate devices and their capabilities.
See [Hello Query Device C++ Sample](../../../inference-engine/samples/hello_query_device/README.md).
See [Hello Query Device C++ Sample](../../../samples/cpp/hello_query_device/README.md).
This is the example output from the sample (truncated to device names only):
```sh
@ -163,9 +163,9 @@ From the application's point of view, this is just another device that handles a
With the 2021.4 release, Auto-device setup is done in three major steps:
1. Configure each device as usual (for example, via the conventional [IECore.set_config](https://docs.openvinotoolkit.org/latest/ie_python_api/classie__api_1_1IECore.html#a2c738cee90fca27146e629825c039a05) method).
1. Configure each device as usual (for example, via the conventional [IECore.set_config](https://docs.openvino.ai/latest/ie_python_api/classie__api_1_1IECore.html#a2c738cee90fca27146e629825c039a05) method).
2. Load a network to the Auto-device plugin. This is the only change needed in your application.
3. As with any other executable network resulting from [IECore.load_network](https://docs.openvinotoolkit.org/latest/ie_python_api/classie__api_1_1IECore.html#ac9a2e043d14ccfa9c6bbf626cfd69fcc), create as many requests as needed to saturate the devices.
3. As with any other executable network resulting from [IECore.load_network](https://docs.openvino.ai/latest/ie_python_api/classie__api_1_1IECore.html#ac9a2e043d14ccfa9c6bbf626cfd69fcc), create as many requests as needed to saturate the devices.
These steps are covered below in detail.

View File

@ -1,5 +1,5 @@
CPU Plugin {#openvino_docs_IE_DG_supported_plugins_CPU}
=======
# CPU Plugin {#openvino_docs_IE_DG_supported_plugins_CPU}
## Introducing the CPU Plugin
The CPU plugin was developed to achieve high performance of neural networks on CPU, using the Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN).

View File

@ -1,5 +1,4 @@
GPU Plugin {#openvino_docs_IE_DG_supported_plugins_GPU}
=======
# GPU Plugin {#openvino_docs_IE_DG_supported_plugins_GPU}
@sphinxdirective

View File

@ -182,7 +182,7 @@ You can set the configuration directly as a string, or use the metric key `MULTI
### Enumerating Available Devices
The Inference Engine features a dedicated API to enumerate devices and their capabilities. See the [Hello Query Device Python Sample](../../../inference-engine/ie_bridges/python/sample/hello_query_device/README.md). This is example output from the sample (truncated to device names only):
The Inference Engine features a dedicated API to enumerate devices and their capabilities. See the [Hello Query Device Python Sample](../../../samples/python/hello_query_device/README.md). This is example output from the sample (truncated to device names only):
```sh
./hello_query_device
@ -268,7 +268,7 @@ Note that while the performance of accelerators works well with Multi-Device, th
### Using the Multi-Device with OpenVINO Samples and Benchmarking the Performance
Every OpenVINO sample that supports the `-d` (which stands for "device") command-line option transparently accepts Multi-Device. The [Benchmark application](../../../inference-engine/tools/benchmark_tool/README.md) is the best reference for the optimal usage of Multi-Device. As discussed earlier, you do not need to set up the number of requests, CPU streams or threads because the application provides optimal performance out of the box. Below is an example command to evaluate CPU+GPU performance with the Benchmark application:
Every OpenVINO sample that supports the `-d` (which stands for "device") command-line option transparently accepts Multi-Device. The [Benchmark application](../../../tools/benchmark_tool/README.md) is the best reference for the optimal usage of Multi-Device. As discussed earlier, you do not need to set up the number of requests, CPU streams or threads because the application provides optimal performance out of the box. Below is an example command to evaluate CPU+GPU performance with the Benchmark application:
```sh
./benchmark_app.py d MULTI:CPU,GPU m <model>

View File

@ -19,6 +19,19 @@ The Inference Engine provides unique capabilities to infer deep learning models
Devices similar to the ones we have used for benchmarking can be accessed using [Intel® DevCloud for the Edge](https://devcloud.intel.com/edge/), a remote development environment with access to Intel® hardware and the latest versions of the Intel® Distribution of the OpenVINO™ Toolkit. [Learn more](https://devcloud.intel.com/edge/get_started/devcloud/) or [Register here](https://inteliot.force.com/DevcloudForEdge/s/).
The table below shows the plugin libraries and additional dependencies for Linux, Windows and macOS platforms.
| Plugin | Library name for Linux | Dependency libraries for Linux | Library name for Windows | Dependency libraries for Windows | Library name for macOS | Dependency libraries for macOS |
|--------|-----------------------------|-------------------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------|------------------------------|---------------------------------------------|
| CPU | `libov_intel_cpu_plugin.so` | | `ov_intel_cpu_plugin.dll` | | `libov_intel_cpu_plugin.so` | |
| GPU | `libov_intel_gpu_plugin.so` | `libOpenCL.so` | `ov_intel_gpu_plugin.dll` | `OpenCL.dll` | Is not supported | - |
| MYRIAD | `libov_intel_vpu_plugin.so` | `libusb.so` | `ov_intel_vpu_plugin.dll`| `usb.dll` | `libov_intel_vpu_plugin.so` | `libusb.dylib` |
| HDDL | `libHDDLPlugin.so` | `libbsl.so`, `libhddlapi.so`, `libmvnc-hddl.so` | `HDDLPlugin.dll` | `bsl.dll`, `hddlapi.dll`, `json-c.dll`, `libcrypto-1_1-x64.dll`, `libssl-1_1-x64.dll`, `mvnc-hddl.dll` | Is not supported | - |
| GNA | `libov_intel_gna_plugin.so` | `libgna.so`, | `ov_intel_gna_plugin.dll` | `gna.dll` | Is not supported | - |
| HETERO | `libov_hetero_plugin.so` | Same as for selected plugins | `ov_hetero_plugin.dll` | Same as for selected plugins | `libov_hetero_plugin.so` | Same as for selected plugins |
| MULTI | `libov_auto_plugin.so` | Same as for selected plugins | `ov_auto_plugin.dll` | Same as for selected plugins | `libov_auto_plugin.so` | Same as for selected plugins |
| AUTO | `libov_auto_plugin.so` | Same as for selected plugins | `ov_auto_plugin.dll` | Same as for selected plugins | `libov_auto_plugin.so` | Same as for selected plugins |
## Supported Configurations
The Inference Engine can inference models in different formats with various input and output formats.

View File

@ -91,12 +91,12 @@ endif()
- `src/CMakeLists.txt` to build a plugin shared library from sources:
@snippet src/CMakeLists.txt cmake:plugin
@snippet template_plugin/src/CMakeLists.txt cmake:plugin
> **NOTE**: `IE::inference_engine` target is imported from the Inference Engine Developer Package.
- `tests/functional/CMakeLists.txt` to build a set of functional plugin tests:
@snippet tests/functional/CMakeLists.txt cmake:functional_tests
@snippet template_plugin/tests/functional/CMakeLists.txt cmake:functional_tests
> **NOTE**: The `IE::funcSharedTests` static library with common functional Inference Engine Plugin tests is imported via the Inference Engine Developer Package.

View File

@ -9,8 +9,11 @@
Implement Plugin Functionality <openvino_docs_ie_plugin_dg_plugin>
Implement Executable Network Functionality <openvino_docs_ie_plugin_dg_executable_network>
openvino_docs_ie_plugin_dg_quantized_networks
Implement Synchronous Inference Request <openvino_docs_ie_plugin_dg_infer_request>
Implement Asynchronous Inference Request <openvino_docs_ie_plugin_dg_async_infer_request>
openvino_docs_ie_plugin_dg_plugin_build
openvino_docs_ie_plugin_dg_plugin_testing
@endsphinxdirective

View File

@ -14,5 +14,5 @@ Starting from OpenVINO 2020.2 release all the quantized models are represented i
![quantized_model_example]
<div align="center">Figure 2. Example of compressed quantized model.</div>
[quantized_convolution]: ../images/quantized_convolution.png
[quantized_model_example]: ../images/quantized_model_example.png
[quantized_convolution]: images/quantized_convolution.png
[quantized_model_example]: images/quantized_model_example.png

View File

@ -24,7 +24,7 @@ Plugin Class
Inference Engine Plugin API provides the helper InferenceEngine::IInferencePlugin class recommended to use as a base class for a plugin.
Based on that, declaration of a plugin class can look as follows:
@snippet src/template_plugin.hpp plugin:header
@snippet template_plugin/src/template_plugin.hpp plugin:header
#### Class Fields
@ -34,7 +34,7 @@ The provided plugin class also has several fields:
* `_waitExecutor` - a task executor that waits for a response from a device about device tasks completion.
* `_cfg` of type `Configuration`:
@snippet src/template_config.hpp configuration:header
@snippet template_plugin/src/template_config.hpp configuration:header
As an example, a plugin configuration has three value parameters:
@ -52,7 +52,7 @@ must be thrown from a plugin constructor.
A plugin must define a device name enabled via the `_pluginName` field of a base class:
@snippet src/template_plugin.cpp plugin:ctor
@snippet template_plugin/src/template_plugin.cpp plugin:ctor
### `LoadExeNetworkImpl()`
@ -62,7 +62,7 @@ of the public InferenceEngine::IInferencePlugin::LoadNetwork method that calls p
This is the most important function of the `Plugin` class and creates an instance of compiled `ExecutableNetwork`,
which holds a backend-dependent compiled graph in an internal representation:
@snippet src/template_plugin.cpp plugin:load_exe_network_impl
@snippet template_plugin/src/template_plugin.cpp plugin:load_exe_network_impl
Before a creation of an `ExecutableNetwork` instance via a constructor, a plugin may check if a provided
InferenceEngine::ICNNNetwork object is supported by a device. In the example above, the plugin checks precision information.
@ -84,7 +84,7 @@ The function accepts a const shared pointer to `ngraph::Function` object and per
* [Intermediate Representation and Operation Sets](../_docs_MO_DG_IR_and_opsets.html)
* [Quantized networks](@ref openvino_docs_ie_plugin_dg_quantized_networks).
@snippet src/template_plugin.cpp plugin:transform_network
@snippet template_plugin/src/template_plugin.cpp plugin:transform_network
> **NOTE**: After all these transformations, a `ngraph::Function` object contains operations which can be perfectly mapped to backend kernels. E.g. if backend has kernel computing `A + B` operations at once, the `TransformNetwork` function should contain a pass which fuses operations `A` and `B` into a single custom operation `A + B` which fits backend kernels set.
@ -100,20 +100,20 @@ operations via the InferenceEngine::QueryNetworkResult structure. The `QueryNetw
3. Construct `supported` and `unsupported` maps which contains names of original operations. Note, that since the inference is performed using ngraph reference backend, the decision whether the operation is supported or not depends on whether the latest OpenVINO opset contains such operation.
4. `QueryNetworkResult.supportedLayersMap` contains only operations which are fully supported by `_backend`.
@snippet src/template_plugin.cpp plugin:query_network
@snippet template_plugin/src/template_plugin.cpp plugin:query_network
### `AddExtension()`
Adds an extension of the InferenceEngine::IExtensionPtr type to a plugin. If a plugin does not
support extensions, the method must throw an exception:
@snippet src/template_plugin.cpp plugin:add_extension
@snippet template_plugin/src/template_plugin.cpp plugin:add_extension
### `SetConfig()`
Sets new values for plugin configuration keys:
@snippet src/template_plugin.cpp plugin:set_config
@snippet template_plugin/src/template_plugin.cpp plugin:set_config
In the snippet above, the `Configuration` class overrides previous configuration values with the new
ones. All these values are used during backend specific graph compilation and execution of inference requests.
@ -124,7 +124,7 @@ ones. All these values are used during backend specific graph compilation and ex
Returns a current value for a specified configuration key:
@snippet src/template_plugin.cpp plugin:get_config
@snippet template_plugin/src/template_plugin.cpp plugin:get_config
The function is implemented with the `Configuration::Get` method, which wraps an actual configuration
key value to the InferenceEngine::Parameter and returns it.
@ -155,7 +155,7 @@ demonstrates the definition of a new optimization capability value specific for
The snippet below provides an example of the implementation for `GetMetric`:
@snippet src/template_plugin.cpp plugin:get_metric
@snippet template_plugin/src/template_plugin.cpp plugin:get_metric
> **NOTE**: If an unsupported metric key is passed to the function, it must throw an exception.
@ -178,13 +178,13 @@ information must be stored and checked during the import.
- Compiled backend specific graph itself
- Information about precisions and shapes set by the user
@snippet src/template_plugin.cpp plugin:import_network_impl
@snippet template_plugin/src/template_plugin.cpp plugin:import_network
Create Instance of Plugin Class
------------------------
Inference Engine plugin library must export only one function creating a plugin instance using IE_DEFINE_PLUGIN_CREATE_FUNCTION macro:
@snippet src/template_plugin.cpp plugin:create_plugin_engine
@snippet template_plugin/src/template_plugin.cpp plugin:create_plugin_engine
Next step in a plugin library implementation is the [ExecutableNetwork](@ref openvino_docs_ie_plugin_dg_executable_network) class.

View File

@ -15,7 +15,7 @@ Engine concepts: plugin creation, multiple executable networks support, multiple
- From the declaration of convolution test class we can see that it's a parametrized GoogleTest based class with the `convLayerTestParamsSet` tuple of parameters:
@snippet single_layer_tests/convolution.hpp test_convolution:definition
@snippet single_layer/convolution.hpp test_convolution:definition
- Based on that, define a set of parameters for `Template` plugin functional test instantiation:

View File

@ -46,4 +46,4 @@ Below we define these rules as follows:
- Non-unified quantization parameters for Eltwise and Concat operations.
- Non-quantized network output, i.e. there are no quantization parameters for it.
[qdq_propagation]: ../images/qdq_propagation.png
[qdq_propagation]: images/qdq_propagation.png

View File

@ -33,8 +33,8 @@ The IR is a pair of files describing the model:
* <code>.bin</code> - Contains the weights and biases binary data.
> **TIP**: You also can work with the Model Optimizer inside the OpenVINO™ [Deep Learning Workbench](https://docs.openvinotoolkit.org/latest/workbench_docs_Workbench_DG_Introduction.html) (DL Workbench).
> [DL Workbench](https://docs.openvinotoolkit.org/latest/workbench_docs_Workbench_DG_Introduction.html) is a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare performance of deep learning models.
> **TIP**: You also can work with the Model Optimizer inside the OpenVINO™ [Deep Learning Workbench](https://docs.openvino.ai/latest/workbench_docs_Workbench_DG_Introduction.html) (DL Workbench).
> [DL Workbench](https://docs.openvino.ai/latest/workbench_docs_Workbench_DG_Introduction.html) is a web-based graphical environment that enables you to optimize, fine-tune, analyze, visualize, and compare performance of deep learning models.
## Install Model Optimizer Pre-Requisites
@ -636,15 +636,15 @@ mo --input_model INPUT_MODEL --output_dir <OUTPUT_MODEL_DIR>
You need to have have write permissions for an output directory.
> **NOTE**: Some models require using additional arguments to specify conversion parameters, such as `--input_shape`, `--scale`, `--scale_values`, `--mean_values`, `--mean_file`. To learn about when you need to use these parameters, refer to [Converting a Model to Intermediate Representation (IR)](Converting_Model.md).
> **NOTE**: Some models require using additional arguments to specify conversion parameters, such as `--input_shape`, `--scale`, `--scale_values`, `--mean_values`, `--mean_file`. To learn about when you need to use these parameters, refer to [Converting a Model to Intermediate Representation (IR)](prepare_model/convert_model/Converting_Model.md).
To adjust the conversion process, you may use general parameters defined in the [Converting a Model to Intermediate Representation (IR)](Converting_Model.md) and
To adjust the conversion process, you may use general parameters defined in the [Converting a Model to Intermediate Representation (IR)](prepare_model/convert_model/Converting_Model.md) and
framework-specific parameters for:
* [Caffe](Convert_Model_From_Caffe.md)
* [TensorFlow](Convert_Model_From_TensorFlow.md)
* [MXNet](Convert_Model_From_MxNet.md)
* [ONNX](Convert_Model_From_ONNX.md)
* [Kaldi](Convert_Model_From_Kaldi.md)
* [Caffe](prepare_model/convert_model/Convert_Model_From_Caffe.md)
* [TensorFlow](prepare_model/convert_model/Convert_Model_From_TensorFlow.md)
* [MXNet](prepare_model/convert_model/Convert_Model_From_MxNet.md)
* [ONNX](prepare_model/convert_model/Convert_Model_From_ONNX.md)
* [Kaldi](prepare_model/convert_model/Convert_Model_From_Kaldi.md)
## Videos

View File

@ -9,7 +9,7 @@ Model Optimizer performs preprocessing to a model. It is possible to optimize th
If, for example, your network assumes the RGB inputs, the Model Optimizer can swap the channels in the first convolution using the `--reverse_input_channels` command line option, so you do not need to convert your inputs to RGB every time you get the BGR image, for example, from OpenCV*.
- **Larger batch size**<br>
Notice that the devices like GPU are doing better with larger batch size. While it is possible to set the batch size in the runtime using the Inference Engine [ShapeInference feature](../IE_DG/ShapeInference.md).
Notice that the devices like GPU are doing better with larger batch size. While it is possible to set the batch size in the runtime using the Inference Engine [ShapeInference feature](../../IE_DG/ShapeInference.md).
- **Resulting IR precision**<br>
The resulting IR precision, for instance, `FP16` or `FP32`, directly affects performance. As CPU now supports `FP16` (while internally upscaling to `FP32` anyway) and because this is the best precision for a GPU target, you may want to always convert models to `FP16`. Notice that this is the only precision that Intel&reg; Movidius&trade; Myriad&trade; 2 and Intel&reg; Myriad&trade; X VPUs support.

View File

@ -1,606 +0,0 @@
# Installing Model Optimizer Pre-Requisites {#openvino_docs_MO_DG_prepare_model_Config_Model_Optimizer}
@sphinxdirective
.. tab:: Using configuration scripts
.. tab:: Linux
.. tab:: All frameworks
.. tab:: Install globally
.. code-block:: sh
cd <INSTALL_DIR>/deployment_tools/model_optimizer/install_prerequisites
./install_prerequisites.sh
.. tab:: Install to virtualenv
.. code-block:: sh
cd <INSTALL_DIR>/deployment_tools/model_optimizer/install_prerequisites
virtualenv --system-site-packages -p python3 ./venv
source ./venv/bin/activate # sh, bash, ksh, or zsh
./install_prerequisites.shs
.. tab:: Caffe
.. tab:: Install globally
.. code-block:: sh
cd <INSTALL_DIR>/deployment_tools/model_optimizer/install_prerequisitess
install_prerequisites_caffe.sh
.. tab:: Install to virtualenv
.. code-block:: sh
cd <INSTALL_DIR>/deployment_tools/model_optimizer/install_prerequisites
install_prerequisites_caffe.shs
.. tab:: Tensorflow 1.x
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Tensorflow 2.x
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: MXNet
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: ONNX
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Kaldi
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Windows
.. tab:: All frameworks
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Caffe
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Tensorflow 1.x
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Tensorflow 2.x
.. tab:: Install globally
test
.. tab:: Install to virtualenv
tests
.. tab:: MXNet
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: ONNX
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Kaldi
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: macOS
.. tab:: All frameworks
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Caffe
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Tensorflow 1.x
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Tensorflow 2.x
.. tab:: Install globally
test
.. tab:: Install to virtualenv
tests
.. tab:: MXNet
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: ONNX
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Kaldi
.. tab:: Install globally
test
.. tab:: Install to virtualenv
test
.. tab:: Using manual configuration process
.. tab:: Linux
.. tab:: All frameworks
test
.. tab:: Caffe
test
.. tab:: Tensorflow 1.x
test
.. tab:: Tensorflow 2.x
test
.. tab:: MXNet
test
.. tab:: ONNX
test
.. tab:: Kaldi
test
.. tab:: Windows
.. tab:: All frameworks
test
.. tab:: Caffe
test
.. tab:: Tensorflow 1.x
test
.. tab:: Tensorflow 2.x
test
.. tab:: MXNet
test
.. tab:: ONNX
test
.. tab:: Kaldi
test
.. tab:: macOS
.. tab:: All frameworks
test
.. tab:: Caffe
test
.. tab:: Tensorflow 1.x
test
.. tab:: Tensorflow 2.x
test
.. tab:: MXNet
test
.. tab:: ONNX
test
.. tab:: Kaldi
test
@endsphinxdirective
Before running the Model Optimizer, you must install the Model Optimizer pre-requisites for the framework that was used to train the model. This section tells you how to install the pre-requisites either through scripts or by using a manual process.
## Using Configuration Scripts
You can either configure all three frameworks at the same time or install an
individual framework. The scripts delivered with the tool install all required
dependencies and provide the fastest and easiest way to configure the Model
Optimizer.
To configure all three frameworks, go to the
`<INSTALL_DIR>/tools/model_optimizer/install_prerequisites`
directory and run:
* For Linux\* OS:
```
install_prerequisites.sh
```
> **NOTE**: This command installs prerequisites globally. If you want to keep Model Optimizer in a separate sandbox, run the following commands instead:
```
virtualenv --system-site-packages -p python3 ./venv
```
```
source ./venv/bin/activate  # sh, bash, ksh, or zsh
```
```
./install_prerequisites.sh
```
* For Windows\* OS:
```
install_prerequisites.bat
```
To configure a specific framework, go to the
`<INSTALL_DIR>/tools/model_optimizer/install_prerequisites`
directory and run:
* For Caffe\* on Linux:
```
install_prerequisites_caffe.sh
```
* For Caffe on Windows:
```
install_prerequisites_caffe.bat
```
* For TensorFlow\* on Linux:
```
install_prerequisites_tf.sh
```
* For TensorFlow on Windows:
```
install_prerequisites_tf.bat
```
* For MXNet\* on Linux:
```
install_prerequisites_mxnet.sh
```
* For MXNet on Windows:
```
install_prerequisites_mxnet.bat
```
* For Kaldi\* on Linux:
```
install_prerequisites_kaldi.sh
```
* For Kaldi on Windows:
```
install_prerequisites_kaldi.bat
```
* For ONNX\* on Linux:
```
install_prerequisites_onnx.sh
```
* For ONNX on Windows:
```
install_prerequisites_onnx.bat
```
> **IMPORTANT**: **ONLY FOR CAFFE\*** By default, you do not need to install Caffe to create an
> Intermediate Representation for a Caffe model, unless you use Caffe for
> custom layer shape inference and do not write Model Optimizer extensions.
> To learn more about implementing Model Optimizer custom operations and the
> limitations of using Caffe for shape inference, see
> [Custom Layers in Model Optimizer](customize_model_optimizer/Customize_Model_Optimizer.md).
## Using Manual Configuration Process
If you prefer, you can manually configure the Model Optimizer for one
framework at a time.
1. Go to the Model Optimizer directory:
```shell
cd <INSTALL_DIR>/tools/model_optimizer/
```
2. **Strongly recommended for all global Model Optimizer dependency installations**:
Create and activate a virtual environment. While not required, this step is
strongly recommended since the virtual environment creates a Python\*
sandbox, and dependencies for the Model Optimizer do not influence the
global Python configuration, installed libraries, or other components.
In addition, a flag ensures that system-wide Python libraries are available
in this sandbox. Skip this step only if you do want to install all the Model
Optimizer dependencies globally:
* Create a virtual environment:
```shell
virtualenv -p /usr/bin/python3.6 .env3 --system-site-packages
```
* Activate the virtual environment:
```shell
virtualenv -p /usr/bin/python3.6 .env3/bin/activate
```
3. Install all dependencies or only the dependencies for a specific framework:
* To install dependencies for all frameworks except TensorFlow* 1.x:
```shell
pip3 install -r requirements.txt
```
> **NOTE**: Support of MO in TensorFlow 1.x environment is deprecated. Use TensorFlow 2.x environment to convert both TensorFlow 1.x and 2.x models. Use separate virtual environments if you want to install multiple TensorFlow versions.
* To install dependencies only for Caffe:
```shell
pip3 install -r requirements_caffe.txt
```
* To install dependencies only for TensorFlow:
```shell
pip3 install -r requirements_tf.txt
```
* To install dependencies only for MXNet:
```shell
pip3 install -r requirements_mxnet.txt
```
* To install dependencies only for Kaldi:
```shell
pip3 install -r requirements_kaldi.txt
```
* To install dependencies only for ONNX:
```shell
pip3 install -r requirements_onnx.txt
```
## Using the protobuf Library in the Model Optimizer for Caffe\*
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="Click to expand">
@endsphinxdirective
These procedures require:
* Access to GitHub and the ability to use git commands
* Microsoft Visual Studio\* 2013 for Win64\* (if using Windows\*)
* C/C++
Model Optimizer uses the protobuf library to load trained Caffe models.
By default, the library executes pure Python\* language implementation,
which is slow. These steps show how to use the faster C++ implementation
of the protobuf library on Windows OS or Linux OS.
#### Using the protobuf Library on Linux\* OS
To use the C++ implementation of the protobuf library on Linux, it is enough to
set up the environment variable:
```sh
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp
```
#### <a name="protobuf-install-windows"></a>Using the protobuf Library on Windows\* OS
On Windows, pre-built protobuf packages for Python versions 3.4, 3.5, 3.6,
and 3.7 are provided with the installation package and can be found in
the
`<INSTALL_DIR>\tools\model_optimizer\install_prerequisites`
folder. Please note that they are not installed with the
`install_prerequisites.bat` installation script due to possible issues
with `pip`, and you can install them at your own discretion. Make sure
that you install the protobuf version that matches the Python version
you use:
- `protobuf-3.6.1-py3.4-win-amd64.egg` for Python 3.4
- `protobuf-3.6.1-py3.5-win-amd64.egg` for Python 3.5
- `protobuf-3.6.1-py3.6-win-amd64.egg` for Python 3.6
- `protobuf-3.6.1-py3.7-win-amd64.egg` for Python 3.7
To install the protobuf package:
1. Open the command prompt as administrator.
2. Go to the `install_prerequisites` folder of the OpenVINO toolkit installation directory:
```sh
cd <INSTALL_DIR>\tools\model_optimizer\install_prerequisites
```
3. Run the following command to install the protobuf for Python 3.6. If
you want to install the protobuf for Python 3.4, 3.5, or 3.7, replace
`protobuf-3.6.1-py3.6-win-amd64.egg` with the corresponding file
name from the list above.
```sh
python -m easy_install protobuf-3.6.1-py3.6-win-amd64.egg
```
If the Python version you use is lower than 3.4, you need to update
it or <a href="#build-protobuf">build the library manually</a>.
#### <a name="build-protobuf"></a>Building the protobuf Library on Windows\* OS
> **NOTE**: These steps are optional. If you use Python version 3.4, 3.5, 3.6, or 3.7,
> you can <a href="#protobuf-install-windows">install the protobuf library</a> using the pre-built packages.
To compile the protobuf library from sources on Windows OS, do the following:
1. Clone protobuf source files from GitHub:
```shell
git clone https://github.com/google/protobuf.git
cd protobuf
```
2. Create a Visual Studio solution file. Run these commands:
```shell
cd C:\Path\to\protobuf\cmake\build
mkdir solution
cd solution C:\Path\to\protobuf\cmake\build\solution
cmake -G "Visual Studio 12 2013 Win64" ../..
```
3. Change the runtime library option for `libprotobuf` and `libprotobuf-lite`:
* Open the project's **Property Pages** dialog box
* Expand the **C/C++** tab
* Select the **Code Generation** property page
* Change the **Runtime Library** property to **Multi-thread DLL (/MD)**
4. Build the `libprotoc`, `protoc`, `libprotobuf`, and `libprotobuf-lite` projects in the **Release** configuration.
5. Add a path to the build directory to the `PATH` environment variable:
```shell
set PATH=%PATH%;C:\Path\to\protobuf\cmake\build\solution\Release
```
6. Go to the `python` directory:
```shell
cd C:\Path\to\protobuf\python
```
7. Use a text editor to open and change these `setup.py` options:
* Change from <code>libraries = ['protobuf']</code>
to <code>libraries = ['libprotobuf', 'libprotobuf-lite']</code>
* Change from <code>extra_objects = ['../src/.libs/libprotobuf.a', '../src/.libs/libprotobuf-lite.a']</code>
to <code>extra_objects = ['../cmake/build/solution/Release/libprotobuf.lib', '../cmake/build/solution/Release/libprotobuf-lite.lib']</code>
8. Build the Python package with the C++ implementation:
```shell
python setup.py build cpp_implementation
```
9. Install the Python package with the C++ implementation:
```shell
python3 -m easy_install dist/protobuf-3.6.1-py3.6-win-amd64.egg
```
10. Set an environment variable to boost the protobuf performance:
```shell
set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## See Also
* [Converting a Model to Intermediate Representation (IR)](convert_model/Converting_Model.md)

View File

@ -6,6 +6,6 @@ Model Optimizer not only converts a model to IR format, but also performs a numb
The picture above shows Caffe\* Resnet269\* topology. The left model is the original model, and the one on the right (after conversion) is the resulting model that the Model Optimizer produces, with BatchNorm and ScaleShift layers fused into the convolution weights rather than constituting separate layers.
If you still see these operations, inspect the Model Optimizer output carefully while searching for warnings, such as on the tool being unable to fuse. For example, non-linear operations (like activations) in between convolutions and linear operations might prevent the fusing. If performance is of concern, try to change (and potentially re-train) the topology. Refer to the [Model Optimizer Guide](../MO_DG/prepare_model/Model_Optimization_Techniques.md) for more optimizations.
If you still see these operations, inspect the Model Optimizer output carefully while searching for warnings, such as on the tool being unable to fuse. For example, non-linear operations (like activations) in between convolutions and linear operations might prevent the fusing. If performance is of concern, try to change (and potentially re-train) the topology. Refer to the [Model Optimizer Guide](Model_Optimization_Techniques.md) for more optimizations.
Notice that the activation (`_relu`) is not touched by the Model Optimizer, and while it can be merged into convolution as well, this is rather a device-specific optimization, covered by Inference Engine during the model loading time. You are encouraged to inspect performance counters from plugins that should indicate that these particular layers are not executed (“Optimized out”). For more information, refer to <a href="#performance-counters">Internal Inference Performance Counters</a>.

View File

@ -18,13 +18,13 @@ You need to build your performance conclusions on reproducible data. Do the perf
- If the warm-up run does not help or execution time still varies, you can try running a large number of iterations and then average or find a mean of the results.
- For time values that range too much, use geomean.
Refer to the [Inference Engine Samples](../IE_DG/Samples_Overview.md) for code examples for the performance measurements. Almost every sample, except interactive demos, has a `-ni` option to specify the number of iterations.
Refer to the [Inference Engine Samples](../../IE_DG/Samples_Overview.md) for code examples for the performance measurements. Almost every sample, except interactive demos, has a `-ni` option to specify the number of iterations.
## Getting performance numbers using OpenVINO tool
To get performance numbers use our Benchmark app.
[Benchmark App](../../inference-engine/samples/benchmark_app/README.md) sample is the best performance reference.
[Benchmark App](../../../samples/cpp/benchmark_app/README.md) sample is the best performance reference.
It has a lot of device-specific knobs, but the primary usage is as simple as:
```bash
$ ./benchmark_app d GPU m <model> -i <input>
@ -39,7 +39,7 @@ to execute on the CPU instead.
For example, for the CPU throughput mode from the previous section, you can play with number of streams (`-nstreams` command-line param).
Try different values of the `-nstreams` argument from `1` to a number of CPU cores and find one that provides the best performance. For example, on a 8-core CPU, compare the `-nstreams 1` (which is a latency-oriented scenario) to the `2`, `4` and `8` streams. Notice that `benchmark_app` automatically queries/creates/runs number of requests required to saturate the given number of streams.
Finally, notice that when you don't specify number of streams with `-nstreams`, "AUTO" value for the streams is used, e.g. for the CPU this is [CPU_THROUGHPUT_AUTO](supported_plugins/CPU.md). You can spot the actual value behind "AUTO" for your machine in the application output.
Finally, notice that when you don't specify number of streams with `-nstreams`, "AUTO" value for the streams is used, e.g. for the CPU this is [CPU_THROUGHPUT_AUTO](../../IE_DG/supported_plugins/CPU.md). You can spot the actual value behind "AUTO" for your machine in the application output.
Notice that the "AUTO" number is not necessarily most optimal, so it is generally recommended to play either with the benchmark_app's "-nstreams" as described above, or via [new Workbench tool](@ref workbench_docs_Workbench_DG_Introduction).This allows you to simplify the app-logic, as you don't need to combine multiple inputs into a batch to achieve good CPU performance.
Instead, it is possible to keep a separate infer request per camera or another source of input and process the requests in parallel using Async API.
@ -47,7 +47,7 @@ Instead, it is possible to keep a separate infer request per camera or another s
When comparing the Inference Engine performance with the framework or another reference code, make sure that both versions are as similar as possible:
- Wrap exactly the inference execution (refer to the [Inference Engine Samples](../IE_DG/Samples_Overview.md) for examples).
- Wrap exactly the inference execution (refer to the [Inference Engine Samples](../../IE_DG/Samples_Overview.md) for examples).
- Do not include model loading time.
- Ensure the inputs are identical for the Inference Engine and the framework. For example, Caffe\* allows to auto-populate the input with random values. Notice that it might give different performance than on real images.
- Similarly, for correct performance comparison, make sure the access pattern, for example, input layouts, is optimal for Inference Engine (currently, it is NCHW).
@ -64,7 +64,7 @@ Alternatively, you can gather the raw profiling data that samples report, the se
### Internal Inference Performance Counters <a name="performance-counters"></a>
Almost every sample (inspect command-line options for a specific sample with `-h`) supports a `-pc` command that outputs internal execution breakdown. Refer to the [samples code](../IE_DG/Samples_Overview.md) for the actual Inference Engine API behind that.
Almost every sample (inspect command-line options for a specific sample with `-h`) supports a `-pc` command that outputs internal execution breakdown. Refer to the [samples code](../../IE_DG/Samples_Overview.md) for the actual Inference Engine API behind that.
Below is example of CPU plugin output for a network (since the device is CPU, the layers wall clock `realTime` and the `cpu` time are the same):

View File

@ -178,7 +178,7 @@ Model Optimizer tried to infer a custom layer via the Caffe\* framework, but an
#### 14. What does the message "Cannot infer shape for node {} because there is no Caffe available. Please register python infer function for op or use Caffe for shape inference" mean? <a name="question-14"></a>
Your model contains a custom layer and you have correctly registered it with the `CustomLayersMapping.xml` file. These steps are required to offload shape inference of the custom layer with the help of the system Caffe\*. However, the Model Optimizer could not import a Caffe package. Make sure that you have built Caffe with a `pycaffe` target and added it into the `PYTHONPATH` environment variable. For more information, please refer to the [Configuring the Model Optimizer](customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md). At the same time, it is highly recommend to avoid dependency on Caffe and write your own Model Optimizer extension for your custom layer. For more information, refer to the FAQ [#45](#question-45).
Your model contains a custom layer and you have correctly registered it with the `CustomLayersMapping.xml` file. These steps are required to offload shape inference of the custom layer with the help of the system Caffe\*. However, the Model Optimizer could not import a Caffe package. Make sure that you have built Caffe with a `pycaffe` target and added it into the `PYTHONPATH` environment variable. At the same time, it is highly recommend to avoid dependency on Caffe and write your own Model Optimizer extension for your custom layer. For more information, refer to the FAQ [#45](#question-45).
#### 15. What does the message "Framework name can not be deduced from the given options. Use --framework to choose one of Caffe, TensorFlow, MXNet" mean? <a name="question-15"></a>
@ -214,7 +214,7 @@ One of the layers in the specified topology might not have inputs or values. Ple
#### 24. What does the message "Part of the nodes was not translated to IE. Stopped" mean? <a name="question-24"></a>
Some of the layers are not supported by the Inference Engine and cannot be translated to an Intermediate Representation. You can extend the Model Optimizer by allowing generation of new types of layers and implement these layers in the dedicated Inference Engine plugins. For more information, refer to [Extending the Model Optimizer with New Primitives](customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md) page and [Inference Engine Extensibility Mechanism](../../IE_DG/Extensibility_DG/Intro.md)
Some of the layers are not supported by the Inference Engine and cannot be translated to an Intermediate Representation. You can extend the Model Optimizer by allowing generation of new types of layers and implement these layers in the dedicated Inference Engine plugins. For more information, refer to the [Custom Layers Guide](../../HOWTO/Custom_Layers_Guide.md) and [Inference Engine Extensibility Mechanism](../../IE_DG/Extensibility_DG/Intro.md)
#### 25. What does the message "While creating an edge from .. to .. : node name is undefined in the graph. Check correctness of the input model" mean? <a name="question-25"></a>
@ -268,7 +268,7 @@ Model Optimizer tried to write an event file in the specified directory but fail
#### 37. What does the message "There is no registered 'infer' function for node with op = .. . Please implement this function in the extensions" mean? <a name="question-37"></a>
Most likely, you tried to extend Model Optimizer with a new primitive, but did not specify an infer function. For more information on extensions, see [Extending the Model Optimizer with New Primitives](customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md).
Most likely, you tried to extend Model Optimizer with a new primitive, but did not specify an infer function. For more information on extensions, see [Custom Layers Guide](../../HOWTO/Custom_Layers_Guide.md).
#### 38. What does the message "Stopped shape/value propagation at node" mean? <a name="question-38"></a>
@ -288,7 +288,7 @@ This error occurs when the `--input` command line option is used to cut a model
#### 42. What does the message "Module TensorFlow was not found. Please install TensorFlow 1.2 or higher" mean? <a name="question-42"></a>
To convert TensorFlow\* models with Model Optimizer, TensorFlow 1.2 or newer must be installed. For more information on prerequisites, see [Configuring the Model Optimizer](Config_Model_Optimizer.md).
To convert TensorFlow\* models with Model Optimizer, TensorFlow 1.2 or newer must be installed. For more information on prerequisites, see [Configuring the Model Optimizer](../Deep_Learning_Model_Optimizer_DevGuide.md).
#### 43. What does the message "Cannot read the model file: it is incorrect TensorFlow model file or missing" mean? <a name="question-43"></a>
@ -300,7 +300,7 @@ Most likely, there is a problem with the specified file for model. The file exis
#### 45. What does the message "Found custom layer. Model Optimizer does not support this layer. Please, register it in CustomLayersMapping.xml or implement extension" mean? <a name="question-45"></a>
This means that the layer `{layer_name}` is not supported in the Model Optimizer. You can find a list of all unsupported layers in the corresponding section. You should add this layer to `CustomLayersMapping.xml` ([Legacy Mode for Caffe* Custom Layers](customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md)) or implement the extensions for this layer ([Extending Model Optimizer with New Primitives](customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md)).
This means that the layer `{layer_name}` is not supported in the Model Optimizer. You can find a list of all unsupported layers in the corresponding section. You should implement the extensions for this layer ([Custom Layers Guide](../../HOWTO/Custom_Layers_Guide.md)).
#### 46. What does the message "Custom replacement configuration file does not exist" mean? <a name="question-46"></a>
@ -308,7 +308,7 @@ Path to the custom replacement configuration file was provided with the `--trans
#### 47. What does the message "Extractors collection have case insensitive duplicates" mean? <a name="question-47"></a>
When extending Model Optimizer with new primitives keep in mind that their names are case insensitive. Most likely, another operation with the same name is already defined. For more information, see [Extending the Model Optimizer with New Primitives](customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md).
When extending Model Optimizer with new primitives keep in mind that their names are case insensitive. Most likely, another operation with the same name is already defined. For more information, see [Custom Layers Guide](../../HOWTO/Custom_Layers_Guide.md).
#### 48. What does the message "Input model name is not in an expected format, cannot extract iteration number" mean? <a name="question-48"></a>
@ -328,7 +328,7 @@ Model Optimizer tried to access a node that does not exist. This could happen if
#### 52. What does the message "Module mxnet was not found. Please install MXNet 1.0.0" mean? <a name="question-52"></a>
To convert MXNet\* models with Model Optimizer, MXNet 1.0.0 must be installed. For more information about prerequisites, see [Configuring the Model Optimizer](Config_Model_Optimizer.md).
To convert MXNet\* models with Model Optimizer, MXNet 1.0.0 must be installed. For more information about prerequisites, see [Configuring the Model Optimizer](../Deep_Learning_Model_Optimizer_DevGuide.md).
#### 53. What does the message "The following error happened while loading MXNet model .." mean? <a name="question-53"></a>
@ -340,7 +340,7 @@ Please, make sure that inputs are defined and have correct shapes. You can use `
#### 55. What does the message "Attempt to register of custom name for the second time as class. Note that custom names are case-insensitive" mean? <a name="question-55"></a>
When extending Model Optimizer with new primitives keep in mind that their names are case insensitive. Most likely, another operation with the same name is already defined. For more information, see [Extending the Model Optimizer with New Primitives](customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md) .
When extending Model Optimizer with new primitives keep in mind that their names are case insensitive. Most likely, another operation with the same name is already defined. For more information, see [Custom Layers Guide](../../HOWTO/Custom_Layers_Guide.md).
#### 56. What does the message "Both --input_shape and --batch were provided. Please, provide only one of them" mean? <a name="question-56"></a>
@ -455,7 +455,7 @@ Your Caffe\* topology `.prototxt` file is intended for training. Model Optimizer
#### 80. What does the message "Warning: please expect that Model Optimizer conversion might be slow" mean? <a name="question-80"></a>
You are using an unsupported Python\* version. Use only versions 3.4 - 3.6 for the C++ `protobuf` implementation that is supplied with the OpenVINO Toolkit. You can still boost conversion speed by building protobuf library from sources. For complete instructions about building `protobuf` from sources, see the appropriate section in [Converting a Model to Intermediate Representation](Config_Model_Optimizer.md).
You are using an unsupported Python\* version. Use only versions 3.4 - 3.6 for the C++ `protobuf` implementation that is supplied with the OpenVINO Toolkit. You can still boost conversion speed by building protobuf library from sources. For complete instructions about building `protobuf` from sources, see the appropriate section in [Converting a Model to Intermediate Representation](../Deep_Learning_Model_Optimizer_DevGuide.md).
#### 81. What does the message "Arguments --nd_prefix_name, --pretrained_model_name and --input_symbol should be provided. Please provide all or do not use any." mean? <a name="question-81"></a>
@ -492,7 +492,7 @@ For more information, refer to [Converting a MXNet* Model](convert_model/Convert
Model Optimizer tried to load the model that contains some unsupported operations.
If you want to convert model that contains unsupported operations you need to prepare extension for all such operations.
For more information, refer to [Extending Model Optimizer with New Primitives](customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md).
For more information, refer to [Custom Layers Guide](../../HOWTO/Custom_Layers_Guide.md).
#### 87. What does the message "Can not register Op ... Please, call function 'register_caffe_python_extractor' with parameter 'name'" mean? <a name="question-87"></a>
@ -538,7 +538,7 @@ Note that the first call <code>register_caffe_python_extractor(ProposalPythonExa
The second call prevents Model Optimizer from using this extension as if it is an extension for
a layer with type `Proposal`. Otherwise, this layer can be chosen as an implementation of extension that can lead to potential issues.
For more information, refer to the [Extending Model Optimizer with New Primitives](customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md).
For more information, refer to the [Custom Layers Guide](../../HOWTO/Custom_Layers_Guide.md).
#### 88. What does the message "Model Optimizer is unable to calculate output shape of Memory node .." mean? <a name="question-88"></a>
@ -573,7 +573,7 @@ This message means that if you have model with custom layers and its json file h
lower than 1.0.0, Model Optimizer does not support such topologies. If you want to convert it you have to rebuild
MXNet with unsupported layers or generate new json with MXNet version 1.0.0 and higher. Also you need to implement
Inference Engine extension for used custom layers.
For more information, refer to the [appropriate section of Model Optimizer configuration](customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md).
For more information, refer to the [Custom Layers Guide](../../HOWTO/Custom_Layers_Guide.md).
#### 97. What does the message "Graph contains a cycle. Can not proceed .." mean? <a name="question-97"></a>
@ -586,7 +586,7 @@ For Tensorflow:
For all frameworks:
1. [Replace cycle containing Sub-graph in Model Optimizer](customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md)
2. [Extend Model Optimizer with New Primitives from first step](customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md)
2. [Custom Layers Guide](../../HOWTO/Custom_Layers_Guide.md)
or
* Edit network in original framework to exclude cycle.
@ -620,7 +620,7 @@ It means that you trying to convert the topology which contains '_contrib_box_nm
#### 103. What does the message "ModelOptimizer is not able to parse *.caffemodel" mean? <a name="question-103"></a>
If a '*.caffemodel' file exists and it is correct, the error possibly occured due to the use of Python protobuf implementation. In some cases, it shows error message during model parsing, for example: "'utf-8' codec can't decode byte 0xe0 in position 4: invalid continuation byte in field: mo_caffe.SpatialTransformerParameter.transform_type". You can either use Python 3.6/3.7 or build 'cpp' implementation of protobuf yourself for your version of Python. For the complete instructions about building `protobuf` from sources, see the appropriate section in [Converting a Model to Intermediate Representation](Config_Model_Optimizer.md).
If a '*.caffemodel' file exists and it is correct, the error possibly occured due to the use of Python protobuf implementation. In some cases, it shows error message during model parsing, for example: "'utf-8' codec can't decode byte 0xe0 in position 4: invalid continuation byte in field: mo_caffe.SpatialTransformerParameter.transform_type". You can either use Python 3.6/3.7 or build 'cpp' implementation of protobuf yourself for your version of Python. For the complete instructions about building `protobuf` from sources, see the appropriate section in [Converting a Model to Intermediate Representation](../Deep_Learning_Model_Optimizer_DevGuide.md).
#### 104. What does the message "SyntaxError: 'yield' inside list comprehension" during MxNet\* model conversion mean? <a name="question-104"></a>

View File

@ -2,7 +2,7 @@
A summary of the steps for optimizing and deploying a model that was trained with Caffe\*:
1. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for Caffe\*.
1. [Configure the Model Optimizer](../../Deep_Learning_Model_Optimizer_DevGuide.md) for Caffe\*.
2. [Convert a Caffe\* Model](#Convert_From_Caffe) to produce an optimized [Intermediate Representation (IR)](../../IR_and_opsets.md) of the model based on the trained network topology, weights, and biases values
3. Test the model in the Intermediate Representation format using the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in the target environment via provided Inference Engine [sample applications](../../../IE_DG/Samples_Overview.md)
4. [Integrate](../../../IE_DG/Samples_Overview.md) the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in your application to deploy the model in the target environment
@ -93,7 +93,7 @@ Caffe*-specific parameters:
```sh
mo --input_model bvlc_alexnet.caffemodel --input_proto bvlc_alexnet.prototxt --output_dir <OUTPUT_MODEL_DIR>
```
* Launching the Model Optimizer for the [bvlc_alexnet.caffemodel](https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet) with a specified `CustomLayersMapping` file. This is the legacy method of quickly enabling model conversion if your model has custom layers. This requires the Caffe\* system on the computer. To read more about this, see [Legacy Mode for Caffe* Custom Layers](../customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md).
* Launching the Model Optimizer for the [bvlc_alexnet.caffemodel](https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet) with a specified `CustomLayersMapping` file. This is the legacy method of quickly enabling model conversion if your model has custom layers. This requires the Caffe\* system on the computer.
Optional parameters without default values and not specified by the user in the `.prototxt` file are removed from the Intermediate Representation, and nested parameters are flattened:
```sh
mo --input_model bvlc_alexnet.caffemodel -k CustomLayersMapping.xml --disable_omitting_optional --enable_flattening_nested_params --output_dir <OUTPUT_MODEL_DIR>

View File

@ -12,7 +12,7 @@
A summary of the steps for optimizing and deploying a model that was trained with Kaldi\*:
1. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for Kaldi\*.
1. [Configure the Model Optimizer](../../Deep_Learning_Model_Optimizer_DevGuide.md) for Kaldi\*.
2. [Convert a Kaldi\* Model](#Convert_From_Kaldi) to produce an optimized [Intermediate Representation (IR)](../../IR_and_opsets.md) of the model based on the trained network topology, weights, and biases values.
3. Test the model in the Intermediate Representation format using the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in the target environment via provided Inference Engine [sample applications](../../../IE_DG/Samples_Overview.md).
4. [Integrate](../../../IE_DG/Samples_Overview.md) the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in your application to deploy the model in the target environment.

View File

@ -13,7 +13,7 @@
A summary of the steps for optimizing and deploying a model that was trained with the MXNet\* framework:
1. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for MXNet* (MXNet was used to train your model)
1. [Configure the Model Optimizer](../../Deep_Learning_Model_Optimizer_DevGuide.md) for MXNet* (MXNet was used to train your model)
2. [Convert a MXNet model](#ConvertMxNet) to produce an optimized [Intermediate Representation (IR)](../../IR_and_opsets.md) of the model based on the trained network topology, weights, and biases values
3. Test the model in the Intermediate Representation format using the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in the target environment via provided Inference Engine [sample applications](../../../IE_DG/Samples_Overview.md)
4. [Integrate](../../../IE_DG/Samples_Overview.md) the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in your application to deploy the model in the target environment

View File

@ -2,7 +2,7 @@
A summary of the steps for optimizing and deploying a model trained with Paddle\*:
1. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for Paddle\*.
1. [Configure the Model Optimizer](../../Deep_Learning_Model_Optimizer_DevGuide.md) for Paddle\*.
2. [Convert a Paddle\* Model](#Convert_From_Paddle) to produce an optimized [Intermediate Representation (IR)](../../IR_and_opsets.md) of the model based on the trained network topology, weights, and biases.
3. Test the model in the Intermediate Representation format using the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in the target environment via provided Inference Engine [sample applications](../../../IE_DG/Samples_Overview.md).
4. [Integrate](../../../IE_DG/Samples_Overview.md) the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in your application to deploy the model in the target environment.
@ -41,7 +41,7 @@ To convert a Paddle\* model:
Parameters to convert your model:
* [Framework-agnostic parameters](Converting_Model_General.md): These parameters are used to convert a model trained with any supported framework.
* [Framework-agnostic parameters](Converting_Model.md): These parameters are used to convert a model trained with any supported framework.
> **NOTE:** `--scale`, `--scale_values`, `--mean_values` are not supported in the current version of mo_paddle.
### Example of Converting a Paddle* Model

View File

@ -10,6 +10,8 @@
openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_QuartzNet
openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_RNNT
openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_YOLACT
openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_Bert_ner
openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_RCAN
@endsphinxdirective
@ -43,7 +45,7 @@ Here is the list of models that are tested and guaranteed to be supported. Howev
PyTorch* framework is supported through export to ONNX\* format. A summary of the steps for optimizing and deploying a model that was trained with the PyTorch\* framework:
1. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for ONNX\*.
1. [Configure the Model Optimizer](../../Deep_Learning_Model_Optimizer_DevGuide.md) for ONNX\*.
2. [Export PyTorch model to ONNX\*](#export-to-onnx).
3. [Convert an ONNX\* model](Convert_Model_From_ONNX.md) to produce an optimized [Intermediate Representation (IR)](../../IR_and_opsets.md) of the model based on the trained network topology, weights, and biases values.
4. Test the model in the Intermediate Representation format using the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in the target environment via provided [sample applications](../../../IE_DG/Samples_Overview.md).

View File

@ -26,7 +26,7 @@
A summary of the steps for optimizing and deploying a model that was trained with the TensorFlow\* framework:
1. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for TensorFlow\* (TensorFlow was used to train your model).
1. [Configure the Model Optimizer](../../Deep_Learning_Model_Optimizer_DevGuide.md) for TensorFlow\* (TensorFlow was used to train your model).
2. [Freeze the TensorFlow model](#freeze-the-tensorflow-model) if your model is not already frozen or skip this step and use the [instruction](#loading-nonfrozen-models) to a convert a non-frozen model.
3. [Convert a TensorFlow\* model](#Convert_From_TF) to produce an optimized [Intermediate Representation (IR)](../../IR_and_opsets.md) of the model based on the trained network topology, weights, and biases values.
4. Test the model in the Intermediate Representation format using the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in the target environment via provided [sample applications](../../../IE_DG/Samples_Overview.md).
@ -352,7 +352,7 @@ TensorFlow*-specific parameters:
mo --input_model inception_v1.pb -b 1 --tensorboard_logdir /tmp/log_dir --output_dir <OUTPUT_MODEL_DIR>
```
* Launching the Model Optimizer for a model with custom TensorFlow operations (refer to the [TensorFlow* documentation](https://www.tensorflow.org/extend/adding_an_op)) implemented in C++ and compiled into the shared library `my_custom_op.so`. Model Optimizer falls back to TensorFlow to infer output shape of operations implemented in the library if a custom TensorFlow operation library is provided. If it is not provided, a custom operation with an inference function is needed. For more information about custom operations, refer to the [Extending the Model Optimizer with New Primitives](../customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md).
* Launching the Model Optimizer for a model with custom TensorFlow operations (refer to the [TensorFlow* documentation](https://www.tensorflow.org/extend/adding_an_op)) implemented in C++ and compiled into the shared library `my_custom_op.so`. Model Optimizer falls back to TensorFlow to infer output shape of operations implemented in the library if a custom TensorFlow operation library is provided. If it is not provided, a custom operation with an inference function is needed. For more information about custom operations, refer to the [Custom Layers Guide](../../../HOWTO/Custom_Layers_Guide.md).
```sh
mo --input_model custom_model.pb --tensorflow_custom_layer_libraries ./my_custom_op.so --output_dir <OUTPUT_MODEL_DIR>
```

View File

@ -11,6 +11,7 @@
openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_MxNet
openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Kaldi
openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_ONNX
openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Paddle
openvino_docs_MO_DG_prepare_model_Model_Optimization_Techniques
openvino_docs_MO_DG_prepare_model_convert_model_Cutting_Model
openvino_docs_MO_DG_prepare_model_Supported_Frameworks_Layers
@ -265,7 +266,7 @@ mo --input_model bvlc_alexnet.caffemodel --reverse_input_channels --mean_values
```
Launch the Model Optimizer for the Caffe bvlc_alexnet model with extensions listed in specified directories, specified mean_images binaryproto
file. For more information about extensions, please refer to [this](../customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md) page.
file. For more information about extensions, please refer to the [Custom Layers Guide](../../../HOWTO/Custom_Layers_Guide.md).
```sh
mo --input_model bvlc_alexnet.caffemodel --extensions /home/,/some/other/path/ --mean_file /path/to/binaryproto --output_dir <OUTPUT_MODEL_DIR>
```
@ -288,7 +289,7 @@ mo --input_model FaceNet.pb --input "placeholder_layer_name->[0.1 1.2 2.3]" --ou
## See Also
* [Configuring the Model Optimizer](../Config_Model_Optimizer.md)
* [Configuring the Model Optimizer](../../Deep_Learning_Model_Optimizer_DevGuide.md)
* [IR Notation Reference](../../IR_and_opsets.md)
* [Model Optimizer Extensibility](../customize_model_optimizer/Customize_Model_Optimizer.md)
* [Model Cutting](Cutting_Model.md)

View File

@ -274,4 +274,4 @@ exec_net = ie.load_network(network=net, device_name="CPU")
result_ie = exec_net.infer(input_data)
```
For more information about Python API, refer to [Inference Engine Python API Overview](../../../../../src/bindings/python/docs/api_overview.md).
For more information about Python API, refer to [Inference Engine Python API](ie_python_api/api.html).

View File

@ -9,7 +9,7 @@
You can download TensorFlow\* Object Detection API models from the <a href="https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md">TensorFlow 1 Detection Model Zoo</a> or <a href="https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md">TensorFlow 2 Detection Model Zoo</a>.
<strong>NOTE</strong>: Before converting, make sure you have configured the Model Optimizer. For configuration steps, refer to [Configuring the Model Optimizer](../../Config_Model_Optimizer.md).
<strong>NOTE</strong>: Before converting, make sure you have configured the Model Optimizer. For configuration steps, refer to [Configuring the Model Optimizer](../../../Deep_Learning_Model_Optimizer_DevGuide.md).
To convert a TensorFlow\* Object Detection API model, go to the `<INSTALL_DIR>/tools/model_optimizer` directory and run the `mo` script with the following required parameters:

View File

@ -3,7 +3,7 @@
This tutorial explains how to convert RetinaNet model to the Intermediate Representation (IR).
[Public RetinaNet model](https://github.com/fizyr/keras-retinanet) does not contain pretrained TensorFlow\* weights.
To convert this model to the TensorFlow\* format, you can use [Reproduce Keras* to TensorFlow* Conversion tutorial](https://docs.openvinotoolkit.org/latest/omz_models_model_retinanet_tf.html).
To convert this model to the TensorFlow\* format, you can use [Reproduce Keras* to TensorFlow* Conversion tutorial](https://docs.openvino.ai/latest/omz_models_model_retinanet_tf.html).
After you convert the model to TensorFlow* format, run the Model Optimizer command below:
```sh

View File

@ -6,45 +6,10 @@
:maxdepth: 1
:hidden:
openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_with_New_Primitives
openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers
openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_with_New_Primitives
openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Legacy_Mode_for_Caffe_Custom_Layers
@endsphinxdirective
- <a href="#model-representation-in-memory">Model Representation in Memory</a>
- <a href="#model-conversion-pipeline">Model Conversion Pipeline</a>
- <a href="#model-loading">Model Loading</a>
- <a href="#operations-attributes-extracting">Operations Attributes Extracting</a>
- <a href="#front-phase">Front Phase</a>
- <a href="#partial-inference">Partial Inference</a>
- <a href="#middle-phase">Middle Phase</a>
- <a href="#layout-change">NHWC to NCHW Layout Change</a>
- <a href="#back-phase">Back Phase</a>
- <a href="#ir-emitting">Intermediate Representation Emitting</a>
- <a href="#graph-ports-and-conneсtions">Graph Traversal and Modification Using <code>Port</code>s and <code>Connection</code>s</a>
- <a href="#intro-ports">Ports</a>
- <a href="#intro-conneсtions">Connections</a>
- <a href="#extensions">Model Optimizer Extensions</a>
- <a href="#operation">Model Optimizer Operation</a>
- <a href="#extension-extractor">Operation Extractor</a>
- <a href="#graph-transformations">Graph Transformation Extensions</a>
- <a href="#front-phase-transformations">Front Phase Transformations</a>
- <a href="#pattern-defined-front-phase-transformations">Pattern-Defined Front Phase Transformations</a>
- <a href="#specific-operation-front-phase-transformations">Specific Operation Front Phase Transformations</a>
- <a href="#generic-front-phase-transformations">Generic Front Phase Transformations</a>
- <a href="#node-name-pattern-front-phase-transformations">Node Name Pattern Front Phase Transformations</a>
- <a href="#start-end-points-front-phase-transformations">Front Phase Transformations Using Start and End Points</a>
- <a href="#generic-transformations-config-front-phase-transformations">Generic Front Phase Transformations Enabled with Transformations Configuration File</a>
- <a href="#middle-phase-transformations">Middle Phase Transformations</a>
- <a href="#pattern-defined-middle-phase-transformations">Pattern-Defined Middle Phase Transformations</a>
- <a href="#generic-middle-phase-transformations">Generic Middle Phase Transformations</a>
- <a href="#back-phase-transformations">Back Phase Transformations</a>
- <a href="#pattern-defined-back-phase-transformations">Pattern-Defined Back Phase Transformations</a>
- <a href="#generic-back-phase-transformations">Generic Back Phase Transformations</a>
- <a href="#see-also">See Also</a>
<a name="model-optimizer-extensibility"></a>Model Optimizer extensibility mechanism enables support of new operations and custom transformations to generate the optimized intermediate representation (IR) as described in the
[Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™](../../IR_and_opsets.md). This
mechanism is a core part of the Model Optimizer. The Model Optimizer itself uses it under the hood, being a huge set of examples on how to add custom logic to support your model.
@ -148,10 +113,6 @@ operation to trigger the extracting function for and the value is the function.
to extract attributes from. This is a legacy and non-extensible approach so it should be avoided. This mechanism will be
removed in future versions of the Model Optimizer.
3. Caffe specific extractor using the `CustomLayersMapping.xml` described in the
[Legacy Mode for Caffe\* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md). This approach is deprecated and will be
removed in future versions of the Model Optimizer.
The extractors execution order is the following:
* `CustomLayersMapping.xml` (for Caffe models only).
* Model Optimizer extension.
@ -638,10 +599,6 @@ There are several types of Model Optimizer extractor extensions:
1. The generic one, which is described in this section.
2. The special extractor for Caffe\* models with Python layers. This kind of extractor is described in the
[Extending the Model Optimizer with Caffe* Python Layers](Extending_Model_Optimizer_with_Caffe_Python_Layers.md).
3. The special extractor for MXNet\* models with custom operations. This kind of extractor is described in the
[Extending the Model Optimizer for Custom MXNet* Operations](Extending_MXNet_Model_Optimizer_with_New_Primitives.md).
4. The special extractor and fallback to Caffe\* for shape inference is described in the
[Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md).
This chapter is focused on the option #1, which provides a generic mechanism for the operation extractor applicable for
all frameworks. Model Optimizer provides the `mo.front.extractor.FrontExtractorOp` class as a base class to implement the
@ -1302,8 +1259,6 @@ Refer to the `extensions/back/GatherNormalizer.py` for the example of a such typ
## See Also <a name="see-also"></a>
* [Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™](../../IR_and_opsets.md)
* [Converting a Model to Intermediate Representation (IR)](../convert_model/Converting_Model.md)
* [nGraph Basic Concepts](@ref openvino_docs_nGraph_DG_basic_concepts)
* [nGraph Basic Concepts](../../../nGraph_DG/nGraph_basic_concepts.md)
* [Inference Engine Extensibility Mechanism](../../../IE_DG/Extensibility_DG/Intro.md)
* [Extending the Model Optimizer with Caffe* Python Layers](Extending_Model_Optimizer_with_Caffe_Python_Layers.md)
* [Extending the Model Optimizer for Custom MXNet* Operations](Extending_MXNet_Model_Optimizer_with_New_Primitives.md)
* [Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md)

View File

@ -1,40 +0,0 @@
# Extending Model Optimizer for Custom MXNet* Operations {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_MXNet_Model_Optimizer_with_New_Primitives}
This section provides instruction on how to support a custom MXNet operation (in the MXNet documentation, called an *operator* or *layer*) that is not part of the MXNet operation set. Creating custom operations is described in
[this guide](https://mxnet.apache.org/versions/1.7.0/api/faq/new_op.html).
This section describes a procedure on how to extract operator attributes in the Model Optimizer. The rest of the
operation-enabling pipeline and documentation on how to support MXNet operations from standard MXNet operation set is
described in the main [Customize_Model_Optimizer](Customize_Model_Optimizer.md) document.
## Writing Extractor for Custom MXNet Operation
Custom MXNet operations have an attribute `op` (defining the type of the operation) equal to `Custom` and attribute
`op_type` which is an operation type defined by an user. Implement extractor class inherited from the
`MXNetCustomFrontExtractorOp` class instead of `FrontExtractorOp` class used for standard framework operations in order
to extract attributes for such kind of operations. The `op` class attribute value should be set to the `op_type` value
so the extractor is triggered for this kind of operation.
There is the example of the extractor for the custom operation registered with type (`op_type` value) equal to
`MyCustomOp` having attribute `my_attribute` of the floating point type with default value `5.6`. In this sample we
assume that we have already created the `CustomOp` class (inherited from `Op` class) for the Model Optimizer operation
for this MXNet custom operation as described in the [Customize_Model_Optimizer](Customize_Model_Optimizer.md).
```py
from openvino.tools.mo.ops.custom_op import CustomOp # implementation of the MO operation class
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.front.extractor import MXNetCustomFrontExtractorOp
class CustomProposalFrontExtractor(MXNetCustomFrontExtractorOp): # inherit from specific base class
op = 'MyCustomOp' # the value corresponding to the `op_type` value of the MXNet operation
enabled = True # the extractor is enabled
@staticmethod
def extract(node):
attrs = get_mxnet_layer_attrs(node.symbol_dict) # parse the attributes to a dictionary with string values
node_attrs = {
'my_attribute': attrs.float('my_attribute', 5.6)
}
CustomOp.update_node_stat(node, node_attrs) # update the attributes of the node
return self.enabled
```

View File

@ -84,5 +84,5 @@ class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp):
```
## See Also
* [Customize_Model_Optimizer](Customize_Model_Optimizer.md)
* [Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md)

View File

@ -1,3 +0,0 @@
# [DEPRECATED] Extending Model Optimizer with New Primitives {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_with_New_Primitives}
This page is deprecated. Please refer to [Model Optimizer Extensibility](Customize_Model_Optimizer.md) page for more information.

View File

@ -1,75 +0,0 @@
# [DEPRECATED] Legacy Mode for Caffe* Custom Layers {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Legacy_Mode_for_Caffe_Custom_Layers}
> **NOTE: This functionality is deprecated and will be removed in future releases.**
Model Optimizer can register custom layers in a way that the output shape is calculated by the Caffe\* framework
installed on your system. This approach has several limitations:
* If your layer output shape depends on dynamic parameters, input data or previous layers parameters, calculation of
output shape of the layer via Caffe can be incorrect. For example, `SimplerNMS` is filtering out bounding boxes that do
not satisfy the condition. Internally, Caffe fallback forwards the whole net without any meaningful data - just some
noise. It is natural to get only one bounding box (0,0,0,0) instead of expected number (for example, 15). There is an
option to patch Caffe accordingly, however, it makes success of Intermediate Representation generation on the patched
Caffe on the particular machine. To keep the solution independent from Caffe, we recommend to use extensions mechanism
for such layers described in the [Model Optimizer Extensibility](Customize_Model_Optimizer.md).
* It is not possible to produce Intermediate Representation on a machine that does not have Caffe installed.
> **NOTE**: Caffe Python\* API has an issue when layer name does not correspond to the name of its top. The fix was
> implemented on [BVLC Caffe\*](https://github.com/BVLC/caffe/commit/35a7b87ad87457291dfc79bf8a7e7cf7ef278cbb). The
> Caffe framework on your computer must contain this fix. Otherwise, Caffe framework can unexpectedly fail during the
> fallback procedure.
> **NOTE**: The Caffe fallback feature was validated against [this GitHub revision](https://github.com/BVLC/caffe/tree/99466224dac86ddb86296b1e727794fb836bd80f). You may have issues with forks or later Caffe framework versions.
1. Create a file `CustomLayersMapping.xml`:
```shell
mv extensions/front/caffe/CustomLayersMapping.xml.example extensions/front/caffe/CustomLayersMapping.xml
```
2. Add (register) custom layers to `CustomLayersMapping.xml`:
```
\<CustomLayer NativeType="${Type}" hasParam="${has_params}" protoParamName="${layer_param}"/\>
```
Where:
* `${Type}` is a type of the layer in the Caffe
* `${has_params}` is "true" if the layer has parameters, and is "false" otherwise
* `${layer_param}` is a name of the layer parameters in `caffe.proto` if the layer has it
**Example**:
1. `Proposal` layer has parameters, and they appear in the Intermediate Representation. The parameters are stored in
the `proposal_param` property of the layer:
```shell
\<CustomLayer NativeType="Proposal" hasParam ="true" protoParamName = "proposal_param"/\>
```
2. CustomLayer layer has no parameters:
```shell
\<CustomLayer NativeType="CustomLayer" hasParam ="false"/\>
```
## Building Caffe\*
1. Build Caffe\* with Python\* 3.5:
```shell
export CAFFE_HOME=PATH_TO_CAFFE
cd $CAFFE_HOME
rm -rf ./build
mkdir ./build
cd ./build
cmake -DCPU_ONLY=ON -DOpenCV_DIR=<your opencv install dir> -DPYTHON_EXECUTABLE=/usr/bin/python3.5 ..
make all # also builds pycaffe
make install
make runtest # optional
```
2. Add Caffe Python directory to `PYTHONPATH` to let it be imported from the Python program:
```shell
export PYTHONPATH=$CAFFE_HOME/python;$PYTHONPATH
```
3. Check the Caffe installation:
```shell
python3
import caffe
```
If Caffe was installed correctly, the `caffe` module is imported without errors.

18
docs/_templates/layout.html vendored Normal file
View File

@ -0,0 +1,18 @@
{%- extends "openvino_sphinx_theme/layout.html" %}
{% block css %}
{{ super() }}
<link rel="stylesheet" href="{{ pathto('_static/css/viewer.min.css', 1) }}" type="text/css" />
<link rel="stylesheet" href="{{ pathto('_static/css/custom.css', 1) }}" type="text/css" />
<script src="https://cdn.jsdelivr.net/npm/chart.js@2.9.3/dist/Chart.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/chartjs-plugin-datalabels"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/chartjs-plugin-annotation/0.5.7/chartjs-plugin-annotation.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/chartjs-plugin-barchart-background@1.3.0/build/Plugin.Barchart.Background.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/chartjs-plugin-deferred@1"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/PapaParse/5.3.1/papaparse.min.js"></script>
<script src="{{ pathto('_static/js/viewer.min.js', 1) }}"></script>
<script src="{{ pathto('_static/js/custom.js', 1) }}"></script>
<script src="{{ pathto('_static/js/graphs.js', 1) }}"></script>
<script src="/assets/versions_raw.js"></script>
{% endblock %}

View File

@ -1,4 +1,4 @@
.. _api_reference
.. _api_reference:
API Reference
-------------
@ -14,5 +14,4 @@ API references available:
:maxdepth: 2
../groupie_cpp_api
../groupie_c_api
ie_python_api/api

View File

@ -51,7 +51,7 @@ The image size used in the inference depends on the network being benchmarked. T
Intel partners with various vendors all over the world. Visit the [Intel® AI: In Production Partners & Solutions Catalog](https://www.intel.com/content/www/us/en/internet-of-things/ai-in-production/partners-solutions-catalog.html) for a list of Equipment Makers and the [Supported Devices](../IE_DG/supported_plugins/Supported_Devices.md) documentation. You can also remotely test and run models before purchasing any hardware by using [Intel® DevCloud for the Edge](http://devcloud.intel.com/edge/).
#### 8. How can I optimize my models for better performance or accuracy?
We published a set of guidelines and recommendations to optimize your models available in an [introductory](../IE_DG/Intro_to_Performance.md) guide and an [advanced](../optimization_guide/dldt_optimization_guide.md) guide. For further support, please join the conversation in the [Community Forum](https://software.intel.com/en-us/forums/intel-distribution-of-openvino-toolkit).
We published a set of guidelines and recommendations to optimize your models available in the [optimization guide](../optimization_guide/dldt_optimization_guide.md). For further support, please join the conversation in the [Community Forum](https://software.intel.com/en-us/forums/intel-distribution-of-openvino-toolkit).
#### 9. Why are INT8 optimized models used for benchmarking on CPUs with no VNNI support?
The benefit of low-precision optimization using the OpenVINO™ toolkit model optimizer extends beyond processors supporting VNNI through Intel® DL Boost. The reduced bit width of INT8 compared to FP32 allows Intel® CPU to process the data faster and thus offers better throughput on any converted model agnostic of the intrinsically supported low-precision optimizations within Intel® hardware. Please refer to [INT8 vs. FP32 Comparison on Select Networks and Platforms](performance_int8_vs_fp32.md) for comparison on boost factors for different network models and a selection of Intel® CPU architectures, including AVX-2 with Intel® Core™ i7-8700T, and AVX-512 (VNNI) with Intel® Xeon® 5218T and Intel® Xeon® 8270.

View File

@ -6,7 +6,7 @@
:hidden:
openvino_docs_performance_benchmarks_faq
Download Performance Data Spreadsheet in MS Excel* Format <https://docs.openvinotoolkit.org/downloads/benchmark_files/OV-2021.4-Download-Excel.xlsx>
Download Performance Data Spreadsheet in MS Excel* Format <https://docs.openvino.ai/downloads/benchmark_files/OV-2021.4-Download-Excel.xlsx>
openvino_docs_performance_int8_vs_fp32
@ -365,6 +365,6 @@ Testing by Intel done on: see test date for each HW platform below.
| BIOS Release | September 21, 2018 | September 21, 2018 |
| Test Date | June 18, 2021 | June 18, 2021 |
Please follow this link for more detailed configuration descriptions: [Configuration Details](https://docs.openvinotoolkit.org/resources/benchmark_files/system_configurations_2021.4.html)
Please follow this link for more detailed configuration descriptions: [Configuration Details](https://docs.openvino.ai/resources/benchmark_files/system_configurations_2021.4.html)
Results may vary. For workloads and configurations visit: [www.intel.com/PerformanceIndex](https://www.intel.com/PerformanceIndex) and [Legal Information](../Legal_Information.md).

View File

@ -131,9 +131,6 @@ def setup(app):
logger = logging.getLogger(__name__)
app.add_config_value('doxygen_mapping_file', doxygen_mapping_file, rebuild=True)
app.add_config_value('repositories', repositories, rebuild=True)
app.add_css_file('css/viewer.min.css')
app.add_css_file('css/custom.css')
app.add_js_file('js/viewer.min.js')
app.add_js_file('js/custom.js')
app.add_js_file('js/graphs.js')
try:

View File

@ -1,245 +1,61 @@
inference-engine/include/ie_api.h
inference-engine/ie_bridges/c/include/c_api/ie_c_api.h
doxygen_errors
inference-engine/include/ie_parallel.hpp
openvino/itt/include/openvino/itt.hpp
inference-engine/src/transformations/include/ngraph_ops/type_relaxed.hpp
openvino/docs/IE_DG/supported_plugins/GNA.md
inference-engine/src/plugin_api/ie_system_conf.h
openvino/docs/documentation.md
openvino/docs/gapi/face_beautification.md
openvino/docs/gapi/gapi_face_analytics_pipeline.md
openvino/docs/get_started/get_started_demos.md
openvino/docs/get_started/get_started_dl_workbench.md
openvino/docs/get_started/get_started_linux.md
openvino/docs/get_started/get_started_macos.md
openvino/docs/get_started/get_started_raspbian.md
openvino/docs/get_started/get_started_windows.md
openvino/docs/HOWTO/Custom_Layers_Guide.md
openvino/docs/IE_DG/inference_engine_intro.md
openvino/docs/IE_DG/Int8Inference.md
openvino/docs/IE_DG/Intro_to_Performance.md
openvino/docs/IE_DG/Model_Downloader.md
openvino/docs/IE_DG/protecting_model_guide.md
openvino/docs/IE_DG/Samples_Overview.md
openvino/docs/IE_DG/ShapeInference.md
openvino/docs/IE_DG/Tools_Overview.md
openvino/docs/IE_PLUGIN_DG/PluginTesting.md
openvino/docs/index.md
openvino/docs/install_guides/deployment-manager-tool.md
openvino/docs/install_guides/installing-openvino-apt.md
openvino/docs/install_guides/installing-openvino-docker-linux.md
openvino/docs/install_guides/installing-openvino-docker-windows.md
openvino/docs/install_guides/installing-openvino-images.md
openvino/docs/install_guides/installing-openvino-linux-ivad-vpu.md
openvino/docs/install_guides/installing-openvino-linux.md
openvino/docs/install_guides/installing-openvino-macos.md
openvino/docs/install_guides/installing-openvino-raspbian.md
openvino/docs/install_guides/installing-openvino-windows.md
openvino/docs/install_guides/installing-openvino-yum.md
openvino/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md
openvino/docs/MO_DG/prepare_model/Additional_Optimizations.md
openvino/docs/MO_DG/prepare_model/convert_model/IR_suitable_for_INT8_inference.md
openvino/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md
openvino/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md
openvino/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md
openvino/docs/MO_DG/prepare_model/Default_Model_Optimizer_Optimizations.md
openvino/docs/MO_DG/prepare_model/Getting_performance_numbers.md
openvino/docs/model_zoo.md
openvino/docs/optimization_guide/dldt_deployment_optimization_guide.md
openvino/docs/optimization_guide/dldt_deployment_optimization_guide_additional.md
openvino/docs/ovsa/ovsa_get_started.md
openvino/docs/resources/introduction.md
openvino/docs/security_guide/workbench.md
openvino/inference-engine/ie_bridges/c/docs/api_overview.md
openvino/inference-engine/ie_bridges/c/samples/hello_classification/README.md
openvino/inference-engine/ie_bridges/c/samples/hello_nv12_input_classification/README.md
openvino/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/README.md
openvino/inference-engine/ie_bridges/python/sample/classification_sample_async/README.md
openvino/inference-engine/ie_bridges/python/sample/hello_classification/README.md
openvino/inference-engine/ie_bridges/python/sample/hello_reshape_ssd/README.md
openvino/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/README.md
openvino/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/README.md
openvino/inference-engine/ie_bridges/python/sample/speech_sample/README.md
openvino/inference-engine/ie_bridges/python/sample/style_transfer_sample/README.md
openvino/inference-engine/samples/benchmark_app/README.md
openvino/inference-engine/samples/classification_sample_async/README.md
openvino/inference-engine/samples/hello_classification/README.md
openvino/inference-engine/samples/hello_nv12_input_classification/README.md
openvino/inference-engine/samples/hello_reshape_ssd/README.md
openvino/inference-engine/samples/object_detection_sample_ssd/README.md
openvino/inference-engine/samples/speech_sample/README.md
openvino/inference-engine/samples/style_transfer_sample/README.md
openvino/inference-engine/tools/benchmark_tool/README.md
inference-engine/src/plugin_api/exec_graph_info.hpp
inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp
inference-engine/src/plugin_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp
inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp
inference-engine/src/plugin_api/threading/ie_istreams_executor.hpp
inference-engine/include/gpu/gpu_context_api_ocl.hpp
inference-engine/src/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp
inference-engine/src/transformations/include/ngraph_ops/convolution_ie.hpp
gst/docs/index.md
open_model_zoo/models/public/i3d-rgb-tf/README.md
open_model_zoo/models/public/index.md
open_model_zoo/tools/accuracy_checker/accuracy_checker/annotation_converters/README.md
open_model_zoo/tools/accuracy_checker/README.md
workbench/configs/README.md
workbench/docs/CLI.md
workbench/docs/FrequentlyAskedQuestions.md
workbench/docs/InstallationGuide.md
workbench/README.md
workbench/README_dev.md
pot/configs/README.md
pot/docs/CLI.md
pot/docs/FrequentlyAskedQuestions.md
pot/docs/InstallationGuide.md
pot/README.md
pot/README_dev.md
workbench/docs/Workbench_DG/Set_Accuracy_Config.md
workbench/docs/Workbench_DG/Int-8_Quantization.md
workbench/docs/Workbench_DG/Remote_Profiling.md
workbench/docs/Workbench_DG/Run_Baseline_Inference.md
workbench/docs/Workbench_DG/Run_Range_of_Inferences.md
workbench/docs/Workbench_DG/Run_Single_Inference.md
workbench/docs/Workbench_DG/Select_Models.md
workbench/docs/Workbench_DG/Visualize_Accuracy.md
openvino/docs/IE_DG/network_state_intro.md
workbench/docs/Workbench_DG/Compare_Performance_between_Two_Versions_of_Models.md
openvino/docs/benchmarks/performance_benchmarks_faq.md
openvino/docs/IE_DG/Bfloat16Inference.md
openvino/docs/IE_DG/InferenceEngine_QueryAPI.md
openvino/docs/IE_DG/Integrate_with_customer_application_new_API.md
openvino/docs/IE_DG/Model_caching_overview.md
openvino/docs/IE_DG/PythonPackage_Overview.md
src/common/legacy/include/legacy/ie_reader.hpp
src/core/builder/include/ngraph/builder/make_constant.hpp
src/core/include/ngraph/except.hpp
src/core/include/ngraph/factory_adapter.hpp
src/core/include/ngraph/opsets/opset1.hpp
src/core/include/ngraph/opsets/opset2.hpp
src/core/include/ngraph/opsets/opset3.hpp
src/core/include/ngraph/opsets/opset4.hpp
src/core/include/ngraph/opsets/opset5.hpp
src/core/include/ngraph/opsets/opset6.hpp
src/core/include/ngraph/opsets/opset7.hpp
src/core/include/ngraph/opsets/opset8.hpp
src/core/include/ngraph/pass/low_latency.hpp
src/core/include/ngraph/pass/pass.hpp
src/core/include/openvino/op/constant.hpp
src/core/include/openvino/opsets/opset1.hpp
src/core/include/openvino/opsets/opset2.hpp
src/core/include/openvino/opsets/opset3.hpp
src/core/include/openvino/opsets/opset4.hpp
src/core/include/openvino/opsets/opset5.hpp
src/core/include/openvino/opsets/opset6.hpp
src/core/include/openvino/opsets/opset7.hpp
src/core/include/openvino/opsets/opset8.hpp
src/core/reference/include/ngraph/coordinate_transform.hpp
src/inference/include/ie/ie_api.h
src/common/transformations/include/transformations/rt_info/old_api_map_order_attribute.hpp
src/core/include/openvino/core/any.hpp
src/core/include/openvino/core/preprocess/postprocess_steps.hpp
src/core/include/openvino/core/preprocess/preprocess_steps.hpp
src/common/low_precision_transformations/include/low_precision/common/ie_lpt_exception.hpp
src/core/include/openvino/core/attribute_adapter.hpp
src/inference/include/ie/ie_parallel.hpp
src/common/itt/include/openvino/itt.hpp
src/common/legacy/include/legacy/layer_transform.hpp
src/common/transformations/include/ngraph_ops/nms_static_shape_ie.hpp
src/common/transformations/include/ngraph_ops/type_relaxed.hpp
src/core/include/ngraph/util.hpp
src/core/include/ngraph/variant.hpp
src/core/include/openvino/core/type.hpp
src/inference/dev_api/ie_system_conf.h
src/core/include/openvino/core/except.hpp
src/core/include/openvino/core/extension.hpp
src/core/include/openvino/core/rtti.hpp
src/frontends/common/include/openvino/frontend/exception.hpp
openvino/docs/IE_DG/supported_plugins/AUTO.md
openvino/docs/IE_DG/supported_plugins/MULTI.md
openvino/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md
openvino/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_EfficientDet_Models.md
openvino/samples/c/hello_classification/README.md
openvino/samples/c/hello_nv12_input_classification/README.md
openvino/samples/cpp/benchmark_app/README.md
openvino/samples/cpp/classification_sample_async/README.md
openvino/samples/cpp/hello_classification/README.md
openvino/samples/cpp/hello_nv12_input_classification/README.md
openvino/samples/cpp/hello_reshape_ssd/README.md
openvino/samples/cpp/speech_sample/README.md
openvino/samples/python/classification_sample_async/README.md
openvino/samples/python/hello_classification/README.md
openvino/samples/python/hello_reshape_ssd/README.md
openvino/samples/python/ngraph_function_creation_sample/README.md
openvino/samples/python/speech_sample/README.md
openvino/src/bindings/c/docs/api_overview.md
openvino/tools/benchmark_tool/README.md
openvino/tools/pot/configs/README.md
openvino/tools/pot/docs/CLI.md
openvino/tools/pot/docs/E2eExample.md
openvino/tools/pot/docs/FrequentlyAskedQuestions.md
openvino/tools/pot/docs/InstallationGuide.md
openvino/tools/pot/docs/LowPrecisionOptimizationGuide.md
openvino/tools/pot/openvino/tools/pot/api/README.md
openvino/tools/pot/openvino/tools/pot/api/samples/3d_segmentation/README.md
openvino/tools/pot/openvino/tools/pot/api/samples/classification/README.md
openvino/tools/pot/openvino/tools/pot/api/samples/face_detection/README.md
openvino/tools/pot/openvino/tools/pot/api/samples/object_detection/README.md
openvino/tools/pot/openvino/tools/pot/api/samples/segmentation/README.md
openvino/tools/pot/README.md
src/inference/dev_api/exec_graph_info.hpp
src/common/legacy/include/legacy/ie_util_internal.hpp
src/common/legacy/include/legacy/graph_tools.hpp
src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp
src/inference/dev_api/cpp_interfaces/impl/ie_infer_async_request_thread_safe_default.hpp
src/common/legacy/include/legacy/ie_layers.h
src/inference/dev_api/ie_icore.hpp
src/inference/dev_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp
src/inference/dev_api/threading/ie_istreams_executor.hpp
src/inference/dev_api/ie_performance_hints.hpp
src/common/legacy/include/legacy/net_pass.h
src/core/include/ngraph/validation_util.hpp
<NGRAPH_DEPRECATED>
src/core/builder/include/ngraph/builder/reshape.hpp
src/core/include/ngraph/file_util.hpp
src/common/legacy/include/legacy/ngraph_ops/convolution_ie.hpp
src/common/legacy/include/legacy/ngraph_ops/fully_connected.hpp
src/core/reference/include/ngraph/runtime/reference/autobroadcast_binop.hpp
src/common/snippets/include/snippets/emitter.hpp
src/common/snippets/include/snippets/generator.hpp
src/core/include/openvino/core/attribute_visitor.hpp
src/core/include/openvino/core/node.hpp
src/frontends/common/include/openvino/frontend/frontend.hpp
src/frontends/common/include/openvino/frontend/input_model.hpp
src/core/include/openvino/op/util/attr_types.hpp
src/core/include/openvino/op/util/rnn_cell_base.hpp
src/core/include/openvino/op/util/binary_elementwise_arithmetic.hpp
src/core/include/openvino/op/util/binary_elementwise_logical.hpp
src/core/include/openvino/op/util/deformable_convolution_base.hpp
src/core/include/openvino/op/util/embeddingbag_offsets_base.hpp
src/core/include/openvino/op/util/embeddingbag_packed_base.hpp
src/core/include/openvino/op/cum_sum.hpp
src/core/include/openvino/op/fake_quantize.hpp
src/core/include/openvino/op/lrn.hpp
src/core/include/openvino/op/deformable_psroi_pooling.hpp
src/core/include/openvino/op/gather.hpp
src/core/include/openvino/op/non_max_suppression.hpp
src/core/include/openvino/op/embeddingbag_offsets_sum.hpp
src/core/include/openvino/op/embeddingbag_packedsum.hpp
src/core/include/openvino/op/embedding_segments_sum.hpp
src/core/include/openvino/op/extractimagepatches.hpp
src/core/include/openvino/op/gru_cell.hpp
src/core/include/openvino/op/ctc_loss.hpp
src/core/include/openvino/op/hswish.hpp
src/core/include/openvino/op/mish.hpp
src/core/include/openvino/op/softplus.hpp
src/core/include/openvino/op/swish.hpp
src/core/include/openvino/op/hsigmoid.hpp
src/core/include/openvino/op/assign.hpp
src/core/include/openvino/op/read_value.hpp
src/core/include/openvino/op/if.hpp
src/core/include/openvino/pass/pattern/matcher.hpp
src/core/include/openvino/pass/pattern/op/any_output.hpp
src/core/include/openvino/op/interpolate.hpp
src/core/include/openvino/core/node_output.hpp
deprecated.rst
global.rst
exampleexample.rst
openvino_docs_ie_dg_lpt_addtransformation.rst
openvino_docs_ie_dg_lpt_alignquantizationintervals.rst
openvino_docs_ie_dg_lpt_alignquantizationparameters.rst
openvino_docs_ie_dg_lpt_avgpooltransformation.rst
openvino_docs_ie_dg_lpt_avgpooltransformation.rst
openvino_docs_ie_dg_lpt_clamptransformation.rst
openvino_docs_ie_dg_lpt_concattransformation.rst
openvino_docs_ie_dg_lpt_convertsubtractconstant.rst
openvino_docs_ie_dg_lpt_convolutionbackpropdatatransformation.rst
openvino_docs_ie_dg_lpt_convolutiontransformation.rst
openvino_docs_ie_dg_lpt_createattribute.rst
openvino_docs_ie_dg_lpt_createprecisionsdependentattribute.rst
openvino_docs_ie_dg_lpt_depthtospacetransformation.rst
openvino_docs_ie_dg_lpt_fakequantizedecompositiontransformation.rst
openvino_docs_ie_dg_lpt_fakequantizetransformation.rst
openvino_docs_ie_dg_lpt_foldconverttransformation.rst
openvino_docs_ie_dg_lpt_foldfakequantizetransformation.rst
openvino_docs_ie_dg_lpt_fuseconverttransformation.rst
openvino_docs_ie_dg_lpt_fusemultiplytofakequantizetransformation.rst
openvino_docs_ie_dg_lpt_fusesubtracttofakequantizetransformation.rst
openvino_docs_ie_dg_lpt_groupconvolutiontransformation.rst
openvino_docs_ie_dg_lpt_interpolatetransformation.rst
openvino_docs_ie_dg_lpt_linopsequencefusion.rst
openvino_docs_ie_dg_lpt_mvntransformation.rst
openvino_docs_ie_dg_lpt_markupavgpoolprecisionpreserved.rst
openvino_docs_ie_dg_lpt_markupcanbequantized.rst
openvino_docs_ie_dg_lpt_markuppertensorquantization.rst
openvino_docs_ie_dg_lpt_markupprecisions.rst
openvino_docs_ie_dg_lpt_matmultransformation.rst
openvino_docs_ie_dg_lpt_maxpooltransformation.rst
openvino_docs_ie_dg_lpt_multiplytogroupconvolutiontransformation.rst
openvino_docs_ie_dg_lpt_multiplytransformation.rst
openvino_docs_ie_dg_lpt_normalizel2transformation.rst
openvino_docs_ie_dg_lpt_prelutransformation.rst
openvino_docs_ie_dg_lpt_padtransformation.rst
openvino_docs_ie_dg_lpt_propagateprecisions.rst
openvino_docs_ie_dg_lpt_propagatesharedvalue.rst
openvino_docs_ie_dg_lpt_propagatethroughprecisionpreserved.rst
openvino_docs_ie_dg_lpt_propagatetoinput.rst
openvino_docs_ie_dg_lpt_pullreshapethroughdequantization.rst
openvino_docs_ie_dg_lpt_pulltransposethroughdequantization.rst
openvino_docs_ie_dg_lpt_reducemaxtransformation.rst
openvino_docs_ie_dg_lpt_reducemeantransformation.rst
openvino_docs_ie_dg_lpt_reducemintransformation.rst
openvino_docs_ie_dg_lpt_reducesumtransformation.rst
openvino_docs_ie_dg_lpt_relutransformation.rst
openvino_docs_ie_dg_lpt_reshapetransformation.rst
openvino_docs_ie_dg_lpt_shufflechannelstransformation.rst
openvino_docs_ie_dg_lpt_splittransformation.rst
openvino_docs_ie_dg_lpt_squeezetransformation.rst
openvino_docs_ie_dg_lpt_stridedslicetransformation.rst
openvino_docs_ie_dg_lpt_subtracttransformation.rst
openvino_docs_ie_dg_lpt_transposetransformation.rst
openvino_docs_ie_dg_lpt_unsqueezetransformation.rst
openvino_docs_ie_dg_lpt_updatesharedprecisionpreserved.rst
openvino_docs_ie_dg_lpt_variadicsplittransformation.rst
openvino_docs_ie_plugin_dg_lp_representation.rst
openvino_docs_ie_dg_lpt.rst

View File

@ -31,7 +31,7 @@ end
g_titleUnderlineCharMap =
{
"=", "~", "-", "+", "*", "^" -- 1 to 6
"=", '~', "-", "+", "*", "^" -- 1 to 6
}
function getTitle(title, level)

View File

@ -1,4 +1,17 @@
# Introduction to OpenCV Graph API (G-API) {#openvino_docs_gapi_gapi_intro}
@sphinxdirective
.. toctree::
:maxdepth: 1
:hidden:
openvino_docs_gapi_kernel_api
openvino_docs_gapi_face_beautification
openvino_docs_gapi_gapi_face_analytics_pipeline
@endsphinxdirective
OpenCV Graph API (G-API) is an OpenCV module targeted to make regular image and video processing fast and portable. G-API is a special module in OpenCV in contrast with the majority of other main modules, this one acts as a framework rather than some specific CV algorithm.
G-API is positioned as a next level optimization enabler for computer vision, focusing not on particular CV functions but on the whole algorithm optimization.

View File

@ -45,7 +45,6 @@
:hidden:
:caption: Get Started Guides
Get Started with One-Command Demo <openvino_docs_get_started_get_started_scripts>
Get Started with Step-by-step Demo <openvino_docs_get_started_get_started_demos>
Get Started with Tutorials <tutorials>
@ -56,18 +55,6 @@
openvino_docs_IE_DG_Samples_Overview
.. toctree::
:maxdepth: 1
:hidden:
:caption: Reference Implementations For Speech Recognition Apps
openvino_inference_engine_samples_speech_libs_and_demos_Speech_libs_and_demos
openvino_inference_engine_samples_speech_libs_and_demos_Speech_library
openvino_inference_engine_samples_speech_libs_and_demos_Offline_speech_recognition_demo
openvino_inference_engine_samples_speech_libs_and_demos_Live_speech_recognition_demo
openvino_inference_engine_samples_speech_libs_and_demos_Kaldi_SLM_conversion_tool
@endsphinxdirective
@ -86,10 +73,6 @@
<h3>Choose how you want to progress:</h3>
<div id="GSG_nextstepchoice">
<a href="openvino_docs_get_started_get_started_scripts.html" >
<h4>One-command demo </h4>
<p>Execute just one command and watch all the steps happening before your eyes. </p>
</a>
<a href="openvino_docs_get_started_get_started_demos.html" >
<h4>Step-by-step demo </h4>
<p>Follow the step-by-step instructions to execute simple tasks with OpenVINO. </p>

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6675f4b68df7eaa3d6188ecc8b5d53be572cf9c92f53abac3bc6416e6b428d0c
size 196146

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:539deb67a7d1c0e8b0c037f8e7488445be0895e8e717bed5cfec64131936870c
size 198207

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6038ccd7873a1a818d944139ea3144a115dae19f0d3094e590a8a0c2b7b3a46c
size 95228

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2925e58a71d684e23776e6ed55cc85d9085b3ba5e484720528aeac5fa59f9e3a
size 55404

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f4a52661c05977d878c614c4f8510935982ce8a0e120e05690307d7c95e4ab31
size 73999

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ddb0550f3f04c177ec116d6c41e6d3a2ac1fedea7121e10ad3836f84c86a5c78
size 35278

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f1e329304ff3d586bb2b8e2442333ede085593f40b1567bd5250508d33d3b9f9
size 32668

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:605515f25a746579d3622b7a274c7dece95e4fbfc6c1817f99431c1abf116070
size 55409

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0ca48900ca8f6733c4a8ebc957517fbed80f3c080f53d251eeebb01f082c8f83
size 55646

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ba94c2c0e0cb98b9e43c876d060d8a7965182461b0d505167eb71134d4975b8f
size 58204

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:75628b7d02f1fe5c25a233fa16ae1c6c3d5060bf3d15bc7b1e5b9ea71ce50b73
size 50227

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:72ab36115cecfee4b215e1b21911ebac3706e513b72eea7bb829932f7bdb3a19
size 70515

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:90e5ff4285c9d1069647097157eccf7d8a3f545f4ba8b93930b55d8b62c17a1a
size 100677

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:70aee6f0fd30c8e2139950c6bc831dc11b2616ea8f04b991efc9b3f5b7b11ce6
size 88891

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c1e297da7f7dfd2af7a0ba47ba1e5c14376f21b15dfcde1fe6f5ad3412ad8feb
size 21296

View File

@ -460,24 +460,24 @@ As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes seve
@sphinxdirective
.. tab:: Linux
- ``/opt/intel/openvino_2021/deployment_tools/demo/car.png``
- ``/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp``
- `Sample images and video <https://storage.openvinotoolkit.org/data/test_data/>`_
- `Sample videos <https://github.com/intel-iot-devkit/sample-videos>`_
- ``/opt/intel/openvino_2021/deployment_tools/demo/car.png``
- ``/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp``
- `Sample images and video <https://storage.openvinotoolkit.org/data/test_data/>`_
- `Sample videos <https://github.com/intel-iot-devkit/sample-videos>`_
.. tab:: Windows
- ``<INSTALL_DIR>\deployment_tools\demo\car.png``
- ``<INSTALL_DIR>\deployment_tools\demo\car_1.bmp``
- `Sample images and video <https://storage.openvinotoolkit.org/data/test_data/>`_
- `Sample videos <https://github.com/intel-iot-devkit/sample-videos>`_
- ``<INSTALL_DIR>\deployment_tools\demo\car.png``
- ``<INSTALL_DIR>\deployment_tools\demo\car_1.bmp``
- `Sample images and video <https://storage.openvinotoolkit.org/data/test_data/>`_
- `Sample videos <https://github.com/intel-iot-devkit/sample-videos>`_
.. tab:: macOS
- ``/opt/intel/openvino_2021/deployment_tools/demo/car.png``
- ``/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp``
- `Sample images and video <https://storage.openvinotoolkit.org/data/test_data/>`_
- `Sample videos <https://github.com/intel-iot-devkit/sample-videos>`_
- ``/opt/intel/openvino_2021/deployment_tools/demo/car.png``
- ``/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp``
- `Sample images and video <https://storage.openvinotoolkit.org/data/test_data/>`_
- `Sample videos <https://github.com/intel-iot-devkit/sample-videos>`_
@endsphinxdirective
@ -587,7 +587,7 @@ The following commands run the Image Classification Code Sample using the `car.p
**GPU:**
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires
[additional hardware configuration steps](https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html#additional-GPU-steps), as described earlier on this page. Running on GPU is not compatible with macOS*.
[additional hardware configuration steps](https://docs.openvino.ai/latest/_docs_install_guides_installing_openvino_linux.html#additional-GPU-steps), as described earlier on this page. Running on GPU is not compatible with macOS*.
@sphinxdirective
.. tab:: Linux
@ -817,7 +817,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on
@endsphinxdirective
**GPU:**
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires [additional hardware configuration steps](https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html#additional-GPU-steps), as described earlier on this page. Running on GPU is not compatible with macOS*.
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires [additional hardware configuration steps](https://docs.openvino.ai/latest/_docs_install_guides_installing_openvino_linux.html#additional-GPU-steps), as described earlier on this page. Running on GPU is not compatible with macOS*.
@sphinxdirective
.. tab:: Linux
@ -841,7 +841,7 @@ To run the **Security Barrier Camera Demo Application** using an input image on
@endsphinxdirective
**MYRIAD:**
> **NOTE**: Running inference on VPU devices (Intel® Movidius™ Neural Compute Stick or Intel® Neural Compute Stick 2) with the MYRIAD plugin requires [additional hardware configuration steps](https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html#additional-NCS-steps), as described earlier on this page.
> **NOTE**: Running inference on VPU devices (Intel® Movidius™ Neural Compute Stick or Intel® Neural Compute Stick 2) with the MYRIAD plugin requires [additional hardware configuration steps](https://docs.openvino.ai/latest/_docs_install_guides_installing_openvino_linux.html#additional-NCS-steps), as described earlier on this page.
@sphinxdirective
.. tab:: Linux

View File

@ -1,66 +0,0 @@
# Quick Start with OpenVINO™ Toolkit via Deep Learning Workbench {#openvino_docs_get_started_get_started_dl_workbench}
The OpenVINO™ toolkit is a comprehensive toolkit for optimizing pretrained deep learning models to achieve high performance and prepare them for deployment on Intel® platforms. Deep Learning Workbench (DL Workbench) is the OpenVINO™ toolkit UI designed to make the production of pretrained deep learning models significantly easier.
Start working with the OpenVINO™ toolkit right from your browser: import a model, analyze its performance and accuracy, visualize the outputs, optimize and prepare the model for deployment in a matter of minutes. DL Workbench will take you through the full OpenVINO™ workflow, providing the opportunity to learn about various toolkit components.
![](./dl_workbench_img/openvino_in_dl_wb.png)
## User Goals
* Learn what neural networks are, how they work, and how to examine their architectures with more than 200 deep learning models.
* Measure and interpret model performance right after the import.
* Tune the model for enhanced performance.
* Analyze the quality of your model and visualize output.
* Use preconfigured JupyterLab\* environment to learn OpenVINO™ workflow.
## Run DL Workbench
You can [run DL Workbench](@ref workbench_docs_Workbench_DG_Install) on your local system or in the Intel® DevCloud for the Edge. Ensure that you have met the [prerequisites](@ref workbench_docs_Workbench_DG_Prerequisites).
Run DL Workbench on your local system by using the installation form. Select your options and run the commands on the local machine:
@sphinxdirective
.. raw:: html
<iframe allowfullscreen mozallowfullscreen msallowfullscreen oallowfullscreen webkitallowfullscreen style="width: 100%; height: 620px;" src="https://openvinotoolkit.github.io/workbench_aux/" frameborder="0" allow="clipboard-write;"></iframe>
@endsphinxdirective
Once DL Workbench is set up, open the http://127.0.0.1:5665 link.
![](./dl_workbench_img/active_projects_page.png)
Watch the video to learn more detailed information on how to run DL Workbench:
@sphinxdirective
.. list-table::
* - .. raw:: html
<iframe allowfullscreen mozallowfullscreen msallowfullscreen oallowfullscreen webkitallowfullscreen width="560" height="100%" src="https://www.youtube.com/embed/JBDG2g5hsoM" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
* - **Inference Engine Concept**. Duration: 3:43
@endsphinxdirective
@sphinxdirective
.. list-table::
* - .. raw:: html
<iframe allowfullscreen mozallowfullscreen msallowfullscreen oallowfullscreen webkitallowfullscreen width="560" height="100%" src="https://www.youtube.com/embed/on8xSSTKCt8" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
* - **Inference Engine Concept**. Duration: 3:43
@endsphinxdirective
Congratulations, you have installed DL Workbench. Your next step is to [Get Started with DL Workbench](@ref workbench_docs_Workbench_DG_Work_with_Models_and_Sample_Datasets) and create your first project.
## See Also
* [Get Started with DL Workbench](@ref workbench_docs_Workbench_DG_Work_with_Models_and_Sample_Datasets)
* [DL Workbench Overview](@ref workbench_docs_Workbench_DG_Introduction)
* [DL Workbench Educational Resources](@ref workbench_docs_Workbench_DG_Additional_Resources)
* [OpenVINO™ Toolkit Overview](../index.md)

View File

@ -1,20 +0,0 @@
# Get Started with Instructions {#openvino_docs_get_started_get_started_instructions}
# Introduction
This guide assumes you completed all installation and configuration steps. If you have not yet installed and configured the toolkit:
@sphinxdirective
.. tab:: Linux
See :doc:`Install Intel® Distribution of OpenVINO™ toolkit for Linux* <openvino_docs_install_guides_installing_openvino_linux>`
.. tab:: Windows
See :doc:`Install Intel® Distribution of OpenVINO™ toolkit for Windows* <openvino_docs_install_guides_installing_openvino_windows>`
.. tab:: macOS
See :doc:`Install Intel® Distribution of OpenVINO™ toolkit for macOS* <openvino_docs_install_guides_installing_openvino_macos>`
@endsphinxdirective

View File

@ -1,461 +0,0 @@
# Get Started with OpenVINO™ Toolkit on Linux* {#openvino_docs_get_started_get_started_linux}
The OpenVINO™ toolkit optimizes and runs Deep Learning Neural Network models on Intel® hardware. This guide helps you get started with the OpenVINO™ toolkit you installed on a Linux* operating system.
In this guide, you will:
* Learn the OpenVINO™ inference workflow.
* Run sample scripts that perform the steps for you. These sample scripts illustrate the workflow.
* Run the workflow steps yourself, using detailed instructions with a code sample and demo application.
## <a name="openvino-components"></a>OpenVINO™ toolkit Components
The toolkit consists of three primary components:
* **Inference Engine:** The software libraries that run inference against the Intermediate Representation (optimized model) to produce inference results.
* **Model Optimizer:** Optimizes models for Intel® architecture, converting models into a format compatible with the Inference Engine. This format is called an Intermediate Representation (IR).
* **Intermediate Representation (IR):** The Model Optimizer output. A model converted to a format that has been optimized for Intel® architecture and is usable by the Inference Engine.
In addition, sample scripts, code samples and demo applications are provided to help you get up and running with the toolkit:
* **Sample Scripts** - Shell scripts that automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios.
* **[Code Samples](../IE_DG/Samples_Overview.md)** - Small console applications that show you how to:
* Utilize specific OpenVINO capabilities in an application
* Perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more.
* **[Demo Applications](@ref omz_demos)** - Console applications that provide robust application templates to help you implement specific deep learning scenarios. These applications involve increasingly complex processing pipelines that gather analysis data from several models that run inference simultaneously, such as detecting a person in a video stream along with detecting the person's physical attributes, such as age, gender, and emotional state.
## <a name="openvino-installation"></a>Intel® Distribution of OpenVINO™ toolkit Installation and Deployment Tools Directory Structure
This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit installation and configuration steps. If you have not yet installed and configured the toolkit, see [Install Intel® Distribution of OpenVINO™ toolkit for Linux*](../install_guides/installing-openvino-linux.md).
By default, the Intel® Distribution of OpenVINO™ is installed to the following directory, referred to as `<INSTALL_DIR>`:
* For root or administrator: `/opt/intel/openvino_<version>/`
* For regular users: `/home/<USER>/intel/openvino_<version>/`
For simplicity, a symbolic link to the latest installation is also created: `/home/<user>/intel/openvino_2022/`
If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home/<USER>/` with the directory in which you installed the software.
The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino_2022/tools` directory.
<details>
<summary><strong>Click for the Intel® Distribution of OpenVINO™ toolkit directory structure</strong></summary>
| Directory&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; | Description |
|:----------------------------------------|:--------------------------------------------------------------------------------------|
| `demo/` | Sample scripts. Demonstrate pipelines for inference scenarios, automatically perform steps and print detailed output to the console. For more information, see the [Use OpenVINO: Sample Scripts](#use-openvino-sample-scripts) section.|
| `inference_engine/` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
| `~intel_models/` | Symbolic link to the `intel_models` subfolder of the `open_model-zoo` folder |
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`include/` | Inference Engine header files. For API documentation, see the [Inference Engine API Reference](./annotated.html). |
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`lib/` | Inference Engine binaries.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`samples/` | Inference Engine samples. Contains source code for C++ and Python* samples and build scripts. See the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md). |
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`src/` | Source files for CPU extensions.|
| `model_optimizer/` | Model Optimizer directory. Contains configuration scripts, scripts to run the Model Optimizer and other files. See the [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
| `open_model_zoo/` | Open Model Zoo directory. Includes the Model Downloader tool to download [pre-trained OpenVINO](@ref omz_models_group_intel) and public models, OpenVINO models documentation, demo applications and the Accuracy Checker tool to evaluate model accuracy.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`demos/` | Demo applications for inference scenarios. Also includes documentation and build scripts.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`intel_models/` | Pre-trained OpenVINO models and associated documentation. See the [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_group_intel).|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`tools/` | Model Downloader and Accuracy Checker tools. |
| `tools/` | Contains a symbolic link to the Model Downloader folder and auxiliary tools to work with your models: Calibration tool, Benchmark and Collect Statistics tools.|
</details>
## <a name="workflow-overview"></a>OpenVINO™ Workflow Overview
The simplified OpenVINO™ workflow is:
1. **Get a trained model** for your inference task. Example inference tasks: pedestrian detection, face detection, vehicle detection, license plate recognition, head pose.
2. **Run the trained model through the Model Optimizer** to convert the model to an Intermediate Representation, which consists of a pair of `.xml` and `.bin` files that are used as the input for Inference Engine.
3. **Use the Inference Engine API in the application** to run inference against the Intermediate Representation (optimized model) and output inference results. The application can be an OpenVINO™ sample, demo, or your own application.
## Use the Sample Scripts to Learn the Workflow
The sample scripts in `/opt/intel/openvino_2022/samples/scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The sample steps let you see how to:
* Compile several samples from the source files delivered as part of the OpenVINO toolkit.
* Download trained models.
* Perform pipeline steps and see the output on the console.
> **NOTE**: You must have Internet access to run the sample scripts. If your Internet access is through a proxy server, make sure the operating system environment proxy information is configured.
The sample scripts can run inference on any [supported target device](https://software.intel.com/en-us/openvino-toolkit/hardware). Although the default inference device is CPU, you can use the `-d` parameter to change the inference device. The general command to run the scripts looks as follows:
```sh
./<script_name> -d [CPU, GPU, MYRIAD, HDDL]
```
Before running the sample or demo applications on Intel® Processor Graphics or on an Intel® Neural Compute Stick 2 device, you must complete the additional configuration steps. For details, see:
* Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md)
* Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md).
The following paragraphs describe each sample script.
### Image Classification Sample Script
The `run_sample_squeezenet` script illustrates the image classification pipeline.
The script:
1. Downloads a SqueezeNet model.
2. Runs the Model Optimizer to convert the model to the IR.
3. Builds the Image Classification Sample Async application.
4. Runs the compiled sample with the `car.png` image located in the `demo` directory.
<details>
<summary><strong>Click for an example of running the Image Classification sample script</strong></summary>
To preview the image that the script will classify:
```sh
cd ${INTEL_OPENVINO_DIR}/samples/scripts
eog car.png
```
To run the script to perform inference on a CPU:
```sh
./run_sample_squeezenet.sh
```
When the script completes, you see the label and confidence for the top-10 categories:
```sh
Top 10 results:
Image /home/user/openvino/samples/cpp/sample_data/car.png
classid probability label
------- ----------- -----
817 0.8363345 sports car, sport car
511 0.0946488 convertible
479 0.0419131 car wheel
751 0.0091071 racer, race car, racing car
436 0.0068161 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
656 0.0037564 minivan
586 0.0025741 half track
717 0.0016069 pickup, pickup truck
864 0.0012027 tow truck, tow car, wrecker
581 0.0005882 grille, radiator grille
total inference time: 2.6642941
Average running time of one iteration: 2.6642941 ms
Throughput: 375.3339402 FPS
[ INFO ] Classification sample execution successful
```
</details>
### Benchmark Sample Script
The `run_sample_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
The script:
1. Downloads a SqueezeNet model.
2. Runs the Model Optimizer to convert the model to the IR.
3. Builds the Inference Engine Benchmark sample.
4. Runs the tool with the `car.png` image located in the `demo` directory.
<details>
<summary><strong>Click for an example of running the Benchmark sample script</strong></summary>
To run the script that performs inference (runs on CPU by default):
```sh
./run_sample_benchmark_app.sh
```
When the verification script completes, you see the performance counters, resulting latency, and throughput values displayed on the screen.
</details>
## <a name="using-sample-application"></a>Use Code Samples and Demo Applications to Learn the Workflow
This section guides you through a simplified workflow for the Intel® Distribution of OpenVINO™ toolkit using code samples and demo applications.
You will perform the following steps:
1. <a href="#download-models">Use the Model Downloader to download suitable models.</a>
2. <a href="#convert-models-to-intermediate-representation">Convert the models with the Model Optimizer.</a>
3. <a href="#download-media">Download media files to run inference on.</a>
4. <a href="#run-image-classification">Run inference on the Image Classification Code Sample and see the results</a>.
5. <a href="#run-security-barrier">Run inference on the Security Barrier Camera Demo application and see the results</a>.
Each demo and code sample is a separate application, but they use the same behavior and components. The code samples and demo applications are:
* [Code Samples](../IE_DG/Samples_Overview.md) - Small console applications that show how to utilize specific OpenVINO capabilities within an application and execute specific tasks such as loading a model, running inference, querying specific device capabilities, and more.
* [Demo Applications](@ref omz_demos) - Console applications that provide robust application templates to support developers in implementing specific deep learning scenarios. They may also involve more complex processing pipelines that gather analysis from several models that run inference simultaneously. For example concurrently detecting a person in a video stream and detecting attributes such as age, gender and/or emotions.
Inputs you'll need to specify:
- **A compiled OpenVINO™ code sample or demo application** that runs inferencing against a model that has been run through the Model Optimizer, resulting in an IR, using the other inputs you provide.
- **One or more models** in the Intermediate Representation format. Each model is trained for a specific task. Examples include pedestrian detection, face detection, vehicle detection, license plate recognition, head pose, and others. Different models are used for different applications. Models can be chained together to provide multiple features; for example vehicle + make/model + license plate recognition.
- **One or more media files**. The media is typically a video file, but can be a still photo.
- **One or more target device** on which you run inference. The target device can be the CPU, GPU, or VPU accelerator.
### Build the Code Samples and Demo Applications
The Image Classification Sample that was automatically compiled when you ran the Image Classification sample script. The binary file is in the `~/inference_engine_cpp_samples_build/intel64/Release` directory.
To run other sample code or demo applications, build them from the source files delivered as part of the OpenVINO toolkit. To learn how to build these, see the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) and [Demo Applications Overview](@ref omz_demos) sections.
### <a name="download-models"></a> Step 1: Download the Models
You must have a model that is specific for you inference task. Example model types are:
- Classification (AlexNet, GoogleNet, SqueezeNet, others) - Detects one type of element in a frame.
- Object Detection (SSD, YOLO) - Draws bounding boxes around multiple types of objects.
- Custom (Often based on SSD)
Options to find a model suitable for the OpenVINO™ toolkit are:
- Download public and Intel's pre-trained models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) using [Model Downloader tool](@ref omz_tools_downloader).
- Download from GitHub*, Caffe* Zoo, TensorFlow* Zoo, etc.
- Train your own model.
This guide uses the Model Downloader to get pre-trained models. You can use one of the following options to find a model:
* **List the models available in the downloader**:
```sh
cd /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/
```
```sh
python3 info_dumper.py --print_all
```
* **Use `grep` to list models that have a specific name pattern**:
```sh
python3 info_dumper.py --print_all | grep <model_name>
```
Use the Model Downloader to download the models to a models directory. This guide uses `<models_dir>` as the models directory and `<models_name>` as the model name:
```sh
sudo python3 ./downloader.py --name <model_name> --output_dir <models_dir>
```
> **NOTE**: Always run the downloader with `sudo`.
Download the following models if you want to run the Image Classification Sample:
|Model Name | Code Sample |
|-----------------------------------------------|-----------------------------------------------------|
|`squeezenet1.1` | Image Classification Sample |
<details>
<summary><strong>Click for an example of downloading the SqueezeNet Caffe* model</strong></summary>
To download the SqueezeNet 1.1 Caffe* model to the `~/models` folder:
```sh
sudo python3 ./downloader.py --name squeezenet1.1 --output_dir ~/models
```
Your screen looks similar to this after the download:
```
###############|| Downloading models ||###############
========= Downloading /home/username/models/public/squeezenet1.1/squeezenet1.1.prototxt
========= Downloading /home/username/models/public/squeezenet1.1/squeezenet1.1.caffemodel
... 100%, 4834 KB, 3157 KB/s, 1 seconds passed
###############|| Post processing ||###############
========= Replacing text in /home/username/models/public/squeezenet1.1/squeezenet1.1.prototxt =========
```
</details>
################|| Post-processing ||################
```
</details>
### <a name="convert-models-to-intermediate-representation"></a> Step 2: Convert the Models to the Intermediate Representation
In this step, your trained models are ready to run through the Model Optimizer to convert them to the Intermediate Representation (IR) format. This is required before using the Inference Engine with the model.
Models in the Intermediate Representation format always include a pair of `.xml` and `.bin` files. Make sure you have these files for the Inference Engine to find them.
- **REQUIRED:** `model_name.xml`
- **REQUIRED:** `model_name.bin`
This guide uses the public SqueezeNet 1.1 Caffe\* model to run the Image Classification Sample. See the example to download a model in the <a href="#download-models">Download Models</a> section to learn how to download this model.
The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the Model Optimizer to convert the model to the IR.
1. Create an `<ir_dir>` directory to contain the model's Intermediate Representation (IR).
2. The Inference Engine can perform inference on different precision formats, such as `FP32`, `FP16`, `INT8`. To prepare an IR with specific precision, run the Model Optimizer with the appropriate `--data_type` option.
3. Run the Model Optimizer script:
```sh
cd /opt/intel/openvino_2022/tools/model_optimizer
```
```sh
mo --input_model <model_dir>/<model_file> --data_type <model_precision> --output_dir <ir_dir>
```
The produced IR files are in the `<ir_dir>` directory.
<details>
<summary><strong>Click for an example of converting the SqueezeNet Caffe* model</strong></summary>
The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory:
```sh
cd /opt/intel/openvino_2022/tools/model_optimizer
```
```sh
mo --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir
```
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory.
Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/samples/scripts/` to `<ir_dir>`. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers:
```sh
cp /opt/intel/openvino_2022/samples/scripts/squeezenet1.1.labels <ir_dir>
```
</details>
### <a name="download-media"></a> Step 3: Download a Video or a Still Photo as Media
Many sources are available from which you can download video media to use the code samples and demo applications. Possibilities include:
- https://videos.pexels.com
- https://images.google.com
As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications:
* `/opt/intel/openvino_2022/samples/scripts/car.png`
* `/opt/intel/openvino_2022/samples/scripts/car_1.bmp`
### <a name="run-image-classification"></a>Step 4: Run the Image Classification Code Sample
> **NOTE**: The Image Classification code sample is automatically compiled when you ran the Image Classification sample script. If you want to compile it manually, see the *Build the Sample Applications on Linux* section in the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md).
To run the **Image Classification** code sample with an input image on the IR:
1. Set up the OpenVINO environment variables:
```sh
source /opt/intel/openvino_2022/setupvars.sh
```
2. Go to the code samples build directory:
```sh
cd ~/inference_engine_cpp_samples_build/intel64/Release
```
3. Run the code sample executable, specifying the input media file, the IR of your model, and a target device on which you want to perform inference:
```sh
classification_sample_async -i <path_to_media> -m <path_to_model> -d <target_device>
```
<details>
<summary><strong>Click for examples of running the Image Classification code sample on different devices</strong></summary>
The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/samples/scripts/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices:
**CPU:**
```sh
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU
```
**GPU:**
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md).
```sh
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU
```
**MYRIAD:**
> **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md).
```sh
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD
```
**HDDL:**
> **NOTE**: Running inference on the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs device with the HDDL plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs section in the [installation instructions](../install_guides/installing-openvino-linux.md).
```sh
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL
```
When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU:
```sh
Top 10 results:
Image /home/user/openvino/samples/cpp/sample_data/car.png
classid probability label
------- ----------- -----
817 0.8363345 sports car, sport car
511 0.0946488 convertible
479 0.0419131 car wheel
751 0.0091071 racer, race car, racing car
436 0.0068161 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
656 0.0037564 minivan
586 0.0025741 half track
717 0.0016069 pickup, pickup truck
864 0.0012027 tow truck, tow car, wrecker
581 0.0005882 grille, radiator grille
total inference time: 2.6642941
Average running time of one iteration: 2.6642941 ms
Throughput: 375.3339402 FPS
[ INFO ] Classification sample execution successful
```
</details>
## <a name="basic-guidelines-sample-application"></a>Basic Guidelines for Using Code Samples and Demo Applications
Following are some basic guidelines for executing the OpenVINO™ workflow using the code samples and demo applications:
1. Before using the OpenVINO™ samples, always set up the environment:
```sh
source /opt/intel/openvino_2022/setupvars.sh
```
2. Have the directory path for the following:
- Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release`
- Demo Application binaries located in `~/inference_engine_demos_build/intel64/Release`
- Media: Video or image. See <a href="#download-media">Download Media</a>.
- Model: Neural Network topology converted with the Model Optimizer to the IR format (.bin and .xml files). See <a href="#download-models">Download Models</a> for more information.
## <a name="syntax-examples"></a> Typical Code Sample and Demo Application Syntax Examples
This section explains how to build and use the sample and demo applications provided with the toolkit. You will need CMake 3.10 or later installed. Build details are on the [Inference Engine Samples](../IE_DG/Samples_Overview.md) and [Demo Applications](@ref omz_demos) pages.
To build all the demos and samples:
```sh
cd $INTEL_OPENVINO_DIR/samples/cpp
# to compile C samples, go here also: cd <INSTALL_DIR>/samples/c
build_samples.sh
cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos
build_demos.sh
```
Depending on what you compiled, executables are in the directories below:
* `~/inference_engine_samples_build/intel64/Release`
* `~/inference_engine_cpp_samples_build/intel64/Release`
* `~/inference_engine_demos_build/intel64/Release`
Template to call sample code or a demo application:
```sh
<path_to_app> -i <path_to_media> -m <path_to_model> -d <target_device>
```
## <a name="advanced-samples"></a> Advanced Demo Use
Some demo applications let you use multiple models for different purposes. In these cases, the output of the first model is usually used as the input for later models.
For example, an SSD will detect a variety of objects in a frame, then age, gender, head pose, emotion recognition and similar models target the objects classified by the SSD to perform their functions.
In these cases, the use pattern in the last part of the template above is usually:
`-m_<acronym> … -d_<acronym> …`
For head pose:
`-m_hp <headpose model> -d_hp <headpose hardware target>`
You can see all the sample applications parameters by adding the `-h` or `--help` option at the command line.
## Additional Resources
Use these resources to learn more about the OpenVINO™ toolkit:
* [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes)
* [OpenVINO™ Toolkit Overview](../index.md)
* [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md)
* [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
* [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md)
* [Overview of OpenVINO™ Toolkit Pre-Trained Models](https://software.intel.com/en-us/openvino-toolkit/documentation/pretrained-models)

View File

@ -1,685 +0,0 @@
# Get Started with OpenVINO™ Toolkit on macOS* {#openvino_docs_get_started_get_started_macos}
The OpenVINO™ toolkit optimizes and runs Deep Learning Neural Network models on Intel® hardware. This guide helps you get started with the OpenVINO™ toolkit you installed on macOS*.
In this guide, you will:
* Learn the OpenVINO™ inference workflow
* Run sample scripts that illustrate the workflow and perform the steps for you
* Run the workflow steps yourself, using detailed instructions with a code sample and demo application
## <a name="openvino-components"></a>OpenVINO™ toolkit Components
The toolkit consists of three primary components:
* **Model Optimizer:** Optimizes models for Intel® architecture, converting models into a format compatible with the Inference Engine. This format is called an Intermediate Representation (IR).
* **Intermediate Representation:** The Model Optimizer output. A model converted to a format that has been optimized for Intel® architecture and is usable by the Inference Engine.
* **Inference Engine:** The software libraries that run inference against the IR (optimized model) to produce inference results.
In addition, sample scripts, code samples and demo applications are provided to help you get up and running with the toolkit:
* **Sample Scripts** - Batch scripts that automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios.
* **[Code Samples](../IE_DG/Samples_Overview.md)** - Small console applications that show you how to:
* Utilize specific OpenVINO capabilities in an application.
* Perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more.
* **[Demo Applications](@ref omz_demos)** - Console applications that provide robust application templates to help you implement specific deep learning scenarios. These applications involve increasingly complex processing pipelines that gather analysis data from several models that run inference simultaneously, such as detecting a person in a video stream along with detecting the person's physical attributes, such as age, gender, and emotional state.
## <a name="openvino-installation"></a>Intel® Distribution of OpenVINO™ toolkit Installation and Deployment Tools Directory Structure
This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit installation and configuration steps. If you have not yet installed and configured the toolkit, see [Install Intel® Distribution of OpenVINO™ toolkit for macOS*](../install_guides/installing-openvino-macos.md).
By default, the Intel® Distribution of OpenVINO™ is installed to the following directory, referred to as `<INSTALL_DIR>`:
* For root or administrator: `/opt/intel/openvino_<version>/`
* For regular users: `/home/<USER>/intel/openvino_<version>/`
For simplicity, a symbolic link to the latest installation is also created: `/home/<user>/intel/openvino_2022/`.
If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home/<USER>/` with the directory in which you installed the software.
The primary tools for deploying your models and applications are installed to the `<INSTALL_DIR>/deployment_tools` directory.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for the Intel® Distribution of OpenVINO™ toolkit directory structure</strong>">
@endsphinxdirective
| Directory&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; | Description |
|:----------------------------------------|:--------------------------------------------------------------------------------------|
| `demo/` | Sample scripts. Demonstrate pipelines for inference scenarios, automatically perform steps and print detailed output to the console. For more information, see the [Use OpenVINO: Sample Scripts](#use-openvino-sample-scripts) section.|
| `inference_engine/` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`external/` | Third-party dependencies and drivers.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`include/` | Inference Engine header files. For API documentation, see the [Inference Engine API Reference](./annotated.html). |
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`lib/` | Inference Engine static libraries.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`samples/` | Inference Engine samples. Contains source code for C++ and Python* samples and build scripts. See the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md). |
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`share/` | CMake configuration files for linking with Inference Engine.|
| `~intel_models/` | Symbolic link to the `intel_models` subfolder of the `open_model_zoo` folder.|
| `model_optimizer/` | Model Optimizer directory. Contains configuration scripts, scripts to run the Model Optimizer and other files. See the [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).|
| `ngraph/` | nGraph directory. Includes the nGraph header and library files. |
| `open_model_zoo/` | Open Model Zoo directory. Includes the Model Downloader tool to download [pre-trained OpenVINO](@ref omz_models_group_intel) and public models, OpenVINO models documentation, demo applications and the Accuracy Checker tool to evaluate model accuracy.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`demos/` | Sample applications for inference scenarios. Also includes documentation and build scripts.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`intel_models/` | Pre-trained OpenVINO models and associated documentation. See the [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_group_intel).|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`models` | Intel's trained and public models that can be obtained with Model Downloader.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`tools/` | Model Downloader and Accuracy Checker tools. |
| `tools/` | Contains a symbolic link to the Model Downloader folder and auxiliary tools to work with your models: Calibration tool, Benchmark and Collect Statistics tools.|
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## <a name="workflow-overview"></a>OpenVINO™ Workflow Overview
The simplified OpenVINO™ workflow is:
1. **Get a trained model** for your inference task. Example inference tasks: pedestrian detection, face detection, vehicle detection, license plate recognition, head pose.
2. **Run the trained model through the Model Optimizer** to convert the model to an IR, which consists of a pair of `.xml` and `.bin` files that are used as the input for Inference Engine.
3. **Use the Inference Engine API in the application** to run inference against the IR (optimized model) and output inference results. The application can be an OpenVINO™ sample, demo, or your own application.
## Use the Sample Scripts to Learn the Workflow
The sample scripts in `<INSTALL_DIR>/samples/scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The sample steps let you see how to:
* Compile several samples from the source files delivered as part of the OpenVINO toolkit
* Download trained models
* Perform pipeline steps and see the output on the console
> **NOTE**: You must have Internet access to run the sample scripts. If your Internet access is through a proxy server, make sure the operating system environment proxy information is configured.
The sample scripts can run inference on any [supported target device](https://software.intel.com/en-us/openvino-toolkit/hardware). Although the default inference device is CPU, you can use the `-d` parameter to change the inference device. The general command to run the scripts looks as follows:
```sh
./<script_name> -d [CPU, MYRIAD]
```
Before running the sample or demo applications on Intel® Neural Compute Stick 2 device, you must complete additional configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md).
The following paragraphs describe each sample script.
### Image Classification Sample Script
The `run_sample_squeezenet` script illustrates the image classification pipeline.
The script:
1. Downloads a SqueezeNet model.
2. Runs the Model Optimizer to convert the model to the IR.
3. Builds the Image Classification Sample Async application.
4. Runs the compiled sample with the `car.png` image located in the `demo` directory.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of running the Image Classification demo script</strong>">
@endsphinxdirective
To run the script to view the sample image and perform inference on the CPU:
```sh
open car.png
./run_sample_squeezenet.sh
```
When the script completes, you see the label and confidence for the top-10 categories:
```sh
Top 10 results:
Image /opt/intel/openvino_2022/samples/scripts/car.png
classid probability label
------- ----------- -----
817 0.6853030 sports car, sport car
479 0.1835197 car wheel
511 0.0917197 convertible
436 0.0200694 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
751 0.0069604 racer, race car, racing car
656 0.0044177 minivan
717 0.0024739 pickup, pickup truck
581 0.0017788 grille, radiator grille
468 0.0013083 cab, hack, taxi, taxicab
661 0.0007443 Model T
[ INFO ] Classification sample execution successful
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### Benchmark Sample Script
The `run_sample_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
The script:
1. Downloads three pre-trained model IRs.
2. Builds the Security Barrier Camera Demo application.
3. Runs the application with the downloaded models and the `car_1.bmp` image from the `demo` directory to show an inference pipeline.
This application:
1. Identifies an object identified as a vehicle.
2. Uses the vehicle identification as input to the second model, which identifies specific vehicle attributes, including the license plate.
3. Uses the the license plate as input to the third model, which recognizes specific characters in the license plate.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of Running the Pipeline demo script</strong>">
@endsphinxdirective
To run the script performing inference on a CPU:
```sh
./demo_security_barrier_camera.sh
```
When the verification script completes, you see an image that displays the resulting frame with detections rendered as bounding boxes, and text:
![](../img/inference_pipeline_script_mac.png)
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### Benchmark Demo Script
The `demo_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
The script:
1. Downloads a SqueezeNet model.
2. Runs the Model Optimizer to convert the model to the IR.
3. Builds the Inference Engine Benchmark tool.
4. Runs the tool with the `car.png` image located in the `demo` directory.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of running the Benchmark demo script</strong>">
@endsphinxdirective
To run the script that performs inference on a CPU:
```sh
./run_sample_benchmark_app.sh
```
When the verification script completes, you see the performance counters, resulting latency, and throughput values displayed on the screen.
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## <a name="using-sample-application"></a>Use Code Samples and Demo Applications to Learn the Workflow
This section guides you through a simplified workflow for the Intel® Distribution of OpenVINO™ toolkit using code samples and demo applications.
You will perform the following steps:
1. <a href="#download-models">Use the Model Downloader to download suitable models.</a>
2. <a href="#convert-models-to-intermediate-representation">Convert the models with the Model Optimizer.</a>
3. <a href="#download-media">Download media files to run inference on.</a>
4. <a href="#run-image-classification">Run inference on the Image Classification Code Sample and see the results.</a>
5. <a href="#run-security-barrier">Run inference on the Security Barrier Camera Demo application and see the results.</a>
Each demo and code sample is a separate application, but they use the same behavior and components.
Inputs you need to specify when using a code sample or demo application:
- **A compiled OpenVINO™ code sample or demo application** that runs inferencing against a model that has been run through the Model Optimizer, resulting in an IR, using the other inputs you provide.
- **One or more models** in the IR format. Each model is trained for a specific task. Examples include pedestrian detection, face detection, vehicle detection, license plate recognition, head pose, and others. Different models are used for different applications. Models can be chained together to provide multiple features; for example, vehicle + make/model + license plate recognition.
- **One or more media files**. The media is typically a video file, but can be a still photo.
- **One or more target device** on which you run inference. The target device can be the CPU, or VPU accelerator.
### Build the Code Samples and Demo Applications
The Image Classification Sample that was automatically compiled when you ran the Image Classification sample script. The binary file is in the `~/inference_engine_cpp_samples_build/intel64/Release` directory.
You can also build all available sample code and demo applications from the source files delivered with the OpenVINO toolkit. To learn how to do this, see the instructions in the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) and [Demo Applications Overview](@ref omz_demos) sections.
### <a name="download-models"></a> Step 1: Download the Models
You must have a model that is specific for you inference task. Example model types are:
- Classification (AlexNet, GoogleNet, SqueezeNet, others) - Detects one type of element in a frame.
- Object Detection (SSD, YOLO) - Draws bounding boxes around multiple types of objects.
- Custom (Often based on SSD)
Options to find a model suitable for the OpenVINO™ toolkit are:
- Download public and Intel's pre-trained models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) using the [Model Downloader tool](@ref omz_tools_downloader).
- Download from GitHub*, Caffe* Zoo, TensorFlow* Zoo, and other resources.
- Train your own model.
This guide uses the Model Downloader to get pre-trained models. You can use one of the following options to find a model:
* **List the models available in the downloader**:
```sh
cd /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/
```
```sh
python3 info_dumper.py --print_all
```
* **Use `grep` to list models that have a specific name pattern**:
```sh
python3 info_dumper.py --print_all | grep <model_name>
```
Use the Model Downloader to download the models to a models directory. This guide uses `<models_dir>` as the models directory and `<models_name>` as the model name:
```sh
sudo python3 ./downloader.py --name <model_name> --output_dir <models_dir>
```
> **NOTE**: Always run the downloader with `sudo`.
Download the following models if you want to run the Image Classification Sample:
|Model Name | Code Sample or Demo App |
|-----------------------------------------------|-----------------------------------------------------|
|`squeezenet1.1` | Image Classification Sample |
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of downloading the SqueezeNet Caffe* model</strong>">
@endsphinxdirective
To download the SqueezeNet 1.1 Caffe* model to the `~/models` folder:
```sh
sudo python3 ./downloader.py --name squeezenet1.1 --output_dir ~/models
```
Your screen looks similar to this after the download:
```
###############|| Downloading models ||###############
========= Downloading /Users/username/models/public/squeezenet1.1/squeezenet1.1.prototxt
... 100%, 9 KB, 44058 KB/s, 0 seconds passed
========= Downloading /Users/username/models/public/squeezenet1.1/squeezenet1.1.caffemodel
... 100%, 4834 KB, 4877 KB/s, 0 seconds passed
###############|| Post processing ||###############
========= Replacing text in /Users/username/models/public/squeezenet1.1/squeezenet1.1.prototxt =========
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of downloading models for the Security Barrier Camera Demo application</strong>">
@endsphinxdirective
To download all three pre-trained models in FP16 precision to the `~/models` folder:
```sh
./downloader.py --name vehicle-license-plate-detection-barrier-0106,vehicle-attributes-recognition-barrier-0039,license-plate-recognition-barrier-0001 --output_dir ~/models --precisions FP16
```
Your screen looks similar to this after the download:
```
################|| Downloading models ||################
========== Downloading /Users/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml
... 100%, 207 KB, 313926 KB/s, 0 seconds passed
========== Downloading /Users/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.bin
... 100%, 1256 KB, 2552 KB/s, 0 seconds passed
========== Downloading /Users/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml
... 100%, 32 KB, 172042 KB/s, 0 seconds passed
========== Downloading /Users/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.bin
... 100%, 1222 KB, 2712 KB/s, 0 seconds passed
========== Downloading /Users/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml
... 100%, 47 KB, 217130 KB/s, 0 seconds passed
========== Downloading /Users/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.bin
... 100%, 2378 KB, 4222 KB/s, 0 seconds passed
################|| Post-processing ||################
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### <a name="convert-models-to-intermediate-representation"></a> Step 2: Convert the Models to the Intermediate Representation
In this step, your trained models are ready to run through the Model Optimizer to convert them to the Intermediate Representation (IR) format. This is required before using the Inference Engine with the model.
Models in the Intermediate Representation format always include a pair of `.xml` and `.bin` files. Make sure you have these files for the Inference Engine to find them.
- **REQUIRED:** `model_name.xml`
- **REQUIRED:** `model_name.bin`
The conversion may also create a `model_name.mapping` file, but it is not needed for running inference.
This guide uses the public SqueezeNet 1.1 Caffe\* model to run the Image Classification Sample. See the example to download a model in the <a href="#download-models">Download Models</a> section to learn how to download this model.
The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the Model Optimizer to convert the model to the IR.
1. Create an `<ir_dir>` directory to contain the model's IR.
2. The Inference Engine can perform inference on different precision formats, such as `FP32`, `FP16`, `INT8`. To prepare an IR with specific precision, run the Model Optimizer with the appropriate `--data_type` option.
3. Run the Model Optimizer script:
```sh
cd /opt/intel/openvino_2022/tools/model_optimizer
```
```sh
mo --input_model <model_dir>/<model_file> --data_type <model_precision> --output_dir <ir_dir>
```
The produced IR files are in the `<ir_dir>` directory.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of converting the SqueezeNet Caffe* model</strong>">
@endsphinxdirective
The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory:
```sh
cd /opt/intel/openvino_2022/tools/model_optimizer
```
```sh
mo --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir
```
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory.
Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/samples/scripts/` to `<ir_dir>`. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers:
```sh
cp /opt/intel/openvino_2022/samples/scripts/squeezenet1.1.labels <ir_dir>
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### <a name="download-media"></a> Step 3: Download a Video or a Still Photo as Media
Many sources are available from which you can download video media to use the code samples and demo applications. Possibilities include:
- https://videos.pexels.com
- https://images.google.com
As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications:
* `/opt/intel/openvino_2022/samples/scripts/car.png`
* `/opt/intel/openvino_2022/samples/scripts/car_1.bmp`
### <a name="run-image-classification"></a>Step 4: Run the Image Classification Code Sample
> **NOTE**: The Image Classification code sample is automatically compiled when you ran the Image Classification sample script. If you want to compile it manually, see the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) document.
To run the **Image Classification** code sample with an input image on the IR:
1. Set up the OpenVINO environment variables:
```sh
source /opt/intel/openvino_2022/setupvars.sh
```
2. Go to the code samples build directory:
```sh
cd ~/inference_engine_cpp_samples_build/intel64/Release
```
3. Run the code sample executable, specifying the input media file, the IR of your model, and a target device on which you want to perform inference:
```sh
./classification_sample_async -i <path_to_media> -m <path_to_model> -d <target_device>
```
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for examples of running the Image Classification code sample on different devices</strong>">
@endsphinxdirective
The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/samples/scripts/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices:
**CPU:**
```sh
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU
```
**MYRIAD:**
> **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md).
```sh
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD
```
When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU:
```sh
Top 10 results:
Image /opt/intel/openvino_2022/samples/scripts/car.png
classid probability label
------- ----------- -----
817 0.8364177 sports car, sport car
511 0.0945683 convertible
479 0.0419195 car wheel
751 0.0091233 racer, race car, racing car
436 0.0068038 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
656 0.0037315 minivan
586 0.0025940 half track
717 0.0016044 pickup, pickup truck
864 0.0012045 tow truck, tow car, wrecker
581 0.0005833 grille, radiator grille
[ INFO ] Execution successful
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### <a name="run-security-barrier"></a>Step 5: Run the Security Barrier Camera Demo Application
> **NOTE**: The Security Barrier Camera Demo Application is automatically compiled when you run the Inference Pipeline demo scripts. If you want to build it manually, see the instructions in the [Demo Applications Overview](@ref omz_demos) section.
To run the **Security Barrier Camera Demo Application** using an input image on the prepared IRs:
1. Set up the OpenVINO environment variables:
```sh
source /opt/intel/openvino_2021/bin/setupvars.sh
```
2. Go to the demo application build directory:
```sh
cd ~/inference_engine_demos_build/intel64/Release
```
3. Run the demo executable, specifying the input media file, list of model IRs, and a target device on which to perform inference:
```sh
./security_barrier_camera_demo -i <path_to_media> -m <path_to_vehicle-license-plate-detection_model_xml> -m_va <path_to_vehicle_attributes_model_xml> -m_lpr <path_to_license_plate_recognition_model_xml> -d <target_device>
```
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for examples of running the Security Barrier Camera demo application on different devices</strong>">
@endsphinxdirective
**CPU:**
```sh
./security_barrier_camera_demo -i /opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp -m ~/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va ~/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr ~/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU
```
**MYRIAD:**
> **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md).
```sh
./classification_sample_async -i <INSTALL_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## <a name="basic-guidelines-sample-application"></a>Basic Guidelines for Using Code Samples and Demo Applications
Following are some basic guidelines for executing the OpenVINO™ workflow using the code samples and demo applications:
1. Before using the OpenVINO™ samples, always set up the environment:
```sh
source /opt/intel/openvino_2022/setupvars.sh
```
2. Have the directory path for the following:
- Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release`
- Demo Application binaries located in `~/inference_engine_demos_build/intel64/Release`
- Media: Video or image. See <a href="#download-media">Download Media</a>.
- Model: Neural Network topology converted with the Model Optimizer to the IR format (.bin and .xml files). See <a href="#download-models">Download Models</a> for more information.
## <a name="syntax-examples"></a> Typical Code Sample and Demo Application Syntax Examples
This section explains how to build and use the sample and demo applications provided with the toolkit. You will need CMake 3.13 or later installed. Build details are on the [Inference Engine Samples](../IE_DG/Samples_Overview.md) and [Demo Applications](@ref omz_demos) pages.
To build all the demos and samples:
```sh
cd $INTEL_OPENVINO_DIR/samples/cpp
# to compile C samples, go here also: cd <INSTALL_DIR>/samples/c
build_samples.sh
cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos
build_demos.sh
```
Depending on what you compiled, executables are in the directories below:
* `~/inference_engine_samples_build/intel64/Release`
* `~/inference_engine_cpp_samples_build/intel64/Release`
* `~/inference_engine_demos_build/intel64/Release`
Template to call sample code or a demo application:
```sh
<path_to_app> -i <path_to_media> -m <path_to_model> -d <target_device>
```
With the sample information specified, the command might look like this:
```sh
cd cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos/object_detection_demo
./object_detection_demo -i ~/Videos/catshow.mp4 -m ~/ir/fp32/mobilenet-ssd.xml -d CPU
```
## <a name="advanced-samples"></a> Advanced Demo Use
Some demo applications let you use multiple models for different purposes. In these cases, the output of the first model is usually used as the input for later models.
For example, an SSD will detect a variety of objects in a frame, then age, gender, head pose, emotion recognition and similar models target the objects classified by the SSD to perform their functions.
In these cases, the use pattern in the last part of the template above is usually:
`-m_<acronym> … -d_<acronym> …`
For head pose:
`-m_hp <headpose model> -d_hp <headpose hardware target>`
You can see all the sample applications parameters by adding the `-h` or `--help` option at the command line.
## Additional Resources
Use these resources to learn more about the OpenVINO™ toolkit:
* [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes)
* [OpenVINO™ Toolkit Overview](../index.md)
* [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md)
* [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
* [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md)
* [Overview of OpenVINO™ Toolkit Pre-Trained Models](https://software.intel.com/en-us/openvino-toolkit/documentation/pretrained-models)

View File

@ -1,119 +0,0 @@
# Get Started with OpenVINO™ Toolkit on Raspbian* OS {#openvino_docs_get_started_get_started_raspbian}
The OpenVINO™ toolkit optimizes and runs Deep Learning Neural Network models on Intel® hardware. This guide helps you get started with the OpenVINO™ toolkit you installed on Raspbian* OS.
In this guide, you will:
* Learn the OpenVINO™ inference workflow.
* Build and run sample code using detailed instructions.
## <a name="openvino-components"></a>OpenVINO™ Toolkit Components
On Raspbian* OS, the OpenVINO™ toolkit consists of the following components:
* **Inference Engine:** The software libraries that run inference against the Intermediate Representation (optimized model) to produce inference results.
* **MYRIAD Plugin:** The plugin developed for inference of neural networks on Intel® Neural Compute Stick 2.
> **NOTE**:
> * The OpenVINO™ package for Raspberry* does not include the [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). To convert models to Intermediate Representation (IR), you need to install it separately to your host machine.
> * The package does not include the Open Model Zoo demo applications. You can download them separately from the [Open Models Zoo repository](https://github.com/openvinotoolkit/open_model_zoo).
In addition, [code samples](../IE_DG/Samples_Overview.md) are provided to help you get up and running with the toolkit.
## <a name="openvino-installation"></a>Intel® Distribution of OpenVINO™ Toolkit Directory Structure
This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit installation and configuration steps. If you have not yet installed and configured the toolkit, see [Install Intel® Distribution of OpenVINO™ toolkit for Raspbian*](../install_guides/installing-openvino-raspbian.md).
The OpenVINO toolkit for Raspbian* OS is distributed without installer. This document refers to the directory to which you unpacked the toolkit package as `<INSTALL_DIR>`.
The primary tools for deploying your models and applications are installed to the `<INSTALL_DIR>/deployment_tools` directory.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for the <code>deployment_tools</code> directory structure</strong>">
@endsphinxdirective
| Directory&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; | Description |
|:----------------------------------------|:--------------------------------------------------------------------------------------|
| `inference_engine/` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`external/` | Third-party dependencies and drivers.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`include/` | Inference Engine header files. For API documentation, see the [Inference Engine API Reference](./annotated.html). |
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`lib/` | Inference Engine libraries.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`samples/` | Inference Engine samples. Contains source code for C++ and Python* samples and build scripts. See the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md). |
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`share/` | CMake configuration files for linking with Inference Engine.|
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## <a name="workflow-overview"></a>OpenVINO™ Workflow Overview
The OpenVINO™ workflow on Raspbian* OS is as follows:
1. **Get a pre-trained model** for your inference task. If you want to use your model for inference, the model must be converted to the `.bin` and `.xml` Intermediate Representation (IR) files, which are used as input by Inference Engine. On Raspberry PI, OpenVINO™ toolkit includes only the Inference Engine module. The Model Optimizer is not supported on this platform. To get the optimized models you can use one of the following options:
* Download public and Intel's pre-trained models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) using [Model Downloader tool](@ref omz_tools_downloader).
<br> For more information on pre-trained models, see [Pre-Trained Models Documentation](@ref omz_models_group_intel)
* Convert a model using the Model Optimizer from a full installation of Intel® Distribution of OpenVINO™ toolkit on one of the supported platforms. Installation instructions are available:
* [Installation Guide for macOS*](../install_guides/installing-openvino-macos.md)
* [Installation Guide for Windows*](../install_guides/installing-openvino-windows.md)
* [Installation Guide for Linux*](../install_guides/installing-openvino-linux.md)
2. **Use the Inference Engine API in the application** to run inference against the Intermediate Representation (optimized model) and output inference results. The application can be an OpenVINO™ sample or your own application.
## <a name="using-sample"></a>Build and Run Code Samples
Follow the steps below to run pre-trained SqueezeNet image classification network using Inference Engine samples from the OpenVINO toolkit.
1. Create a samples build directory. This example uses a directory named `build`:
```sh
mkdir build && cd build
```
2. Build the Hello Classification Sample with the following command:
```sh
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2022/samples/cpp
make -j2 hello_classification
```
3. Download the pre-trained SqueezeNet image classification model with the [Model Downloader tool](@ref omz_tools_downloader):
```sh
git clone --depth 1 https://github.com/openvinotoolkit/open_model_zoo
cd open_model_zoo/tools/downloader
python3 -m pip install -r requirements.in
python3 downloader.py --name squeezenet1.1
```
4. Run the sample, specifying the model and path to the input image:
```sh
./armv7l/Release/hello_classification <path_to_model>/squeezenet1.1.xml <path_to_image> MYRIAD
```
The application outputs to console window top 10 classification results.
## <a name="basic-guidelines-sample-application"></a>Basic Guidelines for Using Code Samples
Following are some basic guidelines for executing the OpenVINO™ workflow using the code samples:
1. Before using the OpenVINO™ samples, always set up the environment:
```sh
source <INSTALL_DIR>/setupvars.sh
```
2. Have the directory path for the following:
- Code Sample binaries
- Media: Video or image. Many sources are available from which you can download video media to use the code samples and demo applications, like https://videos.pexels.com and https://images.google.com.
- Model in the IR format (.bin and .xml files).
## Additional Resources
Use these resources to learn more about the OpenVINO™ toolkit:
* [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes)
* [OpenVINO™ Toolkit Overview](../index.md)
* [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md)
* [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
* [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md)
* [Overview of OpenVINO™ Toolkit Pre-Trained Models](https://software.intel.com/en-us/openvino-toolkit/documentation/pretrained-models)

View File

@ -1,357 +0,0 @@
# Getting Started with Demo Scripts {#openvino_docs_get_started_get_started_scripts}
## Introduction
A set of demo scripts in the `openvino_2021/deployment_tools/demo` directory give you a starting point for learning the OpenVINO™ workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to:
* Compile several samples from the source files delivered as part of the OpenVINO™ toolkit.
* Download trained models.
* Convert the models to IR (Intermediate Representation format used by OpenVINO™) with Model Optimizer.
* Perform pipeline steps and see the output on the console.
This guide assumes you completed all installation and configuration steps. If you have not yet installed and configured the toolkit:
@sphinxdirective
.. tab:: Linux
See :doc:`Install Intel® Distribution of OpenVINO™ toolkit for Linux* <openvino_docs_install_guides_installing_openvino_linux>`
.. tab:: Windows
See :doc:`Install Intel® Distribution of OpenVINO™ toolkit for Windows* <openvino_docs_install_guides_installing_openvino_windows>`
.. tab:: macOS
See :doc:`Install Intel® Distribution of OpenVINO™ toolkit for macOS* <openvino_docs_install_guides_installing_openvino_macos>`
@endsphinxdirective
The demo scripts can run inference on any [supported target device](https://software.intel.com/en-us/openvino-toolkit/hardware). Although the default inference device (i.e., processor) is the CPU, you can add the `-d` parameter to specify a different inference device. The general command to run a demo script is as follows:
@sphinxdirective
.. tab:: Linux
.. code-block:: sh
#If you installed in a location other than /opt/intel, substitute that path.
cd /opt/intel/openvino_2021/deployment_tools/demo/
./<script_name> -d [CPU, GPU, MYRIAD, HDDL]
.. tab:: Windows
.. code-block:: sh
rem If you installed in a location other than the default, substitute that path.
cd "C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo"
.\<script_name> -d [CPU, GPU, MYRIAD, HDDL]
.. tab:: macOS
.. code-block:: sh
#If you installed in a location other than /opt/intel, substitute that path.
cd /opt/intel/openvino_2021/deployment_tools/demo/
./<script_name> -d [CPU, MYRIAD]
@endsphinxdirective
Before running the demo applications on Intel® Processor Graphics or on an Intel® Neural Compute Stick 2 device, you must complete additional configuration steps.
@sphinxdirective
.. tab:: Linux
For details, see the following sections in the :doc:`installation instructions <openvino_docs_install_guides_installing_openvino_linux>`:
* Steps for Intel® Processor Graphics (GPU)
* Steps for Intel® Neural Compute Stick 2
.. tab:: Windows
For details, see the following sections in the :doc:`installation instructions <openvino_docs_install_guides_installing_openvino_windows>`:
* Additional Installation Steps for Intel® Processor Graphics (GPU)
* Additional Installation Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
.. tab:: macOS
For details, see the following sections in the :doc:`installation instructions <openvino_docs_install_guides_installing_openvino_macos>`:
* Steps for Intel® Neural Compute Stick 2
@endsphinxdirective
The following sections describe each demo script.
## Image Classification Demo Script
The `demo_squeezenet_download_convert_run` script illustrates the image classification pipeline.
The script:
1. Downloads a SqueezeNet model.
2. Runs the Model Optimizer to convert the model to the IR format used by OpenVINO™.
3. Builds the Image Classification Sample Async application.
4. Runs the compiled sample with the `car.png` image located in the `demo` directory.
### Example of Running the Image Classification Demo Script
@sphinxdirective
.. raw:: html
<div class="collapsible-section">
@endsphinxdirective
**Click for an example of running the Image Classification demo script**
To preview the image that the script will classify:
@sphinxdirective
.. tab:: Linux
.. code-block:: sh
cd /opt/intel/openvino_2021/deployment_tools/demo
eog car.png
.. tab:: Windows
.. code-block:: sh
car.png
.. tab:: macOS
.. code-block:: sh
cd /opt/intel/openvino_2021/deployment_tools/demo
open car.png
@endsphinxdirective
To run the script and perform inference on the CPU:
@sphinxdirective
.. tab:: Linux
.. code-block:: sh
./demo_squeezenet_download_convert_run.sh
.. tab:: Windows
.. code-block:: bat
.\demo_squeezenet_download_convert_run.bat
.. tab:: macOS
.. code-block:: sh
./demo_squeezenet_download_convert_run.sh
@endsphinxdirective
When the script completes, you see the label and confidence for the top 10 categories:
@sphinxdirective
.. tab:: Linux
.. code-block:: sh
Top 10 results:
Image /opt/intel/openvino_2021/deployment_tools/demo/car.png
classid probability label
------- ----------- -----
817 0.8363345 sports car, sport car
511 0.0946488 convertible
479 0.0419131 car wheel
751 0.0091071 racer, race car, racing car
436 0.0068161 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
656 0.0037564 minivan
586 0.0025741 half track
717 0.0016069 pickup, pickup truck
864 0.0012027 tow truck, tow car, wrecker
581 0.0005882 grille, radiator grille
[ INFO ] Execution successful
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
.. tab:: Windows
.. code-block:: bat
Top 10 results:
Image C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png
classid probability label
------- ----------- -----
817 0.8363345 sports car, sport car
511 0.0946488 convertible
479 0.0419131 car wheel
751 0.0091071 racer, race car, racing car
436 0.0068161 beach wagon, station wagon, wagon, estate car, beach wagon, station wagon, wagon
656 0.0037564 minivan
586 0.0025741 half track
717 0.0016069 pickup, pickup truck
864 0.0012027 tow truck, tow car, wrecker
581 0.0005882 grille, radiator grille
[ INFO ] Execution successful
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
.. tab:: macOS
.. code-block:: sh
Top 10 results:
Image /Users/colin/intel/openvino_2021/deployment_tools/demo/car.png
classid probability label
------- ----------- -----
817 0.8363345 sports car, sport car
511 0.0946488 convertible
479 0.0419131 car wheel
751 0.0091071 racer, race car, racing car
436 0.0068161 beach wagon, station wagon, wagon, estate car, beach wagon, station wagon, wagon
656 0.0037564 minivan
586 0.0025741 half track
717 0.0016069 pickup, pickup truck
864 0.0012027 tow truck, tow car, wrecker
581 0.0005882 grille, radiator grille
[ INFO ] Execution successful
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
@endsphinxdirective
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## Inference Pipeline Demo Script
The `demo_security_barrier_camera` application uses vehicle recognition in which vehicle attributes build on each other to narrow in on a specific attribute.
The script:
1. Downloads three pre-trained models, already converted to IR format.
2. Builds the Security Barrier Camera Demo application.
3. Runs the application with the three models and the `car_1.bmp` image from the `demo` directory to show an inference pipeline.
This application:
1. Gets the boundaries an object identified as a vehicle with the first model.
2. Uses the vehicle identification as input to the second model, which identifies specific vehicle attributes, including the license plate.
3. Uses the license plate as input to the third model, which recognizes specific characters in the license plate.
### Example of Running the Pipeline Demo Script
@sphinxdirective
.. raw:: html
<div class="collapsible-section">
@endsphinxdirective
**Click for an example of Running the Pipeline demo script**
To run the script performing inference on Intel® Processor Graphics:
@sphinxdirective
.. tab:: Linux
.. code-block:: sh
./demo_security_barrier_camera.sh -d GPU
.. tab:: Windows
.. code-block:: bat
.\demo_security_barrier_camera.bat -d GPU
@endsphinxdirective
When the verification script is complete, you see an image that displays the resulting frame with detections rendered as bounding boxes and overlaid text:
@sphinxdirective
.. tab:: Linux
.. image:: ../img/inference_pipeline_script_lnx.png
.. tab:: Windows
.. image:: ../img/inference_pipeline_script_win.png
.. tab:: macOS
.. image:: ../img/inference_pipeline_script_mac.png
@endsphinxdirective
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## Benchmark Demo Script
The `demo_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
The script:
1. Downloads a SqueezeNet model.
2. Runs the Model Optimizer to convert the model to IR format.
3. Builds the Inference Engine Benchmark tool.
4. Runs the tool with the `car.png` image located in the `demo` directory.
### Example of Running the Benchmark Demo Script
@sphinxdirective
.. raw:: html
<div class="collapsible-section">
@endsphinxdirective
**Click for an example of running the Benchmark demo script**
To run the script that performs measures inference performance:
@sphinxdirective
.. tab:: Linux
.. code-block:: sh
./demo_benchmark_app.sh
.. tab:: Windows
.. code-block:: bat
.\demo_benchmark_app.bat
.. tab:: macOS
.. code-block:: sh
./demo_benchmark_app.sh
@endsphinxdirective
When the verification script is complete, you see the performance counters, resulting latency, and throughput values displayed on the screen.
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## Other Get Started Documents
For more get started documents, visit the pages below:
[Get Started with Sample and Demo Applications](get_started_demos.md)
[Get Started with Instructions](get_started_instructions.md)

View File

@ -1,682 +0,0 @@
# Get Started with OpenVINO™ Toolkit on Windows* {#openvino_docs_get_started_get_started_windows}
The OpenVINO™ toolkit optimizes and runs Deep Learning Neural Network models on Intel® hardware. This guide helps you get started with the OpenVINO™ toolkit you installed on Windows* OS.
In this guide, you will:
* Learn the OpenVINO™ inference workflow
* Run sample scripts that illustrate the workflow and perform the steps for you
* Run the workflow steps yourself, using detailed instructions with a code sample and demo application
## <a name="openvino-components"></a>OpenVINO™ toolkit Components
The toolkit consists of three primary components:
* **Model Optimizer:** Optimizes models for Intel® architecture, converting models into a format compatible with the Inference Engine. This format is called an Intermediate Representation (IR).
* **Intermediate Representation:** The Model Optimizer output. A model converted to a format that has been optimized for Intel® architecture and is usable by the Inference Engine.
* **Inference Engine:** The software libraries that run inference against the IR (optimized model) to produce inference results.
In addition, sample scripts, code samples and demo applications are provided to help you get up and running with the toolkit:
* **Sample Scripts** - Batch scripts that automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios.
* **[Code Samples](../IE_DG/Samples_Overview.md)** - Small console applications that show you how to:
* Utilize specific OpenVINO capabilities in an application.
* Perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more.
* **[Demo Applications](@ref omz_demos)** - Console applications that provide robust application templates to help you implement specific deep learning scenarios. These applications involve increasingly complex processing pipelines that gather analysis data from several models that run inference simultaneously, such as detecting a person in a video stream along with detecting the person's physical attributes, such as age, gender, and emotional state.
## <a name="openvino-installation"></a>Intel® Distribution of OpenVINO™ toolkit Installation and Deployment Tools Directory Structure
This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit installation and configuration steps. If you have not yet installed and configured the toolkit, see [Install Intel® Distribution of OpenVINO™ toolkit for Windows*](../install_guides/installing-openvino-windows.md).
By default, the installation directory is `C:\Program Files (x86)\Intel\openvino_<version>`, referred to as `<INSTALL_DIR>`. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `C:\Program Files (x86)\Intel` with the directory in which you installed the software. For simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2022`.
The primary tools for deploying your models and applications are installed to the `<INSTALL_DIR>\deployment_tools` directory.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for the <code>deployment_tools</code> directory structure</strong>">
@endsphinxdirective
| Directory&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; | Description |
|:----------------------------------------|:--------------------------------------------------------------------------------------|
| `demo\` | Sample scripts. Demonstrate pipelines for inference scenarios, automatically perform steps and print detailed output to the console. For more information, see the [Use OpenVINO: Demo Scripts](#use-openvino-demo-scripts) section.|
| `inference_engine\` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`bin\` | Inference Engine binaries.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`external\` | Third-party dependencies and drivers.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`include\` | Inference Engine header files. For API documentation, see the [Inference Engine API Reference](./annotated.html). |
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`lib\` | Inference Engine static libraries.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`samples\` | Inference Engine samples. Contains source code for C++ and Python* samples and build scripts. See the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md). |
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`share\` | CMake configuration files for linking with Inference Engine.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`src\` | Source files for CPU extensions.|
| `~intel_models\` | Symbolic link to the `intel_models` subfolder of the `open_model_zoo` folder. |
| `model_optimizer\` | Model Optimizer directory. Contains configuration scripts, scripts to run the Model Optimizer and other files. See the [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). |
| `ngraph\` | nGraph directory. Includes the nGraph header and library files. |
| `open_model_zoo\` | Open Model Zoo directory. Includes the Model Downloader tool to download [pre-trained OpenVINO](@ref omz_models_group_intel) and public models, OpenVINO models documentation, demo applications and the Accuracy Checker tool to evaluate model accuracy.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`demos\` | Demo applications for inference scenarios. Also includes documentation and build scripts.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`intel_models\` | Pre-trained OpenVINO models and associated documentation. See the [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_group_intel).|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`models` | Intel's trained and public models that can be obtained with Model Downloader.|
| &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`tools\` | Model Downloader and Accuracy Checker tools. |
| `tools\` | Contains a symbolic link to the Model Downloader folder and auxiliary tools to work with your models: Calibration tool, Benchmark and Collect Statistics tools.|
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## <a name="workflow-overview"></a>OpenVINO™ Workflow Overview
The simplified OpenVINO™ workflow is:
1. **Get a trained model** for your inference task. Example inference tasks: pedestrian detection, face detection, vehicle detection, license plate recognition, head pose.
2. **Run the trained model through the Model Optimizer** to convert the model to an IR, which consists of a pair of `.xml` and `.bin` files that are used as the input for Inference Engine.
3. **Use the Inference Engine API in the application** to run inference against the IR (optimized model) and output inference results. The application can be an OpenVINO™ sample, demo, or your own application.
## Use the Sample Scripts to Learn the Workflow
The sample scripts in `<INSTALL_DIR>\samples\scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps demonstrate how to:
* Compile several samples from the source files delivered as part of the OpenVINO toolkit
* Download trained models
* Perform pipeline steps and see the output on the console
> **REQUIRED**: You must have Internet access to run the sample scripts. If your Internet access is through a proxy server, make sure the operating system environment proxy information is configured.
The sample scripts can run inference on any [supported target device](https://software.intel.com/en-us/openvino-toolkit/hardware). Although the default inference device is CPU, you can use the `-d` parameter to change the inference device. The general command to run the scripts looks as follows:
```bat
.\<script_name> -d [CPU, GPU, MYRIAD, HDDL]
```
Before running the sample or demo applications on Intel® Processor Graphics or Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, you must complete additional hardware configuration steps. For details, see the following sections in the [installation instructions](../install_guides/installing-openvino-windows.md):
* Additional Installation Steps for Intel® Processor Graphics (GPU)
* Additional Installation Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
The following paragraphs describe each sample script.
### Image Classification Sample Script
The `run_sample_squeezenet` script illustrates the image classification pipeline.
The script:
1. Downloads a SqueezeNet model.
2. Runs the Model Optimizer to convert the model to the IR.
3. Builds the Image Classification Sample Async application.
4. Runs the compiled sample with the `car.png` image located in the `demo` directory.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of running the Image Classification demo script</strong>">
@endsphinxdirective
To preview the image that the script will classify open the `car.png` file in any image viewer.
To run the script to perform inference on a CPU:
```bat
.\run_sample_squeezenet.bat
```
When the script completes, you see the label and confidence for the top-10 categories:
```bat
Top 10 results:
Image C:\Program Files (x86)\Intel\openvino_2022\samples\scripts\car.png
classid probability label
------- ----------- -----
817 0.6853030 sports car, sport car
479 0.1835197 car wheel
511 0.0917197 convertible
436 0.0200694 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
751 0.0069604 racer, race car, racing car
656 0.0044177 minivan
717 0.0024739 pickup, pickup truck
581 0.0017788 grille, radiator grille
468 0.0013083 cab, hack, taxi, taxicab
661 0.0007443 Model T
[ INFO ] Classification sample execution successful
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### Benchmark Sample Script
The `run_sample_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
The script:
1. Downloads three pre-trained model IRs.
2. Builds the Security Barrier Camera Demo application.
3. Runs the application with the downloaded models and the `car_1.bmp` image from the `demo` directory to show an inference pipeline.
This application:
1. Identifies an object identified as a vehicle.
2. Uses the vehicle identification as input to the second model, which identifies specific vehicle attributes, including the license plate.
3. Uses the the license plate as input to the third model, which recognizes specific characters in the license plate.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of Running the Pipeline demo script</strong>">
@endsphinxdirective
To run the script performing inference on Intel® Processor Graphics:
```bat
.\demo_security_barrier_camera.bat -d GPU
```
When the verification script completes, you see an image that displays the resulting frame with detections rendered as bounding boxes, and text:
![](../img/inference_pipeline_script_win.png)
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### Benchmark Demo Script
The `demo_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
The script:
1. Downloads a SqueezeNet model.
2. Runs the Model Optimizer to convert the model to the IR.
3. Builds the Inference Engine Benchmark tool.
4. Runs the tool with the `car.png` image located in the `demo` directory.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of running the Benchmark demo script</strong>">
@endsphinxdirective
To run the script that performs inference (runs on CPU by default):
```bat
.\run_sample_benchmark_app.bat
```
When the verification script completes, you see the performance counters, resulting latency, and throughput values displayed on the screen.
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## <a name="using-sample-application"></a>Use Code Samples and Demo Applications to Learn the Workflow
This section guides you through a simplified workflow for the Intel® Distribution of OpenVINO™ toolkit using code samples and demo applications.
You will perform the following steps:
1. <a href="#download-models">Use the Model Downloader to download suitable models.</a>
2. <a href="#convert-models-to-intermediate-representation">Convert the models with the Model Optimizer.</a>
3. <a href="#download-media">Download media files to run inference on.</a>
4. <a href="#run-image-classification">Run inference on the Image Classification Code Sample and see the results.</a>
5. <a href="#run-security-barrier">Run inference on the Security Barrier Camera Demo application and see the results.</a>
Each demo and code sample is a separate application, but they use the same behavior and components.
Inputs you need to specify when using a code sample or demo application:
- **A compiled OpenVINO™ code sample or demo application** that runs inferencing against a model that has been run through the Model Optimizer, resulting in an IR, using the other inputs you provide.
- **One or more models** in the IR format. Each model is trained for a specific task. Examples include pedestrian detection, face detection, vehicle detection, license plate recognition, head pose, and others. Different models are used for different applications. Models can be chained together to provide multiple features; for example, vehicle + make/model + license plate recognition.
- **One or more media files**. The media is typically a video file, but can be a still photo.
- **One or more target device** on which you run inference. The target device can be the CPU, GPU, or VPU accelerator.
### Build the Code Samples and Demo Applications
The Image Classification Sample that was automatically compiled when you ran the Image Classification sample script. The binary file is in the `C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release` directory.
You can also build all available sample code and demo applications from the source files delivered with the OpenVINO™ toolkit. To learn how to do this, see the instruction in the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) and [Demo Applications Overview](@ref omz_demos) sections.
### <a name="download-models"></a> Step 1: Download the Models
You must have a model that is specific for you inference task. Example model types are:
- Classification (AlexNet, GoogleNet, SqueezeNet, others) - Detects one type of element in a frame.
- Object Detection (SSD, YOLO) - Draws bounding boxes around multiple types of objects.
- Custom (Often based on SSD)
Options to find a model suitable for the OpenVINO™ toolkit are:
- Download public and Intel's pre-trained models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) using the [Model Downloader tool](@ref omz_tools_downloader).
- Download from GitHub*, Caffe* Zoo, TensorFlow* Zoo, and other resources.
- Train your own model.
This guide uses the Model Downloader to get pre-trained models. You can use one of the following options to find a model:
* **List the models available in the downloader**:
```bat
cd <INSTALL_DIR>\tools\model_downloader\
```
```bat
python info_dumper.py --print_all
```
* **Use `grep` to list models that have a specific name pattern**:
```bat
python info_dumper.py --print_all | grep <model_name>
```
Use the Model Downloader to download the models to a models directory. This guide uses `<models_dir>` as the models directory and `<models_name>` as the model name:
```bat
python .\downloader.py --name <model_name> --output_dir <models_dir>
```
Download the following models if you want to run the Image Classification Sample and Security Barrier Camera Demo application:
|Model Name | Code Sample or Demo App |
|-----------------------------------------------|-----------------------------------------------------|
|`squeezenet1.1` | Image Classification Sample |
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of downloading the SqueezeNet Caffe* model</strong>">
@endsphinxdirective
To download the SqueezeNet 1.1 Caffe* model to the `C:\Users\<USER_ID>\Documents\models` folder:
```bat
python .\downloader.py --name squeezenet1.1 --output_dir C:\Users\username\Documents\models
```
Your screen looks similar to this after the download:
```
################|| Downloading models ||################
========== Downloading C:\Users\username\Documents\models\public\squeezenet1.1\squeezenet1.1.prototxt
... 100%, 9 KB, ? KB/s, 0 seconds passed
========== Downloading C:\Users\username\Documents\models\public\squeezenet1.1\squeezenet1.1.caffemodel
... 100%, 4834 KB, 571 KB/s, 8 seconds passed
################|| Post-processing ||################
========== Replacing text in C:\Users\username\Documents\models\public\squeezenet1.1\squeezenet1.1.prototxt
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of downloading models for the Security Barrier Camera Demo application</strong>">
@endsphinxdirective
To download all three pre-trained models in FP16 precision to the `C:\Users\<USER_ID>\Documents\models` folder:
```bat
python .\downloader.py --name vehicle-license-plate-detection-barrier-0106,vehicle-attributes-recognition-barrier-0039,license-plate-recognition-barrier-0001 --output_dir C:\Users\username\Documents\models --precisions FP16
```
Your screen looks similar to this after the download:
```
################|| Downloading models ||################
========== Downloading C:\Users\username\Documents\models\intel\vehicle-license-plate-detection-barrier-0106\FP16\vehicle-license-plate-detection-barrier-0106.xml
... 100%, 207 KB, 13810 KB/s, 0 seconds passed
========== Downloading C:\Users\username\Documents\models\intel\vehicle-license-plate-detection-barrier-0106\FP16\vehicle-license-plate-detection-barrier-0106.bin
... 100%, 1256 KB, 70 KB/s, 17 seconds passed
========== Downloading C:\Users\username\Documents\models\intel\vehicle-attributes-recognition-barrier-0039\FP16\vehicle-attributes-recognition-barrier-0039.xml
... 100%, 32 KB, ? KB/s, 0 seconds passed
========== Downloading C:\Users\username\Documents\models\intel\vehicle-attributes-recognition-barrier-0039\FP16\vehicle-attributes-recognition-barrier-0039.bin
... 100%, 1222 KB, 277 KB/s, 4 seconds passed
========== Downloading C:\Users\username\Documents\models\intel\license-plate-recognition-barrier-0001\FP16\license-plate-recognition-barrier-0001.xml
... 100%, 47 KB, ? KB/s, 0 seconds passed
========== Downloading C:\Users\username\Documents\models\intel\license-plate-recognition-barrier-0001\FP16\license-plate-recognition-barrier-0001.bin
... 100%, 2378 KB, 120 KB/s, 19 seconds passed
################|| Post-processing ||################
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### <a name="convert-models-to-intermediate-representation"></a> Step 2: Convert the Models to the Intermediate Representation
In this step, your trained models are ready to run through the Model Optimizer to convert them to the Intermediate Representation (IR) format. This is required before using the Inference Engine with the model.
Models in the Intermediate Representation format always include a pair of `.xml` and `.bin` files. Make sure you have these files for the Inference Engine to find them.
- **REQUIRED:** `model_name.xml`
- **REQUIRED:** `model_name.bin`
This guide uses the public SqueezeNet 1.1 Caffe\* model to run the Image Classification Sample. See the example to download a model in the <a href="#download-models">Download Models</a> section to learn how to download this model.
The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the Model Optimizer to convert the model to the IR.
1. Create an `<ir_dir>` directory to contain the model's IR.
2. The Inference Engine can perform inference on different precision formats, such as `FP32`, `FP16`, `INT8`. To prepare an IR with specific precision, run the Model Optimizer with the appropriate `--data_type` option.
3. Run the Model Optimizer script:
```bat
cd <INSTALL_DIR>\tools\model_optimizer
```
```bat
mo --input_model <model_dir>\<model_file> --data_type <model_precision> --output_dir <ir_dir>
```
The produced IR files are in the `<ir_dir>` directory.
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for an example of converting the SqueezeNet Caffe* model</strong>">
@endsphinxdirective
The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` output directory:
```bat
mo --input_model C:\Users\<username>\Documents\models\public\squeezenet1.1\squeezenet1.1.caffemodel --data_type FP16 --output_dir C:\Users\username\Documents\models\public\squeezenet1.1\ir
```
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` directory.
Copy the `squeezenet1.1.labels` file from the `<INSTALL_DIR>\samples\scripts\` to `<ir_dir>`. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers:
```batch
cp <INSTALL_DIR>\samples\scripts\squeezenet1.1.labels <ir_dir>
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### <a name="download-media"></a> Step 3: Download a Video or a Still Photo as Media
Many sources are available from which you can download video media to use the code samples and demo applications. Possibilities include:
- https://videos.pexels.com
- https://images.google.com
As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications:
* `<INSTALL_DIR>\samples\scripts\car.png`
* `<INSTALL_DIR>\samples\scripts\car_1.bmp`
### <a name="run-image-classification"></a>Step 4: Run the Image Classification Code Sample
> **NOTE**: The Image Classification code sample is automatically compiled when you run the Image Classification sample script. If you want to compile it manually, see the Build the Sample Applications on Microsoft Windows* OS section in [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md).
To run the **Image Classification** code sample with an input image on the IR:
1. Set up the OpenVINO environment variables:
```bat
<INSTALL_DIR>\setupvars.sh
```
2. Go to the code samples build directory:
```bat
cd C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release
```
3. Run the code sample executable, specifying the input media file, the IR of your model, and a target device on which you want to perform inference:
```bat
classification_sample_async.exe -i <path_to_media> -m <path_to_model> -d <target_device>
```
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for examples of running the Image Classification code sample on different devices</strong>">
@endsphinxdirective
The following commands run the Image Classification Code Sample using the `car.png` file from the `<INSTALL_DIR>\samples\scripts` directory as an input image, the IR of your model from `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` and on different hardware devices:
**CPU:**
```bat
.\classification_sample_async -i <INSTALL_DIR>\samples\scripts\car.png -m C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d CPU
```
**GPU:**
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-windows.md).
```bat
.\classification_sample_async -i <INSTALL_DIR>\samples\scripts\car.png -m C:\Users\<USER_ID>\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d GPU
```
**MYRIAD:**
```bat
.\classification_sample_async -i <INSTALL_DIR>\samples\scripts\car.png -m C:\Users\<USER_ID>\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d MYRIAD
```
When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU:
```bat
Top 10 results:
Image C:\Program Files (x86)\Intel\openvino_2022\samples\scripts\car.png
classid probability label
------- ----------- -----
817 0.8364177 sports car, sport car
511 0.0945683 convertible
479 0.0419195 car wheel
751 0.0091233 racer, race car, racing car
436 0.0068038 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
656 0.0037315 minivan
586 0.0025940 half track
717 0.0016044 pickup, pickup truck
864 0.0012045 tow truck, tow car, wrecker
581 0.0005833 grille, radiator grille
[ INFO ] Execution successful
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
### <a name="run-security-barrier"></a>Step 5: Run the Security Barrier Camera Demo Application
> **NOTE**: The Security Barrier Camera Demo Application is automatically compiled when you run the Inference Pipeline demo scripts. If you want to build it manually, see the instructions in the [Demo Applications Overview](@ref omz_demos) section.
To run the **Security Barrier Camera Demo Application** using an input image on the prepared IRs:
1. Set up the OpenVINO environment variables:
```bat
<INSTALL_DIR>\bin\setupvars.bat
```
2. Go to the demo application build directory:
```bat
cd C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_demos_build\intel64\Release
```
3. Run the demo executable, specifying the input media file, list of model IRs, and a target device on which to perform inference:
```bat
.\security_barrier_camera_demo -i <path_to_media> -m <path_to_vehicle-license-plate-detection_model_xml> -m_va <path_to_vehicle_attributes_model_xml> -m_lpr <path_to_license_plate_recognition_model_xml> -d <target_device>
```
@sphinxdirective
.. raw:: html
<div class="collapsible-section" data-title="<strong>Click for examples of running the Security Barrier Camera demo application on different devices</strong>">
@endsphinxdirective
**CPU:**
```bat
.\security_barrier_camera_demo -i <INSTALL_DIR>\deployment_tools\demo\car_1.bmp -m C:\Users\username\Documents\models\intel\vehicle-license-plate-detection-barrier-0106\FP16\vehicle-license-plate-detection-barrier-0106.xml -m_va C:\Users\username\Documents\models\intel\vehicle-attributes-recognition-barrier-0039\FP16\vehicle-attributes-recognition-barrier-0039.xml -m_lpr C:\Users\username\Documents\models\intel\license-plate-recognition-barrier-0001\FP16\license-plate-recognition-barrier-0001.xml -d CPU
```
**GPU:**
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-windows.md).
```bat
.\security_barrier_camera_demo -i <INSTALL_DIR>\deployment_tools\demo\car_1.bmp -m <path_to_model>/vehicle-license-plate-detection-barrier-0106.xml -m_va <path_to_model>/vehicle-attributes-recognition-barrier-0039.xml -m_lpr <path_to_model>/license-plate-recognition-barrier-0001.xml -d GPU
```
**MYRIAD:**
```bat
.\classification_sample_async -i <INSTALL_DIR>\inference-engine\samples\sample_data\car.png -m <ir_dir>\squeezenet1.1.xml -d MYRIAD
```
@sphinxdirective
.. raw:: html
</div>
@endsphinxdirective
## <a name="basic-guidelines-sample-application"></a>Basic Guidelines for Using Code Samples and Demo Applications
Below you can find basic guidelines for executing the OpenVINO™ workflow using the code samples and demo applications:
1. Before using the OpenVINO™ samples, always set up the environment:
```bat
<INSTALL_DIR>\setupvars.bat
```
2. Make sure to have the directory path for the following:
- Code Sample binaries located in `C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release`
- Demo Application binaries located in `C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_demos_build\intel64\Release`
- Media: Video or image. See <a href="#download-media">Download Media</a>.
- Model: Neural Network topology converted with the Model Optimizer to the IR format (.bin and .xml files). See <a href="#download-models">Download Models</a> for more information.
## <a name="syntax-examples"></a> Typical Code Sample and Demo Application Syntax Examples
This section explains how to build and use the sample and demo applications provided with the toolkit. You will need CMake 3.10 or later and Microsoft Visual Studio 2017 or 2019 installed. Build details are on the [Inference Engine Samples](../IE_DG/Samples_Overview.md) and [Demo Applications](@ref omz_demos) pages.
To build all the demos and samples:
```sh
cd %INTEL_OPENVINO_DIR%\inference_engine_samples\cpp
# to compile C samples, go here also: cd <INSTALL_DIR>\inference_engine\samples\c
build_samples_msvc.bat
cd %INTEL_OPENVINO_DIR%\deployment_tools\open_model_zoo\demos
build_demos_msvc.bat
```
Depending on what you compiled, executables are in the directories below:
* `C:\Users\<user>\Documents\Intel\OpenVINO\inference_engine_c_samples_build\intel64\Release`
* `C:\Users\<user>\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release`
* `C:\Users\<username>\Documents\Intel\OpenVINO\omz_demos_build\intel64\Release`
Template to call sample code or a demo application:
```bat
<path_to_app> -i <path_to_media> -m <path_to_model> -d <target_device>
```
## <a name="advanced-samples"></a> Advanced Demo Use
Some demo applications let you use multiple models for different purposes. In these cases, the output of the first model is usually used as the input for later models.
For example, an SSD detects a variety of objects in a frame, then age, gender, head pose, emotion recognition and similar models target the objects classified by the SSD to perform their functions.
In these cases, the use pattern in the last part of the template above is usually:
`-m_<acronym> … -d_<acronym> …`
For head pose:
`-m_hp <headpose model> -d_hp <headpose hardware target>`
You can see all the sample applications parameters by adding the `-h` or `--help` option at the command line.
## Additional Resources
Use these resources to learn more about the OpenVINO™ toolkit:
* [OpenVINO™ Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes)
* [OpenVINO™ Toolkit Overview](../index.md)
* [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md)
* [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
* [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md)
* [Overview of OpenVINO™ Toolkit Pre-Trained Models](https://software.intel.com/en-us/openvino-toolkit/documentation/pretrained-models)

View File

@ -3,7 +3,7 @@
This tutorial describes the example from the following YouTube* video:
///
To learn more about how to run the MonoDepth Python* demo application, refer to the [documentation](https://docs.openvinotoolkit.org/latest/omz_demos_monodepth_demo_python.html).
To learn more about how to run the MonoDepth Python* demo application, refer to the [documentation](https://docs.openvino.ai/latest/omz_demos_monodepth_demo_python.html).
Tested on OpenVINO™ 2021, Ubuntu 18.04.

View File

@ -133,8 +133,8 @@ Run Accuracy Checker on the original model:
accuracy_check -c mobilenet-v2.yml
```
Install the Benchmark Tool first. To learn more about Benchmark Tool refer to [Benchmark C++ Tool](https://docs.openvinotoolkit.org/latest/openvino_inference_engine_samples_benchmark_app_README.html)
or [Benchmark Python* Tool](https://docs.openvinotoolkit.org/latest/openvino_inference_engine_tools_benchmark_tool_README.html).
Install the Benchmark Tool first. To learn more about Benchmark Tool refer to [Benchmark C++ Tool](https://docs.openvino.ai/latest/openvino_inference_engine_samples_benchmark_app_README.html)
or [Benchmark Python* Tool](https://docs.openvino.ai/latest/openvino_inference_engine_tools_benchmark_tool_README.html).
Run performance benchmark:
```

View File

@ -1,4 +1,4 @@
# "Hot Topic" How-To Links {#openvino_docs_how_tos_how_to_links}
# "Hot Topic" How-To Links
## Blogs & Articles

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 162 KiB

After

Width:  |  Height:  |  Size: 162 KiB

View File

@ -1,112 +0,0 @@
# OpenVINO™ Toolkit Overview {#openvino_overview}
## Introduction
OpenVINO™ toolkit is a comprehensive toolkit for quickly developing applications and solutions that solve a variety of tasks including emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, and many others. Based on latest generations of artificial neural networks, including Convolutional Neural Networks (CNNs), recurrent and attention-based networks, the toolkit extends computer vision and non-vision workloads across Intel® hardware, maximizing performance. It accelerates applications with high-performance, AI and deep learning inference deployed from edge to cloud.
OpenVINO™ toolkit:
- Enables CNN-based deep learning inference on the edge
- Supports heterogeneous execution across an Intel® CPU, Intel® Integrated Graphics, Intel® Neural Compute Stick 2 and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels
- Includes optimized calls for computer vision standards, including OpenCV\* and OpenCL™
## OpenVINO™ Toolkit Workflow
The following diagram illustrates the typical OpenVINO™ workflow (click to see the full-size image):
![](img/OpenVINO-diagram.png)
### Model Preparation, Conversion and Optimization
You can use your framework of choice to prepare and train a deep learning model or just download a pre-trained model from the Open Model Zoo. The Open Model Zoo includes deep learning solutions to a variety of vision problems, including object recognition, face recognition, pose estimation, text detection, and action recognition, at a range of measured complexities.
Several of these pre-trained models are used also in the [code samples](IE_DG/Samples_Overview.md) and [application demos](@ref omz_demos). To download models from the Open Model Zoo, the [Model Downloader](@ref omz_tools_downloader) tool is used.
One of the core component of the OpenVINO™ toolkit is the [Model Optimizer](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) a cross-platform command-line
tool that converts a trained neural network from its source framework to an open-source, nGraph-compatible [Intermediate Representation (IR)](MO_DG/IR_and_opsets.md) for use in inference operations. The Model Optimizer imports models trained in popular frameworks such as Caffe*, TensorFlow*, MXNet*, Kaldi*, and ONNX* and performs a few optimizations to remove excess layers and group operations when possible into simpler, faster graphs.
![](img/OV-diagram-step2.png)
If your neural network model contains layers that are not in the list of known layers for supported frameworks, you can adjust the conversion and optimization process through use of [Custom Layers](HOWTO/Custom_Layers_Guide.md).
Run the [Accuracy Checker utility](@ref omz_tools_accuracy_checker) either against source topologies or against the output representation to evaluate the accuracy of inference. The Accuracy Checker is also part of the [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction), an integrated web-based performance analysis studio.
Use the [Post-training Optimization Tool](@ref pot_README) to accelerate the inference of a deep learning model by quantizing it to INT8.
Useful documents for model optimization:
* [Model Optimizer Developer Guide](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md)
* [Intermediate Representation and Opsets](MO_DG/IR_and_opsets.md)
* [Custom Layers Guide](HOWTO/Custom_Layers_Guide.md)
* [Accuracy Checker utility](@ref omz_tools_accuracy_checker)
* [Post-training Optimization Tool](@ref pot_README)
* [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction)
* [Model Downloader](@ref omz_tools_downloader) utility
* [Intel's Pretrained Models (Open Model Zoo)](@ref omz_models_group_intel)
* [Public Pretrained Models (Open Model Zoo)](@ref omz_models_group_public)
### Running and Tuning Inference
The other core component of OpenVINO™ is the [Inference Engine](IE_DG/Deep_Learning_Inference_Engine_DevGuide.md), which manages the loading and compiling of the optimized neural network model, runs inference operations on input data, and outputs the results. Inference Engine can execute synchronously or asynchronously, and its plugin architecture manages the appropriate compilations for execution on multiple Intel® devices, including both workhorse CPUs and specialized graphics and video processing platforms (see below, Packaging and Deployment).
You can use OpenVINO™ Tuning Utilities with the Inference Engine to trial and test inference on your model. The Benchmark utility uses an input model to run iterative tests for throughput or latency measures, and the [Cross Check Utility](../tools/cross_check_tool/README.md) compares performance of differently configured inferences.
For a full browser-based studio integrating these other key tuning utilities, try the [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction).
![](img/OV-diagram-step3.png)
OpenVINO™ toolkit includes a set of [inference code samples](IE_DG/Samples_Overview.md) and [application demos](@ref omz_demos) showing how inference is run and output processed for use in retail environments, classrooms, smart camera applications, and other solutions.
OpenVINO also makes use of open-source and Intel™ tools for traditional graphics processing and performance management. Intel® Media SDK supports accelerated rich-media processing, including transcoding. OpenVINO™ optimizes calls to the rich OpenCV and OpenVX libraries for processing computer vision workloads. And the new DL Streamer integration further accelerates video pipelining and performance.
Useful documents for inference tuning:
* [Inference Engine Developer Guide](IE_DG/Deep_Learning_Inference_Engine_DevGuide.md)
* [Inference Engine API References](./api_references.html)
* [Inference Code Samples](IE_DG/Samples_Overview.md)
* [Application Demos](@ref omz_demos)
* [Low Precision Optimization Guide] (@ref pot_docs_LowPrecisionOptimizationGuide)
* [Deep Learning Workbench Guide](@ref workbench_docs_Workbench_DG_Introduction)
* [Intel Media SDK](https://github.com/Intel-Media-SDK/MediaSDK)
* [DL Streamer Samples](@ref gst_samples_README)
* [OpenCV](https://docs.opencv.org/master/)
* [OpenVX](https://software.intel.com/en-us/openvino-ovx-guide)
### Packaging and Deployment
The Intel Distribution of OpenVINO™ toolkit outputs optimized inference runtimes for the following devices:
* Intel® CPUs
* Intel® Processor Graphics
* Intel® Neural Compute Stick 2
* Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
The Inference Engine's plug-in architecture can be extended to meet other specialized needs. [Deployment Manager](./install_guides/deployment-manager-tool.md) is a Python* command-line tool that assembles the tuned model, IR files, your application, and required dependencies into a runtime package for your target device. It outputs packages for CPU, GPU, and VPU on Linux* and Windows*, and Neural Compute Stick-optimized packages with Linux.
* [Inference Engine Integration Workflow](IE_DG/Integrate_with_customer_application_new_API.md)
* [Inference Engine API References](./api_references.html)
* [Inference Engine Plug-in Developer Guide](./ie_plugin_api/index.html)
* [Deployment Manager Guide](./install_guides/deployment-manager-tool.md)
## OpenVINO™ Toolkit Components
Intel® Distribution of OpenVINO™ toolkit includes the following components:
- [Deep Learning Model Optimizer](MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md): A cross-platform command-line tool for importing models and preparing them for optimal execution with the Inference Engine. The Model Optimizer imports, converts, and optimizes models, which were trained in popular frameworks, such as Caffe*, TensorFlow*, MXNet*, Kaldi*, and ONNX*.
- [Deep Learning Inference Engine](IE_DG/Deep_Learning_Inference_Engine_DevGuide.md): A unified API to allow high performance inference on many hardware types including Intel® CPU, Intel® Integrated Graphics, Intel® Neural Compute Stick 2, Intel® Vision Accelerator Design with Intel® Movidius™ vision processing unit (VPU).
- [Inference Engine Samples](IE_DG/Samples_Overview.md): A set of simple console applications demonstrating how to use the Inference Engine in your applications.
- [Deep Learning Workbench](@ref workbench_docs_Workbench_DG_Introduction): A web-based graphical environment that allows you to easily use various sophisticated OpenVINO™ toolkit components.
- [Post-training Optimization Tool](@ref pot_README): A tool to calibrate a model and then execute it in the INT8 precision.
- Additional Tools: A set of tools to work with your models including [Benchmark App](../tools/benchmark_tool/README.md), [Cross Check Tool](../tools/cross_check_tool/README.md), [Compile tool](../tools/compile_tool/README.md).
- [Open Model Zoo](@ref omz_models_group_intel)
- [Demos](@ref omz_demos): Console applications that provide robust application templates to help you implement specific deep learning scenarios.
- Additional Tools: A set of tools to work with your models including [Accuracy Checker Utility](@ref omz_tools_accuracy_checker) and [Model Downloader](@ref omz_tools_downloader).
- [Documentation for Pretrained Models](@ref omz_models_group_intel): Documentation for pre-trained models that are available in the [Open Model Zoo repository](https://github.com/openvinotoolkit/open_model_zoo).
- Deep Learning Streamer (DL Streamer): Streaming analytics framework, based on GStreamer, for constructing graphs of media analytics components. DL Streamer can be installed by the Intel® Distribution of OpenVINO™ toolkit installer. Its open-source version is available on [GitHub](https://github.com/openvinotoolkit/dlstreamer_gst). For the DL Streamer documentation, see:
- [DL Streamer Samples](@ref gst_samples_README)
- [API Reference](https://openvinotoolkit.github.io/dlstreamer_gst/)
- [Elements](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/Elements)
- [Tutorial](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/DL-Streamer-Tutorial)
- [OpenCV](https://docs.opencv.org/master/) : OpenCV* community version compiled for Intel® hardware
- [Intel® Media SDK](https://software.intel.com/en-us/media-sdk) (in Intel® Distribution of OpenVINO™ toolkit for Linux only)
OpenVINO™ Toolkit opensource version is available on [GitHub](https://github.com/openvinotoolkit/openvino). For building the Inference Engine from the source code, see the <a href="https://github.com/openvinotoolkit/openvino/wiki/BuildingCode">build instructions</a>.
## OpenVINO™ API 2.0
The new OpenVINO™ API 2.0 was introduced to make OpenVINO™ interface more user-friendly and align OpenVINO™ with other frameworks.
The [migration guide](@ref ov_2_0_transition_guide) should allow to simplify the process of migration application from old API to OpenVINO™ API 2.0.

View File

@ -156,7 +156,7 @@ sudo apt autoremove intel-openvino-<PACKAGE_TYPE>-ubuntu<OS_VERSION>-<VERSION>.<
**Additional Resources**
- Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit).
- OpenVINO™ toolkit online documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org).
- OpenVINO™ toolkit online documentation: [https://docs.openvino.ai](https://docs.openvino.ai).
- [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
- [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md).
- For more information on Sample Applications, see the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md).

View File

@ -79,7 +79,7 @@ Now you can start developing your application.
## Additional Resources
- Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit).
- OpenVINO™ toolkit online documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org).
- OpenVINO™ toolkit online documentation: [https://docs.openvino.ai](https://docs.openvino.ai).
- [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
- [Inference Engine Developer Guide](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md).
- For more information on Sample Applications, see the [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md).

View File

@ -95,7 +95,7 @@ GPU Acceleration in Windows containers feature requires to meet Windows host, Op
- The container base image must be `mcr.microsoft.com/windows:1809` or higher. Windows Server Core and Nano Server container images are not currently supported.
- The container host must be running Docker Engine 19.03 or higher.
- The container host must have GPU running display drivers of version WDDM 2.5 or higher.
- [OpenVINO™ GPU requirement](https://docs.openvinotoolkit.org/latest/openvino_docs_install_guides_installing_openvino_windows.html#Install-GPU):
- [OpenVINO™ GPU requirement](https://docs.openvino.ai/latest/openvino_docs_install_guides_installing_openvino_windows.html#Install-GPU):
- Intel Graphics Driver for Windows of version 15.65 or higher.
- [Docker isolation mode requirement](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container):
- Windows host and container version tags must match.

View File

@ -4,7 +4,7 @@
## Introduction
By default, the [OpenVINO™ Toolkit](https://docs.openvinotoolkit.org/latest/index.html) installation on this page installs the following components:
By default, the [OpenVINO™ Toolkit](https://docs.openvino.ai/latest/index.html) installation on this page installs the following components:
| Component | Description |
|-----------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
@ -293,7 +293,7 @@ Proceed to the <a href="#get-started">Start Using the Toolkit</a> section to lea
## <a name="get-started"></a>Step 6: Start Using the Toolkit
Now you are ready to try out the toolkit. To continue, see the [Get Started Guide](../get_started/get_started.md) section to learn the basic OpenVINO™ toolkit workflow and run code samples and demo applications with pre-trained models on different inference devices.
Now you are ready to try out the toolkit. To continue, see the [Get Started Guide](../get_started.md) section to learn the basic OpenVINO™ toolkit workflow and run code samples and demo applications with pre-trained models on different inference devices.
## Troubleshooting

View File

@ -4,7 +4,7 @@
## Introduction
By default, the [OpenVINO™ Toolkit](https://docs.openvinotoolkit.org/latest/index.html) installation on this page installs the following components:
By default, the [OpenVINO™ Toolkit](https://docs.openvino.ai/latest/index.html) installation on this page installs the following components:
| Component | Description |
|-----------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
@ -187,11 +187,11 @@ Proceed to the <a href="#get-started">Start Using the Toolkit</a> section to lea
## <a name="get-started"></a>Step 5: Start Using the Toolkit
Now you are ready to try out the toolkit. To continue, see the [Get Started Guide](../get_started/get_started.md) section to learn the basic OpenVINO™ toolkit workflow and run code samples and demo applications with pre-trained models on different inference devices.
Now you are ready to try out the toolkit. To continue, see the [Get Started Guide](../get_started.md) section to learn the basic OpenVINO™ toolkit workflow and run code samples and demo applications with pre-trained models on different inference devices.
## <a name="uninstall"></a>Uninstall the Intel® Distribution of OpenVINO™ Toolkit
To uninstall, follow the steps on the [Uninstalling page](uninstalling_openvino.md).
To uninstall, follow the steps on the [Uninstalling page](uninstalling-openvino.md).
## Additional Resources

Some files were not shown because too many files have changed in this diff Show More