Merge remote-tracking branch 'upstream/master' into sy/test/ConvolutionLayerTest_dynamic_shape_case
This commit is contained in:
commit
a8890950d3
@ -33,7 +33,7 @@ jobs:
|
|||||||
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
||||||
INSTALL_TEST_DIR: $(INSTALL_DIR)/tests
|
INSTALL_TEST_DIR: $(INSTALL_DIR)/tests
|
||||||
LAYER_TESTS_DIR: $(INSTALL_TEST_DIR)/layer_tests
|
LAYER_TESTS_DIR: $(INSTALL_TEST_DIR)/layer_tests
|
||||||
SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
|
SETUPVARS: $(INSTALL_DIR)/setupvars.sh
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- script: |
|
- script: |
|
||||||
@ -171,11 +171,11 @@ jobs:
|
|||||||
- script: ls -alR $(INSTALL_DIR)
|
- script: ls -alR $(INSTALL_DIR)
|
||||||
displayName: 'List install files'
|
displayName: 'List install files'
|
||||||
|
|
||||||
- script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/cpp/build_samples.sh
|
- script: $(INSTALL_DIR)/samples/cpp/build_samples.sh
|
||||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||||
displayName: 'Build cpp samples'
|
displayName: 'Build cpp samples'
|
||||||
|
|
||||||
- script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/c/build_samples.sh
|
- script: $(INSTALL_DIR)/samples/c/build_samples.sh
|
||||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||||
displayName: 'Build c samples'
|
displayName: 'Build c samples'
|
||||||
|
|
||||||
@ -189,8 +189,8 @@ jobs:
|
|||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
export MO_ROOT=$(INSTALL_DIR)/deployment_tools/model_optimizer
|
export MO_ROOT=$(INSTALL_DIR)/tools/model_optimizer
|
||||||
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/deployment_tools/model_optimizer/unit_tests --junitxml=TEST-ModelOptimizer.xml
|
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/tools/model_optimizer/unit_tests --junitxml=TEST-ModelOptimizer.xml
|
||||||
displayName: 'Model Optimizer UT'
|
displayName: 'Model Optimizer UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
@ -258,7 +258,7 @@ jobs:
|
|||||||
- script: |
|
- script: |
|
||||||
. $(SETUPVARS)
|
. $(SETUPVARS)
|
||||||
python3 -m pip install -r requirements.txt
|
python3 -m pip install -r requirements.txt
|
||||||
export MO_ROOT=$(INSTALL_DIR)/deployment_tools/model_optimizer
|
export MO_ROOT=$(INSTALL_DIR)/tools/model_optimizer
|
||||||
export PYTHONPATH=$(LAYER_TESTS_DIR):$PYTHONPATH
|
export PYTHONPATH=$(LAYER_TESTS_DIR):$PYTHONPATH
|
||||||
python3 -m pytest tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=TEST-tf_Roll.xmlTEST
|
python3 -m pytest tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=TEST-tf_Roll.xmlTEST
|
||||||
workingDirectory: $(LAYER_TESTS_DIR)
|
workingDirectory: $(LAYER_TESTS_DIR)
|
||||||
|
@ -17,7 +17,7 @@ jobs:
|
|||||||
WORK_DIR: $(Pipeline.Workspace)/_w
|
WORK_DIR: $(Pipeline.Workspace)/_w
|
||||||
BUILD_DIR: $(WORK_DIR)/build
|
BUILD_DIR: $(WORK_DIR)/build
|
||||||
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
||||||
SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
|
SETUPVARS: $(INSTALL_DIR)/setupvars.sh
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- script: |
|
- script: |
|
||||||
|
@ -110,44 +110,44 @@ jobs:
|
|||||||
displayName: 'Install'
|
displayName: 'Install'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
source $(INSTALL_DIR)/bin/setupvars.sh
|
source $(INSTALL_DIR)/setupvars.sh
|
||||||
CXXFLAGS="-Wno-error=deprecated-declarations" ./build.sh --config RelWithDebInfo --use_openvino CPU_FP32 --build_shared_lib --parallel --skip_tests --build_dir $(ONNXRUNTIME_BUILD_DIR)
|
CXXFLAGS="-Wno-error=deprecated-declarations" ./build.sh --config RelWithDebInfo --use_openvino CPU_FP32 --build_shared_lib --parallel --skip_tests --build_dir $(ONNXRUNTIME_BUILD_DIR)
|
||||||
workingDirectory: $(ONNXRUNTIME_REPO_DIR)
|
workingDirectory: $(ONNXRUNTIME_REPO_DIR)
|
||||||
displayName: 'Build Lin ONNX Runtime'
|
displayName: 'Build Lin ONNX Runtime'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
source $(INSTALL_DIR)/bin/setupvars.sh
|
source $(INSTALL_DIR)/setupvars.sh
|
||||||
skip_tests=`tr -s '\n ' ':' < $(ONNXRUNTIME_UTILS)/skip_tests`
|
skip_tests=`tr -s '\n ' ':' < $(ONNXRUNTIME_UTILS)/skip_tests`
|
||||||
./onnxruntime_test_all --gtest_filter=-$skip_tests
|
./onnxruntime_test_all --gtest_filter=-$skip_tests
|
||||||
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
||||||
displayName: 'Run onnxruntime_test_all'
|
displayName: 'Run onnxruntime_test_all'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
source $(INSTALL_DIR)/bin/setupvars.sh
|
source $(INSTALL_DIR)/setupvars.sh
|
||||||
./onnxruntime_shared_lib_test
|
./onnxruntime_shared_lib_test
|
||||||
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
||||||
displayName: 'Run onnxruntime_shared_lib_test'
|
displayName: 'Run onnxruntime_shared_lib_test'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
source $(INSTALL_DIR)/bin/setupvars.sh
|
source $(INSTALL_DIR)/setupvars.sh
|
||||||
./onnxruntime_global_thread_pools_test
|
./onnxruntime_global_thread_pools_test
|
||||||
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
||||||
displayName: 'Run onnxruntime_global_thread_pools_test'
|
displayName: 'Run onnxruntime_global_thread_pools_test'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
source $(INSTALL_DIR)/bin/setupvars.sh
|
source $(INSTALL_DIR)/setupvars.sh
|
||||||
./onnxruntime_api_tests_without_env
|
./onnxruntime_api_tests_without_env
|
||||||
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
||||||
displayName: 'Run onnxruntime_api_tests_without_env'
|
displayName: 'Run onnxruntime_api_tests_without_env'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
source $(INSTALL_DIR)/bin/setupvars.sh
|
source $(INSTALL_DIR)/setupvars.sh
|
||||||
./onnx_test_runner "$(ONNXRUNTIME_REPO_DIR)/cmake/external/onnx/onnx/backend/test/data/pytorch-converted"
|
./onnx_test_runner "$(ONNXRUNTIME_REPO_DIR)/cmake/external/onnx/onnx/backend/test/data/pytorch-converted"
|
||||||
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
||||||
displayName: 'Run pytorch-converted tests'
|
displayName: 'Run pytorch-converted tests'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
source $(INSTALL_DIR)/bin/setupvars.sh
|
source $(INSTALL_DIR)/setupvars.sh
|
||||||
./onnx_test_runner "$(ONNXRUNTIME_REPO_DIR)/cmake/external/onnx/onnx/backend/test/data/pytorch-operator"
|
./onnx_test_runner "$(ONNXRUNTIME_REPO_DIR)/cmake/external/onnx/onnx/backend/test/data/pytorch-operator"
|
||||||
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
workingDirectory: $(ONNXRUNTIME_BUILD_DIR)/RelWithDebInfo
|
||||||
displayName: 'Run pytorch-operator tests'
|
displayName: 'Run pytorch-operator tests'
|
||||||
|
@ -30,7 +30,7 @@ jobs:
|
|||||||
BUILD_DIR: $(WORK_DIR)/build
|
BUILD_DIR: $(WORK_DIR)/build
|
||||||
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
||||||
INSTALL_TEST_DIR: $(INSTALL_DIR)/tests
|
INSTALL_TEST_DIR: $(INSTALL_DIR)/tests
|
||||||
SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
|
SETUPVARS: $(INSTALL_DIR)/setupvars.sh
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- script: |
|
- script: |
|
||||||
|
@ -33,7 +33,7 @@ jobs:
|
|||||||
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
|
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
|
||||||
INSTALL_DIR: $(WORK_DIR)\install_pkg
|
INSTALL_DIR: $(WORK_DIR)\install_pkg
|
||||||
INSTALL_TEST_DIR: $(INSTALL_DIR)\tests
|
INSTALL_TEST_DIR: $(INSTALL_DIR)\tests
|
||||||
SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat
|
SETUPVARS: $(INSTALL_DIR)\setupvars.bat
|
||||||
IB_DIR: C:\Program Files (x86)\IncrediBuild
|
IB_DIR: C:\Program Files (x86)\IncrediBuild
|
||||||
IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe
|
IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe
|
||||||
|
|
||||||
@ -132,11 +132,11 @@ jobs:
|
|||||||
- script: dir $(INSTALL_DIR) /s
|
- script: dir $(INSTALL_DIR) /s
|
||||||
displayName: 'List install files'
|
displayName: 'List install files'
|
||||||
|
|
||||||
- script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\cpp\build_samples_msvc.bat
|
- script: $(INSTALL_DIR)\samples\cpp\build_samples_msvc.bat
|
||||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||||
displayName: 'Build cpp samples'
|
displayName: 'Build cpp samples'
|
||||||
|
|
||||||
- script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\c\build_samples_msvc.bat
|
- script: $(INSTALL_DIR)\samples\c\build_samples_msvc.bat
|
||||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||||
displayName: 'Build c samples'
|
displayName: 'Build c samples'
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ jobs:
|
|||||||
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
|
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
|
||||||
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
|
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
|
||||||
INSTALL_DIR: $(WORK_DIR)\install_pkg
|
INSTALL_DIR: $(WORK_DIR)\install_pkg
|
||||||
SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat
|
SETUPVARS: $(INSTALL_DIR)\setupvars.bat
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- script: |
|
- script: |
|
||||||
|
@ -75,7 +75,7 @@ RUN make -j $(nproc) install
|
|||||||
|
|
||||||
# Run tests via tox
|
# Run tests via tox
|
||||||
WORKDIR /openvino/runtime/bindings/python
|
WORKDIR /openvino/runtime/bindings/python
|
||||||
ENV OpenVINO_DIR=/openvino/dist/deployment_tools/inference_engine/share
|
ENV OpenVINO_DIR=/openvino/dist/runtime/cmake
|
||||||
ENV LD_LIBRARY_PATH=/openvino/dist/deployment_tools/ngraph/lib
|
ENV LD_LIBRARY_PATH=/openvino/dist/runtime/lib:/openvino/dist/runtime/3rdparty/tbb/lib
|
||||||
ENV PYTHONPATH=/openvino/bin/intel64/${BUILD_TYPE}/lib/python_api/python3.8:${PYTHONPATH}
|
ENV PYTHONPATH=/openvino/bin/intel64/${BUILD_TYPE}/lib/python_api/python3.8:${PYTHONPATH}
|
||||||
CMD tox
|
CMD tox
|
||||||
|
1
.github/workflows/build_doc.yml
vendored
1
.github/workflows/build_doc.yml
vendored
@ -14,6 +14,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
|
sudo apt update
|
||||||
sudo apt --assume-yes install libusb-1.0-0-dev graphviz texlive
|
sudo apt --assume-yes install libusb-1.0-0-dev graphviz texlive
|
||||||
python3 -m pip install lxml
|
python3 -m pip install lxml
|
||||||
# install doxygen
|
# install doxygen
|
||||||
|
13
.github/workflows/code_style.yml
vendored
13
.github/workflows/code_style.yml
vendored
@ -10,10 +10,13 @@ jobs:
|
|||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Install clang-format-9
|
- name: Install clang-format-9
|
||||||
run: sudo apt --assume-yes install clang-format-9
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt --assume-yes install clang-format-9
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
|
sudo apt update
|
||||||
sudo apt --assume-yes install libusb-1.0-0-dev
|
sudo apt --assume-yes install libusb-1.0-0-dev
|
||||||
python3 -m pip install --upgrade pip
|
python3 -m pip install --upgrade pip
|
||||||
python3 -m pip install -r ./inference-engine/ie_bridges/python/requirements.txt
|
python3 -m pip install -r ./inference-engine/ie_bridges/python/requirements.txt
|
||||||
@ -52,7 +55,9 @@ jobs:
|
|||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Install ShellCheck
|
- name: Install ShellCheck
|
||||||
run: sudo apt --assume-yes install shellcheck
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt --assume-yes install shellcheck
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: python3 -m pip install -r ./inference-engine/ie_bridges/python/requirements.txt
|
run: python3 -m pip install -r ./inference-engine/ie_bridges/python/requirements.txt
|
||||||
@ -75,7 +80,9 @@ jobs:
|
|||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Install Clang dependency
|
- name: Install Clang dependency
|
||||||
run: sudo apt --assume-yes install libclang-9-dev
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt --assume-yes install libclang-9-dev
|
||||||
|
|
||||||
- name: Install Python-based dependencies
|
- name: Install Python-based dependencies
|
||||||
run: python3 -m pip install -r cmake/developer_package/ncc_naming_style/requirements_dev.txt
|
run: python3 -m pip install -r cmake/developer_package/ncc_naming_style/requirements_dev.txt
|
||||||
|
1
.github/workflows/mo.yml
vendored
1
.github/workflows/mo.yml
vendored
@ -41,6 +41,7 @@ jobs:
|
|||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
pip install -r requirements_dev.txt
|
pip install -r requirements_dev.txt
|
||||||
# requrements for CMake
|
# requrements for CMake
|
||||||
|
sudo apt update
|
||||||
sudo apt --assume-yes install libusb-1.0-0-dev
|
sudo apt --assume-yes install libusb-1.0-0-dev
|
||||||
working-directory: model-optimizer
|
working-directory: model-optimizer
|
||||||
|
|
||||||
|
@ -85,7 +85,6 @@ include(cmake/test_model_zoo.cmake)
|
|||||||
add_subdirectory(thirdparty)
|
add_subdirectory(thirdparty)
|
||||||
add_subdirectory(openvino)
|
add_subdirectory(openvino)
|
||||||
add_subdirectory(ngraph)
|
add_subdirectory(ngraph)
|
||||||
|
|
||||||
add_subdirectory(runtime)
|
add_subdirectory(runtime)
|
||||||
add_subdirectory(inference-engine)
|
add_subdirectory(inference-engine)
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ if(THREADING STREQUAL "OMP")
|
|||||||
ie_cpack_add_component(omp REQUIRED)
|
ie_cpack_add_component(omp REQUIRED)
|
||||||
file(GLOB_RECURSE source_list "${OMP}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*")
|
file(GLOB_RECURSE source_list "${OMP}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*")
|
||||||
install(FILES ${source_list}
|
install(FILES ${source_list}
|
||||||
DESTINATION "deployment_tools/inference_engine/external/omp/lib"
|
DESTINATION "runtime/3rdparty/omp/lib"
|
||||||
COMPONENT omp)
|
COMPONENT omp)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -10,16 +10,15 @@ include(CPackComponent)
|
|||||||
#
|
#
|
||||||
# Set library directory for cpack
|
# Set library directory for cpack
|
||||||
#
|
#
|
||||||
set(IE_CPACK_IE_DIR deployment_tools/inference_engine)
|
|
||||||
function(ie_cpack_set_library_dir)
|
function(ie_cpack_set_library_dir)
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
||||||
set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
set(IE_CPACK_RUNTIME_PATH runtime/bin/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
||||||
set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER}/$<CONFIG> PARENT_SCOPE)
|
||||||
else()
|
else()
|
||||||
set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE)
|
set(IE_CPACK_LIBRARY_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE)
|
||||||
set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE)
|
set(IE_CPACK_RUNTIME_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE)
|
||||||
set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE)
|
set(IE_CPACK_ARCHIVE_PATH runtime/lib/${ARCH_FOLDER} PARENT_SCOPE)
|
||||||
endif()
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
|
@ -313,7 +313,7 @@ operation for the CPU plugin. The code of the library is described in the [Exte
|
|||||||
To build the extension, run the following:<br>
|
To build the extension, run the following:<br>
|
||||||
```bash
|
```bash
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
cmake .. -DCMAKE_BUILD_TYPE=Release
|
cmake .. -DCMAKE_BUILD_TYPE=Release
|
||||||
make --jobs=$(nproc)
|
make --jobs=$(nproc)
|
||||||
```
|
```
|
||||||
|
@ -8,11 +8,11 @@ The Cross Check Tool can compare metrics per layer or all over the model.
|
|||||||
|
|
||||||
On Linux* OS, before running the Cross Check Tool binary, make sure your application can find the
|
On Linux* OS, before running the Cross Check Tool binary, make sure your application can find the
|
||||||
Deep Learning Inference Engine libraries.
|
Deep Learning Inference Engine libraries.
|
||||||
Navigate to the `<INSTALL_DIR>/deployment_tools/inference_engine/bin` folder and run the `setvars.sh` script to
|
Navigate to the `<INSTALL_DIR>` folder and run the `setupvars.sh` script to
|
||||||
set all necessary environment variables:
|
set all necessary environment variables:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
source setvars.sh
|
source setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
## Running the Cross Check Tool
|
## Running the Cross Check Tool
|
||||||
|
@ -79,9 +79,9 @@ Make sure those libraries are in your computer's path or in the place you pointe
|
|||||||
* Windows: `PATH`
|
* Windows: `PATH`
|
||||||
* macOS: `DYLD_LIBRARY_PATH`
|
* macOS: `DYLD_LIBRARY_PATH`
|
||||||
|
|
||||||
On Linux and macOS, use the script `bin/setupvars.sh` to set the environment variables.
|
On Linux and macOS, use the script `setupvars.sh` to set the environment variables.
|
||||||
|
|
||||||
On Windows, run the `bin\setupvars.bat` batch file to set the environment variables.
|
On Windows, run the `setupvars.bat` batch file to set the environment variables.
|
||||||
|
|
||||||
To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter.
|
To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter.
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ The GPU codepath abstracts many details about OpenCL\*. You need to provide the
|
|||||||
|
|
||||||
There are two options of using the custom operation configuration file:
|
There are two options of using the custom operation configuration file:
|
||||||
|
|
||||||
* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `<INSTALL_DIR>/deployment_tools/inference_engine/bin/intel64/{Debug/Release}` folder
|
* Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `<INSTALL_DIR>/runtime/bin` folder
|
||||||
* Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom operations to the plugin:
|
* Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom operations to the plugin:
|
||||||
|
|
||||||
@snippet snippets/GPU_Kernel.cpp part0
|
@snippet snippets/GPU_Kernel.cpp part0
|
||||||
|
@ -15,18 +15,18 @@ To customize your topology with an OpenCL layer, follow the steps below:
|
|||||||
> **NOTE:** OpenCL compiler, targeting Intel® Neural Compute Stick 2 for the SHAVE* processor only, is redistributed with OpenVINO.
|
> **NOTE:** OpenCL compiler, targeting Intel® Neural Compute Stick 2 for the SHAVE* processor only, is redistributed with OpenVINO.
|
||||||
OpenCL support is provided by ComputeAorta*, and is distributed under a license agreement between Intel® and Codeplay* Software Ltd.
|
OpenCL support is provided by ComputeAorta*, and is distributed under a license agreement between Intel® and Codeplay* Software Ltd.
|
||||||
|
|
||||||
The OpenCL toolchain for the Intel® Neural Compute Stick 2 supports offline compilation only, so first compile OpenCL C code using the standalone `clc` compiler. You can find the compiler binary at `<INSTALL_DIR>/deployment_tools/tools/cl_compiler`.
|
The OpenCL toolchain for the Intel® Neural Compute Stick 2 supports offline compilation only, so first compile OpenCL C code using the standalone `clc` compiler. You can find the compiler binary at `<INSTALL_DIR>/tools/cl_compiler`.
|
||||||
|
|
||||||
> **NOTE:** By design, custom OpenCL layers support any OpenCL kernels written with 1.2 version assumed. It also supports half float extension and is optimized for this type, because it is a native type for Intel® Movidius™ VPUs.
|
> **NOTE:** By design, custom OpenCL layers support any OpenCL kernels written with 1.2 version assumed. It also supports half float extension and is optimized for this type, because it is a native type for Intel® Movidius™ VPUs.
|
||||||
|
|
||||||
1. Prior to running a compilation, make sure that the following variables are set:
|
1. Prior to running a compilation, make sure that the following variables are set:
|
||||||
* `SHAVE_MA2X8XLIBS_DIR=<INSTALL_DIR>/deployment_tools/tools/cl_compiler/lib/`
|
* `SHAVE_MA2X8XLIBS_DIR=<INSTALL_DIR>/tools/cl_compiler/lib/`
|
||||||
* `SHAVE_LDSCRIPT_DIR=<INSTALL_DIR>/deployment_tools/tools/cl_compiler/ldscripts/`
|
* `SHAVE_LDSCRIPT_DIR=<INSTALL_DIR>/tools/cl_compiler/ldscripts/`
|
||||||
* `SHAVE_MYRIAD_LD_DIR=<INSTALL_DIR>/deployment_tools/tools/cl_compiler/bin/`
|
* `SHAVE_MYRIAD_LD_DIR=<INSTALL_DIR>/tools/cl_compiler/bin/`
|
||||||
* `SHAVE_MOVIASM_DIR=<INSTALL_DIR>/deployment_tools/tools/cl_compiler/bin/`
|
* `SHAVE_MOVIASM_DIR=<INSTALL_DIR>/tools/cl_compiler/bin/`
|
||||||
2. Run the compilation with the command below. You should use `--strip-binary-header` to make an OpenCL runtime-agnostic binary runnable with the Inference Engine.
|
2. Run the compilation with the command below. You should use `--strip-binary-header` to make an OpenCL runtime-agnostic binary runnable with the Inference Engine.
|
||||||
```bash
|
```bash
|
||||||
cd <INSTALL_DIR>/deployment_tools/tools/cl_compiler/bin
|
cd <INSTALL_DIR>/tools/cl_compiler/bin
|
||||||
./clc --strip-binary-header custom_layer.cl -o custom_layer.bin
|
./clc --strip-binary-header custom_layer.cl -o custom_layer.bin
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -19,7 +19,6 @@ Glossary {#openvino_docs_IE_DG_Glossary}
|
|||||||
| ELU | Exponential Linear rectification Unit |
|
| ELU | Exponential Linear rectification Unit |
|
||||||
| FCN | Fully Convolutional Network |
|
| FCN | Fully Convolutional Network |
|
||||||
| FP | Floating Point |
|
| FP | Floating Point |
|
||||||
| FPGA | Field-Programmable Gate Array |
|
|
||||||
| GCC | GNU Compiler Collection |
|
| GCC | GNU Compiler Collection |
|
||||||
| GPU | Graphics Processing Unit |
|
| GPU | Graphics Processing Unit |
|
||||||
| HD | High Definition |
|
| HD | High Definition |
|
||||||
|
@ -29,8 +29,6 @@ The function returns list of available devices, for example:
|
|||||||
```
|
```
|
||||||
MYRIAD.1.2-ma2480
|
MYRIAD.1.2-ma2480
|
||||||
MYRIAD.1.4-ma2480
|
MYRIAD.1.4-ma2480
|
||||||
FPGA.0
|
|
||||||
FPGA.1
|
|
||||||
CPU
|
CPU
|
||||||
GPU.0
|
GPU.0
|
||||||
GPU.1
|
GPU.1
|
||||||
|
@ -173,7 +173,7 @@ Note that casting `Blob` to `TBlob` via `std::dynamic_pointer_cast` is not the r
|
|||||||
## Build Your Application
|
## Build Your Application
|
||||||
|
|
||||||
For details about building your application, refer to the CMake files for the sample applications.
|
For details about building your application, refer to the CMake files for the sample applications.
|
||||||
All samples source code is located in the `<INSTALL_DIR>/openvino/inference_engine/samples` directory, where `INSTALL_DIR` is the OpenVINO™ installation directory.
|
All samples source code is located in the `<INSTALL_DIR>/samples` directory, where `INSTALL_DIR` is the OpenVINO™ installation directory.
|
||||||
|
|
||||||
### CMake project creation
|
### CMake project creation
|
||||||
|
|
||||||
@ -199,7 +199,7 @@ add_executable(${PROJECT_NAME} src/main.cpp)
|
|||||||
target_link_libraries(${PROJECT_NAME} PRIVATE openvino::runtime ${OpenCV_LIBS})
|
target_link_libraries(${PROJECT_NAME} PRIVATE openvino::runtime ${OpenCV_LIBS})
|
||||||
```
|
```
|
||||||
3. **To build your project** using CMake with the default build tools currently available on your machine, execute the following commands:
|
3. **To build your project** using CMake with the default build tools currently available on your machine, execute the following commands:
|
||||||
> **NOTE**: Make sure you set environment variables first by running `<INSTALL_DIR>/bin/setupvars.sh` (or setupvars.bat for Windows)`. Otherwise the `InferenceEngine_DIR` and `OpenCV_DIR` variables won't be configured properly to pass `find_package` calls.
|
> **NOTE**: Make sure you set environment variables first by running `<INSTALL_DIR>/setupvars.sh` (or setupvars.bat for Windows)`. Otherwise the `InferenceEngine_DIR` and `OpenCV_DIR` variables won't be configured properly to pass `find_package` calls.
|
||||||
```sh
|
```sh
|
||||||
cd build/
|
cd build/
|
||||||
cmake ../project
|
cmake ../project
|
||||||
|
@ -3,9 +3,9 @@
|
|||||||
The Inference Engine sample applications are simple console applications that show how to utilize specific Inference Engine capabilities within an application, assist developers in executing specific tasks such as loading a model, running inference, querying specific device capabilities and etc.
|
The Inference Engine sample applications are simple console applications that show how to utilize specific Inference Engine capabilities within an application, assist developers in executing specific tasks such as loading a model, running inference, querying specific device capabilities and etc.
|
||||||
|
|
||||||
After installation of Intel® Distribution of OpenVINO™ toolkit, С, C++ and Python* sample applications are available in the following directories, respectively:
|
After installation of Intel® Distribution of OpenVINO™ toolkit, С, C++ and Python* sample applications are available in the following directories, respectively:
|
||||||
* `<INSTALL_DIR>/inference_engine/samples/c`
|
* `<INSTALL_DIR>/samples/c`
|
||||||
* `<INSTALL_DIR>/inference_engine/samples/cpp`
|
* `<INSTALL_DIR>/samples/cpp`
|
||||||
* `<INSTALL_DIR>/inference_engine/samples/python`
|
* `<INSTALL_DIR>/samples/python`
|
||||||
|
|
||||||
Inference Engine sample applications include the following:
|
Inference Engine sample applications include the following:
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ The officially supported Linux* build environment is the following:
|
|||||||
|
|
||||||
> **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode).
|
> **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode).
|
||||||
|
|
||||||
To build the C or C++ sample applications for Linux, go to the `<INSTALL_DIR>/inference_engine/samples/c` or `<INSTALL_DIR>/inference_engine/samples/cpp` directory, respectively, and run the `build_samples.sh` script:
|
To build the C or C++ sample applications for Linux, go to the `<INSTALL_DIR>/samples/c` or `<INSTALL_DIR>/samples/cpp` directory, respectively, and run the `build_samples.sh` script:
|
||||||
```sh
|
```sh
|
||||||
build_samples.sh
|
build_samples.sh
|
||||||
```
|
```
|
||||||
@ -91,11 +91,11 @@ cd build
|
|||||||
3. Run CMake to generate the Make files for release or debug configuration. For example, for C++ samples:
|
3. Run CMake to generate the Make files for release or debug configuration. For example, for C++ samples:
|
||||||
- For release configuration:
|
- For release configuration:
|
||||||
```sh
|
```sh
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release <INSTALL_DIR>/inference_engine/samples/cpp
|
cmake -DCMAKE_BUILD_TYPE=Release <INSTALL_DIR>/samples/cpp
|
||||||
```
|
```
|
||||||
- For debug configuration:
|
- For debug configuration:
|
||||||
```sh
|
```sh
|
||||||
cmake -DCMAKE_BUILD_TYPE=Debug <INSTALL_DIR>/inference_engine/samples/cpp
|
cmake -DCMAKE_BUILD_TYPE=Debug <INSTALL_DIR>/samples/cpp
|
||||||
```
|
```
|
||||||
4. Run `make` to build the samples:
|
4. Run `make` to build the samples:
|
||||||
```sh
|
```sh
|
||||||
@ -114,7 +114,7 @@ The recommended Windows* build environment is the following:
|
|||||||
|
|
||||||
> **NOTE**: If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14.
|
> **NOTE**: If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14.
|
||||||
|
|
||||||
To build the C or C++ sample applications on Windows, go to the `<INSTALL_DIR>\inference_engine\samples\c` or `<INSTALL_DIR>\inference_engine\samples\cpp` directory, respectively, and run the `build_samples_msvc.bat` batch file:
|
To build the C or C++ sample applications on Windows, go to the `<INSTALL_DIR>\samples\c` or `<INSTALL_DIR>\samples\cpp` directory, respectively, and run the `build_samples_msvc.bat` batch file:
|
||||||
```sh
|
```sh
|
||||||
build_samples_msvc.bat
|
build_samples_msvc.bat
|
||||||
```
|
```
|
||||||
@ -123,7 +123,7 @@ By default, the script automatically detects the highest Microsoft Visual Studio
|
|||||||
a solution for a sample code. Optionally, you can also specify the preferred Microsoft Visual Studio version to be used by the script. Supported
|
a solution for a sample code. Optionally, you can also specify the preferred Microsoft Visual Studio version to be used by the script. Supported
|
||||||
versions are `VS2017` and `VS2019`. For example, to build the C++ samples using the Microsoft Visual Studio 2017, use the following command:
|
versions are `VS2017` and `VS2019`. For example, to build the C++ samples using the Microsoft Visual Studio 2017, use the following command:
|
||||||
```sh
|
```sh
|
||||||
<INSTALL_DIR>\inference_engine\samples\cpp\build_samples_msvc.bat VS2017
|
<INSTALL_DIR>\samples\cpp\build_samples_msvc.bat VS2017
|
||||||
```
|
```
|
||||||
|
|
||||||
Once the build is completed, you can find sample binaries in the following folders:
|
Once the build is completed, you can find sample binaries in the following folders:
|
||||||
@ -144,7 +144,7 @@ The officially supported macOS* build environment is the following:
|
|||||||
|
|
||||||
> **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode).
|
> **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode).
|
||||||
|
|
||||||
To build the C or C++ sample applications for macOS, go to the `<INSTALL_DIR>/inference_engine/samples/c` or `<INSTALL_DIR>/inference_engine/samples/cpp` directory, respectively, and run the `build_samples.sh` script:
|
To build the C or C++ sample applications for macOS, go to the `<INSTALL_DIR>/samples/c` or `<INSTALL_DIR>/samples/cpp` directory, respectively, and run the `build_samples.sh` script:
|
||||||
```sh
|
```sh
|
||||||
build_samples.sh
|
build_samples.sh
|
||||||
```
|
```
|
||||||
@ -177,11 +177,11 @@ cd build
|
|||||||
3. Run CMake to generate the Make files for release or debug configuration. For example, for C++ samples:
|
3. Run CMake to generate the Make files for release or debug configuration. For example, for C++ samples:
|
||||||
- For release configuration:
|
- For release configuration:
|
||||||
```sh
|
```sh
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release <INSTALL_DIR>/inference_engine/samples/cpp
|
cmake -DCMAKE_BUILD_TYPE=Release <INSTALL_DIR>/samples/cpp
|
||||||
```
|
```
|
||||||
- For debug configuration:
|
- For debug configuration:
|
||||||
```sh
|
```sh
|
||||||
cmake -DCMAKE_BUILD_TYPE=Debug <INSTALL_DIR>/inference_engine/samples/cpp
|
cmake -DCMAKE_BUILD_TYPE=Debug <INSTALL_DIR>/samples/cpp
|
||||||
```
|
```
|
||||||
4. Run `make` to build the samples:
|
4. Run `make` to build the samples:
|
||||||
```sh
|
```sh
|
||||||
@ -199,7 +199,7 @@ Before running compiled binary files, make sure your application can find the
|
|||||||
Inference Engine and OpenCV libraries.
|
Inference Engine and OpenCV libraries.
|
||||||
Run the `setupvars` script to set all necessary environment variables:
|
Run the `setupvars` script to set all necessary environment variables:
|
||||||
```sh
|
```sh
|
||||||
source <INSTALL_DIR>/bin/setupvars.sh
|
source <INSTALL_DIR>/setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
**(Optional)**: The OpenVINO environment variables are removed when you close the
|
**(Optional)**: The OpenVINO environment variables are removed when you close the
|
||||||
@ -212,7 +212,7 @@ vi <user_home_directory>/.bashrc
|
|||||||
|
|
||||||
2. Add this line to the end of the file:
|
2. Add this line to the end of the file:
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Save and close the file: press the **Esc** key, type `:wq` and press the **Enter** key.
|
3. Save and close the file: press the **Esc** key, type `:wq` and press the **Enter** key.
|
||||||
@ -228,7 +228,7 @@ Before running compiled binary files, make sure your application can find the
|
|||||||
Inference Engine and OpenCV libraries.
|
Inference Engine and OpenCV libraries.
|
||||||
Use the `setupvars` script, which sets all necessary environment variables:
|
Use the `setupvars` script, which sets all necessary environment variables:
|
||||||
```sh
|
```sh
|
||||||
<INSTALL_DIR>\bin\setupvars.bat
|
<INSTALL_DIR>\setupvars.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
To debug or run the samples on Windows in Microsoft Visual Studio, make sure you
|
To debug or run the samples on Windows in Microsoft Visual Studio, make sure you
|
||||||
@ -240,7 +240,7 @@ For example, for the **Debug** configuration, go to the project's
|
|||||||
variable in the **Environment** field to the following:
|
variable in the **Environment** field to the following:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
PATH=<INSTALL_DIR>\deployment_tools\inference_engine\bin\intel64\Debug;<INSTALL_DIR>\opencv\bin;%PATH%
|
PATH=<INSTALL_DIR>\runtime\bin;<INSTALL_DIR>\opencv\bin;%PATH%
|
||||||
```
|
```
|
||||||
where `<INSTALL_DIR>` is the directory in which the OpenVINO toolkit is installed.
|
where `<INSTALL_DIR>` is the directory in which the OpenVINO toolkit is installed.
|
||||||
|
|
||||||
|
@ -6,11 +6,11 @@ The OpenVINO™ toolkit installation includes the following tools:
|
|||||||
|
|
||||||
|Tool | Location in the Installation Directory|
|
|Tool | Location in the Installation Directory|
|
||||||
|-----------------------------------------------------------------------------|---------------------------------------|
|
|-----------------------------------------------------------------------------|---------------------------------------|
|
||||||
|[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `<INSTALL_DIR>/deployment_tools/tools/open_model_zoo/tools/accuracy_checker`|
|
|[Accuracy Checker Tool](@ref omz_tools_accuracy_checker) | `<INSTALL_DIR>/tools/accuracy_checker`|
|
||||||
|[Post-Training Optimization Tool](@ref pot_README) | `<INSTALL_DIR>/deployment_tools/tools/post_training_optimization_toolkit`|
|
|[Post-Training Optimization Tool](@ref pot_README) | `<INSTALL_DIR>/tools/post_training_optimization_toolkit`|
|
||||||
|[Model Downloader](@ref omz_tools_downloader) | `<INSTALL_DIR>/deployment_tools/tools/model_downloader`|
|
|[Model Downloader](@ref omz_tools_downloader) | `<INSTALL_DIR>/extras/open_model_zoo/tools/downloader`|
|
||||||
|[Cross Check Tool](../../tools/cross_check_tool/README.md) | `<INSTALL_DIR>/deployment_tools/tools/cross_check_tool`|
|
|[Cross Check Tool](../../tools/cross_check_tool/README.md) | `<INSTALL_DIR>/tools/cross_check_tool`|
|
||||||
|[Compile Tool](../../tools/compile_tool/README.md) | `<INSTALL_DIR>/deployment_tools/inference_engine/lib/intel64/`|
|
|[Compile Tool](../../tools/compile_tool/README.md) | `<INSTALL_DIR>/tools/compile_tool`|
|
||||||
|
|
||||||
|
|
||||||
## See Also
|
## See Also
|
||||||
|
@ -84,9 +84,9 @@ Make sure those libraries are in your computer's path or in the place you pointe
|
|||||||
* Windows: `PATH`
|
* Windows: `PATH`
|
||||||
* macOS: `DYLD_LIBRARY_PATH`
|
* macOS: `DYLD_LIBRARY_PATH`
|
||||||
|
|
||||||
On Linux and macOS, use the script `bin/setupvars.sh` to set the environment variables.
|
On Linux and macOS, use the script `setupvars.sh` to set the environment variables.
|
||||||
|
|
||||||
On Windows, run the `bin\setupvars.bat` batch file to set the environment variables.
|
On Windows, run the `setupvars.bat` batch file to set the environment variables.
|
||||||
|
|
||||||
To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter.
|
To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter.
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ If transmitting data from one part of a network to another part in heterogeneous
|
|||||||
In this case, you can define heaviest part manually and set the affinity to avoid sending data back and forth many times during one inference.
|
In this case, you can define heaviest part manually and set the affinity to avoid sending data back and forth many times during one inference.
|
||||||
|
|
||||||
## Annotation of Layers per Device and Default Fallback Policy
|
## Annotation of Layers per Device and Default Fallback Policy
|
||||||
Default fallback policy decides which layer goes to which device automatically according to the support in dedicated plugins (FPGA, GPU, CPU, MYRIAD).
|
Default fallback policy decides which layer goes to which device automatically according to the support in dedicated plugins (GPU, CPU, MYRIAD).
|
||||||
|
|
||||||
Another way to annotate a network is to set affinity manually using <code>ngraph::Node::get_rt_info</code> with key `"affinity"`:
|
Another way to annotate a network is to set affinity manually using <code>ngraph::Node::get_rt_info</code> with key `"affinity"`:
|
||||||
|
|
||||||
@ -46,25 +46,16 @@ If you rely on the default affinity distribution, you can avoid calling <code>In
|
|||||||
During loading of the network to heterogeneous plugin, network is divided to separate parts and loaded to dedicated plugins.
|
During loading of the network to heterogeneous plugin, network is divided to separate parts and loaded to dedicated plugins.
|
||||||
Intermediate blobs between these sub graphs are allocated automatically in the most efficient way.
|
Intermediate blobs between these sub graphs are allocated automatically in the most efficient way.
|
||||||
|
|
||||||
## Execution Precision
|
|
||||||
Precision for inference in heterogeneous plugin is defined by
|
|
||||||
* Precision of IR.
|
|
||||||
* Ability of final plugins to execute in precision defined in IR
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
* If you want to execute GPU with CPU fallback with FP16 on GPU, you need to use only FP16 IR.
|
|
||||||
* If you want to execute on FPGA with CPU fallback, you can use any precision for IR. The execution on FPGA is defined by bitstream, the execution on CPU happens in FP32.
|
|
||||||
|
|
||||||
Samples can be used with the following command:
|
Samples can be used with the following command:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./object_detection_sample_ssd -m <path_to_model>/ModelSSD.xml -i <path_to_pictures>/picture.jpg -d HETERO:FPGA,CPU
|
./object_detection_sample_ssd -m <path_to_model>/ModelSSD.xml -i <path_to_pictures>/picture.jpg -d HETERO:GPU,CPU
|
||||||
```
|
```
|
||||||
where:
|
where:
|
||||||
- `HETERO` stands for heterogeneous plugin
|
- `HETERO` stands for heterogeneous plugin
|
||||||
- `FPGA,CPU` points to fallback policy with priority on FPGA and fallback to CPU
|
- `GPU,CPU` points to fallback policy with priority on GPU and fallback to CPU
|
||||||
|
|
||||||
You can point more than two devices: `-d HETERO:FPGA,GPU,CPU`
|
You can point more than two devices: `-d HETERO:GPU,GPU,CPU`
|
||||||
|
|
||||||
## Analyzing Heterogeneous Execution
|
## Analyzing Heterogeneous Execution
|
||||||
After enabling of <code>KEY_HETERO_DUMP_GRAPH_DOT</code> config key, you can dump GraphViz* `.dot` files with annotations of devices per layer.
|
After enabling of <code>KEY_HETERO_DUMP_GRAPH_DOT</code> config key, you can dump GraphViz* `.dot` files with annotations of devices per layer.
|
||||||
|
@ -10,7 +10,7 @@ dependencies and provide the fastest and easiest way to configure the Model
|
|||||||
Optimizer.
|
Optimizer.
|
||||||
|
|
||||||
To configure all three frameworks, go to the
|
To configure all three frameworks, go to the
|
||||||
`<INSTALL_DIR>/deployment_tools/model_optimizer/install_prerequisites`
|
`<INSTALL_DIR>/tools/model_optimizer/install_prerequisites`
|
||||||
directory and run:
|
directory and run:
|
||||||
|
|
||||||
* For Linux\* OS:
|
* For Linux\* OS:
|
||||||
@ -35,7 +35,7 @@ install_prerequisites.bat
|
|||||||
```
|
```
|
||||||
|
|
||||||
To configure a specific framework, go to the
|
To configure a specific framework, go to the
|
||||||
`<INSTALL_DIR>/deployment_tools/model_optimizer/install_prerequisites`
|
`<INSTALL_DIR>/tools/model_optimizer/install_prerequisites`
|
||||||
directory and run:
|
directory and run:
|
||||||
|
|
||||||
* For Caffe\* on Linux:
|
* For Caffe\* on Linux:
|
||||||
@ -101,7 +101,7 @@ framework at a time.
|
|||||||
|
|
||||||
1. Go to the Model Optimizer directory:
|
1. Go to the Model Optimizer directory:
|
||||||
```shell
|
```shell
|
||||||
cd <INSTALL_DIR>/deployment_tools/model_optimizer/
|
cd <INSTALL_DIR>/tools/model_optimizer/
|
||||||
```
|
```
|
||||||
2. **Strongly recommended for all global Model Optimizer dependency installations**:
|
2. **Strongly recommended for all global Model Optimizer dependency installations**:
|
||||||
Create and activate a virtual environment. While not required, this step is
|
Create and activate a virtual environment. While not required, this step is
|
||||||
@ -181,7 +181,7 @@ export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp
|
|||||||
On Windows, pre-built protobuf packages for Python versions 3.4, 3.5, 3.6,
|
On Windows, pre-built protobuf packages for Python versions 3.4, 3.5, 3.6,
|
||||||
and 3.7 are provided with the installation package and can be found in
|
and 3.7 are provided with the installation package and can be found in
|
||||||
the
|
the
|
||||||
`<INSTALL_DIR>\deployment_tools\model_optimizer\install_prerequisites`
|
`<INSTALL_DIR>\tools\model_optimizer\install_prerequisites`
|
||||||
folder. Please note that they are not installed with the
|
folder. Please note that they are not installed with the
|
||||||
`install_prerequisites.bat` installation script due to possible issues
|
`install_prerequisites.bat` installation script due to possible issues
|
||||||
with `pip`, and you can install them at your own discretion. Make sure
|
with `pip`, and you can install them at your own discretion. Make sure
|
||||||
@ -198,7 +198,7 @@ To install the protobuf package:
|
|||||||
1. Open the command prompt as administrator.
|
1. Open the command prompt as administrator.
|
||||||
2. Go to the `install_prerequisites` folder of the OpenVINO toolkit installation directory:
|
2. Go to the `install_prerequisites` folder of the OpenVINO toolkit installation directory:
|
||||||
```sh
|
```sh
|
||||||
cd <INSTALL_DIR>\deployment_tools\model_optimizer\install_prerequisites
|
cd <INSTALL_DIR>\tools\model_optimizer\install_prerequisites
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Run the following command to install the protobuf for Python 3.6. If
|
3. Run the following command to install the protobuf for Python 3.6. If
|
||||||
|
@ -28,7 +28,7 @@ For example, to add the description of the `CustomReshape` layer, which is an ar
|
|||||||
|
|
||||||
2. Generate a new parser:
|
2. Generate a new parser:
|
||||||
```shell
|
```shell
|
||||||
cd <INSTALL_DIR>/deployment_tools/model_optimizer/mo/front/caffe/proto
|
cd <INSTALL_DIR>/tools/model_optimizer/mo/front/caffe/proto
|
||||||
python3 generate_caffe_pb2.py --input_proto <PATH_TO_CUSTOM_CAFFE>/src/caffe/proto/caffe.proto
|
python3 generate_caffe_pb2.py --input_proto <PATH_TO_CUSTOM_CAFFE>/src/caffe/proto/caffe.proto
|
||||||
```
|
```
|
||||||
where `PATH_TO_CUSTOM_CAFFE` is the path to the root directory of custom Caffe\*.
|
where `PATH_TO_CUSTOM_CAFFE` is the path to the root directory of custom Caffe\*.
|
||||||
@ -66,7 +66,7 @@ The mean file that you provide for the Model Optimizer must be in a `.binaryprot
|
|||||||
|
|
||||||
#### 7. What does the message "Invalid proto file: there is neither 'layer' nor 'layers' top-level messages" mean? <a name="question-7"></a>
|
#### 7. What does the message "Invalid proto file: there is neither 'layer' nor 'layers' top-level messages" mean? <a name="question-7"></a>
|
||||||
|
|
||||||
The structure of any Caffe\* topology is described in the `caffe.proto` file of any Caffe version. For example, in the Model Optimizer, you can find the following proto file, used by default: `<INSTALL_DIR>/deployment_tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure:
|
The structure of any Caffe\* topology is described in the `caffe.proto` file of any Caffe version. For example, in the Model Optimizer, you can find the following proto file, used by default: `<INSTALL_DIR>/tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure:
|
||||||
```
|
```
|
||||||
message NetParameter {
|
message NetParameter {
|
||||||
// ... some other parameters
|
// ... some other parameters
|
||||||
@ -81,7 +81,7 @@ This means that any topology should contain layers as top-level structures in `p
|
|||||||
|
|
||||||
#### 8. What does the message "Old-style inputs (via 'input_dims') are not supported. Please specify inputs via 'input_shape'" mean? <a name="question-8"></a>
|
#### 8. What does the message "Old-style inputs (via 'input_dims') are not supported. Please specify inputs via 'input_shape'" mean? <a name="question-8"></a>
|
||||||
|
|
||||||
The structure of any Caffe\* topology is described in the `caffe.proto` file for any Caffe version. For example, in the Model Optimizer you can find the following `.proto` file, used by default: `<INSTALL_DIR>/deployment_tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure:
|
The structure of any Caffe\* topology is described in the `caffe.proto` file for any Caffe version. For example, in the Model Optimizer you can find the following `.proto` file, used by default: `<INSTALL_DIR>/tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure:
|
||||||
```sh
|
```sh
|
||||||
message NetParameter {
|
message NetParameter {
|
||||||
|
|
||||||
|
@ -67,6 +67,7 @@ Standard MXNet\* symbols:
|
|||||||
| _minus_scalar | No |
|
| _minus_scalar | No |
|
||||||
| _mul_scalar | No |
|
| _mul_scalar | No |
|
||||||
| _plus_scalar | No |
|
| _plus_scalar | No |
|
||||||
|
| _random_uniform | Operation provides sequence from uniform distribution, but exact values won't match. |
|
||||||
| _rnn_param_concat | No |
|
| _rnn_param_concat | No |
|
||||||
| _arange | No |
|
| _arange | No |
|
||||||
| _contrib_AdaptiveAvgPooling2D | Converted to the Average Pooling with fixed paddings |
|
| _contrib_AdaptiveAvgPooling2D | Converted to the Average Pooling with fixed paddings |
|
||||||
@ -272,6 +273,8 @@ Standard TensorFlow\* operations:
|
|||||||
| PlaceholderWithDefault | No |
|
| PlaceholderWithDefault | No |
|
||||||
| Prod | No |
|
| Prod | No |
|
||||||
| QueueDequeueUpToV2 | Supported only when it is part of a sub-graph of the special form |
|
| QueueDequeueUpToV2 | Supported only when it is part of a sub-graph of the special form |
|
||||||
|
| RandomUniform | No |
|
||||||
|
| RandomUniformInt | No |
|
||||||
| Range | No |
|
| Range | No |
|
||||||
| Rank | No |
|
| Rank | No |
|
||||||
| RealDiv | No |
|
| RealDiv | No |
|
||||||
@ -568,6 +571,7 @@ Standard ONNX\* operators:
|
|||||||
| RNN | No |
|
| RNN | No |
|
||||||
| ROIAlign | No |
|
| ROIAlign | No |
|
||||||
| Range | No |
|
| Range | No |
|
||||||
|
| RandomUniform | Operation provides sequence from uniform distribution, but exact values won't match. |
|
||||||
| Reciprocal | No |
|
| Reciprocal | No |
|
||||||
| ReduceL1 | No |
|
| ReduceL1 | No |
|
||||||
| ReduceL2 | No |
|
| ReduceL2 | No |
|
||||||
|
@ -38,7 +38,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit
|
|||||||
|
|
||||||
To convert a Caffe\* model:
|
To convert a Caffe\* model:
|
||||||
|
|
||||||
1. Go to the `$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer` directory.
|
1. Go to the `$INTEL_OPENVINO_DIR/tools/model_optimizer` directory.
|
||||||
2. Use the `mo.py` script to simply convert a model, specifying the path to the input model `.caffemodel` file and the path to an output directory with write permissions:
|
2. Use the `mo.py` script to simply convert a model, specifying the path to the input model `.caffemodel` file and the path to an output directory with write permissions:
|
||||||
```sh
|
```sh
|
||||||
python3 mo.py --input_model <INPUT_MODEL>.caffemodel --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo.py --input_model <INPUT_MODEL>.caffemodel --output_dir <OUTPUT_MODEL_DIR>
|
||||||
|
@ -33,7 +33,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit
|
|||||||
|
|
||||||
To convert a Kaldi\* model:
|
To convert a Kaldi\* model:
|
||||||
|
|
||||||
1. Go to the `<INSTALL_DIR>/deployment_tools/model_optimizer` directory.
|
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory.
|
||||||
2. Use the `mo.py` script to simply convert a model with the path to the input model `.nnet` or `.mdl` file and to an output directory where you have write permissions:
|
2. Use the `mo.py` script to simply convert a model with the path to the input model `.nnet` or `.mdl` file and to an output directory where you have write permissions:
|
||||||
```sh
|
```sh
|
||||||
python3 mo.py --input_model <INPUT_MODEL>.nnet --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo.py --input_model <INPUT_MODEL>.nnet --output_dir <OUTPUT_MODEL_DIR>
|
||||||
|
@ -43,7 +43,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit
|
|||||||
|
|
||||||
To convert an MXNet\* model:
|
To convert an MXNet\* model:
|
||||||
|
|
||||||
1. Go to the `<INSTALL_DIR>/deployment_tools/model_optimizer` directory.
|
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory.
|
||||||
2. To convert an MXNet\* model contained in a `model-file-symbol.json` and `model-file-0000.params`, run the Model Optimizer launch script `mo.py`, specifying a path to the input model file and a path to an output directory with write permissions:
|
2. To convert an MXNet\* model contained in a `model-file-symbol.json` and `model-file-0000.params`, run the Model Optimizer launch script `mo.py`, specifying a path to the input model file and a path to an output directory with write permissions:
|
||||||
```sh
|
```sh
|
||||||
python3 mo_mxnet.py --input_model model-file-0000.params --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo_mxnet.py --input_model model-file-0000.params --output_dir <OUTPUT_MODEL_DIR>
|
||||||
|
@ -59,7 +59,7 @@ The Model Optimizer process assumes you have an ONNX model that was directly dow
|
|||||||
|
|
||||||
To convert an ONNX\* model:
|
To convert an ONNX\* model:
|
||||||
|
|
||||||
1. Go to the `<INSTALL_DIR>/deployment_tools/model_optimizer` directory.
|
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory.
|
||||||
2. Use the `mo.py` script to simply convert a model with the path to the input model `.nnet` file and an output directory where you have write permissions:
|
2. Use the `mo.py` script to simply convert a model with the path to the input model `.nnet` file and an output directory where you have write permissions:
|
||||||
```sh
|
```sh
|
||||||
python3 mo.py --input_model <INPUT_MODEL>.onnx --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo.py --input_model <INPUT_MODEL>.onnx --output_dir <OUTPUT_MODEL_DIR>
|
||||||
|
@ -29,7 +29,7 @@ A summary of the steps for optimizing and deploying a model that was trained wit
|
|||||||
|
|
||||||
To convert a Paddle\* model:
|
To convert a Paddle\* model:
|
||||||
|
|
||||||
1. Go to the `$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer` directory.
|
1. Go to the `$INTEL_OPENVINO_DIR/tools/model_optimizer` directory.
|
||||||
2. Use the `mo.py` script to simply convert a model, specifying the framework, the path to the input model `.pdmodel` file and the path to an output directory with write permissions:
|
2. Use the `mo.py` script to simply convert a model, specifying the framework, the path to the input model `.pdmodel` file and the path to an output directory with write permissions:
|
||||||
```sh
|
```sh
|
||||||
python3 mo.py --input_model <INPUT_MODEL>.pdmodel --output_dir <OUTPUT_MODEL_DIR> --framework=paddle
|
python3 mo.py --input_model <INPUT_MODEL>.pdmodel --output_dir <OUTPUT_MODEL_DIR> --framework=paddle
|
||||||
|
@ -178,7 +178,7 @@ There are three ways to store non-frozen TensorFlow models and load them to the
|
|||||||
|
|
||||||
To convert such a TensorFlow model:
|
To convert such a TensorFlow model:
|
||||||
|
|
||||||
1. Go to the `<INSTALL_DIR>/deployment_tools/model_optimizer` directory
|
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory
|
||||||
2. Run the `mo_tf.py` script with the path to the checkpoint file to convert a model and an output directory where you have write permissions:
|
2. Run the `mo_tf.py` script with the path to the checkpoint file to convert a model and an output directory where you have write permissions:
|
||||||
|
|
||||||
* If input model is in `.pb` format:<br>
|
* If input model is in `.pb` format:<br>
|
||||||
@ -200,7 +200,7 @@ python3 mo_tf.py --input_model <INFERENCE_GRAPH>.pbtxt --input_checkpoint <INPUT
|
|||||||
|
|
||||||
To convert such TensorFlow model:
|
To convert such TensorFlow model:
|
||||||
|
|
||||||
1. Go to the `<INSTALL_DIR>/deployment_tools/model_optimizer` directory
|
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory
|
||||||
2. Run the `mo_tf.py` script with a path to the MetaGraph `.meta` file and a writable output directory to convert a model:<br>
|
2. Run the `mo_tf.py` script with a path to the MetaGraph `.meta` file and a writable output directory to convert a model:<br>
|
||||||
```sh
|
```sh
|
||||||
python3 mo_tf.py --input_meta_graph <INPUT_META_GRAPH>.meta --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo_tf.py --input_meta_graph <INPUT_META_GRAPH>.meta --output_dir <OUTPUT_MODEL_DIR>
|
||||||
@ -212,7 +212,7 @@ python3 mo_tf.py --input_meta_graph <INPUT_META_GRAPH>.meta --output_dir <OUTPUT
|
|||||||
|
|
||||||
To convert such TensorFlow model:
|
To convert such TensorFlow model:
|
||||||
|
|
||||||
1. Go to the `<INSTALL_DIR>/deployment_tools/model_optimizer` directory
|
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory
|
||||||
2. Run the `mo_tf.py` script with a path to the SavedModel directory and a writable output directory to convert a model:<br>
|
2. Run the `mo_tf.py` script with a path to the SavedModel directory and a writable output directory to convert a model:<br>
|
||||||
```sh
|
```sh
|
||||||
python3 mo_tf.py --saved_model_dir <SAVED_MODEL_DIRECTORY> --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo_tf.py --saved_model_dir <SAVED_MODEL_DIRECTORY> --output_dir <OUTPUT_MODEL_DIR>
|
||||||
@ -251,7 +251,7 @@ Where:
|
|||||||
|
|
||||||
To convert a TensorFlow model:
|
To convert a TensorFlow model:
|
||||||
|
|
||||||
1. Go to the `<INSTALL_DIR>/deployment_tools/model_optimizer` directory
|
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory
|
||||||
2. Use the `mo_tf.py` script to simply convert a model with the path to the input model `.pb` file and a writable output directory:
|
2. Use the `mo_tf.py` script to simply convert a model with the path to the input model `.pb` file and a writable output directory:
|
||||||
```sh
|
```sh
|
||||||
python3 mo_tf.py --input_model <INPUT_MODEL>.pb --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo_tf.py --input_model <INPUT_MODEL>.pb --output_dir <OUTPUT_MODEL_DIR>
|
||||||
@ -342,7 +342,7 @@ Below are the instructions on how to convert each of them.
|
|||||||
|
|
||||||
A model in the SavedModel format consists of a directory with a `saved_model.pb` file and two subfolders: `variables` and `assets`.
|
A model in the SavedModel format consists of a directory with a `saved_model.pb` file and two subfolders: `variables` and `assets`.
|
||||||
To convert such a model:
|
To convert such a model:
|
||||||
1. Go to the `<INSTALL_DIR>/deployment_tools/model_optimizer` directory.
|
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory.
|
||||||
2. Run the `mo_tf.py` script with a path to the SavedModel directory and a writable output directory:
|
2. Run the `mo_tf.py` script with a path to the SavedModel directory and a writable output directory:
|
||||||
```sh
|
```sh
|
||||||
python3 mo_tf.py --saved_model_dir <SAVED_MODEL_DIRECTORY> --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo_tf.py --saved_model_dir <SAVED_MODEL_DIRECTORY> --output_dir <OUTPUT_MODEL_DIR>
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Converting a Model to Intermediate Representation (IR) {#openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model}
|
# Converting a Model to Intermediate Representation (IR) {#openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model}
|
||||||
|
|
||||||
Use the <code>mo.py</code> script from the `<INSTALL_DIR>/deployment_tools/model_optimizer` directory to run the Model Optimizer and convert the model to the Intermediate Representation (IR):
|
Use the <code>mo.py</code> script from the `<INSTALL_DIR>/tools/model_optimizer` directory to run the Model Optimizer and convert the model to the Intermediate Representation (IR):
|
||||||
```sh
|
```sh
|
||||||
python3 mo.py --input_model INPUT_MODEL --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo.py --input_model INPUT_MODEL --output_dir <OUTPUT_MODEL_DIR>
|
||||||
```
|
```
|
||||||
|
@ -5,7 +5,7 @@ To simply convert a model trained by any supported framework, run the Model Opti
|
|||||||
python3 mo.py --input_model INPUT_MODEL --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo.py --input_model INPUT_MODEL --output_dir <OUTPUT_MODEL_DIR>
|
||||||
```
|
```
|
||||||
|
|
||||||
The script is in `$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer/`. The output directory must have write permissions, so you can run mo.py from the output directory or specify an output path with the `--output_dir` option.
|
The script is in `$INTEL_OPENVINO_DIR/tools/model_optimizer/`. The output directory must have write permissions, so you can run mo.py from the output directory or specify an output path with the `--output_dir` option.
|
||||||
|
|
||||||
> **NOTE:** The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the `RGB<->BGR` conversion specifying the command-line parameter: `--reverse_input_channels`. Otherwise, inference results may be incorrect. For details, refer to [When to Reverse Input Channels](#when_to_reverse_input_channels).
|
> **NOTE:** The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the `RGB<->BGR` conversion specifying the command-line parameter: `--reverse_input_channels`. Otherwise, inference results may be incorrect. For details, refer to [When to Reverse Input Channels](#when_to_reverse_input_channels).
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ In the TensorBoard, it looks the following way together with some predecessors:
|
|||||||
|
|
||||||
Convert this model and put the results in a writable output directory:
|
Convert this model and put the results in a writable output directory:
|
||||||
```sh
|
```sh
|
||||||
${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer
|
${INTEL_OPENVINO_DIR}/tools/model_optimizer
|
||||||
python3 mo.py --input_model inception_v1.pb -b 1 --output_dir <OUTPUT_MODEL_DIR>
|
python3 mo.py --input_model inception_v1.pb -b 1 --output_dir <OUTPUT_MODEL_DIR>
|
||||||
```
|
```
|
||||||
(The other examples on this page assume that you first cd to the `model_optimizer` directory and add the `--output_dir` argument with a directory where you have write permissions.)
|
(The other examples on this page assume that you first cd to the `model_optimizer` directory and add the `--output_dir` argument with a directory where you have write permissions.)
|
||||||
|
@ -47,9 +47,9 @@ As a result the frozen model file `savedmodeldir/efficientdet-d4_frozen.pb` will
|
|||||||
|
|
||||||
To generate the IR of the EfficientDet TensorFlow model, run:<br>
|
To generate the IR of the EfficientDet TensorFlow model, run:<br>
|
||||||
```sh
|
```sh
|
||||||
python3 $INTEL_OPENVINO_DIR/deployment_tools/model_optimizer/mo.py \
|
python3 $INTEL_OPENVINO_DIR/tools/model_optimizer/mo.py \
|
||||||
--input_model savedmodeldir/efficientdet-d4_frozen.pb \
|
--input_model savedmodeldir/efficientdet-d4_frozen.pb \
|
||||||
--transformations_config $INTEL_OPENVINO_DIR/deployment_tools/model_optimizer/extensions/front/tf/automl_efficientdet.json \
|
--transformations_config $INTEL_OPENVINO_DIR/tools/model_optimizer/extensions/front/tf/automl_efficientdet.json \
|
||||||
--input_shape [1,$IMAGE_SIZE,$IMAGE_SIZE,3] \
|
--input_shape [1,$IMAGE_SIZE,$IMAGE_SIZE,3] \
|
||||||
--reverse_input_channels
|
--reverse_input_channels
|
||||||
```
|
```
|
||||||
|
@ -11,10 +11,10 @@ You can download TensorFlow\* Object Detection API models from the <a href="http
|
|||||||
|
|
||||||
<strong>NOTE</strong>: Before converting, make sure you have configured the Model Optimizer. For configuration steps, refer to [Configuring the Model Optimizer](../../Config_Model_Optimizer.md).
|
<strong>NOTE</strong>: Before converting, make sure you have configured the Model Optimizer. For configuration steps, refer to [Configuring the Model Optimizer](../../Config_Model_Optimizer.md).
|
||||||
|
|
||||||
To convert a TensorFlow\* Object Detection API model, go to the `<INSTALL_DIR>/deployment_tools/model_optimizer` directory and run the `mo_tf.py` script with the following required parameters:
|
To convert a TensorFlow\* Object Detection API model, go to the `<INSTALL_DIR>/tools/model_optimizer` directory and run the `mo_tf.py` script with the following required parameters:
|
||||||
|
|
||||||
* `--input_model <path_to_frozen.pb>` --- File with a pre-trained model (binary or text .pb file after freezing) OR `--saved_model_dir <path_to_saved_model>` for the TensorFlow\* 2 models
|
* `--input_model <path_to_frozen.pb>` --- File with a pre-trained model (binary or text .pb file after freezing) OR `--saved_model_dir <path_to_saved_model>` for the TensorFlow\* 2 models
|
||||||
* `--transformations_config <path_to_subgraph_replacement_configuration_file.json>` --- A subgraph replacement configuration file with transformations description. For the models downloaded from the TensorFlow\* Object Detection API zoo, you can find the configuration files in the `<INSTALL_DIR>/deployment_tools/model_optimizer/extensions/front/tf` directory. Use:
|
* `--transformations_config <path_to_subgraph_replacement_configuration_file.json>` --- A subgraph replacement configuration file with transformations description. For the models downloaded from the TensorFlow\* Object Detection API zoo, you can find the configuration files in the `<INSTALL_DIR>/tools/model_optimizer/extensions/front/tf` directory. Use:
|
||||||
* `ssd_v2_support.json` --- for frozen SSD topologies from the models zoo version up to 1.13.X inclusively
|
* `ssd_v2_support.json` --- for frozen SSD topologies from the models zoo version up to 1.13.X inclusively
|
||||||
* `ssd_support_api_v.1.14.json` --- for SSD topologies trained using the TensorFlow\* Object Detection API version 1.14 up to 1.14.X inclusively
|
* `ssd_support_api_v.1.14.json` --- for SSD topologies trained using the TensorFlow\* Object Detection API version 1.14 up to 1.14.X inclusively
|
||||||
* `ssd_support_api_v.1.15.json` --- for SSD topologies trained using the TensorFlow\* Object Detection API version 1.15 up to 2.0
|
* `ssd_support_api_v.1.15.json` --- for SSD topologies trained using the TensorFlow\* Object Detection API version 1.15 up to 2.0
|
||||||
@ -52,7 +52,7 @@ Additionally to the mandatory parameters listed above you can use optional conve
|
|||||||
For example, if you downloaded the [pre-trained SSD InceptionV2 topology](http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2018_01_28.tar.gz) and extracted archive to the directory `/tmp/ssd_inception_v2_coco_2018_01_28`, the sample command line to convert the model looks as follows:
|
For example, if you downloaded the [pre-trained SSD InceptionV2 topology](http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2018_01_28.tar.gz) and extracted archive to the directory `/tmp/ssd_inception_v2_coco_2018_01_28`, the sample command line to convert the model looks as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
<INSTALL_DIR>/deployment_tools/model_optimizer/mo_tf.py --input_model=/tmp/ssd_inception_v2_coco_2018_01_28/frozen_inference_graph.pb --transformations_config <INSTALL_DIR>/deployment_tools/model_optimizer/extensions/front/tf/ssd_v2_support.json --tensorflow_object_detection_api_pipeline_config /tmp/ssd_inception_v2_coco_2018_01_28/pipeline.config --reverse_input_channels
|
<INSTALL_DIR>/tools/model_optimizer/mo_tf.py --input_model=/tmp/ssd_inception_v2_coco_2018_01_28/frozen_inference_graph.pb --transformations_config <INSTALL_DIR>/tools/model_optimizer/extensions/front/tf/ssd_v2_support.json --tensorflow_object_detection_api_pipeline_config /tmp/ssd_inception_v2_coco_2018_01_28/pipeline.config --reverse_input_channels
|
||||||
```
|
```
|
||||||
|
|
||||||
## Important Notes About Feeding Input Images to the Samples
|
## Important Notes About Feeding Input Images to the Samples
|
||||||
@ -128,4 +128,4 @@ It is also important to open the model in the [TensorBoard](https://www.tensorfl
|
|||||||
* `--input_model <path_to_frozen.pb>` --- Path to the frozen model
|
* `--input_model <path_to_frozen.pb>` --- Path to the frozen model
|
||||||
* `--tensorboard_logdir` --- Path to the directory where TensorBoard looks for the event files.
|
* `--tensorboard_logdir` --- Path to the directory where TensorBoard looks for the event files.
|
||||||
|
|
||||||
Implementation of the transformations for Object Detection API models is located in the file `<INSTALL_DIR>/deployment_tools/model_optimizer/extensions/front/tf/ObjectDetectionAPI.py`. Refer to the code in this file to understand the details of the conversion process.
|
Implementation of the transformations for Object Detection API models is located in the file `<INSTALL_DIR>/tools/model_optimizer/extensions/front/tf/ObjectDetectionAPI.py`. Refer to the code in this file to understand the details of the conversion process.
|
||||||
|
@ -92,7 +92,7 @@ python3 convert_weights_pb.py --class_names coco.names --data_format NHWC --weig
|
|||||||
|
|
||||||
### Convert YOLOv3 TensorFlow Model to IR
|
### Convert YOLOv3 TensorFlow Model to IR
|
||||||
|
|
||||||
To solve the problems explained in the <a href="#yolov3-overview">YOLOv3 architecture overview</a> section, use the `yolo_v3.json` or `yolo_v3_tiny.json` (depending on a model) configuration file with custom operations located in the `<OPENVINO_INSTALL_DIR>/deployment_tools/model_optimizer/extensions/front/tf` repository.
|
To solve the problems explained in the <a href="#yolov3-overview">YOLOv3 architecture overview</a> section, use the `yolo_v3.json` or `yolo_v3_tiny.json` (depending on a model) configuration file with custom operations located in the `<OPENVINO_INSTALL_DIR>/tools/model_optimizer/extensions/front/tf` repository.
|
||||||
|
|
||||||
It consists of several attributes:<br>
|
It consists of several attributes:<br>
|
||||||
```sh
|
```sh
|
||||||
@ -206,7 +206,7 @@ Converted TensorFlow YOLO model is missing `Region` layer and its parameters. Or
|
|||||||
file under the `[region]` title.
|
file under the `[region]` title.
|
||||||
|
|
||||||
To recreate the original model structure, use the corresponding yolo `.json` configuration file with custom operations and `Region` layer
|
To recreate the original model structure, use the corresponding yolo `.json` configuration file with custom operations and `Region` layer
|
||||||
parameters when converting the model to the IR. This file is located in the `<OPENVINO_INSTALL_DIR>/deployment_tools/model_optimizer/extensions/front/tf` directory.
|
parameters when converting the model to the IR. This file is located in the `<OPENVINO_INSTALL_DIR>/tools/model_optimizer/extensions/front/tf` directory.
|
||||||
|
|
||||||
If chosen model has specific values of this parameters,
|
If chosen model has specific values of this parameters,
|
||||||
create another configuration file with custom operations and use it for conversion.
|
create another configuration file with custom operations and use it for conversion.
|
||||||
@ -217,7 +217,7 @@ python3 ./mo_tf.py
|
|||||||
--input_model <path_to_model>/<model_name>.pb \
|
--input_model <path_to_model>/<model_name>.pb \
|
||||||
--batch 1 \
|
--batch 1 \
|
||||||
--scale 255 \
|
--scale 255 \
|
||||||
--transformations_config <OPENVINO_INSTALL_DIR>/deployment_tools/model_optimizer/extensions/front/tf/<yolo_config>.json
|
--transformations_config <OPENVINO_INSTALL_DIR>/tools/model_optimizer/extensions/front/tf/<yolo_config>.json
|
||||||
```
|
```
|
||||||
where:
|
where:
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ operation.
|
|||||||
|
|
||||||
Here is a simplified example of the extractor for the custom operation Proposal from Faster-R-CNN model mentioned above.
|
Here is a simplified example of the extractor for the custom operation Proposal from Faster-R-CNN model mentioned above.
|
||||||
The full code with additional checks is provided in the
|
The full code with additional checks is provided in the
|
||||||
`<INSTALL_DIR>/deployment_tools/model_optimizer/extensions/front/caffe/proposal_python_ext.py`. The sample code uses
|
`<INSTALL_DIR>/tools/model_optimizer/extensions/front/caffe/proposal_python_ext.py`. The sample code uses
|
||||||
operation `ProposalOp` which corresponds to `Proposal` operation described in the [Available Operations Sets](../../../ops/opset.md)
|
operation `ProposalOp` which corresponds to `Proposal` operation described in the [Available Operations Sets](../../../ops/opset.md)
|
||||||
document. Refer to the source code below for a detailed explanation of the extractor.
|
document. Refer to the source code below for a detailed explanation of the extractor.
|
||||||
|
|
||||||
|
@ -27,11 +27,9 @@ limitations under the License.
|
|||||||
<tab type="usergroup" title="Installation Guides" url=""><!--automatically generated-->
|
<tab type="usergroup" title="Installation Guides" url=""><!--automatically generated-->
|
||||||
<tab type="usergroup" title="Linux" url="@ref openvino_docs_install_guides_installing_openvino_linux">
|
<tab type="usergroup" title="Linux" url="@ref openvino_docs_install_guides_installing_openvino_linux">
|
||||||
<tab type="user" title="Install Intel® Distribution of OpenVINO™ toolkit for Linux* OS" url="@ref openvino_docs_install_guides_installing_openvino_linux"/>
|
<tab type="user" title="Install Intel® Distribution of OpenVINO™ toolkit for Linux* OS" url="@ref openvino_docs_install_guides_installing_openvino_linux"/>
|
||||||
<tab type="user" title="[DEPRECATED] Install Intel® Distribution of OpenVINO™ toolkit for Linux with FPGA Support" url="@ref openvino_docs_install_guides_installing_openvino_linux_fpga"/>
|
|
||||||
</tab>
|
</tab>
|
||||||
<tab type="usergroup" title="Windows" url="@ref openvino_docs_install_guides_installing_openvino_windows">
|
<tab type="usergroup" title="Windows" url="@ref openvino_docs_install_guides_installing_openvino_windows">
|
||||||
<tab type="user" title="Install Intel® Distribution of OpenVINO™ toolkit for Windows* 10" url="@ref openvino_docs_install_guides_installing_openvino_windows"/>
|
<tab type="user" title="Install Intel® Distribution of OpenVINO™ toolkit for Windows* 10" url="@ref openvino_docs_install_guides_installing_openvino_windows"/>
|
||||||
<tab type="user" title="[DEPRECATED] Install Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA support" url="@ref openvino_docs_install_guides_installing_openvino_windows_fpga"/>
|
|
||||||
</tab>
|
</tab>
|
||||||
<tab type="user" title="macOS" url="@ref openvino_docs_install_guides_installing_openvino_macos"/>
|
<tab type="user" title="macOS" url="@ref openvino_docs_install_guides_installing_openvino_macos"/>
|
||||||
<tab type="user" title="Raspbian OS" url="@ref openvino_docs_install_guides_installing_openvino_raspbian"/>
|
<tab type="user" title="Raspbian OS" url="@ref openvino_docs_install_guides_installing_openvino_raspbian"/>
|
||||||
|
@ -4,7 +4,7 @@ The OpenVINO™ toolkit optimizes and runs Deep Learning Neural Network models o
|
|||||||
|
|
||||||
In this guide, you will:
|
In this guide, you will:
|
||||||
* Learn the OpenVINO™ inference workflow.
|
* Learn the OpenVINO™ inference workflow.
|
||||||
* Run demo scripts that perform the steps for you. These demo scripts illustrate the workflow.
|
* Run sample scripts that perform the steps for you. These sample scripts illustrate the workflow.
|
||||||
* Run the workflow steps yourself, using detailed instructions with a code sample and demo application.
|
* Run the workflow steps yourself, using detailed instructions with a code sample and demo application.
|
||||||
|
|
||||||
## <a name="openvino-components"></a>OpenVINO™ toolkit Components
|
## <a name="openvino-components"></a>OpenVINO™ toolkit Components
|
||||||
@ -13,8 +13,8 @@ The toolkit consists of three primary components:
|
|||||||
* **Model Optimizer:** Optimizes models for Intel® architecture, converting models into a format compatible with the Inference Engine. This format is called an Intermediate Representation (IR).
|
* **Model Optimizer:** Optimizes models for Intel® architecture, converting models into a format compatible with the Inference Engine. This format is called an Intermediate Representation (IR).
|
||||||
* **Intermediate Representation (IR):** The Model Optimizer output. A model converted to a format that has been optimized for Intel® architecture and is usable by the Inference Engine.
|
* **Intermediate Representation (IR):** The Model Optimizer output. A model converted to a format that has been optimized for Intel® architecture and is usable by the Inference Engine.
|
||||||
|
|
||||||
In addition, demo scripts, code samples and demo applications are provided to help you get up and running with the toolkit:
|
In addition, sample scripts, code samples and demo applications are provided to help you get up and running with the toolkit:
|
||||||
* **Demo Scripts** - Shell scripts that automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios.
|
* **Sample Scripts** - Shell scripts that automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios.
|
||||||
* **[Code Samples](../IE_DG/Samples_Overview.md)** - Small console applications that show you how to:
|
* **[Code Samples](../IE_DG/Samples_Overview.md)** - Small console applications that show you how to:
|
||||||
* Utilize specific OpenVINO capabilities in an application
|
* Utilize specific OpenVINO capabilities in an application
|
||||||
* Perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more.
|
* Perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more.
|
||||||
@ -27,18 +27,18 @@ By default, the Intel® Distribution of OpenVINO™ is installed to the followin
|
|||||||
* For root or administrator: `/opt/intel/openvino_<version>/`
|
* For root or administrator: `/opt/intel/openvino_<version>/`
|
||||||
* For regular users: `/home/<USER>/intel/openvino_<version>/`
|
* For regular users: `/home/<USER>/intel/openvino_<version>/`
|
||||||
|
|
||||||
For simplicity, a symbolic link to the latest installation is also created: `/home/<user>/intel/openvino_2021/`
|
For simplicity, a symbolic link to the latest installation is also created: `/home/<user>/intel/openvino_2022/`
|
||||||
|
|
||||||
If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home/<USER>/` with the directory in which you installed the software.
|
If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home/<USER>/` with the directory in which you installed the software.
|
||||||
|
|
||||||
The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino_2021/deployment_tools` directory.
|
The primary tools for deploying your models and applications are installed to the `/opt/intel/openvino_2022/tools` directory.
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for the Intel® Distribution of OpenVINO™ toolkit directory structure</strong></summary>
|
<summary><strong>Click for the Intel® Distribution of OpenVINO™ toolkit directory structure</strong></summary>
|
||||||
|
|
||||||
|
|
||||||
| Directory | Description |
|
| Directory | Description |
|
||||||
|:----------------------------------------|:--------------------------------------------------------------------------------------|
|
|:----------------------------------------|:--------------------------------------------------------------------------------------|
|
||||||
| `demo/` | Demo scripts. Demonstrate pipelines for inference scenarios, automatically perform steps and print detailed output to the console. For more information, see the [Use OpenVINO: Demo Scripts](#use-openvino-demo-scripts) section.|
|
| `demo/` | Sample scripts. Demonstrate pipelines for inference scenarios, automatically perform steps and print detailed output to the console. For more information, see the [Use OpenVINO: Sample Scripts](#use-openvino-sample-scripts) section.|
|
||||||
| `inference_engine/` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
|
| `inference_engine/` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
|
||||||
| `~intel_models/` | Symbolic link to the `intel_models` subfolder of the `open_model-zoo` folder |
|
| `~intel_models/` | Symbolic link to the `intel_models` subfolder of the `open_model-zoo` folder |
|
||||||
| `include/` | Inference Engine header files. For API documentation, see the [Inference Engine API Reference](./annotated.html). |
|
| `include/` | Inference Engine header files. For API documentation, see the [Inference Engine API Reference](./annotated.html). |
|
||||||
@ -61,29 +61,29 @@ The simplified OpenVINO™ workflow is:
|
|||||||
2. **Run the trained model through the Model Optimizer** to convert the model to an Intermediate Representation, which consists of a pair of `.xml` and `.bin` files that are used as the input for Inference Engine.
|
2. **Run the trained model through the Model Optimizer** to convert the model to an Intermediate Representation, which consists of a pair of `.xml` and `.bin` files that are used as the input for Inference Engine.
|
||||||
3. **Use the Inference Engine API in the application** to run inference against the Intermediate Representation (optimized model) and output inference results. The application can be an OpenVINO™ sample, demo, or your own application.
|
3. **Use the Inference Engine API in the application** to run inference against the Intermediate Representation (optimized model) and output inference results. The application can be an OpenVINO™ sample, demo, or your own application.
|
||||||
|
|
||||||
## Use the Demo Scripts to Learn the Workflow
|
## Use the Sample Scripts to Learn the Workflow
|
||||||
|
|
||||||
The demo scripts in `/opt/intel/openvino_2021/deployment_tools/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to:
|
The sample scripts in `/opt/intel/openvino_2022/samples/scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The sample steps let you see how to:
|
||||||
* Compile several samples from the source files delivered as part of the OpenVINO toolkit.
|
* Compile several samples from the source files delivered as part of the OpenVINO toolkit.
|
||||||
* Download trained models.
|
* Download trained models.
|
||||||
* Perform pipeline steps and see the output on the console.
|
* Perform pipeline steps and see the output on the console.
|
||||||
|
|
||||||
> **NOTE**: You must have Internet access to run the demo scripts. If your Internet access is through a proxy server, make sure the operating system environment proxy information is configured.
|
> **NOTE**: You must have Internet access to run the sample scripts. If your Internet access is through a proxy server, make sure the operating system environment proxy information is configured.
|
||||||
|
|
||||||
The demo scripts can run inference on any [supported target device](https://software.intel.com/en-us/openvino-toolkit/hardware). Although the default inference device is CPU, you can use the `-d` parameter to change the inference device. The general command to run the scripts looks as follows:
|
The sample scripts can run inference on any [supported target device](https://software.intel.com/en-us/openvino-toolkit/hardware). Although the default inference device is CPU, you can use the `-d` parameter to change the inference device. The general command to run the scripts looks as follows:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./<script_name> -d [CPU, GPU, MYRIAD, HDDL]
|
./<script_name> -d [CPU, GPU, MYRIAD, HDDL]
|
||||||
```
|
```
|
||||||
|
|
||||||
Before running the demo applications on Intel® Processor Graphics or on an Intel® Neural Compute Stick 2 device, you must complete the additional configuration steps. For details, see:
|
Before running the sample or demo applications on Intel® Processor Graphics or on an Intel® Neural Compute Stick 2 device, you must complete the additional configuration steps. For details, see:
|
||||||
* Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md)
|
* Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md)
|
||||||
* Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
* Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
||||||
|
|
||||||
The following paragraphs describe each demo script.
|
The following paragraphs describe each sample script.
|
||||||
|
|
||||||
### Image Classification Demo Script
|
### Image Classification Sample Script
|
||||||
The `demo_squeezenet_download_convert_run` script illustrates the image classification pipeline.
|
The `run_sample_squeezenet` script illustrates the image classification pipeline.
|
||||||
|
|
||||||
The script:
|
The script:
|
||||||
1. Downloads a SqueezeNet model.
|
1. Downloads a SqueezeNet model.
|
||||||
@ -92,19 +92,19 @@ The script:
|
|||||||
4. Runs the compiled sample with the `car.png` image located in the `demo` directory.
|
4. Runs the compiled sample with the `car.png` image located in the `demo` directory.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for an example of running the Image Classification demo script</strong></summary>
|
<summary><strong>Click for an example of running the Image Classification sample script</strong></summary>
|
||||||
|
|
||||||
To preview the image that the script will classify:
|
To preview the image that the script will classify:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd ${INTEL_OPENVINO_DIR}/deployment_tools/demo
|
cd ${INTEL_OPENVINO_DIR}/samples/scripts
|
||||||
eog car.png
|
eog car.png
|
||||||
```
|
```
|
||||||
|
|
||||||
To run the script to perform inference on a CPU:
|
To run the script to perform inference on a CPU:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./demo_squeezenet_download_convert_run.sh
|
./run_sample_squeezenet.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
When the script completes, you see the label and confidence for the top-10 categories:
|
When the script completes, you see the label and confidence for the top-10 categories:
|
||||||
@ -134,56 +134,27 @@ Average running time of one iteration: 2.6642941 ms
|
|||||||
|
|
||||||
Throughput: 375.3339402 FPS
|
Throughput: 375.3339402 FPS
|
||||||
|
|
||||||
[ INFO ] Execution successful
|
[ INFO ] Classification sample execution successful
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### Inference Pipeline Demo Script
|
### Benchmark Sample Script
|
||||||
The `demo_security_barrier_camera` uses vehicle recognition in which vehicle attributes build on each other to narrow in on a specific attribute.
|
The `run_sample_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
|
||||||
|
|
||||||
The script:
|
The script:
|
||||||
1. Downloads three pre-trained model IRs.
|
|
||||||
2. Builds the Security Barrier Camera Demo application.
|
|
||||||
3. Runs the application with the downloaded models and the `car_1.bmp` image from the `demo` directory to show an inference pipeline.
|
|
||||||
|
|
||||||
This application:
|
|
||||||
|
|
||||||
1. Identifies an object identified as a vehicle.
|
|
||||||
2. Uses the vehicle identification as input to the second model, which identifies specific vehicle attributes, including the license plate.
|
|
||||||
3. Uses the the license plate as input to the third model, which recognizes specific characters in the license plate.
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>Click for an example of Running the Pipeline demo script</strong></summary>
|
|
||||||
|
|
||||||
To run the script performing inference on Intel® Processor Graphics:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
./demo_security_barrier_camera.sh -d GPU
|
|
||||||
```
|
|
||||||
|
|
||||||
When the verification script completes, you see an image that displays the resulting frame with detections rendered as bounding boxes, and text:
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### Benchmark Demo Script
|
|
||||||
The `demo_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
|
|
||||||
|
|
||||||
The script:
|
|
||||||
1. Downloads a SqueezeNet model.
|
1. Downloads a SqueezeNet model.
|
||||||
2. Runs the Model Optimizer to convert the model to the IR.
|
2. Runs the Model Optimizer to convert the model to the IR.
|
||||||
3. Builds the Inference Engine Benchmark tool.
|
3. Builds the Inference Engine Benchmark sample.
|
||||||
4. Runs the tool with the `car.png` image located in the `demo` directory.
|
4. Runs the tool with the `car.png` image located in the `demo` directory.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for an example of running the Benchmark demo script</strong></summary>
|
<summary><strong>Click for an example of running the Benchmark sample script</strong></summary>
|
||||||
|
|
||||||
To run the script that performs inference (runs on CPU by default):
|
To run the script that performs inference (runs on CPU by default):
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./demo_benchmark_app.sh
|
./run_sample_benchmark_app.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
When the verification script completes, you see the performance counters, resulting latency, and throughput values displayed on the screen.
|
When the verification script completes, you see the performance counters, resulting latency, and throughput values displayed on the screen.
|
||||||
@ -191,9 +162,9 @@ When the verification script completes, you see the performance counters, result
|
|||||||
|
|
||||||
## <a name="using-sample-application"></a>Use Code Samples and Demo Applications to Learn the Workflow
|
## <a name="using-sample-application"></a>Use Code Samples and Demo Applications to Learn the Workflow
|
||||||
|
|
||||||
This section guides you through a simplified workflow for the Intel® Distribution of OpenVINO™ toolkit using code samples and demo applications.
|
This section guides you through a simplified workflow for the Intel® Distribution of OpenVINO™ toolkit using code samples and demo applications.
|
||||||
|
|
||||||
You will perform the following steps:
|
You will perform the following steps:
|
||||||
|
|
||||||
1. <a href="#download-models">Use the Model Downloader to download suitable models.</a>
|
1. <a href="#download-models">Use the Model Downloader to download suitable models.</a>
|
||||||
2. <a href="#convert-models-to-intermediate-representation">Convert the models with the Model Optimizer.</a>
|
2. <a href="#convert-models-to-intermediate-representation">Convert the models with the Model Optimizer.</a>
|
||||||
@ -215,7 +186,7 @@ Inputs you'll need to specify:
|
|||||||
|
|
||||||
### Build the Code Samples and Demo Applications
|
### Build the Code Samples and Demo Applications
|
||||||
|
|
||||||
To perform sample inference, run the Image Classification code sample and Security Barrier Camera demo application that were automatically compiled when you ran the Image Classification and Inference Pipeline demo scripts. The binary files are in the `~/inference_engine_cpp_samples_build/intel64/Release` and `~/inference_engine_demos_build/intel64/Release` directories, respectively.
|
The Image Classification Sample that was automatically compiled when you ran the Image Classification sample script. The binary file is in the `~/inference_engine_cpp_samples_build/intel64/Release` directory.
|
||||||
|
|
||||||
To run other sample code or demo applications, build them from the source files delivered as part of the OpenVINO toolkit. To learn how to build these, see the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) and [Demo Applications Overview](@ref omz_demos) sections.
|
To run other sample code or demo applications, build them from the source files delivered as part of the OpenVINO toolkit. To learn how to build these, see the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) and [Demo Applications Overview](@ref omz_demos) sections.
|
||||||
|
|
||||||
@ -235,7 +206,7 @@ This guide uses the Model Downloader to get pre-trained models. You can use one
|
|||||||
|
|
||||||
* **List the models available in the downloader**:
|
* **List the models available in the downloader**:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/tools/model_downloader/
|
cd /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
python3 info_dumper.py --print_all
|
python3 info_dumper.py --print_all
|
||||||
@ -252,14 +223,11 @@ sudo python3 ./downloader.py --name <model_name> --output_dir <models_dir>
|
|||||||
```
|
```
|
||||||
> **NOTE:** Always run the downloader with `sudo`.
|
> **NOTE:** Always run the downloader with `sudo`.
|
||||||
|
|
||||||
Download the following models if you want to run the Image Classification Sample and Security Barrier Camera Demo application:
|
Download the following models if you want to run the Image Classification Sample:
|
||||||
|
|
||||||
|Model Name | Code Sample or Demo App |
|
|Model Name | Code Sample |
|
||||||
|-----------------------------------------------|-----------------------------------------------------|
|
|-----------------------------------------------|-----------------------------------------------------|
|
||||||
|`squeezenet1.1` | Image Classification Sample |
|
|`squeezenet1.1` | Image Classification Sample |
|
||||||
|`vehicle-license-plate-detection-barrier-0106` | Security Barrier Camera Demo application |
|
|
||||||
|`vehicle-attributes-recognition-barrier-0039` | Security Barrier Camera Demo application |
|
|
||||||
|`license-plate-recognition-barrier-0001` | Security Barrier Camera Demo application |
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for an example of downloading the SqueezeNet Caffe* model</strong></summary>
|
<summary><strong>Click for an example of downloading the SqueezeNet Caffe* model</strong></summary>
|
||||||
@ -285,36 +253,6 @@ Your screen looks similar to this after the download:
|
|||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>Click for an example of downloading models for the Security Barrier Camera Demo application</strong></summary>
|
|
||||||
|
|
||||||
To download all three pre-trained models in FP16 precision to the `~/models` folder:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
./downloader.py --name vehicle-license-plate-detection-barrier-0106,vehicle-attributes-recognition-barrier-0039,license-plate-recognition-barrier-0001 --output_dir ~/models --precisions FP16
|
|
||||||
```
|
|
||||||
Your screen looks similar to this after the download:
|
|
||||||
```
|
|
||||||
################|| Downloading models ||################
|
|
||||||
|
|
||||||
========== Downloading /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml
|
|
||||||
... 100%, 204 KB, 183949 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.bin
|
|
||||||
... 100%, 1256 KB, 3948 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml
|
|
||||||
... 100%, 32 KB, 133398 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.bin
|
|
||||||
... 100%, 1222 KB, 3167 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml
|
|
||||||
... 100%, 47 KB, 85357 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.bin
|
|
||||||
... 100%, 2378 KB, 5333 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
################|| Post-processing ||################
|
################|| Post-processing ||################
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -330,16 +268,15 @@ Models in the Intermediate Representation format always include a pair of `.xml`
|
|||||||
|
|
||||||
This guide uses the public SqueezeNet 1.1 Caffe\* model to run the Image Classification Sample. See the example to download a model in the <a href="#download-models">Download Models</a> section to learn how to download this model.
|
This guide uses the public SqueezeNet 1.1 Caffe\* model to run the Image Classification Sample. See the example to download a model in the <a href="#download-models">Download Models</a> section to learn how to download this model.
|
||||||
|
|
||||||
The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the Model Optimizer to convert the model to the IR.
|
The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the Model Optimizer to convert the model to the IR.
|
||||||
The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognition-barrier-0039`, `license-plate-recognition-barrier-0001` models are downloaded in the Intermediate Representation format. You don't need to use the Model Optimizer to convert these models.
|
|
||||||
|
|
||||||
1. Create an `<ir_dir>` directory to contain the model's Intermediate Representation (IR).
|
1. Create an `<ir_dir>` directory to contain the model's Intermediate Representation (IR).
|
||||||
|
|
||||||
2. The Inference Engine can perform inference on different precision formats, such as `FP32`, `FP16`, `INT8`. To prepare an IR with specific precision, run the Model Optimizer with the appropriate `--data_type` option.
|
2. The Inference Engine can perform inference on different precision formats, such as `FP32`, `FP16`, `INT8`. To prepare an IR with specific precision, run the Model Optimizer with the appropriate `--data_type` option.
|
||||||
|
|
||||||
3. Run the Model Optimizer script:
|
3. Run the Model Optimizer script:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/model_optimizer
|
cd /opt/intel/openvino_2022/tools/model_optimizer
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
python3 ./mo.py --input_model <model_dir>/<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
python3 ./mo.py --input_model <model_dir>/<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
||||||
@ -352,7 +289,7 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit
|
|||||||
The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory:
|
The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/model_optimizer
|
cd /opt/intel/openvino_2022/tools/model_optimizer
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir
|
python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir
|
||||||
@ -360,9 +297,9 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP
|
|||||||
|
|
||||||
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory.
|
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory.
|
||||||
|
|
||||||
Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` to `<ir_dir>`. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers:
|
Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/samples/scripts/` to `<ir_dir>`. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers:
|
||||||
```sh
|
```sh
|
||||||
cp /opt/intel/openvino_2021/deployment_tools/demo/squeezenet1.1.labels <ir_dir>
|
cp /opt/intel/openvino_2022/samples/scripts/squeezenet1.1.labels <ir_dir>
|
||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@ -373,22 +310,22 @@ Many sources are available from which you can download video media to use the co
|
|||||||
- https://images.google.com
|
- https://images.google.com
|
||||||
|
|
||||||
As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications:
|
As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications:
|
||||||
* `/opt/intel/openvino_2021/deployment_tools/demo/car.png`
|
* `/opt/intel/openvino_2022/samples/scripts/car.png`
|
||||||
* `/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp`
|
* `/opt/intel/openvino_2022/samples/scripts/car_1.bmp`
|
||||||
|
|
||||||
### <a name="run-image-classification"></a>Step 4: Run the Image Classification Code Sample
|
### <a name="run-image-classification"></a>Step 4: Run the Image Classification Code Sample
|
||||||
|
|
||||||
> **NOTE**: The Image Classification code sample is automatically compiled when you ran the Image Classification demo script. If you want to compile it manually, see the *Build the Sample Applications on Linux* section in the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md).
|
> **NOTE**: The Image Classification code sample is automatically compiled when you ran the Image Classification sample script. If you want to compile it manually, see the *Build the Sample Applications on Linux* section in the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md).
|
||||||
|
|
||||||
To run the **Image Classification** code sample with an input image on the IR:
|
To run the **Image Classification** code sample with an input image on the IR:
|
||||||
|
|
||||||
1. Set up the OpenVINO environment variables:
|
1. Set up the OpenVINO environment variables:
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
2. Go to the code samples build directory:
|
2. Go to the code samples build directory:
|
||||||
```sh
|
```sh
|
||||||
cd ~/inference_engine_samples_build/intel64/Release
|
cd ~/inference_engine_cpp_samples_build/intel64/Release
|
||||||
```
|
```
|
||||||
3. Run the code sample executable, specifying the input media file, the IR of your model, and a target device on which you want to perform inference:
|
3. Run the code sample executable, specifying the input media file, the IR of your model, and a target device on which you want to perform inference:
|
||||||
```sh
|
```sh
|
||||||
@ -397,35 +334,35 @@ To run the **Image Classification** code sample with an input image on the IR:
|
|||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for examples of running the Image Classification code sample on different devices</strong></summary>
|
<summary><strong>Click for examples of running the Image Classification code sample on different devices</strong></summary>
|
||||||
|
|
||||||
The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices:
|
The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/samples/scripts/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices:
|
||||||
|
|
||||||
**CPU:**
|
**CPU:**
|
||||||
```sh
|
```sh
|
||||||
./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU
|
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
**GPU:**
|
**GPU:**
|
||||||
|
|
||||||
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
||||||
```sh
|
```sh
|
||||||
./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU
|
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
**MYRIAD:**
|
**MYRIAD:**
|
||||||
|
|
||||||
> **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
> **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
||||||
```sh
|
```sh
|
||||||
./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD
|
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD
|
||||||
```
|
```
|
||||||
|
|
||||||
**HDDL:**
|
**HDDL:**
|
||||||
|
|
||||||
> **NOTE**: Running inference on the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs device with the HDDL plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
> **NOTE**: Running inference on the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs device with the HDDL plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
||||||
```sh
|
```sh
|
||||||
./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL
|
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d HDDL
|
||||||
```
|
```
|
||||||
|
|
||||||
When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU:
|
When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU:
|
||||||
```sh
|
```sh
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
@ -450,58 +387,7 @@ Average running time of one iteration: 2.6642941 ms
|
|||||||
|
|
||||||
Throughput: 375.3339402 FPS
|
Throughput: 375.3339402 FPS
|
||||||
|
|
||||||
[ INFO ] Execution successful
|
[ INFO ] Classification sample execution successful
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### <a name="run-security-barrier"></a>Step 5: Run the Security Barrier Camera Demo Application
|
|
||||||
|
|
||||||
> **NOTE**: The Security Barrier Camera Demo Application is automatically compiled when you ran the Inference Pipeline demo scripts. If you want to build it manually, see the [Demo Applications Overview](@ref omz_demos) section.
|
|
||||||
|
|
||||||
To run the **Security Barrier Camera Demo Application** using an input image on the prepared IRs:
|
|
||||||
|
|
||||||
1. Set up the OpenVINO environment variables:
|
|
||||||
```sh
|
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
|
||||||
```
|
|
||||||
2. Go to the demo application build directory:
|
|
||||||
```sh
|
|
||||||
cd ~/inference_engine_demos_build/intel64/Release
|
|
||||||
```
|
|
||||||
3. Run the demo executable, specifying the input media file, list of model IRs, and a target device on which to perform inference:
|
|
||||||
```sh
|
|
||||||
./security_barrier_camera_demo -i <path_to_media> -m <path_to_vehicle-license-plate-detection_model_xml> -m_va <path_to_vehicle_attributes_model_xml> -m_lpr <path_to_license_plate_recognition_model_xml> -d <target_device>
|
|
||||||
```
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>Click for examples of running the Security Barrier Camera demo application on different devices</strong></summary>
|
|
||||||
|
|
||||||
**CPU:**
|
|
||||||
|
|
||||||
```sh
|
|
||||||
./security_barrier_camera_demo -i /opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp -m /home/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va /home/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr /home/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU
|
|
||||||
```
|
|
||||||
|
|
||||||
**GPU:**
|
|
||||||
|
|
||||||
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
|
||||||
```sh
|
|
||||||
./security_barrier_camera_demo -i /opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp -m <path_to_model>/vehicle-license-plate-detection-barrier-0106.xml -m_va <path_to_model>/vehicle-attributes-recognition-barrier-0039.xml -m_lpr <path_to_model>/license-plate-recognition-barrier-0001.xml -d GPU
|
|
||||||
```
|
|
||||||
|
|
||||||
**MYRIAD:**
|
|
||||||
|
|
||||||
> **NOTE**: Running inference on the Intel® Neural Compute Stick 2 device with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
|
||||||
```sh
|
|
||||||
./classification_sample_async -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
|
|
||||||
```
|
|
||||||
|
|
||||||
**HDDL:**
|
|
||||||
|
|
||||||
> **NOTE**: Running inference on the Intel® Vision Accelerator Design with Intel® Movidius™ VPUs device with the HDDL plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs section in the [installation instructions](../install_guides/installing-openvino-linux.md).
|
|
||||||
```sh
|
|
||||||
./classification_sample_async -i <DLDT_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d HDDL
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@ -510,10 +396,10 @@ To run the **Security Barrier Camera Demo Application** using an input image on
|
|||||||
|
|
||||||
Following are some basic guidelines for executing the OpenVINO™ workflow using the code samples and demo applications:
|
Following are some basic guidelines for executing the OpenVINO™ workflow using the code samples and demo applications:
|
||||||
|
|
||||||
1. Before using the OpenVINO™ samples, always set up the environment:
|
1. Before using the OpenVINO™ samples, always set up the environment:
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
2. Have the directory path for the following:
|
2. Have the directory path for the following:
|
||||||
- Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release`
|
- Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release`
|
||||||
- Demo Application binaries located in `~/inference_engine_demos_build/intel64/Release`
|
- Demo Application binaries located in `~/inference_engine_demos_build/intel64/Release`
|
||||||
@ -527,10 +413,10 @@ This section explains how to build and use the sample and demo applications prov
|
|||||||
To build all the demos and samples:
|
To build all the demos and samples:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd $INTEL_OPENVINO_DIR/inference_engine_samples/cpp
|
cd $INTEL_OPENVINO_DIR/samples/cpp
|
||||||
# to compile C samples, go here also: cd <INSTALL_DIR>/inference_engine/samples/c
|
# to compile C samples, go here also: cd <INSTALL_DIR>/samples/c
|
||||||
build_samples.sh
|
build_samples.sh
|
||||||
cd $INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/demos
|
cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos
|
||||||
build_demos.sh
|
build_demos.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -553,7 +439,7 @@ With the sample information specified, the command might look like this:
|
|||||||
-m ~/ir/fp32/mobilenet-ssd.xml -d CPU
|
-m ~/ir/fp32/mobilenet-ssd.xml -d CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## <a name="advanced-samples"></a> Advanced Demo Use
|
## <a name="advanced-samples"></a> Advanced Demo Use
|
||||||
|
|
||||||
Some demo applications let you use multiple models for different purposes. In these cases, the output of the first model is usually used as the input for later models.
|
Some demo applications let you use multiple models for different purposes. In these cases, the output of the first model is usually used as the input for later models.
|
||||||
|
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
# Get Started with OpenVINO™ Toolkit on macOS* {#openvino_docs_get_started_get_started_macos}
|
# Get Started with OpenVINO™ Toolkit on macOS* {#openvino_docs_get_started_get_started_macos}
|
||||||
|
|
||||||
The OpenVINO™ toolkit optimizes and runs Deep Learning Neural Network models on Intel® hardware. This guide helps you get started with the OpenVINO™ toolkit you installed on macOS*.
|
The OpenVINO™ toolkit optimizes and runs Deep Learning Neural Network models on Intel® hardware. This guide helps you get started with the OpenVINO™ toolkit you installed on macOS*.
|
||||||
|
|
||||||
In this guide, you will:
|
In this guide, you will:
|
||||||
* Learn the OpenVINO™ inference workflow
|
* Learn the OpenVINO™ inference workflow
|
||||||
* Run demo scripts that illustrate the workflow and perform the steps for you
|
* Run sample scripts that illustrate the workflow and perform the steps for you
|
||||||
* Run the workflow steps yourself, using detailed instructions with a code sample and demo application
|
* Run the workflow steps yourself, using detailed instructions with a code sample and demo application
|
||||||
|
|
||||||
## <a name="openvino-components"></a>OpenVINO™ toolkit Components
|
## <a name="openvino-components"></a>OpenVINO™ toolkit Components
|
||||||
@ -13,8 +13,8 @@ The toolkit consists of three primary components:
|
|||||||
* **Intermediate Representation:** The Model Optimizer output. A model converted to a format that has been optimized for Intel® architecture and is usable by the Inference Engine.
|
* **Intermediate Representation:** The Model Optimizer output. A model converted to a format that has been optimized for Intel® architecture and is usable by the Inference Engine.
|
||||||
* **Inference Engine:** The software libraries that run inference against the IR (optimized model) to produce inference results.
|
* **Inference Engine:** The software libraries that run inference against the IR (optimized model) to produce inference results.
|
||||||
|
|
||||||
In addition, demo scripts, code samples and demo applications are provided to help you get up and running with the toolkit:
|
In addition, sample scripts, code samples and demo applications are provided to help you get up and running with the toolkit:
|
||||||
* **Demo Scripts** - Batch scripts that automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios.
|
* **Sample Scripts** - Batch scripts that automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios.
|
||||||
* **[Code Samples](../IE_DG/Samples_Overview.md)** - Small console applications that show you how to:
|
* **[Code Samples](../IE_DG/Samples_Overview.md)** - Small console applications that show you how to:
|
||||||
* Utilize specific OpenVINO capabilities in an application.
|
* Utilize specific OpenVINO capabilities in an application.
|
||||||
* Perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more.
|
* Perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more.
|
||||||
@ -27,18 +27,18 @@ By default, the Intel® Distribution of OpenVINO™ is installed to the followin
|
|||||||
* For root or administrator: `/opt/intel/openvino_<version>/`
|
* For root or administrator: `/opt/intel/openvino_<version>/`
|
||||||
* For regular users: `/home/<USER>/intel/openvino_<version>/`
|
* For regular users: `/home/<USER>/intel/openvino_<version>/`
|
||||||
|
|
||||||
For simplicity, a symbolic link to the latest installation is also created: `/home/<user>/intel/openvino_2021/`.
|
For simplicity, a symbolic link to the latest installation is also created: `/home/<user>/intel/openvino_2022/`.
|
||||||
|
|
||||||
If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home/<USER>/` with the directory in which you installed the software.
|
If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `/opt/intel` or `/home/<USER>/` with the directory in which you installed the software.
|
||||||
|
|
||||||
The primary tools for deploying your models and applications are installed to the `<INSTALL_DIR>/deployment_tools` directory.
|
The primary tools for deploying your models and applications are installed to the `<INSTALL_DIR>/tools` directory.
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for the Intel® Distribution of OpenVINO™ toolkit directory structure</strong></summary>
|
<summary><strong>Click for the Intel® Distribution of OpenVINO™ toolkit directory structure</strong></summary>
|
||||||
|
|
||||||
|
|
||||||
| Directory | Description |
|
| Directory | Description |
|
||||||
|:----------------------------------------|:--------------------------------------------------------------------------------------|
|
|:----------------------------------------|:--------------------------------------------------------------------------------------|
|
||||||
| `demo/` | Demo scripts. Demonstrate pipelines for inference scenarios, automatically perform steps and print detailed output to the console. For more information, see the [Use OpenVINO: Demo Scripts](#use-openvino-demo-scripts) section.|
|
| `demo/` | Sample scripts. Demonstrate pipelines for inference scenarios, automatically perform steps and print detailed output to the console. For more information, see the [Use OpenVINO: Sample Scripts](#use-openvino-sample-scripts) section.|
|
||||||
| `inference_engine/` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
|
| `inference_engine/` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
|
||||||
| `external/` | Third-party dependencies and drivers.|
|
| `external/` | Third-party dependencies and drivers.|
|
||||||
| `include/` | Inference Engine header files. For API documentation, see the [Inference Engine API Reference](./annotated.html). |
|
| `include/` | Inference Engine header files. For API documentation, see the [Inference Engine API Reference](./annotated.html). |
|
||||||
@ -49,7 +49,7 @@ The primary tools for deploying your models and applications are installed to th
|
|||||||
| `model_optimizer/` | Model Optimizer directory. Contains configuration scripts, scripts to run the Model Optimizer and other files. See the [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).|
|
| `model_optimizer/` | Model Optimizer directory. Contains configuration scripts, scripts to run the Model Optimizer and other files. See the [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).|
|
||||||
| `ngraph/` | nGraph directory. Includes the nGraph header and library files. |
|
| `ngraph/` | nGraph directory. Includes the nGraph header and library files. |
|
||||||
| `open_model_zoo/` | Open Model Zoo directory. Includes the Model Downloader tool to download [pre-trained OpenVINO](@ref omz_models_group_intel) and public models, OpenVINO models documentation, demo applications and the Accuracy Checker tool to evaluate model accuracy.|
|
| `open_model_zoo/` | Open Model Zoo directory. Includes the Model Downloader tool to download [pre-trained OpenVINO](@ref omz_models_group_intel) and public models, OpenVINO models documentation, demo applications and the Accuracy Checker tool to evaluate model accuracy.|
|
||||||
| `demos/` | Demo applications for inference scenarios. Also includes documentation and build scripts.|
|
| `demos/` | Sample applications for inference scenarios. Also includes documentation and build scripts.|
|
||||||
| `intel_models/` | Pre-trained OpenVINO models and associated documentation. See the [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_group_intel).|
|
| `intel_models/` | Pre-trained OpenVINO models and associated documentation. See the [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_group_intel).|
|
||||||
| `models` | Intel's trained and public models that can be obtained with Model Downloader.|
|
| `models` | Intel's trained and public models that can be obtained with Model Downloader.|
|
||||||
| `tools/` | Model Downloader and Accuracy Checker tools. |
|
| `tools/` | Model Downloader and Accuracy Checker tools. |
|
||||||
@ -64,42 +64,42 @@ The simplified OpenVINO™ workflow is:
|
|||||||
2. **Run the trained model through the Model Optimizer** to convert the model to an IR, which consists of a pair of `.xml` and `.bin` files that are used as the input for Inference Engine.
|
2. **Run the trained model through the Model Optimizer** to convert the model to an IR, which consists of a pair of `.xml` and `.bin` files that are used as the input for Inference Engine.
|
||||||
3. **Use the Inference Engine API in the application** to run inference against the IR (optimized model) and output inference results. The application can be an OpenVINO™ sample, demo, or your own application.
|
3. **Use the Inference Engine API in the application** to run inference against the IR (optimized model) and output inference results. The application can be an OpenVINO™ sample, demo, or your own application.
|
||||||
|
|
||||||
## Use the Demo Scripts to Learn the Workflow
|
## Use the Sample Scripts to Learn the Workflow
|
||||||
|
|
||||||
The demo scripts in `<INSTALL_DIR>/deployment_tools/demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps let you see how to:
|
The sample scripts in `<INSTALL_DIR>/samples/scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The sample steps let you see how to:
|
||||||
* Compile several samples from the source files delivered as part of the OpenVINO toolkit
|
* Compile several samples from the source files delivered as part of the OpenVINO toolkit
|
||||||
* Download trained models
|
* Download trained models
|
||||||
* Perform pipeline steps and see the output on the console
|
* Perform pipeline steps and see the output on the console
|
||||||
|
|
||||||
> **NOTE**: You must have Internet access to run the demo scripts. If your Internet access is through a proxy server, make sure the operating system environment proxy information is configured.
|
> **NOTE**: You must have Internet access to run the sample scripts. If your Internet access is through a proxy server, make sure the operating system environment proxy information is configured.
|
||||||
|
|
||||||
The demo scripts can run inference on any [supported target device](https://software.intel.com/en-us/openvino-toolkit/hardware). Although the default inference device is CPU, you can use the `-d` parameter to change the inference device. The general command to run the scripts looks as follows:
|
The sample scripts can run inference on any [supported target device](https://software.intel.com/en-us/openvino-toolkit/hardware). Although the default inference device is CPU, you can use the `-d` parameter to change the inference device. The general command to run the scripts looks as follows:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./<script_name> -d [CPU, MYRIAD]
|
./<script_name> -d [CPU, MYRIAD]
|
||||||
```
|
```
|
||||||
|
|
||||||
Before running the demo applications on Intel® Neural Compute Stick 2 device, you must complete additional configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md).
|
Before running the sample or demo applications on Intel® Neural Compute Stick 2 device, you must complete additional configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md).
|
||||||
|
|
||||||
The following paragraphs describe each demo script.
|
The following paragraphs describe each sample script.
|
||||||
|
|
||||||
### Image Classification Demo Script
|
### Image Classification Sample Script
|
||||||
The `demo_squeezenet_download_convert_run` script illustrates the image classification pipeline.
|
The `run_sample_squeezenet` script illustrates the image classification pipeline.
|
||||||
|
|
||||||
The script:
|
The script:
|
||||||
1. Downloads a SqueezeNet model.
|
1. Downloads a SqueezeNet model.
|
||||||
2. Runs the Model Optimizer to convert the model to the IR.
|
2. Runs the Model Optimizer to convert the model to the IR.
|
||||||
3. Builds the Image Classification Sample Async application.
|
3. Builds the Image Classification Sample Async application.
|
||||||
4. Runs the compiled sample with the `car.png` image located in the `demo` directory.
|
4. Runs the compiled sample with the `car.png` image located in the `demo` directory.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for an example of running the Image Classification demo script</strong></summary>
|
<summary><strong>Click for an example of running the Image Classification sample script</strong></summary>
|
||||||
|
|
||||||
To run the script to view the sample image and perform inference on the CPU:
|
To run the script to view the sample image and perform inference on the CPU:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
open car.png
|
open car.png
|
||||||
./demo_squeezenet_download_convert_run.sh
|
./run_sample_squeezenet.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
When the script completes, you see the label and confidence for the top-10 categories:
|
When the script completes, you see the label and confidence for the top-10 categories:
|
||||||
@ -108,7 +108,7 @@ When the script completes, you see the label and confidence for the top-10 categ
|
|||||||
|
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
Image /opt/intel/openvino_2021/deployment_tools/demo/car.png
|
Image /opt/intel/openvino_2022/samples/scripts/car.png
|
||||||
|
|
||||||
classid probability label
|
classid probability label
|
||||||
------- ----------- -----
|
------- ----------- -----
|
||||||
@ -123,65 +123,36 @@ classid probability label
|
|||||||
468 0.0013083 cab, hack, taxi, taxicab
|
468 0.0013083 cab, hack, taxi, taxicab
|
||||||
661 0.0007443 Model T
|
661 0.0007443 Model T
|
||||||
|
|
||||||
[ INFO ] Execution successful
|
[ INFO ] Classification sample execution successful
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### Inference Pipeline Demo Script
|
### Benchmark Sample Script
|
||||||
The `demo_security_barrier_camera` uses vehicle recognition in which vehicle attributes build on each other to narrow in on a specific attribute.
|
The `run_sample_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
|
||||||
|
|
||||||
The script:
|
The script:
|
||||||
1. Downloads three pre-trained model IRs.
|
|
||||||
2. Builds the Security Barrier Camera Demo application.
|
|
||||||
3. Runs the application with the downloaded models and the `car_1.bmp` image from the `demo` directory to show an inference pipeline.
|
|
||||||
|
|
||||||
This application:
|
|
||||||
|
|
||||||
1. Identifies an object identified as a vehicle.
|
|
||||||
2. Uses the vehicle identification as input to the second model, which identifies specific vehicle attributes, including the license plate.
|
|
||||||
3. Uses the the license plate as input to the third model, which recognizes specific characters in the license plate.
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>Click for an example of Running the Pipeline demo script</strong></summary>
|
|
||||||
|
|
||||||
To run the script performing inference on a CPU:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
./demo_security_barrier_camera.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
When the verification script completes, you see an image that displays the resulting frame with detections rendered as bounding boxes, and text:
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### Benchmark Demo Script
|
|
||||||
The `demo_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
|
|
||||||
|
|
||||||
The script:
|
|
||||||
1. Downloads a SqueezeNet model.
|
1. Downloads a SqueezeNet model.
|
||||||
2. Runs the Model Optimizer to convert the model to the IR.
|
2. Runs the Model Optimizer to convert the model to the IR.
|
||||||
3. Builds the Inference Engine Benchmark tool.
|
3. Builds the Inference Engine Benchmark tool.
|
||||||
4. Runs the tool with the `car.png` image located in the `demo` directory.
|
4. Runs the tool with the `car.png` image located in the `demo` directory.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for an example of running the Benchmark demo script</strong></summary>
|
<summary><strong>Click for an example of running the Benchmark sample script</strong></summary>
|
||||||
|
|
||||||
To run the script that performs inference on a CPU:
|
To run the script that performs inference on a CPU:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./demo_benchmark_app.sh
|
./run_sample_benchmark_app.sh
|
||||||
```
|
```
|
||||||
When the verification script completes, you see the performance counters, resulting latency, and throughput values displayed on the screen.
|
When the verification script completes, you see the performance counters, resulting latency, and throughput values displayed on the screen.
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## <a name="using-sample-application"></a>Use Code Samples and Demo Applications to Learn the Workflow
|
## <a name="using-sample-application"></a>Use Code Samples and Demo Applications to Learn the Workflow
|
||||||
|
|
||||||
This section guides you through a simplified workflow for the Intel® Distribution of OpenVINO™ toolkit using code samples and demo applications.
|
This section guides you through a simplified workflow for the Intel® Distribution of OpenVINO™ toolkit using code samples and demo applications.
|
||||||
|
|
||||||
You will perform the following steps:
|
You will perform the following steps:
|
||||||
|
|
||||||
1. <a href="#download-models">Use the Model Downloader to download suitable models.</a>
|
1. <a href="#download-models">Use the Model Downloader to download suitable models.</a>
|
||||||
2. <a href="#convert-models-to-intermediate-representation">Convert the models with the Model Optimizer.</a>
|
2. <a href="#convert-models-to-intermediate-representation">Convert the models with the Model Optimizer.</a>
|
||||||
@ -197,9 +168,9 @@ Inputs you need to specify when using a code sample or demo application:
|
|||||||
- **One or more media files**. The media is typically a video file, but can be a still photo.
|
- **One or more media files**. The media is typically a video file, but can be a still photo.
|
||||||
- **One or more target device** on which you run inference. The target device can be the CPU, or VPU accelerator.
|
- **One or more target device** on which you run inference. The target device can be the CPU, or VPU accelerator.
|
||||||
|
|
||||||
### Build the Code Samples and Demo Applications
|
### Build the Code Samples and Demo Applications
|
||||||
|
|
||||||
To perform sample inference, run the Image Classification code sample and Security Barrier Camera demo application that are automatically compiled when you run the Image Classification and Inference Pipeline demo scripts. The binary files are in the `~/inference_engine_samples_build/intel64/Release` and `~/inference_engine_demos_build/intel64/Release` directories, respectively.
|
The Image Classification Sample that was automatically compiled when you ran the Image Classification sample script. The binary file is in the `~/inference_engine_cpp_samples_build/intel64/Release` directory.
|
||||||
|
|
||||||
You can also build all available sample code and demo applications from the source files delivered with the OpenVINO toolkit. To learn how to do this, see the instructions in the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) and [Demo Applications Overview](@ref omz_demos) sections.
|
You can also build all available sample code and demo applications from the source files delivered with the OpenVINO toolkit. To learn how to do this, see the instructions in the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) and [Demo Applications Overview](@ref omz_demos) sections.
|
||||||
|
|
||||||
@ -211,21 +182,21 @@ You must have a model that is specific for you inference task. Example model typ
|
|||||||
- Custom (Often based on SSD)
|
- Custom (Often based on SSD)
|
||||||
|
|
||||||
Options to find a model suitable for the OpenVINO™ toolkit are:
|
Options to find a model suitable for the OpenVINO™ toolkit are:
|
||||||
- Download public and Intel's pre-trained models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) using the [Model Downloader tool](@ref omz_tools_downloader).
|
- Download public and Intel's pre-trained models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) using the [Model Downloader tool](@ref omz_tools_downloader).
|
||||||
- Download from GitHub*, Caffe* Zoo, TensorFlow* Zoo, and other resources.
|
- Download from GitHub*, Caffe* Zoo, TensorFlow* Zoo, and other resources.
|
||||||
- Train your own model.
|
- Train your own model.
|
||||||
|
|
||||||
This guide uses the Model Downloader to get pre-trained models. You can use one of the following options to find a model:
|
This guide uses the Model Downloader to get pre-trained models. You can use one of the following options to find a model:
|
||||||
|
|
||||||
* **List the models available in the downloader**:
|
* **List the models available in the downloader**:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/tools/model_downloader/
|
cd /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
python3 info_dumper.py --print_all
|
python3 info_dumper.py --print_all
|
||||||
```
|
```
|
||||||
|
|
||||||
* **Use `grep` to list models that have a specific name pattern**:
|
* **Use `grep` to list models that have a specific name pattern**:
|
||||||
```sh
|
```sh
|
||||||
python3 info_dumper.py --print_all | grep <model_name>
|
python3 info_dumper.py --print_all | grep <model_name>
|
||||||
```
|
```
|
||||||
@ -236,14 +207,11 @@ sudo python3 ./downloader.py --name <model_name> --output_dir <models_dir>
|
|||||||
```
|
```
|
||||||
> **NOTE:** Always run the downloader with `sudo`.
|
> **NOTE:** Always run the downloader with `sudo`.
|
||||||
|
|
||||||
Download the following models if you want to run the Image Classification Sample and Security Barrier Camera Demo application:
|
Download the following models if you want to run the Image Classification Sample:
|
||||||
|
|
||||||
|Model Name | Code Sample or Demo App |
|
|Model Name | Code Sample or Demo App |
|
||||||
|-----------------------------------------------|-----------------------------------------------------|
|
|-----------------------------------------------|-----------------------------------------------------|
|
||||||
|`squeezenet1.1` | Image Classification Sample |
|
|`squeezenet1.1` | Image Classification Sample |
|
||||||
|`vehicle-license-plate-detection-barrier-0106` | Security Barrier Camera Demo application |
|
|
||||||
|`vehicle-attributes-recognition-barrier-0039` | Security Barrier Camera Demo application |
|
|
||||||
|`license-plate-recognition-barrier-0001` | Security Barrier Camera Demo application |
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for an example of downloading the SqueezeNet Caffe* model</strong></summary>
|
<summary><strong>Click for an example of downloading the SqueezeNet Caffe* model</strong></summary>
|
||||||
@ -270,41 +238,6 @@ Your screen looks similar to this after the download:
|
|||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>Click for an example of downloading models for the Security Barrier Camera Demo application</strong></summary>
|
|
||||||
|
|
||||||
To download all three pre-trained models in FP16 precision to the `~/models` folder:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
./downloader.py --name vehicle-license-plate-detection-barrier-0106,vehicle-attributes-recognition-barrier-0039,license-plate-recognition-barrier-0001 --output_dir ~/models --precisions FP16
|
|
||||||
```
|
|
||||||
Your screen looks similar to this after the download:
|
|
||||||
```
|
|
||||||
################|| Downloading models ||################
|
|
||||||
|
|
||||||
========== Downloading /Users/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml
|
|
||||||
... 100%, 207 KB, 313926 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading /Users/username/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.bin
|
|
||||||
... 100%, 1256 KB, 2552 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading /Users/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml
|
|
||||||
... 100%, 32 KB, 172042 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading /Users/username/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.bin
|
|
||||||
... 100%, 1222 KB, 2712 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading /Users/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml
|
|
||||||
... 100%, 47 KB, 217130 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading /Users/username/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.bin
|
|
||||||
... 100%, 2378 KB, 4222 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
################|| Post-processing ||################
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### <a name="convert-models-to-intermediate-representation"></a> Step 2: Convert the Models to the Intermediate Representation
|
### <a name="convert-models-to-intermediate-representation"></a> Step 2: Convert the Models to the Intermediate Representation
|
||||||
|
|
||||||
In this step, your trained models are ready to run through the Model Optimizer to convert them to the Intermediate Representation (IR) format. This is required before using the Inference Engine with the model.
|
In this step, your trained models are ready to run through the Model Optimizer to convert them to the Intermediate Representation (IR) format. This is required before using the Inference Engine with the model.
|
||||||
@ -317,16 +250,15 @@ The conversion may also create a `model_name.mapping` file, but it is not needed
|
|||||||
|
|
||||||
This guide uses the public SqueezeNet 1.1 Caffe\* model to run the Image Classification Sample. See the example to download a model in the <a href="#download-models">Download Models</a> section to learn how to download this model.
|
This guide uses the public SqueezeNet 1.1 Caffe\* model to run the Image Classification Sample. See the example to download a model in the <a href="#download-models">Download Models</a> section to learn how to download this model.
|
||||||
|
|
||||||
The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the Model Optimizer to convert the model to the IR.
|
The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the Model Optimizer to convert the model to the IR.
|
||||||
The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognition-barrier-0039`, `license-plate-recognition-barrier-0001` models are downloaded in the Intermediate Representation format. You don't need to use the Model Optimizer to convert these models.
|
|
||||||
|
|
||||||
1. Create an `<ir_dir>` directory to contain the model's IR.
|
1. Create an `<ir_dir>` directory to contain the model's IR.
|
||||||
|
|
||||||
2. The Inference Engine can perform inference on different precision formats, such as `FP32`, `FP16`, `INT8`. To prepare an IR with specific precision, run the Model Optimizer with the appropriate `--data_type` option.
|
2. The Inference Engine can perform inference on different precision formats, such as `FP32`, `FP16`, `INT8`. To prepare an IR with specific precision, run the Model Optimizer with the appropriate `--data_type` option.
|
||||||
|
|
||||||
3. Run the Model Optimizer script:
|
3. Run the Model Optimizer script:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/model_optimizer
|
cd /opt/intel/openvino_2022/tools/model_optimizer
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
python3 ./mo.py --input_model <model_dir>/<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
python3 ./mo.py --input_model <model_dir>/<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
||||||
@ -339,17 +271,17 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit
|
|||||||
The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory:
|
The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `~/models/public/squeezenet1.1/ir` output directory:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/model_optimizer
|
cd /opt/intel/openvino_2022/tools/model_optimizer
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir
|
python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir
|
||||||
```
|
```
|
||||||
|
|
||||||
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory.
|
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory.
|
||||||
|
|
||||||
Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` to `<ir_dir>`. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers:
|
Copy the `squeezenet1.1.labels` file from the `/opt/intel/openvino_2022/samples/scripts/` to `<ir_dir>`. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers:
|
||||||
```sh
|
```sh
|
||||||
cp /opt/intel/openvino_2021/deployment_tools/demo/squeezenet1.1.labels <ir_dir>
|
cp /opt/intel/openvino_2022/samples/scripts/squeezenet1.1.labels <ir_dir>
|
||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@ -360,22 +292,22 @@ Many sources are available from which you can download video media to use the co
|
|||||||
- https://images.google.com
|
- https://images.google.com
|
||||||
|
|
||||||
As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications:
|
As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications:
|
||||||
* `/opt/intel/openvino_2021/deployment_tools/demo/car.png`
|
* `/opt/intel/openvino_2022/samples/scripts/car.png`
|
||||||
* `/opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp`
|
* `/opt/intel/openvino_2022/samples/scripts/car_1.bmp`
|
||||||
|
|
||||||
### <a name="run-image-classification"></a>Step 4: Run the Image Classification Code Sample
|
### <a name="run-image-classification"></a>Step 4: Run the Image Classification Code Sample
|
||||||
|
|
||||||
> **NOTE**: The Image Classification code sample is automatically compiled when you ran the Image Classification demo script. If you want to compile it manually, see the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) document.
|
> **NOTE**: The Image Classification code sample is automatically compiled when you ran the Image Classification sample script. If you want to compile it manually, see the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) document.
|
||||||
|
|
||||||
To run the **Image Classification** code sample with an input image on the IR:
|
To run the **Image Classification** code sample with an input image on the IR:
|
||||||
|
|
||||||
1. Set up the OpenVINO environment variables:
|
1. Set up the OpenVINO environment variables:
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
2. Go to the code samples build directory:
|
2. Go to the code samples build directory:
|
||||||
```sh
|
```sh
|
||||||
cd ~/inference_engine_samples_build/intel64/Release
|
cd ~/inference_engine_cpp_samples_build/intel64/Release
|
||||||
```
|
```
|
||||||
3. Run the code sample executable, specifying the input media file, the IR of your model, and a target device on which you want to perform inference:
|
3. Run the code sample executable, specifying the input media file, the IR of your model, and a target device on which you want to perform inference:
|
||||||
```sh
|
```sh
|
||||||
@ -384,26 +316,26 @@ To run the **Image Classification** code sample with an input image on the IR:
|
|||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for examples of running the Image Classification code sample on different devices</strong></summary>
|
<summary><strong>Click for examples of running the Image Classification code sample on different devices</strong></summary>
|
||||||
|
|
||||||
The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2021/deployment_tools/demo/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices:
|
The following commands run the Image Classification Code Sample using the `car.png` file from the `/opt/intel/openvino_2022/samples/scripts/` directory as an input image, the IR of your model from `~/models/public/squeezenet1.1/ir` and on different hardware devices:
|
||||||
|
|
||||||
**CPU:**
|
**CPU:**
|
||||||
```sh
|
```sh
|
||||||
./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU
|
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
**MYRIAD:**
|
**MYRIAD:**
|
||||||
|
|
||||||
> **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md).
|
> **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md).
|
||||||
```sh
|
```sh
|
||||||
./classification_sample_async -i /opt/intel/openvino_2021/deployment_tools/demo/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD
|
./classification_sample_async -i /opt/intel/openvino_2022/samples/scripts/car.png -m ~/models/public/squeezenet1.1/ir/squeezenet1.1.xml -d MYRIAD
|
||||||
```
|
```
|
||||||
|
|
||||||
When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU:
|
When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU:
|
||||||
```sh
|
```sh
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
Image /opt/intel/openvino_2021/deployment_tools/demo/car.png
|
Image /opt/intel/openvino_2022/samples/scripts/car.png
|
||||||
|
|
||||||
classid probability label
|
classid probability label
|
||||||
------- ----------- -----
|
------- ----------- -----
|
||||||
@ -418,44 +350,7 @@ classid probability label
|
|||||||
864 0.0012045 tow truck, tow car, wrecker
|
864 0.0012045 tow truck, tow car, wrecker
|
||||||
581 0.0005833 grille, radiator grille
|
581 0.0005833 grille, radiator grille
|
||||||
|
|
||||||
[ INFO ] Execution successful
|
[ INFO ] Classification sample execution successful
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### <a name="run-security-barrier"></a>Step 5: Run the Security Barrier Camera Demo Application
|
|
||||||
|
|
||||||
> **NOTE**: The Security Barrier Camera Demo Application is automatically compiled when you run the Inference Pipeline demo scripts. If you want to build it manually, see the instructions in the [Demo Applications Overview](@ref omz_demos) section.
|
|
||||||
|
|
||||||
To run the **Security Barrier Camera Demo Application** using an input image on the prepared IRs:
|
|
||||||
|
|
||||||
1. Set up the OpenVINO environment variables:
|
|
||||||
```sh
|
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
|
||||||
```
|
|
||||||
2. Go to the demo application build directory:
|
|
||||||
```sh
|
|
||||||
cd ~/inference_engine_demos_build/intel64/Release
|
|
||||||
```
|
|
||||||
3. Run the demo executable, specifying the input media file, list of model IRs, and a target device on which to perform inference:
|
|
||||||
```sh
|
|
||||||
./security_barrier_camera_demo -i <path_to_media> -m <path_to_vehicle-license-plate-detection_model_xml> -m_va <path_to_vehicle_attributes_model_xml> -m_lpr <path_to_license_plate_recognition_model_xml> -d <target_device>
|
|
||||||
```
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>Click for examples of running the Security Barrier Camera demo application on different devices</strong></summary>
|
|
||||||
|
|
||||||
**CPU:**
|
|
||||||
|
|
||||||
```sh
|
|
||||||
./security_barrier_camera_demo -i /opt/intel/openvino_2021/deployment_tools/demo/car_1.bmp -m ~/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106.xml -m_va ~/models/intel/vehicle-attributes-recognition-barrier-0039/FP16/vehicle-attributes-recognition-barrier-0039.xml -m_lpr ~/models/intel/license-plate-recognition-barrier-0001/FP16/license-plate-recognition-barrier-0001.xml -d CPU
|
|
||||||
```
|
|
||||||
|
|
||||||
**MYRIAD:**
|
|
||||||
|
|
||||||
> **NOTE**: Running inference on VPU devices (Intel® Neural Compute Stick 2) with the MYRIAD plugin requires additional hardware configuration steps. For details, see the Steps for Intel® Neural Compute Stick 2 section in the [installation instructions](../install_guides/installing-openvino-macos.md).
|
|
||||||
```sh
|
|
||||||
./classification_sample_async -i <INSTALL_DIR>/inference-engine/samples/sample_data/car.png -m <ir_dir>/squeezenet1.1.xml -d MYRIAD
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@ -464,10 +359,10 @@ To run the **Security Barrier Camera Demo Application** using an input image on
|
|||||||
|
|
||||||
Following are some basic guidelines for executing the OpenVINO™ workflow using the code samples and demo applications:
|
Following are some basic guidelines for executing the OpenVINO™ workflow using the code samples and demo applications:
|
||||||
|
|
||||||
1. Before using the OpenVINO™ samples, always set up the environment:
|
1. Before using the OpenVINO™ samples, always set up the environment:
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
2. Have the directory path for the following:
|
2. Have the directory path for the following:
|
||||||
- Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release`
|
- Code Sample binaries located in `~/inference_engine_cpp_samples_build/intel64/Release`
|
||||||
- Demo Application binaries located in `~/inference_engine_demos_build/intel64/Release`
|
- Demo Application binaries located in `~/inference_engine_demos_build/intel64/Release`
|
||||||
@ -481,10 +376,10 @@ This section explains how to build and use the sample and demo applications prov
|
|||||||
To build all the demos and samples:
|
To build all the demos and samples:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd $INTEL_OPENVINO_DIR/inference_engine_samples/cpp
|
cd $INTEL_OPENVINO_DIR/samples/cpp
|
||||||
# to compile C samples, go here also: cd <INSTALL_DIR>/inference_engine/samples/c
|
# to compile C samples, go here also: cd <INSTALL_DIR>/samples/c
|
||||||
build_samples.sh
|
build_samples.sh
|
||||||
cd $INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/demos
|
cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos
|
||||||
build_demos.sh
|
build_demos.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -503,11 +398,11 @@ Template to call sample code or a demo application:
|
|||||||
With the sample information specified, the command might look like this:
|
With the sample information specified, the command might look like this:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd $INTEL_OPENVINO_DIR/deployment_tools/open_model_zoo/demos/object_detection_demo
|
cd cd $INTEL_OPENVINO_DIR/extras/open_model_zoo/demos/object_detection_demo
|
||||||
./object_detection_demo -i ~/Videos/catshow.mp4 -m ~/ir/fp32/mobilenet-ssd.xml -d CPU
|
./object_detection_demo -i ~/Videos/catshow.mp4 -m ~/ir/fp32/mobilenet-ssd.xml -d CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## <a name="advanced-samples"></a> Advanced Demo Use
|
## <a name="advanced-samples"></a> Advanced Demo Use
|
||||||
|
|
||||||
Some demo applications let you use multiple models for different purposes. In these cases, the output of the first model is usually used as the input for later models.
|
Some demo applications let you use multiple models for different purposes. In these cases, the output of the first model is usually used as the input for later models.
|
||||||
|
|
||||||
|
@ -22,12 +22,12 @@ This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit
|
|||||||
|
|
||||||
The OpenVINO toolkit for Raspbian* OS is distributed without installer. This document refers to the directory to which you unpacked the toolkit package as `<INSTALL_DIR>`.
|
The OpenVINO toolkit for Raspbian* OS is distributed without installer. This document refers to the directory to which you unpacked the toolkit package as `<INSTALL_DIR>`.
|
||||||
|
|
||||||
The primary tools for deploying your models and applications are installed to the `<INSTALL_DIR>/deployment_tools` directory.
|
The primary tools for deploying your models and applications are installed to the `<INSTALL_DIR>/tools` directory.
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for the <code>deployment_tools</code> directory structure</strong></summary>
|
<summary><strong>Click for the <code>tools</code> directory structure</strong></summary>
|
||||||
|
|
||||||
|
|
||||||
| Directory | Description |
|
|
||||||
|
| Directory | Description |
|
||||||
|:----------------------------------------|:--------------------------------------------------------------------------------------|
|
|:----------------------------------------|:--------------------------------------------------------------------------------------|
|
||||||
| `inference_engine/` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
|
| `inference_engine/` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
|
||||||
| `external/` | Third-party dependencies and drivers.|
|
| `external/` | Third-party dependencies and drivers.|
|
||||||
@ -42,7 +42,7 @@ The primary tools for deploying your models and applications are installed to th
|
|||||||
|
|
||||||
The OpenVINO™ workflow on Raspbian* OS is as follows:
|
The OpenVINO™ workflow on Raspbian* OS is as follows:
|
||||||
1. **Get a pre-trained model** for your inference task. If you want to use your model for inference, the model must be converted to the `.bin` and `.xml` Intermediate Representation (IR) files, which are used as input by Inference Engine. On Raspberry PI, OpenVINO™ toolkit includes only the Inference Engine module. The Model Optimizer is not supported on this platform. To get the optimized models you can use one of the following options:
|
1. **Get a pre-trained model** for your inference task. If you want to use your model for inference, the model must be converted to the `.bin` and `.xml` Intermediate Representation (IR) files, which are used as input by Inference Engine. On Raspberry PI, OpenVINO™ toolkit includes only the Inference Engine module. The Model Optimizer is not supported on this platform. To get the optimized models you can use one of the following options:
|
||||||
|
|
||||||
* Download public and Intel's pre-trained models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) using [Model Downloader tool](@ref omz_tools_downloader).
|
* Download public and Intel's pre-trained models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) using [Model Downloader tool](@ref omz_tools_downloader).
|
||||||
<br> For more information on pre-trained models, see [Pre-Trained Models Documentation](@ref omz_models_group_intel)
|
<br> For more information on pre-trained models, see [Pre-Trained Models Documentation](@ref omz_models_group_intel)
|
||||||
|
|
||||||
@ -62,7 +62,7 @@ Follow the steps below to run pre-trained Face Detection network using Inference
|
|||||||
```
|
```
|
||||||
2. Build the Object Detection Sample with the following command:
|
2. Build the Object Detection Sample with the following command:
|
||||||
```sh
|
```sh
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2021/deployment_tools/inference_engine/samples/cpp
|
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2022/samples/cpp
|
||||||
make -j2 object_detection_sample_ssd
|
make -j2 object_detection_sample_ssd
|
||||||
```
|
```
|
||||||
3. Download the pre-trained Face Detection model with the [Model Downloader tool](@ref omz_tools_downloader):
|
3. Download the pre-trained Face Detection model with the [Model Downloader tool](@ref omz_tools_downloader):
|
||||||
@ -82,10 +82,10 @@ The application outputs an image (`out_0.bmp`) with detected faced enclosed in r
|
|||||||
|
|
||||||
Following are some basic guidelines for executing the OpenVINO™ workflow using the code samples:
|
Following are some basic guidelines for executing the OpenVINO™ workflow using the code samples:
|
||||||
|
|
||||||
1. Before using the OpenVINO™ samples, always set up the environment:
|
1. Before using the OpenVINO™ samples, always set up the environment:
|
||||||
```sh
|
```sh
|
||||||
source <INSTALL_DIR>/bin/setupvars.sh
|
source <INSTALL_DIR>/setupvars.sh
|
||||||
```
|
```
|
||||||
2. Have the directory path for the following:
|
2. Have the directory path for the following:
|
||||||
- Code Sample binaries
|
- Code Sample binaries
|
||||||
- Media: Video or image. Many sources are available from which you can download video media to use the code samples and demo applications, like https://videos.pexels.com and https://images.google.com.
|
- Media: Video or image. Many sources are available from which you can download video media to use the code samples and demo applications, like https://videos.pexels.com and https://images.google.com.
|
||||||
|
@ -4,7 +4,7 @@ The OpenVINO™ toolkit optimizes and runs Deep Learning Neural Network models o
|
|||||||
|
|
||||||
In this guide, you will:
|
In this guide, you will:
|
||||||
* Learn the OpenVINO™ inference workflow
|
* Learn the OpenVINO™ inference workflow
|
||||||
* Run demo scripts that illustrate the workflow and perform the steps for you
|
* Run ыфьзду scripts that illustrate the workflow and perform the steps for you
|
||||||
* Run the workflow steps yourself, using detailed instructions with a code sample and demo application
|
* Run the workflow steps yourself, using detailed instructions with a code sample and demo application
|
||||||
|
|
||||||
## <a name="openvino-components"></a>OpenVINO™ toolkit Components
|
## <a name="openvino-components"></a>OpenVINO™ toolkit Components
|
||||||
@ -14,8 +14,8 @@ The toolkit consists of three primary components:
|
|||||||
* **Inference Engine:** The software libraries that run inference against the IR (optimized model) to produce inference results.
|
* **Inference Engine:** The software libraries that run inference against the IR (optimized model) to produce inference results.
|
||||||
|
|
||||||
|
|
||||||
In addition, demo scripts, code samples and demo applications are provided to help you get up and running with the toolkit:
|
In addition, sample scripts, code samples and demo applications are provided to help you get up and running with the toolkit:
|
||||||
* **Demo Scripts** - Batch scripts that automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios.
|
* **Sample Scripts** - Batch scripts that automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios.
|
||||||
* **[Code Samples](../IE_DG/Samples_Overview.md)** - Small console applications that show you how to:
|
* **[Code Samples](../IE_DG/Samples_Overview.md)** - Small console applications that show you how to:
|
||||||
* Utilize specific OpenVINO capabilities in an application.
|
* Utilize specific OpenVINO capabilities in an application.
|
||||||
* Perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more.
|
* Perform specific tasks, such as loading a model, running inference, querying specific device capabilities, and more.
|
||||||
@ -24,16 +24,16 @@ In addition, demo scripts, code samples and demo applications are provided to he
|
|||||||
## <a name="openvino-installation"></a>Intel® Distribution of OpenVINO™ toolkit Installation and Deployment Tools Directory Structure
|
## <a name="openvino-installation"></a>Intel® Distribution of OpenVINO™ toolkit Installation and Deployment Tools Directory Structure
|
||||||
This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit installation and configuration steps. If you have not yet installed and configured the toolkit, see [Install Intel® Distribution of OpenVINO™ toolkit for Windows*](../install_guides/installing-openvino-windows.md).
|
This guide assumes you completed all Intel® Distribution of OpenVINO™ toolkit installation and configuration steps. If you have not yet installed and configured the toolkit, see [Install Intel® Distribution of OpenVINO™ toolkit for Windows*](../install_guides/installing-openvino-windows.md).
|
||||||
|
|
||||||
By default, the installation directory is `C:\Program Files (x86)\Intel\openvino_<version>`, referred to as `<INSTALL_DIR>`. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `C:\Program Files (x86)\Intel` with the directory in which you installed the software. For simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2021`.
|
By default, the installation directory is `C:\Program Files (x86)\Intel\openvino_<version>`, referred to as `<INSTALL_DIR>`. If you installed the Intel® Distribution of OpenVINO™ toolkit to a directory other than the default, replace `C:\Program Files (x86)\Intel` with the directory in which you installed the software. For simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2022`.
|
||||||
|
|
||||||
The primary tools for deploying your models and applications are installed to the `<INSTALL_DIR>\deployment_tools` directory.
|
The primary tools for deploying your models and applications are installed to the `<INSTALL_DIR>\tools` directory.
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for the <code>deployment_tools</code> directory structure</strong></summary>
|
<summary><strong>Click for the <code>tools</code> directory structure</strong></summary>
|
||||||
|
|
||||||
|
|
||||||
| Directory | Description |
|
|
||||||
|
| Directory | Description |
|
||||||
|:----------------------------------------|:--------------------------------------------------------------------------------------|
|
|:----------------------------------------|:--------------------------------------------------------------------------------------|
|
||||||
| `demo\` | Demo scripts. Demonstrate pipelines for inference scenarios, automatically perform steps and print detailed output to the console. For more information, see the [Use OpenVINO: Demo Scripts](#use-openvino-demo-scripts) section.|
|
| `demo\` | Sample scripts. Demonstrate pipelines for inference scenarios, automatically perform steps and print detailed output to the console. For more information, see the [Use OpenVINO: Demo Scripts](#use-openvino-demo-scripts) section.|
|
||||||
| `inference_engine\` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
|
| `inference_engine\` | Inference Engine directory. Contains Inference Engine API binaries and source files, samples and extensions source files, and resources like hardware drivers.|
|
||||||
| `bin\` | Inference Engine binaries.|
|
| `bin\` | Inference Engine binaries.|
|
||||||
| `external\` | Third-party dependencies and drivers.|
|
| `external\` | Third-party dependencies and drivers.|
|
||||||
@ -61,45 +61,44 @@ The simplified OpenVINO™ workflow is:
|
|||||||
2. **Run the trained model through the Model Optimizer** to convert the model to an IR, which consists of a pair of `.xml` and `.bin` files that are used as the input for Inference Engine.
|
2. **Run the trained model through the Model Optimizer** to convert the model to an IR, which consists of a pair of `.xml` and `.bin` files that are used as the input for Inference Engine.
|
||||||
3. **Use the Inference Engine API in the application** to run inference against the IR (optimized model) and output inference results. The application can be an OpenVINO™ sample, demo, or your own application.
|
3. **Use the Inference Engine API in the application** to run inference against the IR (optimized model) and output inference results. The application can be an OpenVINO™ sample, demo, or your own application.
|
||||||
|
|
||||||
## Use the Demo Scripts to Learn the Workflow
|
## Use the Sample Scripts to Learn the Workflow
|
||||||
|
|
||||||
The demo scripts in `<INSTALL_DIR>\deployment_tools\demo` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps demonstrate how to:
|
The sample scripts in `<INSTALL_DIR>\samples\scripts` give you a starting point to learn the OpenVINO workflow. These scripts automatically perform the workflow steps to demonstrate running inference pipelines for different scenarios. The demo steps demonstrate how to:
|
||||||
* Compile several samples from the source files delivered as part of the OpenVINO toolkit
|
* Compile several samples from the source files delivered as part of the OpenVINO toolkit
|
||||||
* Download trained models
|
* Download trained models
|
||||||
* Perform pipeline steps and see the output on the console
|
* Perform pipeline steps and see the output on the console
|
||||||
|
|
||||||
> **REQUIRED**: You must have Internet access to run the demo scripts. If your Internet access is through a proxy server, make sure the operating system environment proxy information is configured.
|
> **REQUIRED**: You must have Internet access to run the sample scripts. If your Internet access is through a proxy server, make sure the operating system environment proxy information is configured.
|
||||||
|
|
||||||
The demo scripts can run inference on any [supported target device](https://software.intel.com/en-us/openvino-toolkit/hardware). Although the default inference device is CPU, you can use the `-d` parameter to change the inference device. The general command to run the scripts looks as follows:
|
The sample scripts can run inference on any [supported target device](https://software.intel.com/en-us/openvino-toolkit/hardware). Although the default inference device is CPU, you can use the `-d` parameter to change the inference device. The general command to run the scripts looks as follows:
|
||||||
|
|
||||||
```bat
|
```bat
|
||||||
.\<script_name> -d [CPU, GPU, MYRIAD, HDDL]
|
.\<script_name> -d [CPU, GPU, MYRIAD, HDDL]
|
||||||
```
|
```
|
||||||
|
|
||||||
Before running the demo applications on Intel® Processor Graphics or Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, you must complete additional hardware configuration steps. For details, see the following sections in the [installation instructions](../install_guides/installing-openvino-windows.md):
|
Before running the sample or demo applications on Intel® Processor Graphics or Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, you must complete additional hardware configuration steps. For details, see the following sections in the [installation instructions](../install_guides/installing-openvino-windows.md):
|
||||||
* Additional Installation Steps for Intel® Processor Graphics (GPU)
|
* Additional Installation Steps for Intel® Processor Graphics (GPU)
|
||||||
* Additional Installation Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
|
* Additional Installation Steps for Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
|
||||||
|
|
||||||
The following paragraphs describe each demo script.
|
The following paragraphs describe each sample script.
|
||||||
|
|
||||||
### Image Classification Demo Script
|
### Image Classification Sample Script
|
||||||
The `demo_squeezenet_download_convert_run` script illustrates the image classification pipeline.
|
The `run_sample_squeezenet` script illustrates the image classification pipeline.
|
||||||
|
|
||||||
The script:
|
The script:
|
||||||
1. Downloads a SqueezeNet model.
|
1. Downloads a SqueezeNet model.
|
||||||
2. Runs the Model Optimizer to convert the model to the IR.
|
2. Runs the Model Optimizer to convert the model to the IR.
|
||||||
3. Builds the Image Classification Sample Async application.
|
3. Builds the Image Classification Sample Async application.
|
||||||
4. Runs the compiled sample with the `car.png` image located in the `demo` directory.
|
4. Runs the compiled sample with the `car.png` image located in the `demo` directory.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for an example of running the Image Classification demo script</strong></summary>
|
<summary><strong>Click for an example of running the Image Classification sample script</strong></summary>
|
||||||
|
|
||||||
|
To preview the image that the script will classify open the `car.png` file in any image viewer.
|
||||||
|
|
||||||
To run the script to perform inference on a CPU:
|
To run the script to perform inference on a CPU:
|
||||||
|
|
||||||
1. Open the `car.png` file in any image viewer to see what the demo will be classifying.
|
|
||||||
2. Run the following script:
|
|
||||||
```bat
|
```bat
|
||||||
.\demo_squeezenet_download_convert_run.bat
|
.\run_sample_squeezenet.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
When the script completes, you see the label and confidence for the top-10 categories:
|
When the script completes, you see the label and confidence for the top-10 categories:
|
||||||
@ -108,7 +107,7 @@ When the script completes, you see the label and confidence for the top-10 categ
|
|||||||
|
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
Image C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png
|
Image C:\Program Files (x86)\Intel\openvino_2022\samples\scripts\car.png
|
||||||
|
|
||||||
classid probability label
|
classid probability label
|
||||||
------- ----------- -----
|
------- ----------- -----
|
||||||
@ -123,56 +122,27 @@ classid probability label
|
|||||||
468 0.0013083 cab, hack, taxi, taxicab
|
468 0.0013083 cab, hack, taxi, taxicab
|
||||||
661 0.0007443 Model T
|
661 0.0007443 Model T
|
||||||
|
|
||||||
[ INFO ] Execution successful
|
[ INFO ] Classification sample execution successful
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### Inference Pipeline Demo Script
|
### Benchmark Sample Script
|
||||||
The `demo_security_barrier_camera` uses vehicle recognition in which vehicle attributes build on each other to narrow in on a specific attribute.
|
The `run_sample_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
|
||||||
|
|
||||||
The script:
|
The script:
|
||||||
1. Downloads three pre-trained model IRs.
|
|
||||||
2. Builds the Security Barrier Camera Demo application.
|
|
||||||
3. Runs the application with the downloaded models and the `car_1.bmp` image from the `demo` directory to show an inference pipeline.
|
|
||||||
|
|
||||||
This application:
|
|
||||||
|
|
||||||
1. Identifies an object identified as a vehicle.
|
|
||||||
2. Uses the vehicle identification as input to the second model, which identifies specific vehicle attributes, including the license plate.
|
|
||||||
3. Uses the the license plate as input to the third model, which recognizes specific characters in the license plate.
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>Click for an example of Running the Pipeline demo script</strong></summary>
|
|
||||||
|
|
||||||
To run the script performing inference on Intel® Processor Graphics:
|
|
||||||
|
|
||||||
```bat
|
|
||||||
.\demo_security_barrier_camera.bat -d GPU
|
|
||||||
```
|
|
||||||
|
|
||||||
When the verification script completes, you see an image that displays the resulting frame with detections rendered as bounding boxes, and text:
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### Benchmark Demo Script
|
|
||||||
The `demo_benchmark_app` script illustrates how to use the Benchmark Application to estimate deep learning inference performance on supported devices.
|
|
||||||
|
|
||||||
The script:
|
|
||||||
1. Downloads a SqueezeNet model.
|
1. Downloads a SqueezeNet model.
|
||||||
2. Runs the Model Optimizer to convert the model to the IR.
|
2. Runs the Model Optimizer to convert the model to the IR.
|
||||||
3. Builds the Inference Engine Benchmark tool.
|
3. Builds the Inference Engine Benchmark tool.
|
||||||
4. Runs the tool with the `car.png` image located in the `demo` directory.
|
4. Runs the tool with the `car.png` image located in the `demo` directory.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for an example of running the Benchmark demo script</strong></summary>
|
<summary><strong>Click for an example of running the Benchmark sample script</strong></summary>
|
||||||
|
|
||||||
To run the script that performs inference (runs on CPU by default):
|
To run the script that performs inference (runs on CPU by default):
|
||||||
|
|
||||||
```bat
|
```bat
|
||||||
.\demo_benchmark_app.bat
|
.\run_sample_benchmark_app.bat
|
||||||
```
|
```
|
||||||
When the verification script completes, you see the performance counters, resulting latency, and throughput values displayed on the screen.
|
When the verification script completes, you see the performance counters, resulting latency, and throughput values displayed on the screen.
|
||||||
</details>
|
</details>
|
||||||
@ -181,7 +151,7 @@ When the verification script completes, you see the performance counters, result
|
|||||||
|
|
||||||
This section guides you through a simplified workflow for the Intel® Distribution of OpenVINO™ toolkit using code samples and demo applications.
|
This section guides you through a simplified workflow for the Intel® Distribution of OpenVINO™ toolkit using code samples and demo applications.
|
||||||
|
|
||||||
You will perform the following steps:
|
You will perform the following steps:
|
||||||
|
|
||||||
1. <a href="#download-models">Use the Model Downloader to download suitable models.</a>
|
1. <a href="#download-models">Use the Model Downloader to download suitable models.</a>
|
||||||
2. <a href="#convert-models-to-intermediate-representation">Convert the models with the Model Optimizer.</a>
|
2. <a href="#convert-models-to-intermediate-representation">Convert the models with the Model Optimizer.</a>
|
||||||
@ -199,7 +169,7 @@ Inputs you need to specify when using a code sample or demo application:
|
|||||||
|
|
||||||
### Build the Code Samples and Demo Applications
|
### Build the Code Samples and Demo Applications
|
||||||
|
|
||||||
To perform sample inference, run the Image Classification code sample and Security Barrier Camera demo application that are automatically compiled when you run the Image Classification and Inference Pipeline demo scripts. The binary files are in the `C:\Users\<USER_ID>\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release` and `C:\Users\<USER_ID>\Intel\OpenVINO\inference_engine_demos_build\intel64\Release` directories, respectively.
|
The Image Classification Sample that was automatically compiled when you ran the Image Classification sample script. The binary file is in the `C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release` directory.
|
||||||
|
|
||||||
You can also build all available sample code and demo applications from the source files delivered with the OpenVINO™ toolkit. To learn how to do this, see the instruction in the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) and [Demo Applications Overview](@ref omz_demos) sections.
|
You can also build all available sample code and demo applications from the source files delivered with the OpenVINO™ toolkit. To learn how to do this, see the instruction in the [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md) and [Demo Applications Overview](@ref omz_demos) sections.
|
||||||
|
|
||||||
@ -214,18 +184,18 @@ Options to find a model suitable for the OpenVINO™ toolkit are:
|
|||||||
- Download public and Intel's pre-trained models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) using the [Model Downloader tool](@ref omz_tools_downloader).
|
- Download public and Intel's pre-trained models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo) using the [Model Downloader tool](@ref omz_tools_downloader).
|
||||||
- Download from GitHub*, Caffe* Zoo, TensorFlow* Zoo, and other resources.
|
- Download from GitHub*, Caffe* Zoo, TensorFlow* Zoo, and other resources.
|
||||||
- Train your own model.
|
- Train your own model.
|
||||||
|
|
||||||
This guide uses the Model Downloader to get pre-trained models. You can use one of the following options to find a model:
|
This guide uses the Model Downloader to get pre-trained models. You can use one of the following options to find a model:
|
||||||
|
|
||||||
* **List the models available in the downloader**:
|
* **List the models available in the downloader**:
|
||||||
```bat
|
```bat
|
||||||
cd <INSTALL_DIR>\deployment_tools\tools\model_downloader\
|
cd <INSTALL_DIR>\tools\model_downloader\
|
||||||
```
|
```
|
||||||
```bat
|
```bat
|
||||||
python info_dumper.py --print_all
|
python info_dumper.py --print_all
|
||||||
```
|
```
|
||||||
|
|
||||||
* **Use `grep` to list models that have a specific name pattern**:
|
* **Use `grep` to list models that have a specific name pattern**:
|
||||||
```bat
|
```bat
|
||||||
python info_dumper.py --print_all | grep <model_name>
|
python info_dumper.py --print_all | grep <model_name>
|
||||||
```
|
```
|
||||||
@ -240,9 +210,6 @@ Download the following models if you want to run the Image Classification Sample
|
|||||||
|Model Name | Code Sample or Demo App |
|
|Model Name | Code Sample or Demo App |
|
||||||
|-----------------------------------------------|-----------------------------------------------------|
|
|-----------------------------------------------|-----------------------------------------------------|
|
||||||
|`squeezenet1.1` | Image Classification Sample |
|
|`squeezenet1.1` | Image Classification Sample |
|
||||||
|`vehicle-license-plate-detection-barrier-0106` | Security Barrier Camera Demo application |
|
|
||||||
|`vehicle-attributes-recognition-barrier-0039` | Security Barrier Camera Demo application |
|
|
||||||
|`license-plate-recognition-barrier-0001` | Security Barrier Camera Demo application |
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for an example of downloading the SqueezeNet Caffe* model</strong></summary>
|
<summary><strong>Click for an example of downloading the SqueezeNet Caffe* model</strong></summary>
|
||||||
@ -269,41 +236,6 @@ Your screen looks similar to this after the download:
|
|||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>Click for an example of downloading models for the Security Barrier Camera Demo application</strong></summary>
|
|
||||||
|
|
||||||
To download all three pre-trained models in FP16 precision to the `C:\Users\<USER_ID>\Documents\models` folder:
|
|
||||||
|
|
||||||
```bat
|
|
||||||
python .\downloader.py --name vehicle-license-plate-detection-barrier-0106,vehicle-attributes-recognition-barrier-0039,license-plate-recognition-barrier-0001 --output_dir C:\Users\username\Documents\models --precisions FP16
|
|
||||||
```
|
|
||||||
Your screen looks similar to this after the download:
|
|
||||||
```
|
|
||||||
################|| Downloading models ||################
|
|
||||||
|
|
||||||
========== Downloading C:\Users\username\Documents\models\intel\vehicle-license-plate-detection-barrier-0106\FP16\vehicle-license-plate-detection-barrier-0106.xml
|
|
||||||
... 100%, 207 KB, 13810 KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading C:\Users\username\Documents\models\intel\vehicle-license-plate-detection-barrier-0106\FP16\vehicle-license-plate-detection-barrier-0106.bin
|
|
||||||
... 100%, 1256 KB, 70 KB/s, 17 seconds passed
|
|
||||||
|
|
||||||
========== Downloading C:\Users\username\Documents\models\intel\vehicle-attributes-recognition-barrier-0039\FP16\vehicle-attributes-recognition-barrier-0039.xml
|
|
||||||
... 100%, 32 KB, ? KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading C:\Users\username\Documents\models\intel\vehicle-attributes-recognition-barrier-0039\FP16\vehicle-attributes-recognition-barrier-0039.bin
|
|
||||||
... 100%, 1222 KB, 277 KB/s, 4 seconds passed
|
|
||||||
|
|
||||||
========== Downloading C:\Users\username\Documents\models\intel\license-plate-recognition-barrier-0001\FP16\license-plate-recognition-barrier-0001.xml
|
|
||||||
... 100%, 47 KB, ? KB/s, 0 seconds passed
|
|
||||||
|
|
||||||
========== Downloading C:\Users\username\Documents\models\intel\license-plate-recognition-barrier-0001\FP16\license-plate-recognition-barrier-0001.bin
|
|
||||||
... 100%, 2378 KB, 120 KB/s, 19 seconds passed
|
|
||||||
|
|
||||||
################|| Post-processing ||################
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### <a name="convert-models-to-intermediate-representation"></a> Step 2: Convert the Models to the Intermediate Representation
|
### <a name="convert-models-to-intermediate-representation"></a> Step 2: Convert the Models to the Intermediate Representation
|
||||||
|
|
||||||
In this step, your trained models are ready to run through the Model Optimizer to convert them to the Intermediate Representation (IR) format. This is required before using the Inference Engine with the model.
|
In this step, your trained models are ready to run through the Model Optimizer to convert them to the Intermediate Representation (IR) format. This is required before using the Inference Engine with the model.
|
||||||
@ -314,18 +246,17 @@ Models in the Intermediate Representation format always include a pair of `.xml`
|
|||||||
|
|
||||||
This guide uses the public SqueezeNet 1.1 Caffe\* model to run the Image Classification Sample. See the example to download a model in the <a href="#download-models">Download Models</a> section to learn how to download this model.
|
This guide uses the public SqueezeNet 1.1 Caffe\* model to run the Image Classification Sample. See the example to download a model in the <a href="#download-models">Download Models</a> section to learn how to download this model.
|
||||||
|
|
||||||
The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the Model Optimizer to convert the model to the IR.
|
The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the Model Optimizer to convert the model to the IR.
|
||||||
The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognition-barrier-0039`, `license-plate-recognition-barrier-0001` models are downloaded in the IR format. You do not need to use the Model Optimizer to convert these models.
|
|
||||||
|
|
||||||
1. Create an `<ir_dir>` directory to contain the model's IR.
|
1. Create an `<ir_dir>` directory to contain the model's IR.
|
||||||
|
|
||||||
2. The Inference Engine can perform inference on different precision formats, such as `FP32`, `FP16`, `INT8`. To prepare an IR with specific precision, run the Model Optimizer with the appropriate `--data_type` option.
|
2. The Inference Engine can perform inference on different precision formats, such as `FP32`, `FP16`, `INT8`. To prepare an IR with specific precision, run the Model Optimizer with the appropriate `--data_type` option.
|
||||||
|
|
||||||
3. Run the Model Optimizer script:
|
3. Run the Model Optimizer script:
|
||||||
```bat
|
```bat
|
||||||
cd <INSTALL_DIR>\deployment_tools\model_optimizer
|
cd <INSTALL_DIR>\tools\model_optimizer
|
||||||
```
|
```
|
||||||
```bat
|
```bat
|
||||||
python .\mo.py --input_model <model_dir>\<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
python .\mo.py --input_model <model_dir>\<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
||||||
```
|
```
|
||||||
The produced IR files are in the `<ir_dir>` directory.
|
The produced IR files are in the `<ir_dir>` directory.
|
||||||
@ -336,43 +267,43 @@ The `vehicle-license-plate-detection-barrier-0106`, `vehicle-attributes-recognit
|
|||||||
The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` output directory:
|
The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` output directory:
|
||||||
|
|
||||||
```bat
|
```bat
|
||||||
cd <INSTALL_DIR>\deployment_tools\model_optimizer
|
cd <INSTALL_DIR>\tools\model_optimizer
|
||||||
```
|
```
|
||||||
```bat
|
```bat
|
||||||
python .\mo.py --input_model C:\Users\username\Documents\models\public\squeezenet1.1\squeezenet1.1.caffemodel --data_type FP16 --output_dir C:\Users\username\Documents\models\public\squeezenet1.1\ir
|
python .\mo.py --input_model C:\Users\username\Documents\models\public\squeezenet1.1\squeezenet1.1.caffemodel --data_type FP16 --output_dir C:\Users\username\Documents\models\public\squeezenet1.1\ir
|
||||||
```
|
```
|
||||||
|
|
||||||
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` directory.
|
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` directory.
|
||||||
|
|
||||||
Copy the `squeezenet1.1.labels` file from the `<INSTALL_DIR>\deployment_tools\demo\` to `<ir_dir>`. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers:
|
Copy the `squeezenet1.1.labels` file from the `<INSTALL_DIR>\samples\scripts\` to `<ir_dir>`. This file contains the classes that ImageNet uses. Therefore, the inference results show text instead of classification numbers:
|
||||||
```batch
|
```batch
|
||||||
cp <INSTALL_DIR>\deployment_tools\demo\squeezenet1.1.labels <ir_dir>
|
cp <INSTALL_DIR>\samples\scripts\squeezenet1.1.labels <ir_dir>
|
||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
### <a name="download-media"></a> Step 3: Download a Video or a Still Photo as Media
|
### <a name="download-media"></a> Step 3: Download a Video or a Still Photo as Media
|
||||||
|
|
||||||
Many sources are available from which you can download video media to use the code samples and demo applications. Possibilities include:
|
Many sources are available from which you can download video media to use the code samples and demo applications. Possibilities include:
|
||||||
- https://videos.pexels.com
|
- https://videos.pexels.com
|
||||||
- https://images.google.com
|
- https://images.google.com
|
||||||
|
|
||||||
As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications:
|
As an alternative, the Intel® Distribution of OpenVINO™ toolkit includes two sample images that you can use for running code samples and demo applications:
|
||||||
* `<INSTALL_DIR>\deployment_tools\demo\car.png`
|
* `<INSTALL_DIR>\samples\scripts\car.png`
|
||||||
* `<INSTALL_DIR>\deployment_tools\demo\car_1.bmp`
|
* `<INSTALL_DIR>\samples\scripts\car_1.bmp`
|
||||||
|
|
||||||
### <a name="run-image-classification"></a>Step 4: Run the Image Classification Code Sample
|
### <a name="run-image-classification"></a>Step 4: Run the Image Classification Code Sample
|
||||||
|
|
||||||
> **NOTE**: The Image Classification code sample is automatically compiled when you run the Image Classification demo script. If you want to compile it manually, see the Build the Sample Applications on Microsoft Windows* OS section in [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md).
|
> **NOTE**: The Image Classification code sample is automatically compiled when you run the Image Classification sample script. If you want to compile it manually, see the Build the Sample Applications on Microsoft Windows* OS section in [Inference Engine Code Samples Overview](../IE_DG/Samples_Overview.md).
|
||||||
|
|
||||||
To run the **Image Classification** code sample with an input image on the IR:
|
To run the **Image Classification** code sample with an input image on the IR:
|
||||||
|
|
||||||
1. Set up the OpenVINO environment variables:
|
1. Set up the OpenVINO environment variables:
|
||||||
```bat
|
```bat
|
||||||
<INSTALL_DIR>\openvino\bin\setupvars.sh
|
<INSTALL_DIR>\setupvars.sh
|
||||||
```
|
```
|
||||||
2. Go to the code samples build directory:
|
2. Go to the code samples build directory:
|
||||||
```bat
|
```bat
|
||||||
cd C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_samples_build\intel64\Release
|
cd C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release
|
||||||
```
|
```
|
||||||
3. Run the code sample executable, specifying the input media file, the IR of your model, and a target device on which you want to perform inference:
|
3. Run the code sample executable, specifying the input media file, the IR of your model, and a target device on which you want to perform inference:
|
||||||
```bat
|
```bat
|
||||||
@ -381,31 +312,31 @@ To run the **Image Classification** code sample with an input image on the IR:
|
|||||||
<details>
|
<details>
|
||||||
<summary><strong>Click for examples of running the Image Classification code sample on different devices</strong></summary>
|
<summary><strong>Click for examples of running the Image Classification code sample on different devices</strong></summary>
|
||||||
|
|
||||||
The following commands run the Image Classification Code Sample using the `car.png` file from the `<INSTALL_DIR>\deployment_tools\demo` directory as an input image, the IR of your model from `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` and on different hardware devices:
|
The following commands run the Image Classification Code Sample using the `car.png` file from the `<INSTALL_DIR>\samples\scripts` directory as an input image, the IR of your model from `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` and on different hardware devices:
|
||||||
|
|
||||||
**CPU:**
|
**CPU:**
|
||||||
```bat
|
```bat
|
||||||
.\classification_sample_async -i <INSTALL_DIR>\deployment_tools\demo\car.png -m C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d CPU
|
.\classification_sample_async -i <INSTALL_DIR>\samples\scripts\car.png -m C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
**GPU:**
|
**GPU:**
|
||||||
|
|
||||||
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-windows.md).
|
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-windows.md).
|
||||||
```bat
|
```bat
|
||||||
.\classification_sample_async -i <INSTALL_DIR>\deployment_tools\demo\car.png -m C:\Users\<USER_ID>\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d GPU
|
.\classification_sample_async -i <INSTALL_DIR>\samples\scripts\car.png -m C:\Users\<USER_ID>\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
**MYRIAD:**
|
**MYRIAD:**
|
||||||
|
|
||||||
```bat
|
```bat
|
||||||
.\classification_sample_async -i <INSTALL_DIR>\deployment_tools\demo\car.png -m C:\Users\<USER_ID>\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d MYRIAD
|
.\classification_sample_async -i <INSTALL_DIR>\samples\scripts\car.png -m C:\Users\<USER_ID>\models\public\squeezenet1.1\ir\squeezenet1.1.xml -d MYRIAD
|
||||||
```
|
```
|
||||||
|
|
||||||
When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU:
|
When the Sample Application completes, you see the label and confidence for the top-10 categories on the display. Below is a sample output with inference results on CPU:
|
||||||
```bat
|
```bat
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
Image C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\demo\car.png
|
Image C:\Program Files (x86)\Intel\openvino_2022\samples\scripts\car.png
|
||||||
|
|
||||||
classid probability label
|
classid probability label
|
||||||
------- ----------- -----
|
------- ----------- -----
|
||||||
@ -420,50 +351,7 @@ classid probability label
|
|||||||
864 0.0012045 tow truck, tow car, wrecker
|
864 0.0012045 tow truck, tow car, wrecker
|
||||||
581 0.0005833 grille, radiator grille
|
581 0.0005833 grille, radiator grille
|
||||||
|
|
||||||
[ INFO ] Execution successful
|
[ INFO ] Classification sample execution successful
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### <a name="run-security-barrier"></a>Step 5: Run the Security Barrier Camera Demo Application
|
|
||||||
|
|
||||||
> **NOTE**: The Security Barrier Camera Demo Application is automatically compiled when you run the Inference Pipeline demo scripts. If you want to build it manually, see the instructions in the [Demo Applications Overview](@ref omz_demos) section.
|
|
||||||
|
|
||||||
To run the **Security Barrier Camera Demo Application** using an input image on the prepared IRs:
|
|
||||||
|
|
||||||
1. Set up the OpenVINO environment variables:
|
|
||||||
```bat
|
|
||||||
<INSTALL_DIR>\bin\setupvars.bat
|
|
||||||
```
|
|
||||||
2. Go to the demo application build directory:
|
|
||||||
```bat
|
|
||||||
cd C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_demos_build\intel64\Release
|
|
||||||
```
|
|
||||||
3. Run the demo executable, specifying the input media file, list of model IRs, and a target device on which to perform inference:
|
|
||||||
```bat
|
|
||||||
.\security_barrier_camera_demo -i <path_to_media> -m <path_to_vehicle-license-plate-detection_model_xml> -m_va <path_to_vehicle_attributes_model_xml> -m_lpr <path_to_license_plate_recognition_model_xml> -d <target_device>
|
|
||||||
```
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><strong>Click for examples of running the Security Barrier Camera demo application on different devices</strong></summary>
|
|
||||||
|
|
||||||
**CPU:**
|
|
||||||
|
|
||||||
```bat
|
|
||||||
.\security_barrier_camera_demo -i <INSTALL_DIR>\deployment_tools\demo\car_1.bmp -m C:\Users\username\Documents\models\intel\vehicle-license-plate-detection-barrier-0106\FP16\vehicle-license-plate-detection-barrier-0106.xml -m_va C:\Users\username\Documents\models\intel\vehicle-attributes-recognition-barrier-0039\FP16\vehicle-attributes-recognition-barrier-0039.xml -m_lpr C:\Users\username\Documents\models\intel\license-plate-recognition-barrier-0001\FP16\license-plate-recognition-barrier-0001.xml -d CPU
|
|
||||||
```
|
|
||||||
|
|
||||||
**GPU:**
|
|
||||||
|
|
||||||
> **NOTE**: Running inference on Intel® Processor Graphics (GPU) requires additional hardware configuration steps. For details, see the Steps for Intel® Processor Graphics (GPU) section in the [installation instructions](../install_guides/installing-openvino-windows.md).
|
|
||||||
```bat
|
|
||||||
.\security_barrier_camera_demo -i <INSTALL_DIR>\deployment_tools\demo\car_1.bmp -m <path_to_model>/vehicle-license-plate-detection-barrier-0106.xml -m_va <path_to_model>/vehicle-attributes-recognition-barrier-0039.xml -m_lpr <path_to_model>/license-plate-recognition-barrier-0001.xml -d GPU
|
|
||||||
```
|
|
||||||
|
|
||||||
**MYRIAD:**
|
|
||||||
|
|
||||||
```bat
|
|
||||||
.\classification_sample_async -i <INSTALL_DIR>\inference-engine\samples\sample_data\car.png -m <ir_dir>\squeezenet1.1.xml -d MYRIAD
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@ -472,10 +360,10 @@ To run the **Security Barrier Camera Demo Application** using an input image on
|
|||||||
|
|
||||||
Below you can find basic guidelines for executing the OpenVINO™ workflow using the code samples and demo applications:
|
Below you can find basic guidelines for executing the OpenVINO™ workflow using the code samples and demo applications:
|
||||||
|
|
||||||
1. Before using the OpenVINO™ samples, always set up the environment:
|
1. Before using the OpenVINO™ samples, always set up the environment:
|
||||||
```bat
|
```bat
|
||||||
<INSTALL_DIR>\bin\setupvars.bat
|
<INSTALL_DIR>\setupvars.bat
|
||||||
```
|
```
|
||||||
2. Make sure to have the directory path for the following:
|
2. Make sure to have the directory path for the following:
|
||||||
- Code Sample binaries located in `C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release`
|
- Code Sample binaries located in `C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\intel64\Release`
|
||||||
- Demo Application binaries located in `C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_demos_build\intel64\Release`
|
- Demo Application binaries located in `C:\Users\<USER_ID>\Documents\Intel\OpenVINO\inference_engine_demos_build\intel64\Release`
|
||||||
@ -490,9 +378,9 @@ To build all the demos and samples:
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd $INTEL_OPENVINO_DIR\inference_engine_samples\cpp
|
cd $INTEL_OPENVINO_DIR\inference_engine_samples\cpp
|
||||||
# to compile C samples, go here also: cd <INSTALL_DIR>\inference_engine\samples\c
|
# to compile C samples, go here also: cd <INSTALL_DIR>\samples\c
|
||||||
build_samples_msvc.bat
|
build_samples_msvc.bat
|
||||||
cd $INTEL_OPENVINO_DIR\deployment_tools\open_model_zoo\demos
|
cd $INTEL_OPENVINO_DIR\extras\open_model_zoo\demos
|
||||||
build_demos_msvc.bat
|
build_demos_msvc.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -515,7 +403,7 @@ With the sample information specified, the command might look like this:
|
|||||||
-m C:\Users\<USER_ID>\Documents\ir\fp32\mobilenet-ssd.xml -d CPU
|
-m C:\Users\<USER_ID>\Documents\ir\fp32\mobilenet-ssd.xml -d CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## <a name="advanced-samples"></a> Advanced Demo Use
|
## <a name="advanced-samples"></a> Advanced Demo Use
|
||||||
|
|
||||||
Some demo applications let you use multiple models for different purposes. In these cases, the output of the first model is usually used as the input for later models.
|
Some demo applications let you use multiple models for different purposes. In these cases, the output of the first model is usually used as the input for later models.
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ Tested on OpenVINO™ 2021, Ubuntu 18.04.
|
|||||||
|
|
||||||
Define the OpenVINO™ install directory:
|
Define the OpenVINO™ install directory:
|
||||||
```
|
```
|
||||||
export OV=/opt/intel/openvino_2021/
|
export OV=/opt/intel/openvino_2022/
|
||||||
```
|
```
|
||||||
Define the working directory. Make sure the directory exist:
|
Define the working directory. Make sure the directory exist:
|
||||||
```
|
```
|
||||||
@ -22,19 +22,19 @@ export WD=~/MonoDepth_Python/
|
|||||||
|
|
||||||
Initialize OpenVINO™:
|
Initialize OpenVINO™:
|
||||||
```
|
```
|
||||||
source $OV/bin/setupvars.sh
|
source $OV/setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Install the Model Optimizer prerequisites:
|
Install the Model Optimizer prerequisites:
|
||||||
```
|
```
|
||||||
cd $OV/deployment_tools/model_optimizer/install_prerequisites/
|
cd $OV/tools/model_optimizer/install_prerequisites/
|
||||||
sudo ./install_prerequisites.sh
|
sudo ./install_prerequisites.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Install the Model Downloader prerequisites:
|
Install the Model Downloader prerequisites:
|
||||||
|
|
||||||
```
|
```
|
||||||
cd $OV/deployment_tools/tools/model_downloader/
|
cd $OV/extras/open_model_zoo/tools/downloader/
|
||||||
python3 -mpip install --user -r ./requirements.in
|
python3 -mpip install --user -r ./requirements.in
|
||||||
sudo python3 -mpip install --user -r ./requirements-pytorch.in
|
sudo python3 -mpip install --user -r ./requirements-pytorch.in
|
||||||
sudo python3 -mpip install --user -r ./requirements-caffe2.in
|
sudo python3 -mpip install --user -r ./requirements-caffe2.in
|
||||||
@ -44,7 +44,7 @@ sudo python3 -mpip install --user -r ./requirements-caffe2.in
|
|||||||
|
|
||||||
Download all models from the Demo Models list:
|
Download all models from the Demo Models list:
|
||||||
```
|
```
|
||||||
python3 $OV/deployment_tools/tools/model_downloader/downloader.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst -o $WD
|
python3 $OV/extras/open_model_zoo/tools/downloader/downloader.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst -o $WD
|
||||||
```
|
```
|
||||||
|
|
||||||
## 4. Convert Models to Intermediate Representation (IR)
|
## 4. Convert Models to Intermediate Representation (IR)
|
||||||
@ -52,7 +52,7 @@ python3 $OV/deployment_tools/tools/model_downloader/downloader.py --list $OV/dep
|
|||||||
Use the convert script to convert the models to ONNX*, and then to IR format:
|
Use the convert script to convert the models to ONNX*, and then to IR format:
|
||||||
```
|
```
|
||||||
cd $WD
|
cd $WD
|
||||||
python3 $OV/deployment_tools/tools/model_downloader/converter.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst
|
python3 $OV/extras/open_model_zoo/tools/downloader/converter.py --list $OV/deployment_tools/inference_engine/demos/python_demos/monodepth_demo/models.lst
|
||||||
```
|
```
|
||||||
|
|
||||||
## 5. Run Demo
|
## 5. Run Demo
|
||||||
|
@ -16,21 +16,21 @@ Install OpenVINO™ toolkit and Model Optimizer, Accuracy Checker, and Post-trai
|
|||||||
|
|
||||||
1. Define the OpenVINO™ install directory:
|
1. Define the OpenVINO™ install directory:
|
||||||
```
|
```
|
||||||
export OV=/opt/intel/openvino_2021/
|
export OV=/opt/intel/openvino_2022/
|
||||||
```
|
```
|
||||||
2. Install the Model Optimizer prerequisites:
|
2. Install the Model Optimizer prerequisites:
|
||||||
```
|
```
|
||||||
cd $OV/deployment_tools/model_optimizer/install_prerequisites
|
cd $OV/tools/model_optimizer/install_prerequisites
|
||||||
sudo ./install_prerequisites.sh
|
sudo ./install_prerequisites.sh
|
||||||
```
|
```
|
||||||
3. Install the Accuracy Checker requirements:
|
3. Install the Accuracy Checker requirements:
|
||||||
```
|
```
|
||||||
cd $OV/deployment_tools/open_model_zoo/tools/accuracy_checker
|
cd $OV/tools/accuracy_checker
|
||||||
sudo python3 setup.py install
|
sudo python3 setup.py install
|
||||||
```
|
```
|
||||||
4. Install the Post-training Optimization Tool:
|
4. Install the Post-training Optimization Tool:
|
||||||
```
|
```
|
||||||
cd $OV/deployment_tools/tools/post_training_optimization_toolkit
|
cd $OV/tools/post_training_optimization_toolkit
|
||||||
sudo python3 setup.py install
|
sudo python3 setup.py install
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -46,14 +46,14 @@ mkdir ~/POT
|
|||||||
cd ~/POT
|
cd ~/POT
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
python3 $OV/deployment_tools/tools/model_downloader/downloader.py --name mobilenet-v2-pytorch -o .
|
python3 $OV/extras/open_model_zoo/tools/downloader/downloader.py --name mobilenet-v2-pytorch -o .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 3. Prepare Model for Inference
|
## 3. Prepare Model for Inference
|
||||||
|
|
||||||
Install requirements for PyTorch using the commands below:
|
Install requirements for PyTorch using the commands below:
|
||||||
```
|
```
|
||||||
cd $OV/deployment_tools/open_model_zoo/tools/downloader
|
cd $OV/extras/open_model_zoo/tools/downloader
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
python3 -mpip install --user -r ./requirements-pytorch.in
|
python3 -mpip install --user -r ./requirements-pytorch.in
|
||||||
@ -61,13 +61,13 @@ python3 -mpip install --user -r ./requirements-pytorch.in
|
|||||||
|
|
||||||
You can find the parameters for Mobilnet v2 conversion here:
|
You can find the parameters for Mobilnet v2 conversion here:
|
||||||
```
|
```
|
||||||
vi /opt/intel/openvino_2021/deployment_tools/open_model_zoo/models/public/mobilenet-v2-pytorch/model.yml
|
vi /opt/intel/openvino_2022/extras/open_model_zoo/models/public/mobilenet-v2-pytorch/model.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Convert the model from PyTorch to ONNX*:
|
Convert the model from PyTorch to ONNX*:
|
||||||
```
|
```
|
||||||
cd ~/POT/public/mobilenet-v2-pytorch
|
cd ~/POT/public/mobilenet-v2-pytorch
|
||||||
python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/pytorch_to_onnx.py \
|
python3 /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/pytorch_to_onnx.py \
|
||||||
--model-name=MobileNetV2 \
|
--model-name=MobileNetV2 \
|
||||||
--model-path=. \
|
--model-path=. \
|
||||||
--weights=mobilenet-v2.pth \
|
--weights=mobilenet-v2.pth \
|
||||||
@ -100,17 +100,17 @@ mv mobilenet-v2.bin ~/POT/model.bin
|
|||||||
|
|
||||||
Edit the configuration files:
|
Edit the configuration files:
|
||||||
```
|
```
|
||||||
sudo vi $OV/deployment_tools/open_model_zoo/tools/accuracy_checker/dataset_definitions.yml
|
sudo vi $OV/tools/accuracy_checker/dataset_definitions.yml
|
||||||
(edit imagenet_1000_classes)
|
(edit imagenet_1000_classes)
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
export DEFINITIONS_FILE=/opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/accuracy_checker/dataset_definitions.yml
|
export DEFINITIONS_FILE=/opt/intel/openvino_2022/tools/accuracy_checker/dataset_definitions.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Copy the JSON file to my directory and edit:
|
Copy the JSON file to my directory and edit:
|
||||||
|
|
||||||
```
|
```
|
||||||
cp $OV/deployment_tools/tools/post_training_optimization_toolkit/configs/examples/quantization/classification/mobilenetV2_pytorch_int8.json ~/POT
|
cp $OV/tools/post_training_optimization_toolkit/configs/examples/quantization/classification/mobilenetV2_pytorch_int8.json ~/POT
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
vi mobilenetV2_pytorch_int8.json
|
vi mobilenetV2_pytorch_int8.json
|
||||||
@ -119,7 +119,7 @@ vi mobilenetV2_pytorch_int8.json
|
|||||||
Copy the YML file to my directory and edit:
|
Copy the YML file to my directory and edit:
|
||||||
|
|
||||||
```
|
```
|
||||||
cp /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/accuracy_checker/configs/mobilenet-v2.yml ~/POT
|
cp /opt/intel/openvino_2022/tools/accuracy_checker/configs/mobilenet-v2.yml ~/POT
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
vi mobilenet-v2.yml
|
vi mobilenet-v2.yml
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:9ca0811c19b4108054bfa66d99107e469409d7a0200745da96dd3e8fdac79daf
|
|
||||||
size 397011
|
|
@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:619da8838c460aa26253fa6cfed3d3346fcf7c7c5deb8f178e9bd55dc78c9c8f
|
|
||||||
size 2017750
|
|
@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:c08b4d12634d3e17a7ed198cdc15be7b8e4b1fe33728d5f38d0998faa7ea8e7e
|
|
||||||
size 568383
|
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
The Deployment Manager of Intel® Distribution of OpenVINO™ creates a deployment package by assembling the model, IR files, your application, and associated dependencies into a runtime package for your target device.
|
The Deployment Manager of Intel® Distribution of OpenVINO™ creates a deployment package by assembling the model, IR files, your application, and associated dependencies into a runtime package for your target device.
|
||||||
|
|
||||||
The Deployment Manager is a Python\* command-line tool that is delivered within the Intel® Distribution of OpenVINO™ toolkit for Linux\* and Windows\* release packages and available after installation in the `<INSTALL_DIR>/deployment_tools/tools/deployment_manager` directory.
|
The Deployment Manager is a Python\* command-line tool that is delivered within the Intel® Distribution of OpenVINO™ toolkit for Linux\* and Windows\* release packages and available after installation in the `<INSTALL_DIR>/tools/deployment_manager` directory.
|
||||||
|
|
||||||
## Pre-Requisites
|
## Pre-Requisites
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ Interactive mode provides a user-friendly command-line interface that will guide
|
|||||||
|
|
||||||
1. To launch the Deployment Manager in the interactive mode, open a new terminal window, go to the Deployment Manager tool directory and run the tool script without parameters:
|
1. To launch the Deployment Manager in the interactive mode, open a new terminal window, go to the Deployment Manager tool directory and run the tool script without parameters:
|
||||||
```sh
|
```sh
|
||||||
<INSTALL_DIR>/deployment_tools/tools/deployment_manager
|
<INSTALL_DIR>/tools/deployment_manager
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
./deployment_manager.py
|
./deployment_manager.py
|
||||||
@ -92,9 +92,9 @@ To deploy the Inference Engine components from the development machine to the ta
|
|||||||
```
|
```
|
||||||
* For Windows, use an archiver your prefer.
|
* For Windows, use an archiver your prefer.
|
||||||
|
|
||||||
The package is unpacked to the destination directory and the following subdirectories are created:
|
The package is unpacked to the destination directory and the following files and subdirectories are created:
|
||||||
* `bin` — Snapshot of the `bin` directory from the OpenVINO installation directory.
|
* `setupvars.sh` — copy of `setupvars.sh`
|
||||||
* `deployment_tools/inference_engine` — Contains the Inference Engine binary files.
|
* `runtime` — Contains the OpenVINO runtime binary files.
|
||||||
* `install_dependencies` — Snapshot of the `install_dependencies` directory from the OpenVINO installation directory.
|
* `install_dependencies` — Snapshot of the `install_dependencies` directory from the OpenVINO installation directory.
|
||||||
* `<user_data>` — The directory with the user data (IRs, datasets, etc.) you specified while configuring the package.
|
* `<user_data>` — The directory with the user data (IRs, datasets, etc.) you specified while configuring the package.
|
||||||
3. For Linux, to run inference on a target Intel® GPU, Intel® Movidius™ VPU, or Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, you need to install additional dependencies by running the `install_openvino_dependencies.sh` script:
|
3. For Linux, to run inference on a target Intel® GPU, Intel® Movidius™ VPU, or Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, you need to install additional dependencies by running the `install_openvino_dependencies.sh` script:
|
||||||
@ -110,14 +110,14 @@ To deploy the Inference Engine components from the development machine to the ta
|
|||||||
cd <destination_dir>/openvino/
|
cd <destination_dir>/openvino/
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
source ./bin/setupvars.sh
|
source ./setupvars.sh
|
||||||
```
|
```
|
||||||
* For Windows:
|
* For Windows:
|
||||||
```
|
```
|
||||||
cd <destination_dir>\openvino\
|
cd <destination_dir>\openvino\
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
.\bin\setupvars.bat
|
.\setupvars.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
Congratulations, you have finished the deployment of the Inference Engine components to the target host.
|
Congratulations, you have finished the deployment of the Inference Engine components to the target host.
|
@ -193,7 +193,7 @@ RUN /bin/mkdir -p '/usr/local/lib' && \
|
|||||||
|
|
||||||
WORKDIR /opt/libusb-1.0.22/
|
WORKDIR /opt/libusb-1.0.22/
|
||||||
RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
|
RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
|
||||||
cp /opt/intel/openvino_2021/deployment_tools/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ && \
|
cp /opt/intel/openvino_2022/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \
|
||||||
ldconfig
|
ldconfig
|
||||||
```
|
```
|
||||||
- **CentOS 7**:
|
- **CentOS 7**:
|
||||||
@ -223,11 +223,11 @@ RUN /bin/mkdir -p '/usr/local/lib' && \
|
|||||||
/bin/mkdir -p '/usr/local/include/libusb-1.0' && \
|
/bin/mkdir -p '/usr/local/include/libusb-1.0' && \
|
||||||
/usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \
|
/usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \
|
||||||
/bin/mkdir -p '/usr/local/lib/pkgconfig' && \
|
/bin/mkdir -p '/usr/local/lib/pkgconfig' && \
|
||||||
printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino_2021/bin/setupvars.sh
|
printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino_2022/setupvars.sh
|
||||||
|
|
||||||
WORKDIR /opt/libusb-1.0.22/
|
WORKDIR /opt/libusb-1.0.22/
|
||||||
RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
|
RUN /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
|
||||||
cp /opt/intel/openvino_2021/deployment_tools/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ && \
|
cp /opt/intel/openvino_2022/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/ && \
|
||||||
ldconfig
|
ldconfig
|
||||||
```
|
```
|
||||||
2. Run the Docker* image:
|
2. Run the Docker* image:
|
||||||
@ -323,44 +323,36 @@ docker run -it --rm --net=host -v /var/tmp:/var/tmp –-ipc=host <image_name>
|
|||||||
|
|
||||||
### Run Demos in the Docker* Image
|
### Run Demos in the Docker* Image
|
||||||
|
|
||||||
To run the Security Barrier Camera Demo on a specific inference device, run the following commands with the root privileges (additional third-party dependencies will be installed):
|
To run the Classification Demo Using SqueezeNet on a specific inference device, run the following commands with the root privileges (additional third-party dependencies will be installed):
|
||||||
|
|
||||||
**CPU**:
|
**CPU**:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker run -itu root:root --rm <image_name>
|
docker run -itu root:root --rm <image_name>
|
||||||
/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d CPU -sample-options -no_show"
|
/bin/bash -c "apt update && apt install sudo && samples/scripts/run_sample_squeezenet.sh -d CPU"
|
||||||
```
|
```
|
||||||
|
|
||||||
**GPU**:
|
**GPU**:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker run -itu root:root --rm --device /dev/dri:/dev/dri <image_name>
|
docker run -itu root:root --rm --device /dev/dri:/dev/dri <image_name>
|
||||||
/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d GPU -sample-options -no_show"
|
/bin/bash -c "apt update && apt install sudo && samples/scripts/run_sample_squeezenet.sh -d GPU"
|
||||||
```
|
```
|
||||||
|
|
||||||
**MYRIAD**:
|
**MYRIAD**:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker run -itu root:root --rm --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb <image_name>
|
docker run -itu root:root --rm --device-cgroup-rule='c 189:* rmw' -v /dev/bus/usb:/dev/bus/usb <image_name>
|
||||||
/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d MYRIAD -sample-options -no_show"
|
/bin/bash -c "apt update && apt install sudo && samples/scripts/run_sample_squeezenet.sh -d MYRIAD"
|
||||||
```
|
```
|
||||||
|
|
||||||
**HDDL**:
|
**HDDL**:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp <image_name>
|
docker run -itu root:root --rm --device=/dev/ion:/dev/ion -v /var/tmp:/var/tmp <image_name>
|
||||||
/bin/bash -c "apt update && apt install sudo && deployment_tools/demo/demo_security_barrier_camera.sh -d HDDL -sample-options -no_show"
|
/bin/bash -c "apt update && apt install sudo && samples/scripts/run_sample_squeezenet.sh -d HDDL"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Use a Docker* Image for FPGA
|
|
||||||
|
|
||||||
Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA.
|
|
||||||
|
|
||||||
Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates.
|
|
||||||
|
|
||||||
For instructions for previous releases with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_installing_openvino_docker_linux.html#use_a_docker_image_for_fpga) or lower.
|
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
If you got proxy issues, please setup proxy settings for Docker. See the Proxy section in the [Install the DL Workbench from Docker Hub* ](@ref workbench_docs_Workbench_DG_Run_Locally) topic.
|
If you got proxy issues, please setup proxy settings for Docker. See the Proxy section in the [Install the DL Workbench from Docker Hub* ](@ref workbench_docs_Workbench_DG_Run_Locally) topic.
|
||||||
|
@ -83,7 +83,7 @@ docker run -it --rm <image_name>
|
|||||||
If you want to try some demos then run image with the root privileges (some additional 3-rd party dependencies will be installed):
|
If you want to try some demos then run image with the root privileges (some additional 3-rd party dependencies will be installed):
|
||||||
|
|
||||||
```bat
|
```bat
|
||||||
docker run -itu ContainerAdministrator --rm <image_name> cmd /S /C "cd deployment_tools\demo && demo_security_barrier_camera.bat -d CPU -sample-options -no_show"
|
docker run -itu ContainerAdministrator --rm <image_name> cmd /S /C "cd samples\scripts && run_sample_squeezenet.bat -d CPU"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configure and Run the Docker* Image for GPU
|
## Configure and Run the Docker* Image for GPU
|
||||||
@ -138,9 +138,9 @@ GPU Acceleration in Windows containers feature requires to meet Windows host, Op
|
|||||||
```bat
|
```bat
|
||||||
copy C:\tmp\OpenCL.dll C:\Windows\System32\ && reg add "HKLM\SOFTWARE\Khronos\OpenCL\Vendors" /v "C:\Windows\System32\DriverStore\FileRepository\iigd_dch.inf_amd64_518f2921ba495409\ocl\bin\x64\intelocl64.dll" /t REG_DWORD /d 0
|
copy C:\tmp\OpenCL.dll C:\Windows\System32\ && reg add "HKLM\SOFTWARE\Khronos\OpenCL\Vendors" /v "C:\Windows\System32\DriverStore\FileRepository\iigd_dch.inf_amd64_518f2921ba495409\ocl\bin\x64\intelocl64.dll" /t REG_DWORD /d 0
|
||||||
```
|
```
|
||||||
3. For example, run the `demo_security_barrier_camera` demo with the command below:
|
3. For example, run the `run_sample_squeezenet` demo with the command below:
|
||||||
```bat
|
```bat
|
||||||
cd bin && setupvars.bat && cd ../ && cd deployment_tools\demo && demo_security_barrier_camera.bat -d GPU -sample-options -no_show
|
cd samples\scripts && run_sample_squeezenet.bat -d GPU
|
||||||
```
|
```
|
||||||
> **NOTE**: Addittional third-party dependencies will be installed.
|
> **NOTE**: Addittional third-party dependencies will be installed.
|
||||||
|
|
||||||
|
@ -1,21 +0,0 @@
|
|||||||
# Install Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support {#openvino_docs_install_guides_installing_openvino_linux_fpga}
|
|
||||||
|
|
||||||
## Product Change Notice
|
|
||||||
Intel® Distribution of OpenVINO™ toolkit for Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<td><strong>Change Notice Begins</strong></td>
|
|
||||||
<td>July 2020</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><strong>Change Date</strong></td>
|
|
||||||
<td>October 2020</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA.
|
|
||||||
|
|
||||||
Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates.
|
|
||||||
|
|
||||||
For installation instructions for the last release of Intel® Distribution of OpenVINO™ toolkit for Linux* with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_installing_openvino_linux_fpga.html).
|
|
@ -11,9 +11,9 @@ For Intel® Vision Accelerator Design with Intel® Movidius™ VPUs, the followi
|
|||||||
|
|
||||||
1. Set the environment variables:
|
1. Set the environment variables:
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
> **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `<openvino_install_dir>/deployment_tools/inference_engine/external/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2021//deployment_tools/inference_engine/external/hddl`.
|
> **NOTE**: The `HDDL_INSTALL_DIR` variable is set to `<openvino_install_dir>/runtime/3rdparty/hddl`. If you installed the Intel® Distribution of OpenVINO™ to the default install directory, the `HDDL_INSTALL_DIR` was set to `/opt/intel/openvino_2022/runtime/3rdparty/hddl`.
|
||||||
|
|
||||||
2. Install dependencies:
|
2. Install dependencies:
|
||||||
```sh
|
```sh
|
||||||
@ -52,7 +52,7 @@ E: [ncAPI] [ 965618] [MainThread] ncDeviceOpen:677 Failed to find a device,
|
|||||||
```sh
|
```sh
|
||||||
kill -9 $(pidof hddldaemon autoboot)
|
kill -9 $(pidof hddldaemon autoboot)
|
||||||
pidof hddldaemon autoboot # Make sure none of them is alive
|
pidof hddldaemon autoboot # Make sure none of them is alive
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
${HDDL_INSTALL_DIR}/bin/bsl_reset
|
${HDDL_INSTALL_DIR}/bin/bsl_reset
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -133,7 +133,7 @@ sudo ./install.sh -s silent.cfg
|
|||||||
By default, the Intel® Distribution of OpenVINO™ is installed to the following directory, referred to as `<INSTALL_DIR>`:
|
By default, the Intel® Distribution of OpenVINO™ is installed to the following directory, referred to as `<INSTALL_DIR>`:
|
||||||
* For root or administrator: `/opt/intel/openvino_<version>/`
|
* For root or administrator: `/opt/intel/openvino_<version>/`
|
||||||
* For regular users: `/home/<USER>/intel/openvino_<version>/`
|
* For regular users: `/home/<USER>/intel/openvino_<version>/`
|
||||||
For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino_2021/`.
|
For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino_2022/`.
|
||||||
|
|
||||||
8. **Optional**: You can choose **Customize** to change the installation directory or the components you want to install:
|
8. **Optional**: You can choose **Customize** to change the installation directory or the components you want to install:
|
||||||
> **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions.
|
> **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions.
|
||||||
@ -156,7 +156,7 @@ These dependencies are required for:
|
|||||||
|
|
||||||
1. Change to the `install_dependencies` directory:
|
1. Change to the `install_dependencies` directory:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/install_dependencies
|
cd /opt/intel/openvino_2022/install_dependencies
|
||||||
```
|
```
|
||||||
2. Run a script to download and install the external software dependencies:
|
2. Run a script to download and install the external software dependencies:
|
||||||
```sh
|
```sh
|
||||||
@ -169,7 +169,7 @@ sudo -E ./install_openvino_dependencies.sh
|
|||||||
You must update several environment variables before you can compile and run OpenVINO™ applications. Run the following script to temporarily set your environment variables:
|
You must update several environment variables before you can compile and run OpenVINO™ applications. Run the following script to temporarily set your environment variables:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
**Optional:** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows:
|
**Optional:** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows:
|
||||||
@ -181,7 +181,7 @@ vi <user_directory>/.bashrc
|
|||||||
|
|
||||||
2. Add this line to the end of the file:
|
2. Add this line to the end of the file:
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Save and close the file: press the **Esc** key and type `:wq`.
|
3. Save and close the file: press the **Esc** key and type `:wq`.
|
||||||
@ -217,7 +217,7 @@ You can choose to either configure all supported frameworks at once **OR** confi
|
|||||||
|
|
||||||
1. Go to the Model Optimizer prerequisites directory:
|
1. Go to the Model Optimizer prerequisites directory:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites
|
cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites
|
||||||
```
|
```
|
||||||
2. Run the script to configure the Model Optimizer for Caffe,
|
2. Run the script to configure the Model Optimizer for Caffe,
|
||||||
TensorFlow 1.x, MXNet, Kaldi\*, and ONNX:
|
TensorFlow 1.x, MXNet, Kaldi\*, and ONNX:
|
||||||
@ -231,7 +231,7 @@ Configure individual frameworks separately **ONLY** if you did not select **Opti
|
|||||||
|
|
||||||
1. Go to the Model Optimizer prerequisites directory:
|
1. Go to the Model Optimizer prerequisites directory:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites
|
cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites
|
||||||
```
|
```
|
||||||
2. Run the script for your model framework. You can run more than one script:
|
2. Run the script for your model framework. You can run more than one script:
|
||||||
|
|
||||||
@ -281,7 +281,7 @@ The steps in this section are required only if you want to enable the toolkit co
|
|||||||
|
|
||||||
1. Go to the install_dependencies directory:
|
1. Go to the install_dependencies directory:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/install_dependencies/
|
cd /opt/intel/openvino_2022/install_dependencies/
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install the **Intel® Graphics Compute Runtime for OpenCL™** driver components required to use the GPU plugin and write custom layers for Intel® Integrated Graphics. The drivers are not included in the package and must be installed separately.
|
2. Install the **Intel® Graphics Compute Runtime for OpenCL™** driver components required to use the GPU plugin and write custom layers for Intel® Integrated Graphics. The drivers are not included in the package and must be installed separately.
|
||||||
@ -315,7 +315,7 @@ sudo usermod -a -G users "$(whoami)"
|
|||||||
Log out and log in for it to take effect.
|
Log out and log in for it to take effect.
|
||||||
2. To perform inference on Intel® Neural Compute Stick 2, install the USB rules as follows:
|
2. To perform inference on Intel® Neural Compute Stick 2, install the USB rules as follows:
|
||||||
```sh
|
```sh
|
||||||
sudo cp /opt/intel/openvino_2021/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/
|
sudo cp /opt/intel/openvino_2022/runtime/3rdparty/97-myriad-usbboot.rules /etc/udev/rules.d/
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
sudo udevadm control --reload-rules
|
sudo udevadm control --reload-rules
|
||||||
@ -341,17 +341,12 @@ After configuration is done, you are ready to run the verification scripts with
|
|||||||
|
|
||||||
1. Go to the **Inference Engine demo** directory:
|
1. Go to the **Inference Engine demo** directory:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/demo
|
cd /opt/intel/openvino_2022/samples/scripts
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Run the **Image Classification verification script**. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment.
|
2. Run the **Image Classification verification script**. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment.
|
||||||
```sh
|
```sh
|
||||||
./demo_squeezenet_download_convert_run.sh -d HDDL
|
./run_sample_squeezenet.sh -d HDDL
|
||||||
```
|
|
||||||
|
|
||||||
3. Run the **Inference Pipeline verification script**:
|
|
||||||
```sh
|
|
||||||
./demo_security_barrier_camera.sh -d HDDL
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You've completed all required configuration steps to perform inference on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs.
|
You've completed all required configuration steps to perform inference on Intel® Vision Accelerator Design with Intel® Movidius™ VPUs.
|
||||||
|
@ -119,7 +119,7 @@ The disk image is mounted to `/Volumes/m_openvino_toolkit_p_<version>` and autom
|
|||||||
* For root or administrator: `/opt/intel/openvino_<version>/`
|
* For root or administrator: `/opt/intel/openvino_<version>/`
|
||||||
* For regular users: `/home/<USER>/intel/openvino_<version>/`
|
* For regular users: `/home/<USER>/intel/openvino_<version>/`
|
||||||
|
|
||||||
For simplicity, a symbolic link to the latest installation is also created: `/home/<user>/intel/openvino_2021/`.
|
For simplicity, a symbolic link to the latest installation is also created: `/home/<user>/intel/openvino_2022/`.
|
||||||
9. If needed, click **Customize** to change the installation directory or the components you want to install:
|
9. If needed, click **Customize** to change the installation directory or the components you want to install:
|
||||||

|

|
||||||
> **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions.
|
> **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions.
|
||||||
@ -138,10 +138,10 @@ The disk image is mounted to `/Volumes/m_openvino_toolkit_p_<version>` and autom
|
|||||||
You need to update several environment variables before you can compile and run OpenVINO™ applications. Open the macOS Terminal\* or a command-line interface shell you prefer and run the following script to temporarily set your environment variables:
|
You need to update several environment variables before you can compile and run OpenVINO™ applications. Open the macOS Terminal\* or a command-line interface shell you prefer and run the following script to temporarily set your environment variables:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
If you didn't choose the default installation option, replace `/opt/intel/openvino_2021` with your directory.
|
If you didn't choose the default installation option, replace `/opt/intel/openvino_2022` with your directory.
|
||||||
|
|
||||||
<strong>Optional</strong>: The OpenVINO environment variables are removed when you close the shell. You can permanently set the environment variables as follows:
|
<strong>Optional</strong>: The OpenVINO environment variables are removed when you close the shell. You can permanently set the environment variables as follows:
|
||||||
|
|
||||||
@ -153,10 +153,10 @@ If you didn't choose the default installation option, replace `/opt/intel/openvi
|
|||||||
|
|
||||||
3. Add this line to the end of the file:
|
3. Add this line to the end of the file:
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
If you didn't choose the default installation option, replace `/opt/intel/openvino_2021` with your directory.
|
If you didn't choose the default installation option, replace `/opt/intel/openvino_2022` with your directory.
|
||||||
|
|
||||||
4. Save and close the file: press the **Esc** key, type `:wq` and press the **Enter** key.
|
4. Save and close the file: press the **Esc** key, type `:wq` and press the **Enter** key.
|
||||||
|
|
||||||
@ -189,7 +189,7 @@ You can choose to either configure the Model Optimizer for all supported framewo
|
|||||||
|
|
||||||
1. Go to the Model Optimizer prerequisites directory:
|
1. Go to the Model Optimizer prerequisites directory:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites
|
cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Run the script to configure the Model Optimizer for Caffe, TensorFlow 1.x, MXNet, Kaldi\*, and ONNX:
|
2. Run the script to configure the Model Optimizer for Caffe, TensorFlow 1.x, MXNet, Kaldi\*, and ONNX:
|
||||||
@ -203,7 +203,7 @@ Configure individual frameworks separately **ONLY** if you did not select **Opti
|
|||||||
|
|
||||||
1. Go to the Model Optimizer prerequisites directory:
|
1. Go to the Model Optimizer prerequisites directory:
|
||||||
```sh
|
```sh
|
||||||
cd /opt/intel/openvino_2021/deployment_tools/model_optimizer/install_prerequisites
|
cd /opt/intel/openvino_2022/tools/model_optimizer/install_prerequisites
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Run the script for your model framework. You can run more than one script:
|
2. Run the script for your model framework. You can run more than one script:
|
||||||
@ -272,14 +272,14 @@ Now you are ready to get started. To continue, see the following pages:
|
|||||||
|
|
||||||
Follow the steps below to uninstall the Intel® Distribution of OpenVINO™ Toolkit from your system:
|
Follow the steps below to uninstall the Intel® Distribution of OpenVINO™ Toolkit from your system:
|
||||||
|
|
||||||
1. From the installation directory (by default, `/opt/intel/openvino_2021`), locate and open `openvino_toolkit_uninstaller.app`.
|
1. From the installation directory (by default, `/opt/intel/openvino_2022`), locate and open `openvino_toolkit_uninstaller.app`.
|
||||||
2. Follow the uninstallation wizard instructions.
|
2. Follow the uninstallation wizard instructions.
|
||||||
3. When uninstallation is complete, click **Finish**.
|
3. When uninstallation is complete, click **Finish**.
|
||||||
|
|
||||||
|
|
||||||
## Additional Resources
|
## Additional Resources
|
||||||
|
|
||||||
- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2021/deployment_tools/demo/`.
|
- To learn more about the verification applications, see `README.txt` in `/opt/intel/openvino_2022/samples/scripts/`.
|
||||||
|
|
||||||
- For detailed description of the pre-trained models, go to the [Overview of OpenVINO toolkit Pre-Trained Models](@ref omz_models_group_intel) page.
|
- For detailed description of the pre-trained models, go to the [Overview of OpenVINO toolkit Pre-Trained Models](@ref omz_models_group_intel) page.
|
||||||
|
|
||||||
|
@ -75,11 +75,11 @@ The guide assumes you downloaded the OpenVINO toolkit for Raspbian* OS. If you d
|
|||||||
By default, the package file is saved as `l_openvino_toolkit_runtime_raspbian_p_<version>.tgz`.
|
By default, the package file is saved as `l_openvino_toolkit_runtime_raspbian_p_<version>.tgz`.
|
||||||
3. Create an installation folder.
|
3. Create an installation folder.
|
||||||
```sh
|
```sh
|
||||||
sudo mkdir -p /opt/intel/openvino_2021
|
sudo mkdir -p /opt/intel/openvino_2022
|
||||||
```
|
```
|
||||||
4. Unpack the archive:
|
4. Unpack the archive:
|
||||||
```sh
|
```sh
|
||||||
sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_<version>.tgz --strip 1 -C /opt/intel/openvino_2021
|
sudo tar -xf l_openvino_toolkit_runtime_raspbian_p_<version>.tgz --strip 1 -C /opt/intel/openvino_2022
|
||||||
```
|
```
|
||||||
|
|
||||||
Now the OpenVINO toolkit components are installed. Additional configuration steps are still required. Continue to the next sections to install External Software Dependencies, configure the environment and set up USB rules.
|
Now the OpenVINO toolkit components are installed. Additional configuration steps are still required. Continue to the next sections to install External Software Dependencies, configure the environment and set up USB rules.
|
||||||
@ -97,12 +97,12 @@ CMake is installed. Continue to the next section to set the environment variable
|
|||||||
|
|
||||||
You must update several environment variables before you can compile and run OpenVINO toolkit applications. Run the following script to temporarily set the environment variables:
|
You must update several environment variables before you can compile and run OpenVINO toolkit applications. Run the following script to temporarily set the environment variables:
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
**(Optional)** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows:
|
**(Optional)** The OpenVINO environment variables are removed when you close the shell. As an option, you can permanently set the environment variables as follows:
|
||||||
```sh
|
```sh
|
||||||
echo "source /opt/intel/openvino_2021/bin/setupvars.sh" >> ~/.bashrc
|
echo "source /opt/intel/openvino_2022/setupvars.sh" >> ~/.bashrc
|
||||||
```
|
```
|
||||||
|
|
||||||
To test your change, open a new terminal. You will see the following:
|
To test your change, open a new terminal. You will see the following:
|
||||||
@ -120,11 +120,11 @@ This task applies only if you have an Intel® Neural Compute Stick 2 device.
|
|||||||
Log out and log in for it to take effect.
|
Log out and log in for it to take effect.
|
||||||
2. If you didn't modify `.bashrc` to permanently set the environment variables, run `setupvars.sh` again after logging in:
|
2. If you didn't modify `.bashrc` to permanently set the environment variables, run `setupvars.sh` again after logging in:
|
||||||
```sh
|
```sh
|
||||||
source /opt/intel/openvino_2021/bin/setupvars.sh
|
source /opt/intel/openvino_2022/setupvars.sh
|
||||||
```
|
```
|
||||||
3. To perform inference on the Intel® Neural Compute Stick 2, install the USB rules running the `install_NCS_udev_rules.sh` script:
|
3. To perform inference on the Intel® Neural Compute Stick 2, install the USB rules running the `install_NCS_udev_rules.sh` script:
|
||||||
```sh
|
```sh
|
||||||
sh /opt/intel/openvino_2021/install_dependencies/install_NCS_udev_rules.sh
|
sh /opt/intel/openvino_2022/install_dependencies/install_NCS_udev_rules.sh
|
||||||
```
|
```
|
||||||
4. Plug in your Intel® Neural Compute Stick 2.
|
4. Plug in your Intel® Neural Compute Stick 2.
|
||||||
|
|
||||||
@ -140,7 +140,7 @@ Follow the next steps to use the pre-trained face detection model using Inferenc
|
|||||||
```
|
```
|
||||||
2. Build the Object Detection Sample:
|
2. Build the Object Detection Sample:
|
||||||
```sh
|
```sh
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2021/deployment_tools/inference_engine/samples/cpp
|
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=armv7-a" /opt/intel/openvino_2022/samples/cpp
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
make -j2 object_detection_sample_ssd
|
make -j2 object_detection_sample_ssd
|
||||||
|
@ -1,21 +0,0 @@
|
|||||||
# Install Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support {#openvino_docs_install_guides_installing_openvino_windows_fpga}
|
|
||||||
|
|
||||||
## Product Change Notice
|
|
||||||
Intel® Distribution of OpenVINO™ toolkit for Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<td><strong>Change Notice Begins</strong></td>
|
|
||||||
<td>July 2020</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><strong>Change Date</strong></td>
|
|
||||||
<td>October 2020</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
Intel will be transitioning to the next-generation programmable deep-learning solution based on FPGAs in order to increase the level of customization possible in FPGA deep-learning. As part of this transition, future standard releases (i.e., non-LTS releases) of Intel® Distribution of OpenVINO™ toolkit will no longer include the Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA.
|
|
||||||
|
|
||||||
Intel® Distribution of OpenVINO™ toolkit 2020.3.X LTS release will continue to support Intel® Vision Accelerator Design with an Intel® Arria® 10 FPGA and the Intel® Programmable Acceleration Card with Intel® Arria® 10 GX FPGA. For questions about next-generation programmable deep-learning solutions based on FPGAs, please talk to your sales representative or contact us to get the latest FPGA updates.
|
|
||||||
|
|
||||||
For installation instructions for the last release of Intel® Distribution of OpenVINO™ toolkit for Windows* with FPGA Support, see documentation for the [2020.4 version](https://docs.openvinotoolkit.org/2020.4/openvino_docs_install_guides_installing_openvino_windows_fpga.html).
|
|
@ -107,7 +107,7 @@ The following components are installed by default:
|
|||||||
1. If you have not downloaded the Intel® Distribution of OpenVINO™ toolkit, [download the latest version](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit/download.html). By default, the file is saved to the `Downloads` directory as `w_openvino_toolkit_p_<version>.exe`.
|
1. If you have not downloaded the Intel® Distribution of OpenVINO™ toolkit, [download the latest version](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit/download.html). By default, the file is saved to the `Downloads` directory as `w_openvino_toolkit_p_<version>.exe`.
|
||||||
2. Go to the `Downloads` folder and double-click `w_openvino_toolkit_p_<version>.exe`. A window opens to let you choose your installation directory and components.
|
2. Go to the `Downloads` folder and double-click `w_openvino_toolkit_p_<version>.exe`. A window opens to let you choose your installation directory and components.
|
||||||

|

|
||||||
The default installation directory is `C:\Program Files (x86)\Intel\openvino_<version>`, for simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2021`. If you choose a different installation directory, the installer will create the directory for you.
|
The default installation directory is `C:\Program Files (x86)\Intel\openvino_<version>`, for simplicity, a shortcut to the latest installation is also created: `C:\Program Files (x86)\Intel\openvino_2022`. If you choose a different installation directory, the installer will create the directory for you.
|
||||||
> **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions.
|
> **NOTE**: If there is an OpenVINO™ toolkit version previously installed on your system, the installer will use the same destination directory for next installations. If you want to install a newer version to a different directory, you need to uninstall the previously installed versions.
|
||||||
3. Click **Next**.
|
3. Click **Next**.
|
||||||
4. You are asked if you want to provide consent to gather information. Choose the option of your choice. Click **Next**.
|
4. You are asked if you want to provide consent to gather information. Choose the option of your choice. Click **Next**.
|
||||||
@ -126,7 +126,7 @@ The screen example below indicates you are missing two dependencies:
|
|||||||
|
|
||||||
You must update several environment variables before you can compile and run OpenVINO™ applications. Open the Command Prompt, and run the `setupvars.bat` batch file to temporarily set your environment variables:
|
You must update several environment variables before you can compile and run OpenVINO™ applications. Open the Command Prompt, and run the `setupvars.bat` batch file to temporarily set your environment variables:
|
||||||
```sh
|
```sh
|
||||||
"C:\Program Files (x86)\Intel\openvino_2021\bin\setupvars.bat"
|
"C:\Program Files (x86)\Intel\openvino_2022\setupvars.bat"
|
||||||
```
|
```
|
||||||
> **IMPORTANT**: Windows PowerShell* is not recommended to run the configuration commands, please use the Command Prompt instead.
|
> **IMPORTANT**: Windows PowerShell* is not recommended to run the configuration commands, please use the Command Prompt instead.
|
||||||
|
|
||||||
@ -147,7 +147,7 @@ The Model Optimizer is a key component of the Intel® Distribution of OpenVINO
|
|||||||
|
|
||||||
The Inference Engine reads, loads, and infers the IR files, using a common API across the CPU, GPU, or VPU hardware.
|
The Inference Engine reads, loads, and infers the IR files, using a common API across the CPU, GPU, or VPU hardware.
|
||||||
|
|
||||||
The Model Optimizer is a Python*-based command line tool (`mo.py`), which is located in `C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use.
|
The Model Optimizer is a Python*-based command line tool (`mo.py`), which is located in `C:\Program Files (x86)\Intel\openvino_2022\tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use.
|
||||||
|
|
||||||
This section explains how to use scripts to configure the Model Optimizer either for all of the supported frameworks at the same time or for individual frameworks. If you want to manually configure the Model Optimizer instead of using scripts, see the **Using Manual Configuration Process** section on the [Configuring the Model Optimizer](../MO_DG/prepare_model/Config_Model_Optimizer.md) page.
|
This section explains how to use scripts to configure the Model Optimizer either for all of the supported frameworks at the same time or for individual frameworks. If you want to manually configure the Model Optimizer instead of using scripts, see the **Using Manual Configuration Process** section on the [Configuring the Model Optimizer](../MO_DG/prepare_model/Config_Model_Optimizer.md) page.
|
||||||
|
|
||||||
@ -162,7 +162,7 @@ You can configure the Model Optimizer either for all supported frameworks at onc
|
|||||||
|
|
||||||
> **NOTE**:
|
> **NOTE**:
|
||||||
> In the steps below:
|
> In the steps below:
|
||||||
> - If you you want to use the Model Optimizer from another installed versions of Intel® Distribution of OpenVINO™ toolkit installed, replace `openvino_2021` with `openvino_<version>`, where `<version>` is the required version.
|
> - If you you want to use the Model Optimizer from another installed versions of Intel® Distribution of OpenVINO™ toolkit installed, replace `openvino_2022` with `openvino_<version>`, where `<version>` is the required version.
|
||||||
> - If you installed the Intel® Distribution of OpenVINO™ toolkit to the non-default installation directory, replace `C:\Program Files (x86)\Intel` with the directory where you installed the software.
|
> - If you installed the Intel® Distribution of OpenVINO™ toolkit to the non-default installation directory, replace `C:\Program Files (x86)\Intel` with the directory where you installed the software.
|
||||||
|
|
||||||
These steps use a command prompt to make sure you see error messages.
|
These steps use a command prompt to make sure you see error messages.
|
||||||
@ -176,7 +176,7 @@ Type commands in the opened window:
|
|||||||
|
|
||||||
2. Go to the Model Optimizer prerequisites directory.<br>
|
2. Go to the Model Optimizer prerequisites directory.<br>
|
||||||
```sh
|
```sh
|
||||||
cd C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer\install_prerequisites
|
cd C:\Program Files (x86)\Intel\openvino_2022\tools\model_optimizer\install_prerequisites
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Run the following batch file to configure the Model Optimizer for Caffe\*, TensorFlow\* 1.x, MXNet\*, Kaldi\*, and ONNX\*:<br>
|
3. Run the following batch file to configure the Model Optimizer for Caffe\*, TensorFlow\* 1.x, MXNet\*, Kaldi\*, and ONNX\*:<br>
|
||||||
@ -188,7 +188,7 @@ install_prerequisites.bat
|
|||||||
|
|
||||||
1. Go to the Model Optimizer prerequisites directory:<br>
|
1. Go to the Model Optimizer prerequisites directory:<br>
|
||||||
```sh
|
```sh
|
||||||
cd C:\Program Files (x86)\Intel\openvino_2021\deployment_tools\model_optimizer\install_prerequisites
|
cd C:\Program Files (x86)\Intel\openvino_2022\tools\model_optimizer\install_prerequisites
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Run the batch file for the framework you will use with the Model Optimizer. You can use more than one:
|
2. Run the batch file for the framework you will use with the Model Optimizer. You can use more than one:
|
||||||
@ -269,7 +269,7 @@ To perform inference on Intel® Vision Accelerator Design with Intel® Movidius
|
|||||||
|
|
||||||
1. Download and install <a href="https://www.microsoft.com/en-us/download/details.aspx?id=48145">Visual C++ Redistributable for Visual Studio 2017</a>
|
1. Download and install <a href="https://www.microsoft.com/en-us/download/details.aspx?id=48145">Visual C++ Redistributable for Visual Studio 2017</a>
|
||||||
2. Check with a support engineer if your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs card requires SMBUS connection to PCIe slot (most unlikely). Install the SMBUS driver only if confirmed (by default, it's not required):
|
2. Check with a support engineer if your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs card requires SMBUS connection to PCIe slot (most unlikely). Install the SMBUS driver only if confirmed (by default, it's not required):
|
||||||
1. Go to the `<INSTALL_DIR>\deployment_tools\inference-engine\external\hddl\drivers\SMBusDriver` directory, where `<INSTALL_DIR>` is the directory in which the Intel Distribution of OpenVINO toolkit is installed.
|
1. Go to the `<INSTALL_DIR>\runtime\3rdparty\hddl\drivers\SMBusDriver` directory, where `<INSTALL_DIR>` is the directory in which the Intel Distribution of OpenVINO toolkit is installed.
|
||||||
2. Right click on the `hddlsmbus.inf` file and choose **Install** from the pop-up menu.
|
2. Right click on the `hddlsmbus.inf` file and choose **Install** from the pop-up menu.
|
||||||
|
|
||||||
You are done installing your device driver and are ready to use your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs.
|
You are done installing your device driver and are ready to use your Intel® Vision Accelerator Design with Intel® Movidius™ VPUs.
|
||||||
|
@ -46,7 +46,7 @@ The `hddldaemon` is a system service, a binary executable that is run to manage
|
|||||||
`<IE>` refers to the following default OpenVINO™ Inference Engine directories:
|
`<IE>` refers to the following default OpenVINO™ Inference Engine directories:
|
||||||
- **Linux:**
|
- **Linux:**
|
||||||
```
|
```
|
||||||
/opt/intel/openvino_2021/inference_engine
|
/opt/intel/openvino_2022/inference_engine
|
||||||
```
|
```
|
||||||
- **Windows:**
|
- **Windows:**
|
||||||
```
|
```
|
||||||
|
@ -12,11 +12,13 @@
|
|||||||
The generation algorithm is based on underlying random integer generator that uses Philox algorithm. Philox algorithm
|
The generation algorithm is based on underlying random integer generator that uses Philox algorithm. Philox algorithm
|
||||||
is a counter-based pseudo-random generator, which produces uint32 values. Single invocation of Philox algorithm returns
|
is a counter-based pseudo-random generator, which produces uint32 values. Single invocation of Philox algorithm returns
|
||||||
four result random values, depending on the given *key* and *counter* values. *Key* and *counter* are initialized
|
four result random values, depending on the given *key* and *counter* values. *Key* and *counter* are initialized
|
||||||
with *seed* and *seed2* attributes respectively.
|
with *global_seed* and *op_seed* attributes respectively.
|
||||||
|
|
||||||
|
If both seed values equal to zero, RandomUniform generates non-deterministic sequence.
|
||||||
|
|
||||||
\f[
|
\f[
|
||||||
key = seed\\
|
key = global_seed\\
|
||||||
counter = seed2
|
counter = op_seed
|
||||||
\f]
|
\f]
|
||||||
|
|
||||||
Link to the original paper [Parallel Random Numbers: As Easy as 1, 2, 3](https://www.thesalmons.org/john/random123/papers/random123sc11.pdf)
|
Link to the original paper [Parallel Random Numbers: As Easy as 1, 2, 3](https://www.thesalmons.org/john/random123/papers/random123sc11.pdf)
|
||||||
@ -130,7 +132,7 @@ result = x \mod (maxval - minval) + minval,
|
|||||||
where *x* is uint32 random value.
|
where *x* is uint32 random value.
|
||||||
|
|
||||||
|
|
||||||
Example 1. *RandomUniform* output with `seed` = 150, `seed2` = 10, `output_type` = f32:
|
Example 1. *RandomUniform* output with `global_seed` = 150, `op_seed` = 10, `output_type` = f32:
|
||||||
|
|
||||||
```
|
```
|
||||||
input_shape = [ 3, 3 ]
|
input_shape = [ 3, 3 ]
|
||||||
@ -139,7 +141,7 @@ output = [[0.7011236 0.30539632 0.93931055]
|
|||||||
[0.5197197 0.22727466 0.991374 ]]
|
[0.5197197 0.22727466 0.991374 ]]
|
||||||
```
|
```
|
||||||
|
|
||||||
Example 2. *RandomUniform* output with `seed` = 80, `seed2` = 100, `output_type` = double:
|
Example 2. *RandomUniform* output with `global_seed` = 80, `op_seed` = 100, `output_type` = double:
|
||||||
|
|
||||||
```
|
```
|
||||||
input_shape = [ 2, 2 ]
|
input_shape = [ 2, 2 ]
|
||||||
@ -152,7 +154,7 @@ output = [[5.65927959 4.23122376]
|
|||||||
[2.67008206 2.36423758]]
|
[2.67008206 2.36423758]]
|
||||||
```
|
```
|
||||||
|
|
||||||
Example 3. *RandomUniform* output with `seed` = 80, `seed2` = 100, `output_type` = i32:
|
Example 3. *RandomUniform* output with `global_seed` = 80, `op_seed` = 100, `output_type` = i32:
|
||||||
|
|
||||||
```
|
```
|
||||||
input_shape = [ 2, 3 ]
|
input_shape = [ 2, 3 ]
|
||||||
@ -175,18 +177,20 @@ output = [[65 70 56]
|
|||||||
* **Type**: string
|
* **Type**: string
|
||||||
* **Required**: *Yes*
|
* **Required**: *Yes*
|
||||||
|
|
||||||
* *seed*
|
* *global_seed*
|
||||||
|
|
||||||
* **Description**: global seed value.
|
* **Description**: global seed value.
|
||||||
* **Range of values**: positive integers
|
* **Range of values**: positive integers
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
|
* **Default value**: 0
|
||||||
* **Required**: *Yes*
|
* **Required**: *Yes*
|
||||||
|
|
||||||
* *seed2*
|
* *op_seed*
|
||||||
|
|
||||||
* **Description**: operational seed value.
|
* **Description**: operational seed value.
|
||||||
* **Range of values**: positive integers
|
* **Range of values**: positive integers
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
|
* **Default value**: 0
|
||||||
* **Required**: *Yes*
|
* **Required**: *Yes*
|
||||||
|
|
||||||
**Inputs**:
|
**Inputs**:
|
||||||
@ -212,7 +216,7 @@ output = [[65 70 56]
|
|||||||
|
|
||||||
```xml
|
```xml
|
||||||
<layer ... name="RandomUniform" type="RandomUniform">
|
<layer ... name="RandomUniform" type="RandomUniform">
|
||||||
<data output_type="f32" seed="234" seed2="148"/>
|
<data output_type="f32" global_seed="234" op_seed="148"/>
|
||||||
<input>
|
<input>
|
||||||
<port id="0" precision="I32"> <!-- shape value: [2, 3, 10] -->
|
<port id="0" precision="I32"> <!-- shape value: [2, 3, 10] -->
|
||||||
<dim>3</dim>
|
<dim>3</dim>
|
||||||
|
@ -196,16 +196,6 @@ Since Intel® Movidius™ Myriad™ X Visual Processing Unit (Intel® Movidius
|
|||||||
|
|
||||||
Intel® Vision Accelerator Design with Intel® Movidius™ VPUs requires keeping at least 32 inference requests in flight to fully saturate the device.
|
Intel® Vision Accelerator Design with Intel® Movidius™ VPUs requires keeping at least 32 inference requests in flight to fully saturate the device.
|
||||||
|
|
||||||
### FPGA <a name="fpga"></a>
|
|
||||||
|
|
||||||
Below are listed the most important tips for the efficient usage of the FPGA:
|
|
||||||
|
|
||||||
- Just like for the Intel® Movidius™ Myriad™ VPU flavors, for the FPGA, it is important to hide the communication overheads by running multiple inference requests in parallel. For examples, refer to the [Benchmark App Sample](../../inference-engine/samples/benchmark_app/README.md).
|
|
||||||
- Since the first inference iteration with FPGA is always significantly slower than the subsequent ones, make sure you run multiple iterations (all samples, except GUI-based demos, have the `-ni` or 'niter' option to do that).
|
|
||||||
- FPGA performance heavily depends on the bitstream.
|
|
||||||
- Number of the infer request per executable network is limited to five, so “channel” parallelism (keeping individual infer request per camera/video input) would not work beyond five inputs. Instead, you need to mux the inputs into some queue that will internally use a pool of (5) requests.
|
|
||||||
- In most scenarios, the FPGA acceleration is leveraged through <a href="heterogeneity">heterogeneous execution</a> with further specific tips.
|
|
||||||
|
|
||||||
## Heterogeneity <a name="heterogeneity"></a>
|
## Heterogeneity <a name="heterogeneity"></a>
|
||||||
|
|
||||||
Heterogeneous execution (constituted by the dedicated Inference Engine [“Hetero” plugin](../IE_DG/supported_plugins/HETERO.md)) enables to schedule a network inference to the multiple devices.
|
Heterogeneous execution (constituted by the dedicated Inference Engine [“Hetero” plugin](../IE_DG/supported_plugins/HETERO.md)) enables to schedule a network inference to the multiple devices.
|
||||||
@ -249,23 +239,15 @@ Every Inference Engine sample supports the `-d` (device) option.
|
|||||||
For example, here is a command to run an [Object Detection Sample SSD Sample](../../inference-engine/samples/object_detection_sample_ssd/README.md):
|
For example, here is a command to run an [Object Detection Sample SSD Sample](../../inference-engine/samples/object_detection_sample_ssd/README.md):
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./object_detection_sample_ssd -m <path_to_model>/ModelSSD.xml -i <path_to_pictures>/picture.jpg -d HETERO:FPGA,CPU
|
./object_detection_sample_ssd -m <path_to_model>/ModelSSD.xml -i <path_to_pictures>/picture.jpg -d HETERO:GPU,CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
where:
|
where:
|
||||||
|
|
||||||
- `HETERO` stands for Heterogeneous plugin.
|
- `HETERO` stands for Heterogeneous plugin.
|
||||||
- `FPGA,CPU` points to fallback policy with first priority on FPGA and further fallback to CPU.
|
- `GPU,CPU` points to fallback policy with first priority on GPU and further fallback to CPU.
|
||||||
|
|
||||||
You can point more than two devices: `-d HETERO:FPGA,GPU,CPU`.
|
You can point more than two devices: `-d HETERO:GPU,MYRIAD,CPU`.
|
||||||
|
|
||||||
### Heterogeneous Scenarios with FPGA <a name="heterogeneous-scenarios-fpga"></a>
|
|
||||||
|
|
||||||
As FPGA is considered as an inference accelerator, most performance issues are related to the fact that due to the fallback, the CPU can be still used quite heavily.
|
|
||||||
- Yet in most cases, the CPU does only small/lightweight layers, for example, post-processing (`SoftMax` in most classification models or `DetectionOutput` in the SSD*-based topologies). In that case, limiting the number of CPU threads with [`KEY_CPU_THREADS_NUM`](../IE_DG/supported_plugins/CPU.md) config would further reduce the CPU utilization without significantly degrading the overall performance.
|
|
||||||
- Also, if you are still using OpenVINO™ toolkit version earlier than R1 2019, or if you have recompiled the Inference Engine with OpenMP (say for backward compatibility), setting the `KMP_BLOCKTIME` environment variable to something less than default 200ms (we suggest 1ms) is particularly helpful. Use `KMP_BLOCKTIME=0` if the CPU subgraph is small.
|
|
||||||
|
|
||||||
> **NOTE**: General threading tips (see <a href="#note-on-app-level-threading">Note on the App-Level Threading</a>) apply well, even when the entire topology fits the FPGA, because there is still a host-side code for data pre- and post-processing.
|
|
||||||
|
|
||||||
### General Tips on GPU/CPU Execution <a name="tips-on-gpu-cpu-execution"></a>
|
### General Tips on GPU/CPU Execution <a name="tips-on-gpu-cpu-execution"></a>
|
||||||
|
|
||||||
@ -323,7 +305,7 @@ Other than that, when implementing the kernels, you can try the methods from the
|
|||||||
### A Few Device-Specific Tips <a name="device-specific-tips"></a>
|
### A Few Device-Specific Tips <a name="device-specific-tips"></a>
|
||||||
|
|
||||||
- As already outlined in the <a href="#cpu-checklist">CPU Checklist</a>, align the threading model that you use in your CPU kernels with the model that the rest of the Inference Engine compiled with.
|
- As already outlined in the <a href="#cpu-checklist">CPU Checklist</a>, align the threading model that you use in your CPU kernels with the model that the rest of the Inference Engine compiled with.
|
||||||
- For CPU extensions, consider kernel flavor that supports blocked layout, if your kernel is in the hotspots (see <a href="#performance-counters">Internal Inference Performance Counters</a>). Since Intel MKL-DNN internally operates on the blocked layouts, this would save you a data packing (Reorder) on tensor inputs/outputs of your kernel. For example of the blocked layout support, please, refer to the extensions in `<OPENVINO_INSTALL_DIR>/deployment_tools/samples/extension/`.
|
- For CPU extensions, consider kernel flavor that supports blocked layout, if your kernel is in the hotspots (see <a href="#performance-counters">Internal Inference Performance Counters</a>). Since Intel MKL-DNN internally operates on the blocked layouts, this would save you a data packing (Reorder) on tensor inputs/outputs of your kernel.
|
||||||
|
|
||||||
## Plugging Inference Engine to Applications <a name="plugging-ie-to-applications"></a>
|
## Plugging Inference Engine to Applications <a name="plugging-ie-to-applications"></a>
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ auto function = network.getFunction();
|
|||||||
|
|
||||||
// This example demonstrates how to perform default affinity initialization and then
|
// This example demonstrates how to perform default affinity initialization and then
|
||||||
// correct affinity manually for some layers
|
// correct affinity manually for some layers
|
||||||
const std::string device = "HETERO:FPGA,CPU";
|
const std::string device = "HETERO:GPU,CPU";
|
||||||
|
|
||||||
// QueryNetworkResult object contains map layer -> device
|
// QueryNetworkResult object contains map layer -> device
|
||||||
InferenceEngine::QueryNetworkResult res = core.QueryNetwork(network, device, { });
|
InferenceEngine::QueryNetworkResult res = core.QueryNetwork(network, device, { });
|
||||||
|
@ -5,7 +5,7 @@ using namespace InferenceEngine;
|
|||||||
//! [part2]
|
//! [part2]
|
||||||
InferenceEngine::Core core;
|
InferenceEngine::Core core;
|
||||||
auto network = core.ReadNetwork("sample.xml");
|
auto network = core.ReadNetwork("sample.xml");
|
||||||
auto executable_network = core.LoadNetwork(network, "HETERO:FPGA,CPU");
|
auto executable_network = core.LoadNetwork(network, "HETERO:GPU,CPU");
|
||||||
//! [part2]
|
//! [part2]
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1,17 +0,0 @@
|
|||||||
#include <ie_core.hpp>
|
|
||||||
|
|
||||||
int main() {
|
|
||||||
using namespace InferenceEngine;
|
|
||||||
//! [part0]
|
|
||||||
using namespace InferenceEngine::PluginConfigParams;
|
|
||||||
using namespace InferenceEngine::HeteroConfigParams;
|
|
||||||
|
|
||||||
Core ie;
|
|
||||||
auto network = ie.ReadNetwork("sample.xml");
|
|
||||||
// ...
|
|
||||||
|
|
||||||
auto execNetwork = ie.LoadNetwork(network, "HETERO:FPGA,CPU", { {KEY_HETERO_DUMP_GRAPH_DOT, YES} });
|
|
||||||
//! [part0]
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
102
docs/template_plugin/tests/functional/op_reference/atan.cpp
Normal file
102
docs/template_plugin/tests/functional/op_reference/atan.cpp
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "base_reference_test.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
|
||||||
|
namespace reference_tests {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
struct AtanParams {
|
||||||
|
Tensor input;
|
||||||
|
Tensor expected;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Builder : ParamsBuilder<AtanParams> {
|
||||||
|
REFERENCE_TESTS_ADD_SET_PARAM(Builder, input);
|
||||||
|
REFERENCE_TESTS_ADD_SET_PARAM(Builder, expected);
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReferenceAtanLayerTest : public testing::TestWithParam<AtanParams>, public CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
auto params = GetParam();
|
||||||
|
function = CreateFunction(params.input.shape, params.input.type);
|
||||||
|
inputData = {params.input.data};
|
||||||
|
refOutData = {params.expected.data};
|
||||||
|
}
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<AtanParams>& obj) {
|
||||||
|
auto param = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "shape=" << param.input.shape << "_";
|
||||||
|
result << "type=" << param.input.type;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::shared_ptr<Function> CreateFunction(const Shape& shape, const element::Type& type) {
|
||||||
|
const auto in = std::make_shared<op::Parameter>(type, shape);
|
||||||
|
const auto atan = std::make_shared<op::Atan>(in);
|
||||||
|
return std::make_shared<Function>(NodeVector{atan}, ParameterVector{in});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_P(ReferenceAtanLayerTest, AtanWithHardcodedRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_Atan_With_Hardcoded_Refs, ReferenceAtanLayerTest,
|
||||||
|
::testing::Values(
|
||||||
|
Builder{}
|
||||||
|
.input({{11}, element::f16, std::vector<ngraph::float16>{-4.f, -2.f, -1.f, -0.5f, -0.25f, 0.f, 0.25f, 0.5f, 1.f, 2.f, 4.f}})
|
||||||
|
.expected({{11}, element::f16, std::vector<ngraph::float16>{-1.32581766f,
|
||||||
|
-1.10714872f,
|
||||||
|
-0.78539816f,
|
||||||
|
-0.46364761f,
|
||||||
|
-0.24497866f,
|
||||||
|
0.00000000f,
|
||||||
|
0.24497866f,
|
||||||
|
0.46364761f,
|
||||||
|
0.78539816f,
|
||||||
|
1.10714872f,
|
||||||
|
1.32581766f}}),
|
||||||
|
Builder{}
|
||||||
|
.input({{11}, element::f32, std::vector<float>{-4.f, -2.f, -1.f, -0.5f, -0.25f, 0.f, 0.25f, 0.5f, 1.f, 2.f, 4.f}})
|
||||||
|
.expected({{11}, element::f32, std::vector<float>{-1.32581766f,
|
||||||
|
-1.10714872f,
|
||||||
|
-0.78539816f,
|
||||||
|
-0.46364761f,
|
||||||
|
-0.24497866f,
|
||||||
|
0.00000000f,
|
||||||
|
0.24497866f,
|
||||||
|
0.46364761f,
|
||||||
|
0.78539816f,
|
||||||
|
1.10714872f,
|
||||||
|
1.32581766f}}),
|
||||||
|
Builder{}
|
||||||
|
.input({{5}, element::i32, std::vector<int32_t>{-2, -1, 0, 1, 2}})
|
||||||
|
.expected({{5}, element::i32, std::vector<int32_t>{-1, -1, 0, 1, 1}}),
|
||||||
|
Builder{}
|
||||||
|
.input({{5}, element::i64, std::vector<int64_t>{-2, -1, 0, 1, 2}})
|
||||||
|
.expected({{5}, element::i64, std::vector<int64_t>{-1, -1, 0, 1, 1}}),
|
||||||
|
Builder{}
|
||||||
|
.input({{5}, element::u32, std::vector<uint32_t>{0, 1, 2, 3, 4}})
|
||||||
|
.expected({{5}, element::u32, std::vector<uint32_t>{0, 1, 1, 1, 1}}),
|
||||||
|
Builder{}
|
||||||
|
.input({{5}, element::u64, std::vector<uint64_t>{0, 1, 2, 3, 4}})
|
||||||
|
.expected({{5}, element::u64, std::vector<uint64_t>{0, 1, 1, 1, 1}})),
|
||||||
|
ReferenceAtanLayerTest::getTestCaseName);
|
||||||
|
} // namespace reference_tests
|
112
docs/template_plugin/tests/functional/op_reference/minimum.cpp
Normal file
112
docs/template_plugin/tests/functional/op_reference/minimum.cpp
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "base_reference_test.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
using namespace reference_tests;
|
||||||
|
|
||||||
|
struct MinimumParams {
|
||||||
|
template <class IT, class OT>
|
||||||
|
MinimumParams(const PartialShape& s,
|
||||||
|
const element::Type& iType, const element::Type& oType,
|
||||||
|
const std::vector<IT>& iValues1, const std::vector<IT>& iValues2,
|
||||||
|
const std::vector<OT>& oValues)
|
||||||
|
: pshape(s),
|
||||||
|
inType(iType),
|
||||||
|
outType(oType),
|
||||||
|
inputData1(CreateBlob(iType, iValues1)),
|
||||||
|
inputData2(CreateBlob(iType, iValues2)),
|
||||||
|
refData(CreateBlob(oType, oValues)) {}
|
||||||
|
PartialShape pshape;
|
||||||
|
element::Type inType;
|
||||||
|
element::Type outType;
|
||||||
|
Blob::Ptr inputData1;
|
||||||
|
Blob::Ptr inputData2;
|
||||||
|
Blob::Ptr refData;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReferenceMinimumLayerTest : public testing::TestWithParam<MinimumParams>, public CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
auto params = GetParam();
|
||||||
|
function = CreateFunction(params.pshape, params.inType);
|
||||||
|
inputData = {params.inputData1, params.inputData2};
|
||||||
|
refOutData = {params.refData};
|
||||||
|
}
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<MinimumParams>& obj) {
|
||||||
|
auto param = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "shape=" << param.pshape << "_";
|
||||||
|
result << "iType=" << param.inType << "_";
|
||||||
|
result << "oType=" << param.outType;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::shared_ptr<Function> CreateFunction(const PartialShape& shape, const element::Type& data_type) {
|
||||||
|
auto A = std::make_shared<op::Parameter>(data_type, shape);
|
||||||
|
auto B = std::make_shared<op::Parameter>(data_type, shape);
|
||||||
|
return std::make_shared<Function>(std::make_shared<op::v1::Minimum>(A, B), ParameterVector{A, B});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_P(ReferenceMinimumLayerTest, CompareWithHardcodedRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_Minimum, ReferenceMinimumLayerTest, ::testing::Values(
|
||||||
|
MinimumParams(PartialShape {8},
|
||||||
|
element::u8,
|
||||||
|
element::u8,
|
||||||
|
std::vector<uint8_t> {1, 8, 8, 17, 5, 5, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 4, 8, 0, 2, 1, 200},
|
||||||
|
std::vector<uint8_t> {1, 2, 4, 8, 0, 2, 1, 3}),
|
||||||
|
MinimumParams(PartialShape {8},
|
||||||
|
element::u16,
|
||||||
|
element::u16,
|
||||||
|
std::vector<uint16_t> {1, 8, 8, 17, 5, 7, 123, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 4, 8, 0, 2, 1, 1037},
|
||||||
|
std::vector<uint16_t> {1, 2, 4, 8, 0, 2, 1, 3}),
|
||||||
|
MinimumParams(PartialShape {8},
|
||||||
|
element::u32,
|
||||||
|
element::u32,
|
||||||
|
std::vector<uint32_t> {1, 8, 8, 17, 5, 5, 2, 1},
|
||||||
|
std::vector<uint32_t> {1, 2, 4, 8, 0, 2, 1, 222},
|
||||||
|
std::vector<uint32_t> {1, 2, 4, 8, 0, 2, 1, 1}),
|
||||||
|
MinimumParams(PartialShape {8},
|
||||||
|
element::u64,
|
||||||
|
element::u64,
|
||||||
|
std::vector<uint64_t> {1, 8, 8, 17, 5, 5, 2, 13},
|
||||||
|
std::vector<uint64_t> {1, 2, 4, 8, 0, 2, 1, 2222},
|
||||||
|
std::vector<uint64_t> {1, 2, 4, 8, 0, 2, 1, 13}),
|
||||||
|
MinimumParams(PartialShape {8},
|
||||||
|
element::f32,
|
||||||
|
element::f32,
|
||||||
|
std::vector<float> {1, 8, -8, 17, -0.5, 0.5, 2, 1},
|
||||||
|
std::vector<float> {1, 2, 4, 8, 0, 0, 1, 1.5},
|
||||||
|
std::vector<float> {1, 2, -8, 8, -.5, 0, 1, 1}),
|
||||||
|
MinimumParams(PartialShape {8},
|
||||||
|
element::i32,
|
||||||
|
element::i32,
|
||||||
|
std::vector<int32_t> {1, 8, -8, 17, -5, 67635216, 2, 1},
|
||||||
|
std::vector<int32_t> {1, 2, 4, 8, 0, 18448, 1, 6},
|
||||||
|
std::vector<int32_t> {1, 2, -8, 8, -5, 18448, 1, 1}),
|
||||||
|
MinimumParams(PartialShape {8},
|
||||||
|
element::i64,
|
||||||
|
element::i64,
|
||||||
|
std::vector<int64_t> {1, 8, -8, 17, -5, 67635216, 2, 17179887632},
|
||||||
|
std::vector<int64_t> {1, 2, 4, 8, 0, 18448, 1, 280592},
|
||||||
|
std::vector<int64_t> {1, 2, -8, 8, -5, 18448, 1, 280592})),
|
||||||
|
ReferenceMinimumLayerTest::getTestCaseName);
|
@ -4,17 +4,12 @@
|
|||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "behavior/infer_request_dynamic.hpp"
|
#include "behavior/infer_request/infer_request_dynamic.hpp"
|
||||||
|
|
||||||
using namespace BehaviorTestsDefinitions;
|
using namespace BehaviorTestsDefinitions;
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
|
||||||
InferenceEngine::Precision::FP32,
|
|
||||||
InferenceEngine::Precision::FP16
|
|
||||||
};
|
|
||||||
|
|
||||||
const std::vector<std::map<std::string, std::string>> configs = {
|
const std::vector<std::map<std::string, std::string>> configs = {
|
||||||
{}
|
{}
|
||||||
};
|
};
|
@ -14,6 +14,26 @@ endif()
|
|||||||
|
|
||||||
add_subdirectory(samples)
|
add_subdirectory(samples)
|
||||||
|
|
||||||
|
# TODO: remove this
|
||||||
|
foreach(sample benchmark_app classification_sample_async hello_classification
|
||||||
|
hello_nv12_input_classification hello_query_device hello_reshape_ssd
|
||||||
|
ngraph_function_creation_sample object_detection_sample_ssd
|
||||||
|
speech_sample style_transfer_sample hello_classification_c
|
||||||
|
object_detection_sample_ssd_c hello_nv12_input_classification_c)
|
||||||
|
if(TARGET ${sample})
|
||||||
|
install(TARGETS ${sample}
|
||||||
|
RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL)
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
foreach(samples_library opencv_c_wrapper format_reader)
|
||||||
|
if(TARGET ${samples_library})
|
||||||
|
install(TARGETS ${samples_library}
|
||||||
|
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL
|
||||||
|
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL)
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
openvino_developer_export_targets(COMPONENT openvino_common TARGETS format_reader ie_samples_utils)
|
openvino_developer_export_targets(COMPONENT openvino_common TARGETS format_reader ie_samples_utils)
|
||||||
|
|
||||||
if(ENABLE_TESTS)
|
if(ENABLE_TESTS)
|
||||||
@ -31,7 +51,7 @@ ie_cpack_add_component(cpp_samples DEPENDS cpp_samples_deps core)
|
|||||||
|
|
||||||
if(UNIX)
|
if(UNIX)
|
||||||
install(DIRECTORY samples/
|
install(DIRECTORY samples/
|
||||||
DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp
|
DESTINATION samples/cpp
|
||||||
COMPONENT cpp_samples
|
COMPONENT cpp_samples
|
||||||
USE_SOURCE_PERMISSIONS
|
USE_SOURCE_PERMISSIONS
|
||||||
PATTERN *.bat EXCLUDE
|
PATTERN *.bat EXCLUDE
|
||||||
@ -39,7 +59,7 @@ if(UNIX)
|
|||||||
PATTERN .clang-format EXCLUDE)
|
PATTERN .clang-format EXCLUDE)
|
||||||
elseif(WIN32)
|
elseif(WIN32)
|
||||||
install(DIRECTORY samples/
|
install(DIRECTORY samples/
|
||||||
DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp
|
DESTINATION samples/cpp
|
||||||
COMPONENT cpp_samples
|
COMPONENT cpp_samples
|
||||||
USE_SOURCE_PERMISSIONS
|
USE_SOURCE_PERMISSIONS
|
||||||
PATTERN *.sh EXCLUDE
|
PATTERN *.sh EXCLUDE
|
||||||
@ -47,35 +67,26 @@ elseif(WIN32)
|
|||||||
PATTERN .clang-format EXCLUDE)
|
PATTERN .clang-format EXCLUDE)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
install(TARGETS format_reader
|
|
||||||
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL
|
|
||||||
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL)
|
|
||||||
|
|
||||||
if(TARGET benchmark_app)
|
|
||||||
install(TARGETS benchmark_app
|
|
||||||
RUNTIME DESTINATION tests COMPONENT tests EXCLUDE_FROM_ALL)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# install C samples
|
# install C samples
|
||||||
|
|
||||||
ie_cpack_add_component(c_samples DEPENDS core_c)
|
ie_cpack_add_component(c_samples DEPENDS core_c)
|
||||||
|
|
||||||
if(UNIX)
|
if(UNIX)
|
||||||
install(PROGRAMS samples/build_samples.sh
|
install(PROGRAMS samples/build_samples.sh
|
||||||
DESTINATION ${IE_CPACK_IE_DIR}/samples/c
|
DESTINATION samples/c
|
||||||
COMPONENT c_samples)
|
COMPONENT c_samples)
|
||||||
elseif(WIN32)
|
elseif(WIN32)
|
||||||
install(PROGRAMS samples/build_samples_msvc.bat
|
install(PROGRAMS samples/build_samples_msvc.bat
|
||||||
DESTINATION ${IE_CPACK_IE_DIR}/samples/c
|
DESTINATION samples/c
|
||||||
COMPONENT c_samples)
|
COMPONENT c_samples)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
install(DIRECTORY ie_bridges/c/samples/
|
install(DIRECTORY ie_bridges/c/samples/
|
||||||
DESTINATION ${IE_CPACK_IE_DIR}/samples/c
|
DESTINATION samples/c
|
||||||
COMPONENT c_samples
|
COMPONENT c_samples
|
||||||
PATTERN ie_bridges/c/samples/CMakeLists.txt EXCLUDE
|
PATTERN ie_bridges/c/samples/CMakeLists.txt EXCLUDE
|
||||||
PATTERN ie_bridges/c/samples/.clang-format EXCLUDE)
|
PATTERN ie_bridges/c/samples/.clang-format EXCLUDE)
|
||||||
|
|
||||||
install(FILES samples/CMakeLists.txt
|
install(FILES samples/CMakeLists.txt
|
||||||
DESTINATION ${IE_CPACK_IE_DIR}/samples/c
|
DESTINATION samples/c
|
||||||
COMPONENT c_samples)
|
COMPONENT c_samples)
|
||||||
|
@ -22,7 +22,7 @@ Supported Python* versions:
|
|||||||
|
|
||||||
To configure the environment for the Inference Engine C* API, run:
|
To configure the environment for the Inference Engine C* API, run:
|
||||||
|
|
||||||
- On Ubuntu 16.04: `source <INSTALL_DIR>/bin/setupvars.sh .`
|
- On Ubuntu 16.04: `source <INSTALL_DIR>/setupvars.sh .`
|
||||||
- On Windows 10: XXXX
|
- On Windows 10: XXXX
|
||||||
|
|
||||||
The script automatically detects latest installed C* version and configures required environment if the version is supported.
|
The script automatically detects latest installed C* version and configures required environment if the version is supported.
|
||||||
|
@ -501,7 +501,7 @@ INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_core_get_config(const ie_co
|
|||||||
* @brief Gets available devices for neural network inference.
|
* @brief Gets available devices for neural network inference.
|
||||||
* @ingroup Core
|
* @ingroup Core
|
||||||
* @param core A pointer to ie_core_t instance.
|
* @param core A pointer to ie_core_t instance.
|
||||||
* @param avai_devices The devices are returned as { CPU, FPGA.0, FPGA.1, MYRIAD }
|
* @param avai_devices The devices are returned as { CPU, GPU.0, GPU.1, MYRIAD }
|
||||||
* If there more than one device of specific type, they are enumerated with .# suffix
|
* If there more than one device of specific type, they are enumerated with .# suffix
|
||||||
* @return Status code of the operation: OK(0) for success.
|
* @return Status code of the operation: OK(0) for success.
|
||||||
*/
|
*/
|
||||||
|
@ -72,7 +72,7 @@ The application outputs top-10 inference results.
|
|||||||
```
|
```
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
Image C:\images\car.bmp
|
Image /opt/intel/openvino/samples/scripts/car.png
|
||||||
|
|
||||||
classid probability
|
classid probability
|
||||||
------- -----------
|
------- -----------
|
||||||
|
@ -39,8 +39,8 @@ install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets
|
|||||||
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core_c
|
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core_c
|
||||||
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core_c
|
ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core_c
|
||||||
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c
|
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core_c
|
||||||
INCLUDES DESTINATION ${IE_CPACK_IE_DIR}/include/ie)
|
INCLUDES DESTINATION runtime/include/ie)
|
||||||
|
|
||||||
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/
|
install(DIRECTORY ${InferenceEngine_C_API_SOURCE_DIR}/include/
|
||||||
DESTINATION ${IE_CPACK_IE_DIR}/include/ie
|
DESTINATION runtime/include/ie
|
||||||
COMPONENT core_c_dev)
|
COMPONENT core_c_dev)
|
||||||
|
@ -99,7 +99,8 @@ install(PROGRAMS src/openvino/__init__.py
|
|||||||
ie_cpack_add_component(python_samples)
|
ie_cpack_add_component(python_samples)
|
||||||
|
|
||||||
install(DIRECTORY sample/
|
install(DIRECTORY sample/
|
||||||
DESTINATION ${IE_CPACK_IE_DIR}/samples/python
|
DESTINATION samples/python
|
||||||
|
USE_SOURCE_PERMISSIONS
|
||||||
COMPONENT python_samples)
|
COMPONENT python_samples)
|
||||||
|
|
||||||
ie_cpack(${PYTHON_COMPONENT} python_samples)
|
ie_cpack(${PYTHON_COMPONENT} python_samples)
|
||||||
|
@ -26,11 +26,11 @@ Supported Python* versions:
|
|||||||
## Set Up the Environment
|
## Set Up the Environment
|
||||||
|
|
||||||
To configure the environment for the Inference Engine Python\* API, run:
|
To configure the environment for the Inference Engine Python\* API, run:
|
||||||
* On Ubuntu\* 18.04 or 20.04: `source <INSTALL_DIR>/bin/setupvars.sh .`
|
* On Ubuntu\* 18.04 or 20.04: `source <INSTALL_DIR>/setupvars.sh .`
|
||||||
* On CentOS\* 7.4: `source <INSTALL_DIR>/bin/setupvars.sh .`
|
* On CentOS\* 7.4: `source <INSTALL_DIR>/setupvars.sh .`
|
||||||
* On macOS\* 10.x: `source <INSTALL_DIR>/bin/setupvars.sh .`
|
* On macOS\* 10.x: `source <INSTALL_DIR>/setupvars.sh .`
|
||||||
* On Raspbian\* 9,: `source <INSTALL_DIR>/bin/setupvars.sh .`
|
* On Raspbian\* 9,: `source <INSTALL_DIR>/setupvars.sh .`
|
||||||
* On Windows\* 10: `call <INSTALL_DIR>\bin\setupvars.bat`
|
* On Windows\* 10: `call <INSTALL_DIR>\setupvars.bat`
|
||||||
|
|
||||||
The script automatically detects latest installed Python\* version and configures required environment if the version is supported.
|
The script automatically detects latest installed Python\* version and configures required environment if the version is supported.
|
||||||
If you want to use certain version of Python\*, set the environment variable `PYTHONPATH=<INSTALL_DIR>/python/<desired_python_version>`
|
If you want to use certain version of Python\*, set the environment variable `PYTHONPATH=<INSTALL_DIR>/python/<desired_python_version>`
|
||||||
|
@ -85,7 +85,7 @@ The sample application logs each step in a standard output stream and outputs to
|
|||||||
|
|
||||||
```
|
```
|
||||||
[ INFO ] Creating Inference Engine
|
[ INFO ] Creating Inference Engine
|
||||||
[ INFO ] Loading the network using ngraph function with weights from c:\openvino\deployment_tools\inference_engine\samples\python\ngraph_function_creation_sample\lenet.bin
|
[ INFO ] Loading the network using ngraph function with weights from c:\openvino\samples\python\ngraph_function_creation_sample\lenet.bin
|
||||||
[ INFO ] Configuring input and output blobs
|
[ INFO ] Configuring input and output blobs
|
||||||
[ INFO ] Loading the model to the plugin
|
[ INFO ] Loading the model to the plugin
|
||||||
[ WARNING ] Image c:\images\3.png is inverted to white over black
|
[ WARNING ] Image c:\images\3.png is inverted to white over black
|
||||||
|
@ -228,10 +228,9 @@ def main():
|
|||||||
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
# ---------------------------Step 7. Do inference----------------------------------------------------------------------
|
||||||
log.info('Starting inference in synchronous mode')
|
log.info('Starting inference in synchronous mode')
|
||||||
results = {blob_name: {} for blob_name in output_blobs}
|
results = {blob_name: {} for blob_name in output_blobs}
|
||||||
infer_times = []
|
total_infer_time = 0
|
||||||
perf_counters = []
|
|
||||||
|
|
||||||
for key in sorted(input_data):
|
for i, key in enumerate(sorted(input_data)):
|
||||||
start_infer_time = default_timer()
|
start_infer_time = default_timer()
|
||||||
|
|
||||||
# Reset states between utterance inferences to remove a memory impact
|
# Reset states between utterance inferences to remove a memory impact
|
||||||
@ -244,43 +243,49 @@ def main():
|
|||||||
for blob_name in result.keys():
|
for blob_name in result.keys():
|
||||||
results[blob_name][key] = result[blob_name]
|
results[blob_name][key] = result[blob_name]
|
||||||
|
|
||||||
infer_times.append(default_timer() - start_infer_time)
|
infer_time = default_timer() - start_infer_time
|
||||||
perf_counters.append(exec_net.requests[0].get_perf_counts())
|
total_infer_time += infer_time
|
||||||
|
num_of_frames = file_data[0][key].shape[0]
|
||||||
|
avg_infer_time_per_frame = infer_time / num_of_frames
|
||||||
|
|
||||||
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
# ---------------------------Step 8. Process output--------------------------------------------------------------------
|
||||||
for blob_name in output_blobs:
|
log.info('')
|
||||||
for i, key in enumerate(sorted(results[blob_name])):
|
log.info(f'Utterance {i} ({key}):')
|
||||||
log.info(f'Utterance {i} ({key})')
|
log.info(f'Total time in Infer (HW and SW): {infer_time * 1000:.2f}ms')
|
||||||
|
log.info(f'Frames in utterance: {num_of_frames}')
|
||||||
|
log.info(f'Average Infer time per frame: {avg_infer_time_per_frame * 1000:.2f}ms')
|
||||||
|
|
||||||
|
for blob_name in output_blobs:
|
||||||
|
log.info('')
|
||||||
log.info(f'Output blob name: {blob_name}')
|
log.info(f'Output blob name: {blob_name}')
|
||||||
log.info(f'Frames in utterance: {results[blob_name][key].shape[0]}')
|
log.info(f'Number scores per frame: {results[blob_name][key].shape[1]}')
|
||||||
log.info(f'Total time in Infer (HW and SW): {infer_times[i] * 1000:.2f}ms')
|
|
||||||
|
|
||||||
if args.reference:
|
if args.reference:
|
||||||
|
log.info('')
|
||||||
compare_with_reference(results[blob_name][key], references[blob_name][key])
|
compare_with_reference(results[blob_name][key], references[blob_name][key])
|
||||||
|
|
||||||
if args.performance_counter:
|
if args.performance_counter:
|
||||||
if 'GNA' in args.device:
|
if 'GNA' in args.device:
|
||||||
pc = perf_counters[i]
|
pc = exec_net.requests[0].get_perf_counts()
|
||||||
total_cycles = int(pc['1.1 Total scoring time in HW']['real_time'])
|
total_cycles = int(pc['1.1 Total scoring time in HW']['real_time'])
|
||||||
stall_cycles = int(pc['1.2 Stall scoring time in HW']['real_time'])
|
stall_cycles = int(pc['1.2 Stall scoring time in HW']['real_time'])
|
||||||
active_cycles = total_cycles - stall_cycles
|
active_cycles = total_cycles - stall_cycles
|
||||||
frequency = 10**6
|
frequency = 10**6
|
||||||
if args.arch == 'CORE':
|
if args.arch == 'CORE':
|
||||||
frequency *= GNA_CORE_FREQUENCY
|
frequency *= GNA_CORE_FREQUENCY
|
||||||
else:
|
else:
|
||||||
frequency *= GNA_ATOM_FREQUENCY
|
frequency *= GNA_ATOM_FREQUENCY
|
||||||
total_inference_time = total_cycles / frequency
|
total_inference_time = total_cycles / frequency
|
||||||
active_time = active_cycles / frequency
|
active_time = active_cycles / frequency
|
||||||
stall_time = stall_cycles / frequency
|
stall_time = stall_cycles / frequency
|
||||||
log.info('')
|
log.info('')
|
||||||
log.info('Performance Statistics of GNA Hardware')
|
log.info('Performance Statistics of GNA Hardware')
|
||||||
log.info(f' Total Inference Time: {(total_inference_time * 1000):.4f} ms')
|
log.info(f' Total Inference Time: {(total_inference_time * 1000):.4f} ms')
|
||||||
log.info(f' Active Time: {(active_time * 1000):.4f} ms')
|
log.info(f' Active Time: {(active_time * 1000):.4f} ms')
|
||||||
log.info(f' Stall Time: {(stall_time * 1000):.4f} ms')
|
log.info(f' Stall Time: {(stall_time * 1000):.4f} ms')
|
||||||
|
|
||||||
log.info('')
|
log.info('')
|
||||||
|
log.info(f'Total sample time: {total_infer_time * 1000:.2f}ms')
|
||||||
log.info(f'Total sample time: {sum(infer_times) * 1000:.2f}ms')
|
|
||||||
|
|
||||||
if args.output:
|
if args.output:
|
||||||
for i, blob_name in enumerate(results):
|
for i, blob_name in enumerate(results):
|
||||||
|
@ -72,8 +72,10 @@ add_custom_command(TARGET ${TARGET_NAME}
|
|||||||
# install
|
# install
|
||||||
|
|
||||||
install(TARGETS ${INSTALLED_TARGETS}
|
install(TARGETS ${INSTALLED_TARGETS}
|
||||||
RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_COMPONENT}
|
RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine
|
||||||
LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_COMPONENT})
|
COMPONENT ${PYTHON_COMPONENT}
|
||||||
|
LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine
|
||||||
|
COMPONENT ${PYTHON_COMPONENT})
|
||||||
|
|
||||||
install(PROGRAMS __init__.py
|
install(PROGRAMS __init__.py
|
||||||
DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine
|
DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine
|
||||||
|
@ -9,7 +9,7 @@ from enum import Enum
|
|||||||
supported_precisions = ['FP32', 'FP64', 'FP16', 'I64', 'U64', 'I32', 'U32',
|
supported_precisions = ['FP32', 'FP64', 'FP16', 'I64', 'U64', 'I32', 'U32',
|
||||||
'I16', 'I4', 'I8', 'U16', 'U4', 'U8', 'BOOL', 'BIN', 'BF16']
|
'I16', 'I4', 'I8', 'U16', 'U4', 'U8', 'BOOL', 'BIN', 'BF16']
|
||||||
|
|
||||||
known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL', 'MULTI']
|
known_plugins = ['CPU', 'GPU', 'MYRIAD', 'HETERO', 'HDDL', 'MULTI']
|
||||||
|
|
||||||
layout_int_to_str_map = {0: 'ANY', 1: 'NCHW', 2: 'NHWC', 3: 'NCDHW', 4: 'NDHWC', 64: 'OIHW', 95: 'SCALAR', 96: 'C',
|
layout_int_to_str_map = {0: 'ANY', 1: 'NCHW', 2: 'NHWC', 3: 'NCDHW', 4: 'NDHWC', 64: 'OIHW', 95: 'SCALAR', 96: 'C',
|
||||||
128: 'CHW', 192: 'HW', 193: 'NC', 194: 'CN', 200: 'BLOCKED'}
|
128: 'CHW', 192: 'HW', 193: 'NC', 194: 'CN', 200: 'BLOCKED'}
|
||||||
|
@ -39,7 +39,7 @@ cdef class InferRequest:
|
|||||||
cpdef get_perf_counts(self)
|
cpdef get_perf_counts(self)
|
||||||
cdef void user_callback(self, int status) with gil
|
cdef void user_callback(self, int status) with gil
|
||||||
cdef public:
|
cdef public:
|
||||||
_inputs_list, _outputs_list, _py_callback, _py_data, _py_callback_used, _py_callback_called, _user_blobs, _inputs_is_dynamic
|
_inputs_list, _outputs_list, _py_callback, _py_data, _user_blobs, _inputs_is_dynamic
|
||||||
|
|
||||||
cdef class IENetwork:
|
cdef class IENetwork:
|
||||||
cdef C.IENetwork impl
|
cdef C.IENetwork impl
|
||||||
|
@ -541,7 +541,7 @@ cdef class IECore:
|
|||||||
def get_config(self, device_name: str, config_name: str):
|
def get_config(self, device_name: str, config_name: str):
|
||||||
return self.impl.getConfig(device_name.encode(), config_name.encode())
|
return self.impl.getConfig(device_name.encode(), config_name.encode())
|
||||||
|
|
||||||
## A list of devices. The devices are returned as \[CPU, FPGA.0, FPGA.1, MYRIAD\].
|
## A list of devices. The devices are returned as \[CPU, GPU.0, GPU.1, MYRIAD\].
|
||||||
# If there are more than one device of a specific type, they all are listed followed by a dot and a number.
|
# If there are more than one device of a specific type, they all are listed followed by a dot and a number.
|
||||||
@property
|
@property
|
||||||
def available_devices(self):
|
def available_devices(self):
|
||||||
@ -1071,15 +1071,11 @@ cdef class InferRequest:
|
|||||||
self._inputs_list = []
|
self._inputs_list = []
|
||||||
self._outputs_list = []
|
self._outputs_list = []
|
||||||
self._py_callback = lambda *args, **kwargs: None
|
self._py_callback = lambda *args, **kwargs: None
|
||||||
self._py_callback_used = False
|
|
||||||
self._py_callback_called = threading.Event()
|
|
||||||
self._py_data = None
|
self._py_data = None
|
||||||
self._inputs_is_dynamic = {}
|
self._inputs_is_dynamic = {}
|
||||||
|
|
||||||
cdef void user_callback(self, int status) with gil:
|
cdef void user_callback(self, int status) with gil:
|
||||||
if self._py_callback:
|
if self._py_callback:
|
||||||
# Set flag at first since user can call wait in callback
|
|
||||||
self._py_callback_called.set()
|
|
||||||
self._py_callback(status, self._py_data)
|
self._py_callback(status, self._py_data)
|
||||||
|
|
||||||
## Description: Sets a callback function that is called on success or failure of an asynchronous request
|
## Description: Sets a callback function that is called on success or failure of an asynchronous request
|
||||||
@ -1103,7 +1099,6 @@ cdef class InferRequest:
|
|||||||
def set_completion_callback(self, py_callback, py_data = None):
|
def set_completion_callback(self, py_callback, py_data = None):
|
||||||
self._py_callback = py_callback
|
self._py_callback = py_callback
|
||||||
self._py_data = py_data
|
self._py_data = py_data
|
||||||
self._py_callback_used = True
|
|
||||||
deref(self.impl).setCyCallback(<cb_type> self.user_callback, <void *> self)
|
deref(self.impl).setCyCallback(<cb_type> self.user_callback, <void *> self)
|
||||||
|
|
||||||
cpdef BlobBuffer _get_blob_buffer(self, const string & blob_name):
|
cpdef BlobBuffer _get_blob_buffer(self, const string & blob_name):
|
||||||
@ -1221,8 +1216,6 @@ cdef class InferRequest:
|
|||||||
cpdef async_infer(self, inputs=None):
|
cpdef async_infer(self, inputs=None):
|
||||||
if inputs is not None:
|
if inputs is not None:
|
||||||
self._fill_inputs(inputs)
|
self._fill_inputs(inputs)
|
||||||
if self._py_callback_used:
|
|
||||||
self._py_callback_called.clear()
|
|
||||||
with nogil:
|
with nogil:
|
||||||
deref(self.impl).infer_async()
|
deref(self.impl).infer_async()
|
||||||
|
|
||||||
@ -1242,24 +1235,6 @@ cdef class InferRequest:
|
|||||||
cpdef wait(self, timeout=None):
|
cpdef wait(self, timeout=None):
|
||||||
cdef int status
|
cdef int status
|
||||||
cdef int64_t c_timeout
|
cdef int64_t c_timeout
|
||||||
cdef int c_wait_mode
|
|
||||||
if self._py_callback_used:
|
|
||||||
# check request status to avoid blocking for idle requests
|
|
||||||
c_wait_mode = WaitMode.STATUS_ONLY
|
|
||||||
with nogil:
|
|
||||||
status = deref(self.impl).wait(c_wait_mode)
|
|
||||||
if status != StatusCode.RESULT_NOT_READY:
|
|
||||||
return status
|
|
||||||
if not self._py_callback_called.is_set():
|
|
||||||
if timeout == WaitMode.RESULT_READY:
|
|
||||||
timeout = None
|
|
||||||
if timeout is not None:
|
|
||||||
# Convert milliseconds to seconds
|
|
||||||
timeout = float(timeout)/1000
|
|
||||||
if not self._py_callback_called.wait(timeout):
|
|
||||||
return StatusCode.REQUEST_BUSY
|
|
||||||
return StatusCode.OK
|
|
||||||
|
|
||||||
if timeout is None:
|
if timeout is None:
|
||||||
timeout = WaitMode.RESULT_READY
|
timeout = WaitMode.RESULT_READY
|
||||||
c_timeout = <int64_t> timeout
|
c_timeout = <int64_t> timeout
|
||||||
|
@ -545,10 +545,10 @@ void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests)
|
|||||||
auto end_time = Time::now();
|
auto end_time = Time::now();
|
||||||
auto execTime = std::chrono::duration_cast<ns>(end_time - infer_request.start_time);
|
auto execTime = std::chrono::duration_cast<ns>(end_time - infer_request.start_time);
|
||||||
infer_request.exec_time = static_cast<double>(execTime.count()) * 0.000001;
|
infer_request.exec_time = static_cast<double>(execTime.count()) * 0.000001;
|
||||||
infer_request.request_queue_ptr->setRequestIdle(infer_request.index);
|
|
||||||
if (infer_request.user_callback) {
|
if (infer_request.user_callback) {
|
||||||
infer_request.user_callback(infer_request.user_data, code);
|
infer_request.user_callback(infer_request.user_data, code);
|
||||||
}
|
}
|
||||||
|
infer_request.request_queue_ptr->setRequestIdle(infer_request.index);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -59,9 +59,11 @@ add_custom_command(TARGET ${TARGET_NAME}
|
|||||||
# ie_cpack_add_component(${PYTHON_VERSION}_dev DEPENDS ${PYTHON_COMPONENT})
|
# ie_cpack_add_component(${PYTHON_VERSION}_dev DEPENDS ${PYTHON_COMPONENT})
|
||||||
|
|
||||||
install(TARGETS ${TARGET_NAME}
|
install(TARGETS ${TARGET_NAME}
|
||||||
RUNTIME DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT}
|
RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations
|
||||||
LIBRARY DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT})
|
COMPONENT ${PYTHON_COMPONENT}
|
||||||
|
LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations
|
||||||
|
COMPONENT ${PYTHON_COMPONENT})
|
||||||
|
|
||||||
install(PROGRAMS __init__.py
|
install(PROGRAMS __init__.py
|
||||||
DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations
|
DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations
|
||||||
COMPONENT ${PYTHON_COMPONENT})
|
COMPONENT ${PYTHON_COMPONENT})
|
||||||
|
@ -5,6 +5,7 @@ import numpy as np
|
|||||||
import os
|
import os
|
||||||
import pytest
|
import pytest
|
||||||
import warnings
|
import warnings
|
||||||
|
import time
|
||||||
|
|
||||||
from openvino.inference_engine import ie_api as ie
|
from openvino.inference_engine import ie_api as ie
|
||||||
from conftest import model_path, image_path
|
from conftest import model_path, image_path
|
||||||
@ -173,6 +174,26 @@ def test_wait_before_start(device):
|
|||||||
del ie_core
|
del ie_core
|
||||||
|
|
||||||
|
|
||||||
|
def test_wait_for_callback(device):
|
||||||
|
def callback(status, callbacks_info):
|
||||||
|
time.sleep(0.01)
|
||||||
|
callbacks_info['finished'] += 1
|
||||||
|
|
||||||
|
ie_core = ie.IECore()
|
||||||
|
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||||
|
num_requests = 3
|
||||||
|
exec_net = ie_core.load_network(net, device, num_requests=num_requests)
|
||||||
|
callbacks_info = {}
|
||||||
|
callbacks_info['finished'] = 0
|
||||||
|
img = read_image()
|
||||||
|
for request in exec_net.requests:
|
||||||
|
request.set_completion_callback(callback, callbacks_info)
|
||||||
|
request.async_infer({'data': img})
|
||||||
|
|
||||||
|
exec_net.wait(num_requests)
|
||||||
|
assert callbacks_info['finished'] == num_requests
|
||||||
|
|
||||||
|
|
||||||
def test_wrong_request_id(device):
|
def test_wrong_request_id(device):
|
||||||
ie_core = ie.IECore()
|
ie_core = ie.IECore()
|
||||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||||
|
@ -7,6 +7,7 @@ import pytest
|
|||||||
import warnings
|
import warnings
|
||||||
import threading
|
import threading
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
import time
|
||||||
|
|
||||||
from openvino.inference_engine import ie_api as ie
|
from openvino.inference_engine import ie_api as ie
|
||||||
from conftest import model_path, image_path
|
from conftest import model_path, image_path
|
||||||
@ -349,7 +350,7 @@ def test_async_infer_callback_wait_in_callback(device):
|
|||||||
self.cv.release()
|
self.cv.release()
|
||||||
status = self.request.wait(ie.WaitMode.RESULT_READY)
|
status = self.request.wait(ie.WaitMode.RESULT_READY)
|
||||||
assert status == ie.StatusCode.OK
|
assert status == ie.StatusCode.OK
|
||||||
assert self.status_code == ie.StatusCode.OK
|
assert self.status_code == ie.StatusCode.RESULT_NOT_READY
|
||||||
|
|
||||||
ie_core = ie.IECore()
|
ie_core = ie.IECore()
|
||||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||||
@ -361,6 +362,24 @@ def test_async_infer_callback_wait_in_callback(device):
|
|||||||
del ie_core
|
del ie_core
|
||||||
|
|
||||||
|
|
||||||
|
def test_async_infer_wait_while_callback_will_not_finish(device):
|
||||||
|
def callback(status, callback_status):
|
||||||
|
time.sleep(0.01)
|
||||||
|
callback_status['finished'] = True
|
||||||
|
|
||||||
|
ie_core = ie.IECore()
|
||||||
|
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||||
|
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||||
|
callback_status = {}
|
||||||
|
callback_status['finished'] = False
|
||||||
|
request = exec_net.requests[0]
|
||||||
|
request.set_completion_callback(callback, py_data=callback_status)
|
||||||
|
img = read_image()
|
||||||
|
request.async_infer({'data': img})
|
||||||
|
request.wait()
|
||||||
|
assert callback_status['finished'] == True
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.ngraph_dependent_test
|
@pytest.mark.ngraph_dependent_test
|
||||||
def test_get_perf_counts(device):
|
def test_get_perf_counts(device):
|
||||||
ie_core = ie.IECore()
|
ie_core = ie.IECore()
|
||||||
|
@ -9,8 +9,6 @@ WHEEL_REQUIREMENTS=@WHEEL_REQUIREMENTS@
|
|||||||
WHEEL_OVERVIEW=@WHEEL_OVERVIEW@
|
WHEEL_OVERVIEW=@WHEEL_OVERVIEW@
|
||||||
|
|
||||||
CMAKE_BUILD_DIR=@CMAKE_BINARY_DIR@
|
CMAKE_BUILD_DIR=@CMAKE_BINARY_DIR@
|
||||||
CORE_LIBS_DIR=@IE_CPACK_RUNTIME_PATH@
|
OV_RUNTIME_LIBS_DIR=@IE_CPACK_RUNTIME_PATH@
|
||||||
PLUGINS_LIBS_DIR=@PLUGINS_LIBS_DIR@
|
|
||||||
NGRAPH_LIBS_DIR=@NGRAPH_LIBS_DIR@
|
|
||||||
TBB_LIBS_DIR=@TBB_LIBS_DIR@
|
TBB_LIBS_DIR=@TBB_LIBS_DIR@
|
||||||
PY_PACKAGES_DIR=@PY_PACKAGES_DIR@
|
PY_PACKAGES_DIR=@PY_PACKAGES_DIR@
|
||||||
|
@ -18,11 +18,8 @@ set(WHEEL_OVERVIEW "${CMAKE_CURRENT_SOURCE_DIR}/meta/pypi_overview.md" CACHE STR
|
|||||||
set(SETUP_PY "${CMAKE_CURRENT_SOURCE_DIR}/setup.py")
|
set(SETUP_PY "${CMAKE_CURRENT_SOURCE_DIR}/setup.py")
|
||||||
set(SETUP_ENV "${CMAKE_CURRENT_SOURCE_DIR}/.env.in")
|
set(SETUP_ENV "${CMAKE_CURRENT_SOURCE_DIR}/.env.in")
|
||||||
|
|
||||||
set(CORE_LIBS_DIR ${IE_CPACK_RUNTIME_PATH})
|
|
||||||
set(PLUGINS_LIBS_DIR ${IE_CPACK_RUNTIME_PATH})
|
|
||||||
set(NGRAPH_LIBS_DIR deployment_tools/ngraph/lib)
|
|
||||||
set(PY_PACKAGES_DIR ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION})
|
set(PY_PACKAGES_DIR ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION})
|
||||||
set(TBB_LIBS_DIR deployment_tools/inference_engine/external/tbb/lib)
|
set(TBB_LIBS_DIR runtime/3rdparty/tbb/lib)
|
||||||
|
|
||||||
if(APPLE)
|
if(APPLE)
|
||||||
set(WHEEL_PLATFORM macosx_10_15_x86_64)
|
set(WHEEL_PLATFORM macosx_10_15_x86_64)
|
||||||
@ -30,7 +27,7 @@ elseif(UNIX)
|
|||||||
set(WHEEL_PLATFORM manylinux2014_x86_64)
|
set(WHEEL_PLATFORM manylinux2014_x86_64)
|
||||||
elseif(WIN32)
|
elseif(WIN32)
|
||||||
set(WHEEL_PLATFORM win_amd64)
|
set(WHEEL_PLATFORM win_amd64)
|
||||||
set(TBB_LIBS_DIR deployment_tools/inference_engine/external/tbb/bin)
|
set(TBB_LIBS_DIR runtime/3rdparty/tbb/bin)
|
||||||
else()
|
else()
|
||||||
message(FATAL_ERROR "This platform is not supported")
|
message(FATAL_ERROR "This platform is not supported")
|
||||||
endif()
|
endif()
|
||||||
|
@ -5,7 +5,7 @@ OpenVINO™ toolkit quickly deploys applications and solutions that emulate huma
|
|||||||
OpenVINO™ toolkit:
|
OpenVINO™ toolkit:
|
||||||
|
|
||||||
- Enables CNN-based deep learning inference on the edge
|
- Enables CNN-based deep learning inference on the edge
|
||||||
- Supports heterogeneous execution across an Intel® CPU, Intel® Integrated Graphics, Intel® FPGA, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
|
- Supports heterogeneous execution across an Intel® CPU, Intel® Integrated Graphics, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
|
||||||
- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels
|
- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels
|
||||||
- Includes optimized calls for computer vision standards, including OpenCV\* and OpenCL™
|
- Includes optimized calls for computer vision standards, including OpenCV\* and OpenCL™
|
||||||
|
|
||||||
|
@ -42,10 +42,8 @@ elif machine == 'aarch64':
|
|||||||
|
|
||||||
# The following variables can be defined in environment or .env file
|
# The following variables can be defined in environment or .env file
|
||||||
CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', '.')
|
CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', '.')
|
||||||
CORE_LIBS_DIR = config('CORE_LIBS_DIR', f'deployment_tools/inference_engine/{LIBS_DIR}/{ARCH}/{CONFIG}')
|
OV_RUNTIME_LIBS_DIR = config('OV_RUNTIME_LIBS_DIR', f'runtime/{LIBS_DIR}/{ARCH}/{CONFIG}')
|
||||||
PLUGINS_LIBS_DIR = config('PLUGINS_LIBS_DIR', f'deployment_tools/inference_engine/{LIBS_DIR}/{ARCH}/{CONFIG}')
|
TBB_LIBS_DIR = config('TBB_LIBS_DIR', f'runtime/3rdparty/tbb/{LIBS_DIR}')
|
||||||
NGRAPH_LIBS_DIR = config('NGRAPH_LIBS_DIR', 'deployment_tools/ngraph/lib')
|
|
||||||
TBB_LIBS_DIR = config('TBB_LIBS_DIR', f'deployment_tools/inference_engine/external/tbb/{LIBS_DIR}')
|
|
||||||
PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', f'python/{PYTHON_VERSION}')
|
PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', f'python/{PYTHON_VERSION}')
|
||||||
LIBS_RPATH = '$ORIGIN' if sys.platform == 'linux' else '@loader_path'
|
LIBS_RPATH = '$ORIGIN' if sys.platform == 'linux' else '@loader_path'
|
||||||
|
|
||||||
@ -53,43 +51,43 @@ LIB_INSTALL_CFG = {
|
|||||||
'ie_libs': {
|
'ie_libs': {
|
||||||
'name': 'core',
|
'name': 'core',
|
||||||
'prefix': 'libs.core',
|
'prefix': 'libs.core',
|
||||||
'install_dir': CORE_LIBS_DIR,
|
'install_dir': OV_RUNTIME_LIBS_DIR,
|
||||||
'rpath': LIBS_RPATH,
|
'rpath': LIBS_RPATH,
|
||||||
},
|
},
|
||||||
'hetero_plugin': {
|
'hetero_plugin': {
|
||||||
'name': 'hetero',
|
'name': 'hetero',
|
||||||
'prefix': 'libs.plugins',
|
'prefix': 'libs.core',
|
||||||
'install_dir': PLUGINS_LIBS_DIR,
|
'install_dir': OV_RUNTIME_LIBS_DIR,
|
||||||
'rpath': LIBS_RPATH,
|
'rpath': LIBS_RPATH,
|
||||||
},
|
},
|
||||||
'gpu_plugin': {
|
'gpu_plugin': {
|
||||||
'name': 'gpu',
|
'name': 'gpu',
|
||||||
'prefix': 'libs.plugins',
|
'prefix': 'libs.core',
|
||||||
'install_dir': PLUGINS_LIBS_DIR,
|
'install_dir': OV_RUNTIME_LIBS_DIR,
|
||||||
'rpath': LIBS_RPATH,
|
'rpath': LIBS_RPATH,
|
||||||
},
|
},
|
||||||
'cpu_plugin': {
|
'cpu_plugin': {
|
||||||
'name': 'cpu',
|
'name': 'cpu',
|
||||||
'prefix': 'libs.plugins',
|
'prefix': 'libs.core',
|
||||||
'install_dir': PLUGINS_LIBS_DIR,
|
'install_dir': OV_RUNTIME_LIBS_DIR,
|
||||||
'rpath': LIBS_RPATH,
|
'rpath': LIBS_RPATH,
|
||||||
},
|
},
|
||||||
'multi_plugin': {
|
'multi_plugin': {
|
||||||
'name': 'multi',
|
'name': 'multi',
|
||||||
'prefix': 'libs.plugins',
|
'prefix': 'libs.core',
|
||||||
'install_dir': PLUGINS_LIBS_DIR,
|
'install_dir': OV_RUNTIME_LIBS_DIR,
|
||||||
'rpath': LIBS_RPATH,
|
'rpath': LIBS_RPATH,
|
||||||
},
|
},
|
||||||
'myriad_plugin': {
|
'myriad_plugin': {
|
||||||
'name': 'myriad',
|
'name': 'myriad',
|
||||||
'prefix': 'libs.plugins',
|
'prefix': 'libs.core',
|
||||||
'install_dir': PLUGINS_LIBS_DIR,
|
'install_dir': OV_RUNTIME_LIBS_DIR,
|
||||||
'rpath': LIBS_RPATH,
|
'rpath': LIBS_RPATH,
|
||||||
},
|
},
|
||||||
'ngraph_libs': {
|
'ngraph_libs': {
|
||||||
'name': 'ngraph',
|
'name': 'ngraph',
|
||||||
'prefix': 'libs.ngraph',
|
'prefix': 'libs.core',
|
||||||
'install_dir': NGRAPH_LIBS_DIR,
|
'install_dir': OV_RUNTIME_LIBS_DIR,
|
||||||
'rpath': LIBS_RPATH,
|
'rpath': LIBS_RPATH,
|
||||||
},
|
},
|
||||||
'tbb_libs': {
|
'tbb_libs': {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
# Benchmark C++ Tool {#openvino_inference_engine_samples_benchmark_app_README}
|
# Benchmark C++ Tool {#openvino_inference_engine_samples_benchmark_app_README}
|
||||||
|
|
||||||
This topic demonstrates how to use the Benchmark C++ Tool to estimate deep learning inference performance on supported devices. Performance can be measured for two inference modes: synchronous (latency-oriented) and asynchronous (throughput-oriented).
|
This topic demonstrates how to use the Benchmark C++ Tool to estimate deep learning inference performance on supported devices.
|
||||||
|
Performance can be measured for two inference modes: latency- and throughput-oriented.
|
||||||
|
|
||||||
> **NOTE:** This topic describes usage of C++ implementation of the Benchmark Tool. For the Python* implementation, refer to [Benchmark Python* Tool](../../../tools/benchmark_tool/README.md).
|
> **NOTE:** This topic describes usage of C++ implementation of the Benchmark Tool. For the Python* implementation, refer to [Benchmark Python* Tool](../../../tools/benchmark_tool/README.md).
|
||||||
|
|
||||||
@ -12,12 +13,19 @@ This topic demonstrates how to use the Benchmark C++ Tool to estimate deep learn
|
|||||||
|
|
||||||
## How It Works
|
## How It Works
|
||||||
|
|
||||||
Upon start-up, the application reads command-line parameters and loads a network and images/binary files to the Inference Engine plugin, which is chosen depending on a specified device. The number of infer requests and execution approach depend on the mode defined with the `-api` command-line parameter.
|
Upon start-up, the application reads command-line parameters and loads a network and inputs (images/binary files) to the specified device.
|
||||||
|
|
||||||
> **NOTE**: By default, Inference Engine samples, tools and demos expect input with BGR channels order. If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified. For more information about the argument, refer to **When to Reverse Input Channels** section of [Converting a Model Using General Conversion Parameters](../../../docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
|
**NOTE**: By default, Inference Engine samples, tools and demos expect input with BGR channels order.
|
||||||
|
If you trained your model to work with RGB order, you need to manually rearrange the default channels order in the sample or demo application
|
||||||
|
or reconvert your model using the Model Optimizer tool with `--reverse_input_channels` argument specified.
|
||||||
|
For more information about the argument, refer to **When to Reverse Input Channels** section of
|
||||||
|
[Converting a Model Using General Conversion Parameters](../../../docs/MO_DG/prepare_model/convert_model/Converting_Model_General.md).
|
||||||
|
|
||||||
If you run the application in the synchronous mode, it creates one infer request and executes the `Infer` method.
|
Device-specific execution parameters (number of streams, threads, and so on) can be either explicitly specified through the command line
|
||||||
If you run the application in the asynchronous mode, it creates as many infer requests as specified in the `-nireq` command-line parameter and executes the `StartAsync` method for each of them. If `-nireq` is not set, the application will use the default value for specified device.
|
or left default. In the last case, the sample logic will select the values for the optimal throughput.
|
||||||
|
While experimenting with individual parameters allows to find the performance sweet spot, usually, the parameters are not very performance-portable,
|
||||||
|
so the values from one machine or device are not necessarily optimal for another.
|
||||||
|
From this perspective, the most portable way is experimenting only with the performance hints. To learn more, refer to the section on the command-line parameters below.
|
||||||
|
|
||||||
A number of execution steps is defined by one of the following parameters:
|
A number of execution steps is defined by one of the following parameters:
|
||||||
* Number of iterations specified with the `-niter` command-line argument
|
* Number of iterations specified with the `-niter` command-line argument
|
||||||
@ -25,14 +33,9 @@ A number of execution steps is defined by one of the following parameters:
|
|||||||
* Both of them (execution will continue until both conditions are met)
|
* Both of them (execution will continue until both conditions are met)
|
||||||
* Predefined duration if `-niter` and `-t` are not specified. Predefined duration value depends on a device.
|
* Predefined duration if `-niter` and `-t` are not specified. Predefined duration value depends on a device.
|
||||||
|
|
||||||
During the execution, the application collects latency for each executed infer request.
|
During the execution, the application calculates latency (if applicable) and overall throughput:
|
||||||
|
* By default, the median latency value is reported
|
||||||
Reported latency value is calculated as a median value of all collected latencies. Reported throughput value is reported
|
* Throughput is calculated as overall_inference_time/number_of_processed_requests. Note that the throughput value also depends on batch size.
|
||||||
in frames per second (FPS) and calculated as a derivative from:
|
|
||||||
* Reported latency in the Sync mode
|
|
||||||
* The total execution time in the Async mode
|
|
||||||
|
|
||||||
Throughput value also depends on batch size.
|
|
||||||
|
|
||||||
The application also collects per-layer Performance Measurement (PM) counters for each executed infer request if you
|
The application also collects per-layer Performance Measurement (PM) counters for each executed infer request if you
|
||||||
enable statistics dumping by setting the `-report_type` parameter to one of the possible values:
|
enable statistics dumping by setting the `-report_type` parameter to one of the possible values:
|
||||||
@ -56,7 +59,7 @@ Note that the benchmark_app usually produces optimal performance for any device
|
|||||||
./benchmark_app -m <model> -i <input> -d CPU
|
./benchmark_app -m <model> -i <input> -d CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
But it is still may be non-optimal for some cases, especially for very small networks. More details can read in [Introduction to Performance Topics](../../../docs/IE_DG/Intro_to_Performance.md).
|
But it is still may be sub-optimal for some cases, especially for very small networks. More details can read in [Introduction to Performance Topics](../../../docs/IE_DG/Intro_to_Performance.md).
|
||||||
|
|
||||||
As explained in the [Introduction to Performance Topics](../../../docs/IE_DG/Intro_to_Performance.md) section, for all devices, including new [MULTI device](../../../docs/IE_DG/supported_plugins/MULTI.md) it is preferable to use the FP16 IR for the model.
|
As explained in the [Introduction to Performance Topics](../../../docs/IE_DG/Intro_to_Performance.md) section, for all devices, including new [MULTI device](../../../docs/IE_DG/supported_plugins/MULTI.md) it is preferable to use the FP16 IR for the model.
|
||||||
Also if latency of the CPU inference on the multi-socket machines is of concern, please refer to the same
|
Also if latency of the CPU inference on the multi-socket machines is of concern, please refer to the same
|
||||||
@ -83,7 +86,12 @@ Options:
|
|||||||
-l "<absolute_path>" Required for CPU custom layers. Absolute path to a shared library with the kernels implementations.
|
-l "<absolute_path>" Required for CPU custom layers. Absolute path to a shared library with the kernels implementations.
|
||||||
Or
|
Or
|
||||||
-c "<absolute_path>" Required for GPU custom kernels. Absolute path to an .xml file with the kernels description.
|
-c "<absolute_path>" Required for GPU custom kernels. Absolute path to an .xml file with the kernels description.
|
||||||
-api "<sync/async>" Optional. Enable Sync/Async API. Default value is "async".
|
-hint "<throughput(or just 'tput')/latency">
|
||||||
|
Optional. Performance hint (optimize for latency or throughput).
|
||||||
|
The hint allows the OpenVINO device to select the right network-specific settings,
|
||||||
|
as opposite to just accepting specific values from the sample command line.
|
||||||
|
So you can specify only the hint without setting explicit 'nstreams' or other device-specific options.
|
||||||
|
-api "<sync/async>" Optional (deprecated). Enable Sync/Async API. Default value is "async".
|
||||||
-niter "<integer>" Optional. Number of iterations. If not specified, the number of iterations is calculated depending on a device.
|
-niter "<integer>" Optional. Number of iterations. If not specified, the number of iterations is calculated depending on a device.
|
||||||
-nireq "<integer>" Optional. Number of infer requests. Default value is determined automatically for a device.
|
-nireq "<integer>" Optional. Number of infer requests. Default value is determined automatically for a device.
|
||||||
-b "<integer>" Optional. Batch size value. If not specified, the batch size value is determined from Intermediate Representation.
|
-b "<integer>" Optional. Batch size value. If not specified, the batch size value is determined from Intermediate Representation.
|
||||||
@ -140,39 +148,39 @@ To run the tool, you can use [public](@ref omz_models_group_public) or [Intel's]
|
|||||||
|
|
||||||
## Examples of Running the Tool
|
## Examples of Running the Tool
|
||||||
|
|
||||||
This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or FPGA devices. As an input, the `car.png` file from the `<INSTALL_DIR>/deployment_tools/demo/` directory is used.
|
This section provides step-by-step instructions on how to run the Benchmark Tool with the `googlenet-v1` public model on CPU or GPU devices. As an input, the `car.png` file from the `<INSTALL_DIR>/samples/scripts/` directory is used.
|
||||||
|
|
||||||
> **NOTE:** The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment.
|
> **NOTE:** The Internet access is required to execute the following steps successfully. If you have access to the Internet through the proxy server only, please make sure that it is configured in your OS environment.
|
||||||
|
|
||||||
1. Download the model. Go to the the Model Downloader directory and run the `downloader.py` script with specifying the model name and directory to download the model to:
|
1. Download the model. Go to the the Model Downloader directory and run the `downloader.py` script with specifying the model name and directory to download the model to:
|
||||||
```sh
|
```sh
|
||||||
cd <INSTAL_DIR>/deployment_tools/open_model_zoo/tools/downloader
|
cd <INSTAL_DIR>/extras/open_model_zoo/tools/downloader
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
python3 downloader.py --name googlenet-v1 -o <models_dir>
|
python3 downloader.py --name googlenet-v1 -o <models_dir>
|
||||||
```
|
```
|
||||||
2. Convert the model to the Inference Engine IR format. Go to the Model Optimizer directory and run the `mo.py` script with specifying the path to the model, model format (which must be FP32 for CPU and FPG) and output directory to generate the IR files:
|
2. Convert the model to the Inference Engine IR format. Go to the Model Optimizer directory and run the `mo.py` script with specifying the path to the model, model format (which must be FP32 for CPU and FPG) and output directory to generate the IR files:
|
||||||
```sh
|
```sh
|
||||||
cd <INSTALL_DIR>/deployment_tools/model_optimizer
|
cd <INSTALL_DIR>/tools/model_optimizer
|
||||||
```
|
```
|
||||||
```sh
|
```sh
|
||||||
python3 mo.py --input_model <models_dir>/public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir <ir_dir>
|
python3 mo.py --input_model <models_dir>/public/googlenet-v1/googlenet-v1.caffemodel --data_type FP32 --output_dir <ir_dir>
|
||||||
```
|
```
|
||||||
3. Run the tool with specifying the `<INSTALL_DIR>/deployment_tools/demo/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and FPGA devices:
|
3. Run the tool with specifying the `<INSTALL_DIR>/samples/scripts/car.png` file as an input image, the IR of the `googlenet-v1` model and a device to perform inference on. The following commands demonstrate running the Benchmark Tool in the asynchronous mode on CPU and GPU devices:
|
||||||
|
|
||||||
* On CPU:
|
* On CPU:
|
||||||
```sh
|
```sh
|
||||||
./benchmark_app -m <ir_dir>/googlenet-v1.xml -i <INSTALL_DIR>/deployment_tools/demo/car.png -d CPU -api async --progress true
|
./benchmark_app -m <ir_dir>/googlenet-v1.xml -i <INSTALL_DIR>/samples/scripts/car.png -d CPU -api async --progress true
|
||||||
```
|
```
|
||||||
* On FPGA:
|
* On GPU:
|
||||||
```sh
|
```sh
|
||||||
./benchmark_app -m <ir_dir>/googlenet-v1.xml -i <INSTALL_DIR>/deployment_tools/demo/car.png -d HETERO:FPGA,CPU -api async --progress true
|
./benchmark_app -m <ir_dir>/googlenet-v1.xml -i <INSTALL_DIR>/samples/scripts/car.png -d GPU -api async --progress true
|
||||||
```
|
```
|
||||||
|
|
||||||
The application outputs the number of executed iterations, total duration of execution, latency, and throughput.
|
The application outputs the number of executed iterations, total duration of execution, latency, and throughput.
|
||||||
Additionally, if you set the `-report_type` parameter, the application outputs statistics report. If you set the `-pc` parameter, the application outputs performance counters. If you set `-exec_graph_path`, the application reports executable graph information serialized. All measurements including per-layer PM counters are reported in milliseconds.
|
Additionally, if you set the `-report_type` parameter, the application outputs statistics report. If you set the `-pc` parameter, the application outputs performance counters. If you set `-exec_graph_path`, the application reports executable graph information serialized. All measurements including per-layer PM counters are reported in milliseconds.
|
||||||
|
|
||||||
Below are fragments of sample output for CPU and FPGA devices:
|
Below are fragments of sample output for CPU and GPU devices:
|
||||||
|
|
||||||
* For CPU:
|
* For CPU:
|
||||||
```
|
```
|
||||||
@ -189,7 +197,7 @@ Below are fragments of sample output for CPU and FPGA devices:
|
|||||||
Throughput: 76.73 FPS
|
Throughput: 76.73 FPS
|
||||||
```
|
```
|
||||||
|
|
||||||
* For FPGA:
|
* For GPU:
|
||||||
```
|
```
|
||||||
[Step 10/11] Measuring performance (Start inference asynchronously, 5 inference requests using 4 streams for CPU, limits: 120000 ms duration)
|
[Step 10/11] Measuring performance (Start inference asynchronously, 5 inference requests using 4 streams for CPU, limits: 120000 ms duration)
|
||||||
Progress: [....................] 100% done
|
Progress: [....................] 100% done
|
||||||
|
@ -22,8 +22,15 @@ static const char model_message[] =
|
|||||||
"Required. Path to an .xml/.onnx file with a trained model or to a .blob files with "
|
"Required. Path to an .xml/.onnx file with a trained model or to a .blob files with "
|
||||||
"a trained compiled model.";
|
"a trained compiled model.";
|
||||||
|
|
||||||
|
/// @brief message for performance hint
|
||||||
|
static const char hint_message[] =
|
||||||
|
"Optional. Performance hint (optimize for latency or throughput). "
|
||||||
|
"The hint allows the OpenVINO device to select the right network-specific settings,"
|
||||||
|
"as opposite to just accepting specific values from the sample command line."
|
||||||
|
"So you can specify only the hint without setting explicit 'nstreams' or other device-specific options";
|
||||||
|
|
||||||
/// @brief message for execution mode
|
/// @brief message for execution mode
|
||||||
static const char api_message[] = "Optional. Enable Sync/Async API. Default value is \"async\".";
|
static const char api_message[] = "Optional (deprecated). Enable Sync/Async API. Default value is \"async\".";
|
||||||
|
|
||||||
/// @brief message for assigning cnn calculation to device
|
/// @brief message for assigning cnn calculation to device
|
||||||
static const char target_device_message[] =
|
static const char target_device_message[] =
|
||||||
@ -193,6 +200,9 @@ DEFINE_string(i, "", input_message);
|
|||||||
/// It is a required parameter
|
/// It is a required parameter
|
||||||
DEFINE_string(m, "", model_message);
|
DEFINE_string(m, "", model_message);
|
||||||
|
|
||||||
|
/// @brief Define execution mode
|
||||||
|
DEFINE_string(hint, "", hint_message);
|
||||||
|
|
||||||
/// @brief Define execution mode
|
/// @brief Define execution mode
|
||||||
DEFINE_string(api, "async", api_message);
|
DEFINE_string(api, "async", api_message);
|
||||||
|
|
||||||
|
@ -59,7 +59,10 @@ bool ParseAndCheckCommandLine(int argc, char* argv[]) {
|
|||||||
if (FLAGS_api != "async" && FLAGS_api != "sync") {
|
if (FLAGS_api != "async" && FLAGS_api != "sync") {
|
||||||
throw std::logic_error("Incorrect API. Please set -api option to `sync` or `async` value.");
|
throw std::logic_error("Incorrect API. Please set -api option to `sync` or `async` value.");
|
||||||
}
|
}
|
||||||
|
if (!FLAGS_hint.empty() && FLAGS_hint != "throughput" && FLAGS_hint != "tput" && FLAGS_hint != "latency") {
|
||||||
|
throw std::logic_error("Incorrect performance hint. Please set -hint option to"
|
||||||
|
"either `throughput`(tput) or `latency' value.");
|
||||||
|
}
|
||||||
if (!FLAGS_report_type.empty() && FLAGS_report_type != noCntReport && FLAGS_report_type != averageCntReport &&
|
if (!FLAGS_report_type.empty() && FLAGS_report_type != noCntReport && FLAGS_report_type != averageCntReport &&
|
||||||
FLAGS_report_type != detailedCntReport) {
|
FLAGS_report_type != detailedCntReport) {
|
||||||
std::string err = "only " + std::string(noCntReport) + "/" + std::string(averageCntReport) + "/" +
|
std::string err = "only " + std::string(noCntReport) + "/" + std::string(averageCntReport) + "/" +
|
||||||
@ -208,6 +211,11 @@ int main(int argc, char* argv[]) {
|
|||||||
// ----------------- 3. Setting device configuration
|
// ----------------- 3. Setting device configuration
|
||||||
// -----------------------------------------------------------
|
// -----------------------------------------------------------
|
||||||
next_step();
|
next_step();
|
||||||
|
std::string ov_perf_hint;
|
||||||
|
if (FLAGS_hint == "throughput" || FLAGS_hint == "tput")
|
||||||
|
ov_perf_hint = CONFIG_VALUE(THROUGHPUT);
|
||||||
|
else if (FLAGS_hint == "latency")
|
||||||
|
ov_perf_hint = CONFIG_VALUE(LATENCY);
|
||||||
|
|
||||||
bool perf_counts = false;
|
bool perf_counts = false;
|
||||||
// Update config per device according to command line parameters
|
// Update config per device according to command line parameters
|
||||||
@ -219,6 +227,13 @@ int main(int argc, char* argv[]) {
|
|||||||
config[device] = {};
|
config[device] = {};
|
||||||
std::map<std::string, std::string>& device_config = config.at(device);
|
std::map<std::string, std::string>& device_config = config.at(device);
|
||||||
|
|
||||||
|
// high-level performance modes
|
||||||
|
if (!ov_perf_hint.empty()) {
|
||||||
|
device_config[CONFIG_KEY(PERFORMANCE_HINT)] = ov_perf_hint;
|
||||||
|
if (FLAGS_nireq != 0)
|
||||||
|
device_config[CONFIG_KEY(PERFORMANCE_HINT_NUM_REQUESTS)] = std::to_string(FLAGS_nireq);
|
||||||
|
}
|
||||||
|
|
||||||
// Set performance counter
|
// Set performance counter
|
||||||
if (isFlagSetInCommandLine("pc")) {
|
if (isFlagSetInCommandLine("pc")) {
|
||||||
// set to user defined value
|
// set to user defined value
|
||||||
@ -241,6 +256,7 @@ int main(int argc, char* argv[]) {
|
|||||||
}
|
}
|
||||||
perf_counts = (device_config.at(CONFIG_KEY(PERF_COUNT)) == CONFIG_VALUE(YES)) ? true : perf_counts;
|
perf_counts = (device_config.at(CONFIG_KEY(PERF_COUNT)) == CONFIG_VALUE(YES)) ? true : perf_counts;
|
||||||
|
|
||||||
|
// the rest are individual per-device settings (overriding the values set with perf modes)
|
||||||
auto setThroughputStreams = [&]() {
|
auto setThroughputStreams = [&]() {
|
||||||
const std::string key = device + "_THROUGHPUT_STREAMS";
|
const std::string key = device + "_THROUGHPUT_STREAMS";
|
||||||
if (device_nstreams.count(device)) {
|
if (device_nstreams.count(device)) {
|
||||||
@ -255,7 +271,7 @@ int main(int argc, char* argv[]) {
|
|||||||
" or via configuration file.");
|
" or via configuration file.");
|
||||||
}
|
}
|
||||||
device_config[key] = device_nstreams.at(device);
|
device_config[key] = device_nstreams.at(device);
|
||||||
} else if (!device_config.count(key) && (FLAGS_api == "async")) {
|
} else if (ov_perf_hint.empty() && !device_config.count(key) && (FLAGS_api == "async")) {
|
||||||
slog::warn << "-nstreams default value is determined automatically for " << device
|
slog::warn << "-nstreams default value is determined automatically for " << device
|
||||||
<< " device. "
|
<< " device. "
|
||||||
"Although the automatic selection usually provides a "
|
"Although the automatic selection usually provides a "
|
||||||
@ -484,9 +500,24 @@ int main(int argc, char* argv[]) {
|
|||||||
batchSize = 1;
|
batchSize = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// ----------------- 8. Setting optimal runtime parameters
|
// ----------------- 8. Querying optimal runtime parameters
|
||||||
// -----------------------------------------------------
|
// -----------------------------------------------------
|
||||||
next_step();
|
next_step();
|
||||||
|
// output of the actual settings that the device selected based on the hint
|
||||||
|
if (!ov_perf_hint.empty()) {
|
||||||
|
for (const auto& device : devices) {
|
||||||
|
std::vector<std::string> supported_config_keys =
|
||||||
|
ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS));
|
||||||
|
slog::info << "Device: " << device << slog::endl;
|
||||||
|
for (const auto& cfg : supported_config_keys) {
|
||||||
|
try {
|
||||||
|
slog::info << " {" << cfg << " , " << exeNetwork.GetConfig(cfg).as<std::string>();
|
||||||
|
} catch (...) {
|
||||||
|
};
|
||||||
|
slog::info << " }" << slog::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Update number of streams
|
// Update number of streams
|
||||||
for (auto&& ds : device_nstreams) {
|
for (auto&& ds : device_nstreams) {
|
||||||
|
@ -59,7 +59,6 @@ uint32_t deviceDefaultDeviceDurationInSeconds(const std::string& device) {
|
|||||||
{"VPU", 60},
|
{"VPU", 60},
|
||||||
{"MYRIAD", 60},
|
{"MYRIAD", 60},
|
||||||
{"HDDL", 60},
|
{"HDDL", 60},
|
||||||
{"FPGA", 120},
|
|
||||||
{"UNKNOWN", 120}};
|
{"UNKNOWN", 120}};
|
||||||
uint32_t duration = 0;
|
uint32_t duration = 0;
|
||||||
for (const auto& deviceDurationInSeconds : deviceDefaultDurationInSeconds) {
|
for (const auto& deviceDurationInSeconds : deviceDefaultDurationInSeconds) {
|
||||||
|
@ -19,12 +19,10 @@ SAMPLES_PATH="$( cd "$( dirname "${BASH_SOURCE[0]-$0}" )" && pwd )"
|
|||||||
printf "\nSetting environment variables for building samples...\n"
|
printf "\nSetting environment variables for building samples...\n"
|
||||||
|
|
||||||
if [ -z "$INTEL_OPENVINO_DIR" ]; then
|
if [ -z "$INTEL_OPENVINO_DIR" ]; then
|
||||||
if [ -e "$SAMPLES_PATH/../../../bin/setupvars.sh" ]; then
|
if [ -e "$SAMPLES_PATH/../../setupvars.sh" ]; then
|
||||||
setvars_path="$SAMPLES_PATH/../../../bin/setupvars.sh"
|
setvars_path="$SAMPLES_PATH/../../setupvars.sh"
|
||||||
elif [ -e "$SAMPLES_PATH/../../../../bin/setupvars.sh" ]; then
|
|
||||||
setvars_path="$SAMPLES_PATH/../../../../bin/setupvars.sh"
|
|
||||||
else
|
else
|
||||||
printf "Error: Failed to set the environment variables automatically. To fix, run the following command:\n source <INSTALL_DIR>/bin/setupvars.sh\n where INSTALL_DIR is the OpenVINO installation directory.\n\n"
|
printf "Error: Failed to set the environment variables automatically. To fix, run the following command:\n source <INSTALL_DIR>/setupvars.sh\n where INSTALL_DIR is the OpenVINO installation directory.\n\n"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if ! source "$setvars_path" ; then
|
if ! source "$setvars_path" ; then
|
||||||
@ -33,7 +31,7 @@ if [ -z "$INTEL_OPENVINO_DIR" ]; then
|
|||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
# case for run with `sudo -E`
|
# case for run with `sudo -E`
|
||||||
source "$INTEL_OPENVINO_DIR/bin/setupvars.sh"
|
source "$INTEL_OPENVINO_DIR/setupvars.sh"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! command -v cmake &>/dev/null; then
|
if ! command -v cmake &>/dev/null; then
|
||||||
|
@ -9,7 +9,6 @@ set "ROOT_DIR=%~dp0"
|
|||||||
FOR /F "delims=\" %%i IN ("%ROOT_DIR%") DO set SAMPLES_TYPE=%%~nxi
|
FOR /F "delims=\" %%i IN ("%ROOT_DIR%") DO set SAMPLES_TYPE=%%~nxi
|
||||||
|
|
||||||
set "SOLUTION_DIR64=%USERPROFILE%\Documents\Intel\OpenVINO\inference_engine_%SAMPLES_TYPE%_samples_build"
|
set "SOLUTION_DIR64=%USERPROFILE%\Documents\Intel\OpenVINO\inference_engine_%SAMPLES_TYPE%_samples_build"
|
||||||
if "%InferenceEngine_DIR%"=="" set "InferenceEngine_DIR=%ROOT_DIR%\..\share"
|
|
||||||
|
|
||||||
set MSBUILD_BIN=
|
set MSBUILD_BIN=
|
||||||
set VS_PATH=
|
set VS_PATH=
|
||||||
@ -30,19 +29,16 @@ if not "%1" == "" (
|
|||||||
)
|
)
|
||||||
|
|
||||||
if "%INTEL_OPENVINO_DIR%"=="" (
|
if "%INTEL_OPENVINO_DIR%"=="" (
|
||||||
if exist "%ROOT_DIR%\..\..\..\bin\setupvars.bat" (
|
if exist "%ROOT_DIR%\..\..\setupvars.bat" (
|
||||||
call "%ROOT_DIR%\..\..\..\bin\setupvars.bat"
|
call "%ROOT_DIR%\..\..\setupvars.bat"
|
||||||
) else (
|
) else (
|
||||||
if exist "%ROOT_DIR%\..\..\..\..\bin\setupvars.bat" (
|
|
||||||
call "%ROOT_DIR%\..\..\..\..\bin\setupvars.bat"
|
|
||||||
) else (
|
|
||||||
echo Failed to set the environment variables automatically
|
echo Failed to set the environment variables automatically
|
||||||
echo To fix, run the following command: ^<INSTALL_DIR^>\bin\setupvars.bat
|
echo To fix, run the following command: ^<INSTALL_DIR^>\setupvars.bat
|
||||||
echo where INSTALL_DIR is the OpenVINO installation directory.
|
echo where INSTALL_DIR is the OpenVINO installation directory.
|
||||||
GOTO errorHandling
|
GOTO errorHandling
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if "%PROCESSOR_ARCHITECTURE%" == "AMD64" (
|
if "%PROCESSOR_ARCHITECTURE%" == "AMD64" (
|
||||||
set "PLATFORM=x64"
|
set "PLATFORM=x64"
|
||||||
|
@ -72,7 +72,7 @@ The application outputs top-10 inference results.
|
|||||||
```
|
```
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
Image C:\images\car.bmp
|
Image /opt/intel/openvino/samples/scripts/car.png
|
||||||
|
|
||||||
classid probability
|
classid probability
|
||||||
------- -----------
|
------- -----------
|
||||||
|
@ -46,8 +46,10 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
|
|||||||
for (auto& kvp : configMap) {
|
for (auto& kvp : configMap) {
|
||||||
std::string key = kvp.first;
|
std::string key = kvp.first;
|
||||||
std::string val = kvp.second;
|
std::string val = kvp.second;
|
||||||
|
const auto hints = perfHintsConfig.SupportedKeys();
|
||||||
if (key.compare(PluginConfigParams::KEY_PERF_COUNT) == 0) {
|
if (hints.end() != std::find(hints.begin(), hints.end(), key)) {
|
||||||
|
perfHintsConfig.SetConfig(key, val);
|
||||||
|
} else if (key.compare(PluginConfigParams::KEY_PERF_COUNT) == 0) {
|
||||||
if (val.compare(PluginConfigParams::YES) == 0) {
|
if (val.compare(PluginConfigParams::YES) == 0) {
|
||||||
useProfiling = true;
|
useProfiling = true;
|
||||||
} else if (val.compare(PluginConfigParams::NO) == 0) {
|
} else if (val.compare(PluginConfigParams::NO) == 0) {
|
||||||
@ -341,6 +343,9 @@ void Config::adjustKeyMapValues() {
|
|||||||
key_config_map[GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING] = PluginConfigParams::YES;
|
key_config_map[GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING] = PluginConfigParams::YES;
|
||||||
else
|
else
|
||||||
key_config_map[GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING] = PluginConfigParams::NO;
|
key_config_map[GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING] = PluginConfigParams::NO;
|
||||||
|
key_config_map.insert({ PluginConfigParams::KEY_PERFORMANCE_HINT, perfHintsConfig.ovPerfHint });
|
||||||
|
key_config_map.insert({ PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS,
|
||||||
|
std::to_string(perfHintsConfig.ovPerfHintNumRequests) });
|
||||||
}
|
}
|
||||||
IE_SUPPRESS_DEPRECATED_END
|
IE_SUPPRESS_DEPRECATED_END
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "cldnn_custom_layer.h"
|
#include "cldnn_custom_layer.h"
|
||||||
|
#include <ie_performance_hints.hpp>
|
||||||
#include <cldnn/graph/network.hpp>
|
#include <cldnn/graph/network.hpp>
|
||||||
|
|
||||||
namespace CLDNNPlugin {
|
namespace CLDNNPlugin {
|
||||||
@ -62,6 +62,7 @@ struct Config {
|
|||||||
bool enable_loop_unrolling;
|
bool enable_loop_unrolling;
|
||||||
|
|
||||||
std::map<std::string, std::string> key_config_map;
|
std::map<std::string, std::string> key_config_map;
|
||||||
|
InferenceEngine::PerfHintsConfig perfHintsConfig;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace CLDNNPlugin
|
} // namespace CLDNNPlugin
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user