Merge remote-tracking branch 'github/master' into auto-batch-master
# Conflicts: # inference-engine/tests/functional/inference_engine/CMakeLists.txt # src/plugins/intel_gpu/src/plugin/plugin.cpp
This commit is contained in:
commit
c94a288707
@ -121,8 +121,8 @@ jobs:
|
||||
# For running ONNX frontend unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/src/core/tests/requirements_test_onnx.txt
|
||||
# For MO unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/model-optimizer/requirements.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/model-optimizer/requirements_dev.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_dev.txt
|
||||
# Speed up build
|
||||
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
|
||||
unzip ninja-linux.zip
|
||||
@ -152,7 +152,7 @@ jobs:
|
||||
-DPYTHON_EXECUTABLE=/usr/bin/python3.8
|
||||
-DENABLE_WHEEL=ON
|
||||
-DENABLE_TESTS=ON
|
||||
-DNGRAPH_ONNX_FRONTEND_ENABLE=ON
|
||||
-DENABLE_OV_ONNX_FRONTEND=ON
|
||||
-DENABLE_FASTER_BUILD=ON
|
||||
-DENABLE_STRICT_DEPENDENCIES=OFF
|
||||
-DENABLE_REQUIREMENTS_INSTALL=OFF
|
||||
@ -237,14 +237,15 @@ jobs:
|
||||
continueOnError: false
|
||||
|
||||
- script: |
|
||||
export MO_ROOT=$(INSTALL_DIR)/tools/model_optimizer
|
||||
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/tests/model_optimizer/unit_tests --junitxml=TEST-ModelOptimizer.xml
|
||||
export MO_ROOT=$(INSTALL_DIR)/tools/mo
|
||||
. $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_DIR)/tests/mo/unit_tests --junitxml=TEST-ModelOptimizer.xml
|
||||
displayName: 'Model Optimizer UT'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||
workingDirectory: $(INSTALL_TEST_DIR)
|
||||
displayName: 'nGraph UT'
|
||||
displayName: 'OV Core UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/paddlepaddle_tests --gtest_print_time=1 --gtest_output=xml:TEST-PaddlePaddle.xml
|
||||
@ -333,6 +334,7 @@ jobs:
|
||||
displayName: 'Samples Smoke Tests'
|
||||
continueOnError: false
|
||||
condition: eq(variables['CMAKE_BUILD_SHARED_LIBS'], 'ON')
|
||||
enabled: false
|
||||
|
||||
- script: |
|
||||
export DATA_PATH=$(MODELS_PATH)
|
||||
@ -345,12 +347,13 @@ jobs:
|
||||
- script: |
|
||||
. $(SETUPVARS)
|
||||
python3 -m pip install -r requirements.txt
|
||||
export MO_ROOT=$(INSTALL_DIR)/tools/model_optimizer
|
||||
export MO_ROOT=$(INSTALL_DIR)/tools/mo
|
||||
export PYTHONPATH=$(LAYER_TESTS_DIR):$PYTHONPATH
|
||||
python3 -m pytest tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=TEST-tf_Roll.xmlTEST
|
||||
workingDirectory: $(LAYER_TESTS_DIR)
|
||||
displayName: 'Layer Tests'
|
||||
continueOnError: false
|
||||
enabled: false
|
||||
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
|
@ -90,8 +90,8 @@ jobs:
|
||||
# For running ONNX frontend unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/src/core/tests/requirements_test_onnx.txt
|
||||
# For MO unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/model-optimizer/requirements.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/model-optimizer/requirements_dev.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements.txt
|
||||
python3 -m pip install -r $(REPO_DIR)/tools/mo/requirements_dev.txt
|
||||
# Speed up build
|
||||
wget https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-linux.zip
|
||||
unzip ninja-linux.zip
|
||||
@ -118,7 +118,7 @@ jobs:
|
||||
-DPYTHON_EXECUTABLE=/usr/bin/python3.8
|
||||
-DENABLE_WHEEL=ON
|
||||
-DENABLE_TESTS=ON
|
||||
-DNGRAPH_ONNX_FRONTEND_ENABLE=ON
|
||||
-DENABLE_OV_ONNX_FRONTEND=ON
|
||||
-DENABLE_FASTER_BUILD=ON
|
||||
-DENABLE_STRICT_DEPENDENCIES=OFF
|
||||
-DENABLE_REQUIREMENTS_INSTALL=OFF
|
||||
@ -158,9 +158,9 @@ jobs:
|
||||
displayName: 'Clean build dir'
|
||||
continueOnError: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||
workingDirectory: $(INSTALL_TEST_DIR)
|
||||
displayName: 'nGraph UT'
|
||||
displayName: 'OV Core UT'
|
||||
continueOnError: false
|
||||
|
||||
- task: PublishTestResults@2
|
||||
|
@ -101,8 +101,8 @@ jobs:
|
||||
-DENABLE_INTEL_GPU=OFF
|
||||
-DENABLE_PROFILING_ITT=OFF
|
||||
-DENABLE_SAMPLES=OFF
|
||||
-DNGRAPH_ONNX_FRONTEND_ENABLE=ON
|
||||
-DOPENVINO_DEBUG_ENABLE=OFF
|
||||
-DENABLE_OV_ONNX_FRONTEND=ON
|
||||
-DENABLE_OPENVINO_DEBUG=OFF
|
||||
$(REPO_DIR)
|
||||
workingDirectory: $(BUILD_DIR)
|
||||
|
||||
|
@ -128,9 +128,9 @@ jobs:
|
||||
- script: ls -alR $(INSTALL_DIR)
|
||||
displayName: 'List install files'
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid:IE_CPU/GRUSequenceOp.onnx_model_gru* --gtest_output=xml:TEST-NGraphUT.xml
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU*:IE_CPU.onnx_model_sigmoid:IE_CPU/GRUSequenceOp.onnx_model_gru* --gtest_output=xml:TEST-NGraphUT.xml
|
||||
workingDirectory: $(INSTALL_TEST_DIR)
|
||||
displayName: 'nGraph UT'
|
||||
displayName: 'OV Core UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_filter=-MKLDNNGraphStructureTests.TestNoRedundantReordersBeforeDWConvolution:TestConvolution/MKLDNNGraphConvolutionTests.TestsConvolution/0:TestConvolutionDefaultPrimitivesPriority/MKLDNNGraphConvolutionTests.TestsConvolution/0 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
|
||||
|
@ -114,8 +114,8 @@ jobs:
|
||||
rem For running ONNX frontend unit tests
|
||||
python -m pip install -r $(REPO_DIR)\src\core\tests\requirements_test_onnx.txt
|
||||
rem For MO unit tests
|
||||
python -m pip install -r $(REPO_DIR)\model-optimizer\requirements.txt
|
||||
python -m pip install -r $(REPO_DIR)\model-optimizer\requirements_dev.txt
|
||||
python -m pip install -r $(REPO_DIR)\tools\mo\requirements.txt
|
||||
python -m pip install -r $(REPO_DIR)\tools\mo\requirements_dev.txt
|
||||
rem Speed up build
|
||||
certutil -urlcache -split -f https://github.com/Kitware/CMake/releases/download/v$(CMAKE_VERSION)/cmake-$(CMAKE_VERSION)-windows-x86_64.zip cmake-$(CMAKE_VERSION)-windows-x86_64.zip
|
||||
powershell -command "Expand-Archive -Force cmake-$(CMAKE_VERSION)-windows-x86_64.zip"
|
||||
@ -202,9 +202,9 @@ jobs:
|
||||
displayName: 'Clean build dir'
|
||||
continueOnError: false
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||
workingDirectory: $(INSTALL_TEST_DIR)
|
||||
displayName: 'nGraph UT'
|
||||
displayName: 'OV Core UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\paddlepaddle_tests --gtest_print_time=1 --gtest_output=xml:TEST-PaddlePaddle.xml
|
||||
|
@ -66,8 +66,8 @@ RUN cmake .. \
|
||||
-DENABLE_SAMPLES=OFF \
|
||||
-DENABLE_PYTHON=ON \
|
||||
-DPYTHON_EXECUTABLE=/usr/bin/python3 \
|
||||
-DNGRAPH_ONNX_FRONTEND_ENABLE=ON \
|
||||
-DOPENVINO_DEBUG_ENABLE=OFF \
|
||||
-DENABLE_OV_ONNX_FRONTEND=ON \
|
||||
-DENABLE_OPENVINO_DEBUG=OFF \
|
||||
-DCMAKE_INSTALL_PREFIX=/openvino/dist
|
||||
RUN make -j $(nproc) install
|
||||
|
||||
|
15
.github/workflows/mo.yml
vendored
15
.github/workflows/mo.yml
vendored
@ -2,10 +2,10 @@ name: MO
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'model-optimizer/**'
|
||||
- 'openvino/tools/mo/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'model-optimizer/**'
|
||||
- 'openvino/tools/mo/**'
|
||||
|
||||
jobs:
|
||||
Pylint-UT:
|
||||
@ -24,7 +24,7 @@ jobs:
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('model-optimizer/requirements*.txt') }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('openvino/tools/mo/requirements*.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
${{ runner.os }}-
|
||||
@ -43,11 +43,11 @@ jobs:
|
||||
# requrements for CMake
|
||||
sudo apt update
|
||||
sudo apt --assume-yes install libusb-1.0-0-dev
|
||||
working-directory: model-optimizer
|
||||
working-directory: openvino/tools/mo
|
||||
|
||||
- name: Pylint
|
||||
run: pylint -d C,R,W mo/ mo.py extensions/
|
||||
working-directory: model-optimizer
|
||||
run: pylint -d C,R,W openvino/tools/mo/ openvino/tools/mo/mo.py
|
||||
working-directory: openvino/tools/mo
|
||||
|
||||
- name: CMake
|
||||
run: |
|
||||
@ -62,5 +62,4 @@ jobs:
|
||||
env
|
||||
mkdir ../mo-ut-logs
|
||||
python3 -m xmlrunner discover -p *_test.py --output=../mo-ut-logs
|
||||
working-directory: model-optimizer
|
||||
|
||||
working-directory: openvino/tools/mo
|
||||
|
22
.gitignore
vendored
22
.gitignore
vendored
@ -48,14 +48,14 @@ __pycache__
|
||||
*pylint_report_comments.txt
|
||||
|
||||
# Artifacts
|
||||
/model-optimizer/*.bin
|
||||
/model-optimizer/*.xml
|
||||
/model-optimizer/*.json
|
||||
/model-optimizer/*.so
|
||||
/model-optimizer/*.txt
|
||||
/model-optimizer/*.pb
|
||||
/model-optimizer/*.pbtxt
|
||||
/model-optimizer/!CMakeLists.txt
|
||||
/model-optimizer/*.mapping
|
||||
/model-optimizer/*.dat
|
||||
/model-optimizer/*.svg
|
||||
/tools/mo/*.bin
|
||||
/tools/mo/*.xml
|
||||
/tools/mo/*.json
|
||||
/tools/mo/*.so
|
||||
/tools/mo/*.txt
|
||||
/tools/mo/*.pb
|
||||
/tools/mo/*.pbtxt
|
||||
/tools/mo/!CMakeLists.txt
|
||||
/tools/mo/*.mapping
|
||||
/tools/mo/*.dat
|
||||
/tools/mo/*.svg
|
||||
|
@ -96,7 +96,6 @@ add_subdirectory(src)
|
||||
add_subdirectory(samples)
|
||||
add_subdirectory(inference-engine)
|
||||
include(cmake/extra_modules.cmake)
|
||||
add_subdirectory(model-optimizer)
|
||||
add_subdirectory(docs)
|
||||
add_subdirectory(tools)
|
||||
add_subdirectory(scripts)
|
||||
|
34
CODEOWNERS
34
CODEOWNERS
@ -6,15 +6,26 @@ CODEOWNERS @openvinotoolkit/openvino-admins @openvinotoolkit/openvino-maintaine
|
||||
|
||||
# CI:
|
||||
Jenkinsfile @openvinotoolkit/openvino-admins
|
||||
azure-pipelines.yml @openvinotoolkit/openvino-admins
|
||||
/.github/ @openvinotoolkit/openvino-admins
|
||||
/.ci/ @openvinotoolkit/openvino-admins
|
||||
|
||||
# OpenVINO Samples:
|
||||
/samples/ @openvinotoolkit/openvino-samples-maintainers
|
||||
|
||||
# OpenVINO Scripts:
|
||||
/scripts/ @openvinotoolkit/openvino-scripts-maintainers
|
||||
|
||||
# QA Tests:
|
||||
/tests/ @openvinotoolkit/openvino-tests-maintainers
|
||||
|
||||
# OpenVINO Scripts
|
||||
/scripts/ @openvinotoolkit/openvino-scripts-maintainers
|
||||
# Tools:
|
||||
/tools/ @openvinotoolkit/openvino-tools-maintainers
|
||||
|
||||
# Model Optimizer:
|
||||
/tools/mo/ @openvinotoolkit/openvino-mo-maintainers
|
||||
|
||||
# POT:
|
||||
/tools/pot/ @openvinotoolkit/openvino-pot-maintainers
|
||||
|
||||
# IE Core:
|
||||
/inference-engine/ @openvinotoolkit/openvino-ie-maintainers
|
||||
@ -22,6 +33,8 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins
|
||||
/src/common/transformations/ @GlebKazantaev @ilyachur
|
||||
/src/common/legacy/ @openvinotoolkit/openvino-ngraph-maintainers
|
||||
/src/common/ @openvinotoolkit/openvino-ie-maintainers
|
||||
/src/core/ @openvinotoolkit/openvino-ngraph-maintainers
|
||||
/src/frontends/ @openvinotoolkit/openvino-ngraph-maintainers
|
||||
/inference-engine/tests_deprecated/readers/ @openvinotoolkit/openvino-ngraph-maintainers
|
||||
|
||||
# IE CPU:
|
||||
@ -62,20 +75,7 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins
|
||||
/inference-engine/tests/functional/inference_engine/ngraph_reader/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ngraph-maintainers
|
||||
/inference-engine/tests/functional/inference_engine/transformations/ @openvinotoolkit/openvino-ie-tests-maintainers @openvinotoolkit/openvino-ngraph-maintainers
|
||||
|
||||
# MO:
|
||||
/model-optimizer/ @openvinotoolkit/openvino-mo-maintainers
|
||||
|
||||
# nGraph:
|
||||
/src/core/ @openvinotoolkit/openvino-ngraph-maintainers
|
||||
/src/frontends/ @openvinotoolkit/openvino-ngraph-maintainers
|
||||
|
||||
# POT Tools
|
||||
/tools/pot/ @openvinotoolkit/openvino-pot-maintainers
|
||||
|
||||
# Tools
|
||||
/tools/ @openvinotoolkit/openvino-tools-maintainers
|
||||
|
||||
# Documentation
|
||||
# Documentation:
|
||||
/docs/ @openvinotoolkit/openvino-docs-maintainers
|
||||
/docs/template_plugin/ @openvinotoolkit/openvino-ie-template-maintainers
|
||||
*.md @openvinotoolkit/openvino-docs-maintainers
|
||||
|
@ -84,7 +84,7 @@ ie_coverage_extract(INPUT "openvino" OUTPUT "core"
|
||||
ie_coverage_genhtml(INFO_FILE "core"
|
||||
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
|
||||
|
||||
if(NGRAPH_ONNX_FRONTEND_ENABLE)
|
||||
if(ENABLE_OV_ONNX_FRONTEND)
|
||||
ie_coverage_extract(INPUT "openvino" OUTPUT "onnx"
|
||||
PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/src/frontends/onnx/*"
|
||||
"${OV_COVERAGE_BASE_DIRECTORY}/src/frontends/onnx/*")
|
||||
|
@ -164,33 +164,33 @@ else()
|
||||
set(protoc_available ON)
|
||||
endif()
|
||||
|
||||
ie_dependent_option(NGRAPH_ONNX_FRONTEND_ENABLE "Enable ONNX FrontEnd" ON "protoc_available" OFF)
|
||||
ie_dependent_option(NGRAPH_PDPD_FRONTEND_ENABLE "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF)
|
||||
ie_option(NGRAPH_IR_FRONTEND_ENABLE "Enable IR FrontEnd" ON)
|
||||
ie_dependent_option(NGRAPH_TF_FRONTEND_ENABLE "Enable TensorFlow FrontEnd" ON "protoc_available" OFF)
|
||||
ie_dependent_option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system protobuf" OFF
|
||||
"NGRAPH_ONNX_FRONTEND_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE OR NGRAPH_TF_FRONTEND_ENABLE;BUILD_SHARED_LIBS" OFF)
|
||||
ie_dependent_option(NGRAPH_UNIT_TEST_ENABLE "Enables ngraph unit tests" ON "ENABLE_TESTS;NOT ANDROID" OFF)
|
||||
ie_dependent_option(NGRAPH_UNIT_TEST_BACKENDS_ENABLE "Control the building of unit tests using backends" ON
|
||||
"NGRAPH_UNIT_TEST_ENABLE" OFF)
|
||||
ie_option(OPENVINO_DEBUG_ENABLE "Enable output for OPENVINO_DEBUG statements" OFF)
|
||||
ie_dependent_option(ENABLE_OV_ONNX_FRONTEND "Enable ONNX FrontEnd" ON "protoc_available" OFF)
|
||||
ie_dependent_option(ENABLE_OV_PDPD_FRONTEND "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF)
|
||||
ie_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON)
|
||||
ie_dependent_option(ENABLE_OV_TF_FRONTEND "Enable TensorFlow FrontEnd" ON "protoc_available" OFF)
|
||||
ie_dependent_option(ENABLE_SYSTEM_PROTOBUF "Use system protobuf" OFF
|
||||
"ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_PDPD_FRONTEND OR ENABLE_OV_TF_FRONTEND;BUILD_SHARED_LIBS" OFF)
|
||||
ie_dependent_option(ENABLE_OV_CORE_UNIT_TESTS "Enables OpenVINO core unit tests" ON "ENABLE_TESTS;NOT ANDROID" OFF)
|
||||
ie_dependent_option(ENABLE_OV_CORE_BACKEND_UNIT_TESTS "Control the building of unit tests using backends" ON
|
||||
"ENABLE_OV_CORE_UNIT_TESTS" OFF)
|
||||
ie_option(ENABLE_OPENVINO_DEBUG "Enable output for OPENVINO_DEBUG statements" OFF)
|
||||
ie_option(ENABLE_REQUIREMENTS_INSTALL "Dynamic dependencies install" ON)
|
||||
|
||||
if(NOT BUILD_SHARED_LIBS AND NGRAPH_TF_FRONTEND_ENABLE)
|
||||
if(NOT BUILD_SHARED_LIBS AND ENABLE_OV_TF_FRONTEND)
|
||||
set(FORCE_FRONTENDS_USE_PROTOBUF ON)
|
||||
else()
|
||||
set(FORCE_FRONTENDS_USE_PROTOBUF OFF)
|
||||
endif()
|
||||
|
||||
# WA for ngraph python build on Windows debug
|
||||
list(REMOVE_ITEM IE_OPTIONS NGRAPH_UNIT_TEST_ENABLE NGRAPH_UNIT_TEST_BACKENDS_ENABLE)
|
||||
list(REMOVE_ITEM IE_OPTIONS ENABLE_OV_CORE_UNIT_TESTS ENABLE_OV_CORE_BACKEND_UNIT_TESTS)
|
||||
|
||||
#
|
||||
# Process featues
|
||||
#
|
||||
|
||||
if(NGRAPH_DEBUG_ENABLE)
|
||||
add_definitions(-DNGRAPH_DEBUG_ENABLE)
|
||||
if(ENABLE_OPENVINO_DEBUG)
|
||||
add_definitions(-DENABLE_OPENVINO_DEBUG)
|
||||
endif()
|
||||
|
||||
if (ENABLE_PROFILING_RAW)
|
||||
|
@ -185,10 +185,10 @@ endif()
|
||||
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_Runtime_FOUND ON)
|
||||
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND @NGRAPH_ONNX_FRONTEND_ENABLE@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND @NGRAPH_PDPD_FRONTEND_ENABLE@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND @NGRAPH_TF_FRONTEND_ENABLE@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND @NGRAPH_IR_FRONTEND_ENABLE@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND @ENABLE_OV_ONNX_FRONTEND@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND @ENABLE_OV_PDPD_FRONTEND@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND @ENABLE_OV_TF_FRONTEND@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND @ENABLE_OV_IR_FRONTEND@)
|
||||
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_ONNX_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND})
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_PaddlePaddle_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND})
|
||||
|
@ -19,7 +19,7 @@ function(ov_model_convert SRC DST OUT)
|
||||
get_filename_component(name_we "${in_file}" NAME_WE)
|
||||
set(model_source_dir "${SRC}/${rel_dir}")
|
||||
|
||||
if(NOT NGRAPH_ONNX_FRONTEND_ENABLE AND ext MATCHES "^\\.(onnx|prototxt)$")
|
||||
if(NOT ENABLE_OV_ONNX_FRONTEND AND ext MATCHES "^\\.(onnx|prototxt)$")
|
||||
# don't copy / process ONNX / prototxt files
|
||||
continue()
|
||||
endif()
|
||||
@ -92,7 +92,7 @@ ov_model_convert("${OpenVINO_SOURCE_DIR}/${rel_path}"
|
||||
docs_onnx_out_files)
|
||||
|
||||
if(ENABLE_TESTS)
|
||||
if(NGRAPH_ONNX_FRONTEND_ENABLE AND ENABLE_REQUIREMENTS_INSTALL)
|
||||
if(ENABLE_OV_ONNX_FRONTEND AND ENABLE_REQUIREMENTS_INSTALL)
|
||||
find_package(PythonInterp 3 REQUIRED)
|
||||
|
||||
get_filename_component(PYTHON_EXEC_DIR ${PYTHON_EXECUTABLE} DIRECTORY)
|
||||
@ -136,7 +136,7 @@ if(ENABLE_TESTS)
|
||||
add_dependencies(test_model_zoo test_pip_prerequsites)
|
||||
endif()
|
||||
|
||||
if (NGRAPH_PDPD_FRONTEND_ENABLE AND NGRAPH_UNIT_TEST_ENABLE)
|
||||
if (ENABLE_OV_PDPD_FRONTEND AND ENABLE_OV_CORE_UNIT_TESTS)
|
||||
add_dependencies(test_model_zoo paddlepaddle_test_models)
|
||||
endif()
|
||||
|
||||
|
@ -17,7 +17,7 @@ if(NOT ENABLE_DOCKER)
|
||||
set(OpenVINO_DIR ${CMAKE_BINARY_DIR})
|
||||
endif()
|
||||
|
||||
if(NGRAPH_ONNX_FRONTEND_ENABLE)
|
||||
if(ENABLE_OV_ONNX_FRONTEND)
|
||||
add_subdirectory(onnx_custom_op)
|
||||
endif()
|
||||
add_subdirectory(template_extension)
|
||||
|
@ -137,7 +137,7 @@ and [Convert Your TensorFlow* Model](../MO_DG/prepare_model/convert_model/Conver
|
||||
for more details and command line parameters used for the model conversion.
|
||||
|
||||
```bash
|
||||
./<MO_INSTALL_DIR>/mo.py --input_model <PATH_TO_MODEL>/wnet_20.pb -b 1
|
||||
mo --input_model <PATH_TO_MODEL>/wnet_20.pb -b 1
|
||||
```
|
||||
> **NOTE:** This conversion guide is applicable for the 2021.3 release of OpenVINO and that starting from 2021.4
|
||||
> the OpenVINO supports this model out of the box.
|
||||
@ -258,7 +258,7 @@ The implementation should be saved to the file `mo_extensions/front/tf/ComplexAb
|
||||
|
||||
Now it is possible to convert the model using the following command line:
|
||||
```bash
|
||||
./<MO_INSTALL_DIR>/mo.py --input_model <PATH_TO_MODEL>/wnet_20.pb -b 1 --extensions mo_extensions/
|
||||
mo --input_model <PATH_TO_MODEL>/wnet_20.pb -b 1 --extensions mo_extensions/
|
||||
```
|
||||
|
||||
The sub-graph corresponding to the originally non-supported one is depicted in the image below:
|
||||
@ -322,7 +322,7 @@ The result of this command is a compiled shared library (`.so` or `.dll`). It sh
|
||||
application using `Core` class instance method `AddExtension` like this
|
||||
`core.AddExtension(std::make_shared<Extension>(compiled_library_file_name), "CPU");`.
|
||||
|
||||
To test that the extension is implemented correctly we can run the "mri_reconstruction_demo.py" with the following content:
|
||||
To test that the extension is implemented correctly we can run the "mri_reconstruction_demo" with the following content:
|
||||
|
||||
@snippet mri_reconstruction_demo.py mri_demo:demo
|
||||
|
||||
|
@ -21,7 +21,7 @@ The IR is a pair of files describing the model:
|
||||
Below is a simple command running Model Optimizer to generate an IR for the input model:
|
||||
|
||||
```sh
|
||||
python3 mo.py --input_model INPUT_MODEL
|
||||
mo --input_model INPUT_MODEL
|
||||
```
|
||||
To learn about all Model Optimizer parameters and conversion technics, see the [Converting a Model to IR](prepare_model/convert_model/Converting_Model.md) page.
|
||||
|
||||
|
@ -28,7 +28,7 @@ For example, to add the description of the `CustomReshape` layer, which is an ar
|
||||
|
||||
2. Generate a new parser:
|
||||
```shell
|
||||
cd <INSTALL_DIR>/tools/model_optimizer/mo/front/caffe/proto
|
||||
cd <SITE_PACKAGES_WITH_INSTALLED_OPENVINO>/openvino/tools/mo/front/caffe/proto
|
||||
python3 generate_caffe_pb2.py --input_proto <PATH_TO_CUSTOM_CAFFE>/src/caffe/proto/caffe.proto
|
||||
```
|
||||
where `PATH_TO_CUSTOM_CAFFE` is the path to the root directory of custom Caffe\*.
|
||||
@ -66,7 +66,7 @@ The mean file that you provide for the Model Optimizer must be in a `.binaryprot
|
||||
|
||||
#### 7. What does the message "Invalid proto file: there is neither 'layer' nor 'layers' top-level messages" mean? <a name="question-7"></a>
|
||||
|
||||
The structure of any Caffe\* topology is described in the `caffe.proto` file of any Caffe version. For example, in the Model Optimizer, you can find the following proto file, used by default: `<INSTALL_DIR>/tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure:
|
||||
The structure of any Caffe\* topology is described in the `caffe.proto` file of any Caffe version. For example, in the Model Optimizer, you can find the following proto file, used by default: `mo/front/caffe/proto/my_caffe.proto`. There you can find the structure:
|
||||
```
|
||||
message NetParameter {
|
||||
// ... some other parameters
|
||||
@ -81,7 +81,7 @@ This means that any topology should contain layers as top-level structures in `p
|
||||
|
||||
#### 8. What does the message "Old-style inputs (via 'input_dims') are not supported. Please specify inputs via 'input_shape'" mean? <a name="question-8"></a>
|
||||
|
||||
The structure of any Caffe\* topology is described in the `caffe.proto` file for any Caffe version. For example, in the Model Optimizer you can find the following `.proto` file, used by default: `<INSTALL_DIR>/tools/model_optimizer/mo/front/caffe/proto/my_caffe.proto`. There you can find the structure:
|
||||
The structure of any Caffe\* topology is described in the `caffe.proto` file for any Caffe version. For example, in the Model Optimizer you can find the following `.proto` file, used by default: `mo/front/caffe/proto/my_caffe.proto`. There you can find the structure:
|
||||
```sh
|
||||
message NetParameter {
|
||||
|
||||
@ -350,15 +350,15 @@ The specified input shape cannot be parsed. Please, define it in one of the foll
|
||||
|
||||
*
|
||||
```shell
|
||||
python3 mo.py --input_model <INPUT_MODEL>.caffemodel --input_shape (1,3,227,227)
|
||||
mo --input_model <INPUT_MODEL>.caffemodel --input_shape (1,3,227,227)
|
||||
```
|
||||
*
|
||||
```shell
|
||||
python3 mo.py --input_model <INPUT_MODEL>.caffemodel --input_shape [1,3,227,227]
|
||||
mo --input_model <INPUT_MODEL>.caffemodel --input_shape [1,3,227,227]
|
||||
```
|
||||
* In case of multi input topology you should also specify inputs:
|
||||
```shell
|
||||
python3 mo.py --input_model /path-to/your-model.caffemodel --input data,rois --input_shape (1,3,227,227),(1,6,1,1)
|
||||
mo --input_model /path-to/your-model.caffemodel --input data,rois --input_shape (1,3,227,227),(1,6,1,1)
|
||||
```
|
||||
|
||||
Keep in mind that there is no space between and inside the brackets for input shapes.
|
||||
|
@ -39,9 +39,9 @@ A summary of the steps for optimizing and deploying a model that was trained wit
|
||||
To convert a Caffe\* model:
|
||||
|
||||
1. Go to the `$INTEL_OPENVINO_DIR/tools/model_optimizer` directory.
|
||||
2. Use the `mo.py` script to simply convert a model, specifying the path to the input model `.caffemodel` file and the path to an output directory with write permissions:
|
||||
2. Use the `mo` script to simply convert a model, specifying the path to the input model `.caffemodel` file and the path to an output directory with write permissions:
|
||||
```sh
|
||||
python3 mo.py --input_model <INPUT_MODEL>.caffemodel --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model <INPUT_MODEL>.caffemodel --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Two groups of parameters are available to convert your model:
|
||||
@ -94,13 +94,13 @@ Caffe*-specific parameters:
|
||||
* Launching the Model Optimizer for the [bvlc_alexnet.caffemodel](https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet) with a specified `prototxt` file. This is needed when the name of the Caffe\* model and the `.prototxt` file are different or are placed in different directories. Otherwise, it is enough to provide only the path to the input `model.caffemodel` file. You must have write permissions for the output directory.
|
||||
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel --input_proto bvlc_alexnet.prototxt --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model bvlc_alexnet.caffemodel --input_proto bvlc_alexnet.prototxt --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
* Launching the Model Optimizer for the [bvlc_alexnet.caffemodel](https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet) with a specified `CustomLayersMapping` file. This is the legacy method of quickly enabling model conversion if your model has custom layers. This requires the Caffe\* system on the computer. To read more about this, see [Legacy Mode for Caffe* Custom Layers](../customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md).
|
||||
Optional parameters without default values and not specified by the user in the `.prototxt` file are removed from the Intermediate Representation, and nested parameters are flattened:
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel -k CustomLayersMapping.xml --disable_omitting_optional --enable_flattening_nested_params --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model bvlc_alexnet.caffemodel -k CustomLayersMapping.xml --disable_omitting_optional --enable_flattening_nested_params --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
This example shows a multi-input model with input layers: `data`, `rois`
|
||||
```
|
||||
@ -124,7 +124,7 @@ layer {
|
||||
|
||||
* Launching the Model Optimizer for a multi-input model with two inputs and providing a new shape for each input in the order they are passed to the Model Optimizer along with a writable output directory. In particular, for data, set the shape to `1,3,227,227`. For rois, set the shape to `1,6,1,1`:
|
||||
```sh
|
||||
python3 mo.py --input_model /path-to/your-model.caffemodel --input data,rois --input_shape (1,3,227,227),[1,6,1,1] --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model /path-to/your-model.caffemodel --input data,rois --input_shape (1,3,227,227),[1,6,1,1] --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
## Custom Layer Definition
|
||||
|
@ -34,9 +34,9 @@ A summary of the steps for optimizing and deploying a model that was trained wit
|
||||
To convert a Kaldi\* model:
|
||||
|
||||
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory.
|
||||
2. Use the `mo.py` script to simply convert a model with the path to the input model `.nnet` or `.mdl` file and to an output directory where you have write permissions:
|
||||
2. Use the `mo` script to simply convert a model with the path to the input model `.nnet` or `.mdl` file and to an output directory where you have write permissions:
|
||||
```sh
|
||||
python3 mo.py --input_model <INPUT_MODEL>.nnet --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model <INPUT_MODEL>.nnet --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Two groups of parameters are available to convert your model:
|
||||
@ -60,12 +60,12 @@ Kaldi-specific parameters:
|
||||
|
||||
* To launch the Model Optimizer for the wsj_dnn5b_smbr model with the specified `.nnet` file and an output directory where you have write permissions:
|
||||
```sh
|
||||
python3 mo.py --input_model wsj_dnn5b_smbr.nnet --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model wsj_dnn5b_smbr.nnet --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
* To launch the Model Optimizer for the wsj_dnn5b_smbr model with existing file that contains counts for the last layer with biases and a writable output directory:
|
||||
```sh
|
||||
python3 mo.py --input_model wsj_dnn5b_smbr.nnet --counts wsj_dnn5b_smbr.counts --output_dir <OUTPUT_MODEL_DIR>_
|
||||
mo --input_model wsj_dnn5b_smbr.nnet --counts wsj_dnn5b_smbr.counts --output_dir <OUTPUT_MODEL_DIR>_
|
||||
```
|
||||
* The Model Optimizer normalizes сounts in the following way:
|
||||
\f[
|
||||
@ -83,7 +83,7 @@ python3 mo.py --input_model wsj_dnn5b_smbr.nnet --counts wsj_dnn5b_smbr.counts -
|
||||
* If you want to remove the last SoftMax layer in the topology, launch the Model Optimizer with the
|
||||
`--remove_output_softmax` flag.
|
||||
```sh
|
||||
python3 mo.py --input_model wsj_dnn5b_smbr.nnet --counts wsj_dnn5b_smbr.counts --remove_output_softmax --output_dir <OUTPUT_MODEL_DIR>_
|
||||
mo --input_model wsj_dnn5b_smbr.nnet --counts wsj_dnn5b_smbr.counts --remove_output_softmax --output_dir <OUTPUT_MODEL_DIR>_
|
||||
```
|
||||
The Model Optimizer finds the last layer of the topology and removes this layer only if it is a SoftMax layer.
|
||||
|
||||
|
@ -44,9 +44,9 @@ A summary of the steps for optimizing and deploying a model that was trained wit
|
||||
To convert an MXNet\* model:
|
||||
|
||||
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory.
|
||||
2. To convert an MXNet\* model contained in a `model-file-symbol.json` and `model-file-0000.params`, run the Model Optimizer launch script `mo.py`, specifying a path to the input model file and a path to an output directory with write permissions:
|
||||
2. To convert an MXNet\* model contained in a `model-file-symbol.json` and `model-file-0000.params`, run the Model Optimizer launch script `mo`, specifying a path to the input model file and a path to an output directory with write permissions:
|
||||
```sh
|
||||
python3 mo_mxnet.py --input_model model-file-0000.params --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model model-file-0000.params --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Two groups of parameters are available to convert your model:
|
||||
|
@ -60,9 +60,9 @@ The Model Optimizer process assumes you have an ONNX model that was directly dow
|
||||
To convert an ONNX\* model:
|
||||
|
||||
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory.
|
||||
2. Use the `mo.py` script to simply convert a model with the path to the input model `.nnet` file and an output directory where you have write permissions:
|
||||
2. Use the `mo` script to simply convert a model with the path to the input model `.nnet` file and an output directory where you have write permissions:
|
||||
```sh
|
||||
python3 mo.py --input_model <INPUT_MODEL>.onnx --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model <INPUT_MODEL>.onnx --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
There are no ONNX\* specific parameters, so only [framework-agnostic parameters](Converting_Model_General.md) are available to convert your model.
|
||||
|
@ -33,10 +33,10 @@ A summary of the steps for optimizing and deploying a model trained with Paddle\
|
||||
|
||||
To convert a Paddle\* model:
|
||||
|
||||
1. Go to the `$INTEL_OPENVINO_DIR/tools/model_optimizer` directory.
|
||||
2. Use the `mo.py` script to simply convert a model, specifying the framework, the path to the input model `.pdmodel` file and the path to an output directory with write permissions:
|
||||
1. Activate environment with installed OpenVINO if needed
|
||||
2. Use the `mo` script to simply convert a model, specifying the framework, the path to the input model `.pdmodel` file and the path to an output directory with write permissions:
|
||||
```sh
|
||||
python3 mo.py --input_model <INPUT_MODEL>.pdmodel --output_dir <OUTPUT_MODEL_DIR> --framework=paddle
|
||||
mo --input_model <INPUT_MODEL>.pdmodel --output_dir <OUTPUT_MODEL_DIR> --framework=paddle
|
||||
```
|
||||
|
||||
Parameters to convert your model:
|
||||
@ -47,7 +47,7 @@ Parameters to convert your model:
|
||||
### Example of Converting a Paddle* Model
|
||||
Below is the example command to convert yolo v3 Paddle\* network to OpenVINO IR network with Model Optimizer.
|
||||
```sh
|
||||
python3 mo.py --model_name yolov3_darknet53_270e_coco --output_dir <OUTPUT_MODEL_DIR> --framework=paddle --data_type=FP32 --reverse_input_channels --input_shape=[1,3,608,608],[1,2],[1,2] --input=image,im_shape,scale_factor --output=save_infer_model/scale_0.tmp_1,save_infer_model/scale_1.tmp_1 --input_model=yolov3.pdmodel
|
||||
mo --model_name yolov3_darknet53_270e_coco --output_dir <OUTPUT_MODEL_DIR> --framework=paddle --data_type=FP32 --reverse_input_channels --input_shape=[1,3,608,608],[1,2],[1,2] --input=image,im_shape,scale_factor --output=save_infer_model/scale_0.tmp_1,save_infer_model/scale_1.tmp_1 --input_model=yolov3.pdmodel
|
||||
```
|
||||
|
||||
## Supported Paddle\* Layers
|
||||
|
@ -179,15 +179,15 @@ There are three ways to store non-frozen TensorFlow models and load them to the
|
||||
To convert such a TensorFlow model:
|
||||
|
||||
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory
|
||||
2. Run the `mo_tf.py` script with the path to the checkpoint file to convert a model and an output directory where you have write permissions:
|
||||
2. Run the `mo` script with the path to the checkpoint file to convert a model and an output directory where you have write permissions:
|
||||
|
||||
* If input model is in `.pb` format:<br>
|
||||
```sh
|
||||
python3 mo_tf.py --input_model <INFERENCE_GRAPH>.pb --input_checkpoint <INPUT_CHECKPOINT> --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model <INFERENCE_GRAPH>.pb --input_checkpoint <INPUT_CHECKPOINT> --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
* If input model is in `.pbtxt` format:<br>
|
||||
```sh
|
||||
python3 mo_tf.py --input_model <INFERENCE_GRAPH>.pbtxt --input_checkpoint <INPUT_CHECKPOINT> --input_model_is_text --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model <INFERENCE_GRAPH>.pbtxt --input_checkpoint <INPUT_CHECKPOINT> --input_model_is_text --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
2. MetaGraph:
|
||||
@ -201,9 +201,9 @@ python3 mo_tf.py --input_model <INFERENCE_GRAPH>.pbtxt --input_checkpoint <INPUT
|
||||
To convert such TensorFlow model:
|
||||
|
||||
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory
|
||||
2. Run the `mo_tf.py` script with a path to the MetaGraph `.meta` file and a writable output directory to convert a model:<br>
|
||||
2. Run the `mo` script with a path to the MetaGraph `.meta` file and a writable output directory to convert a model:<br>
|
||||
```sh
|
||||
python3 mo_tf.py --input_meta_graph <INPUT_META_GRAPH>.meta --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_meta_graph <INPUT_META_GRAPH>.meta --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
3. SavedModel format of TensorFlow 1.x and 2.x versions:
|
||||
@ -213,9 +213,9 @@ python3 mo_tf.py --input_meta_graph <INPUT_META_GRAPH>.meta --output_dir <OUTPUT
|
||||
To convert such TensorFlow model:
|
||||
|
||||
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory
|
||||
2. Run the `mo_tf.py` script with a path to the SavedModel directory and a writable output directory to convert a model:<br>
|
||||
2. Run the `mo` script with a path to the SavedModel directory and a writable output directory to convert a model:<br>
|
||||
```sh
|
||||
python3 mo_tf.py --saved_model_dir <SAVED_MODEL_DIRECTORY> --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --saved_model_dir <SAVED_MODEL_DIRECTORY> --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
You can convert TensorFlow 1.x SavedModel format in the environment that has a 1.x or 2.x version of TensorFlow. However, TensorFlow 2.x SavedModel format strictly requires the 2.x version of TensorFlow.
|
||||
@ -252,9 +252,9 @@ Where:
|
||||
To convert a TensorFlow model:
|
||||
|
||||
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory
|
||||
2. Use the `mo_tf.py` script to simply convert a model with the path to the input model `.pb` file and a writable output directory:
|
||||
2. Use the `mo` script to simply convert a model with the path to the input model `.pb` file and a writable output directory:
|
||||
```sh
|
||||
python3 mo_tf.py --input_model <INPUT_MODEL>.pb --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model <INPUT_MODEL>.pb --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Two groups of parameters are available to convert your model:
|
||||
@ -310,27 +310,27 @@ TensorFlow*-specific parameters:
|
||||
|
||||
* Launching the Model Optimizer for Inception V1 frozen model when model file is a plain text protobuf, specifying a writable output directory:
|
||||
```sh
|
||||
python3 mo_tf.py --input_model inception_v1.pbtxt --input_model_is_text -b 1 --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pbtxt --input_model_is_text -b 1 --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
* Launching the Model Optimizer for Inception V1 frozen model and update custom sub-graph replacement file `transform.json` with information about input and output nodes of the matched sub-graph, specifying a writable output directory. For more information about this feature, refer to [Sub-Graph Replacement in the Model Optimizer](../customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md).
|
||||
```sh
|
||||
python3 mo_tf.py --input_model inception_v1.pb -b 1 --tensorflow_custom_operations_config_update transform.json --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --tensorflow_custom_operations_config_update transform.json --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
* Launching the Model Optimizer for Inception V1 frozen model and use custom sub-graph replacement file `transform.json` for model conversion. For more information about this feature, refer to [Sub-Graph Replacement in the Model Optimizer](../customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md).
|
||||
```sh
|
||||
python3 mo_tf.py --input_model inception_v1.pb -b 1 --transformations_config transform.json --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --transformations_config transform.json --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
* Launching the Model Optimizer for Inception V1 frozen model and dump information about the graph to TensorBoard log dir `/tmp/log_dir`
|
||||
```sh
|
||||
python3 mo_tf.py --input_model inception_v1.pb -b 1 --tensorboard_logdir /tmp/log_dir --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --tensorboard_logdir /tmp/log_dir --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
* Launching the Model Optimizer for a model with custom TensorFlow operations (refer to the [TensorFlow* documentation](https://www.tensorflow.org/extend/adding_an_op)) implemented in C++ and compiled into the shared library `my_custom_op.so`. Model Optimizer falls back to TensorFlow to infer output shape of operations implemented in the library if a custom TensorFlow operation library is provided. If it is not provided, a custom operation with an inference function is needed. For more information about custom operations, refer to the [Extending the Model Optimizer with New Primitives](../customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md).
|
||||
```sh
|
||||
python3 mo_tf.py --input_model custom_model.pb --tensorflow_custom_layer_libraries ./my_custom_op.so --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model custom_model.pb --tensorflow_custom_layer_libraries ./my_custom_op.so --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
|
||||
@ -345,9 +345,9 @@ Below are the instructions on how to convert each of them.
|
||||
A model in the SavedModel format consists of a directory with a `saved_model.pb` file and two subfolders: `variables` and `assets`.
|
||||
To convert such a model:
|
||||
1. Go to the `<INSTALL_DIR>/tools/model_optimizer` directory.
|
||||
2. Run the `mo_tf.py` script with a path to the SavedModel directory and a writable output directory:
|
||||
2. Run the `mo` script with a path to the SavedModel directory and a writable output directory:
|
||||
```sh
|
||||
python3 mo_tf.py --saved_model_dir <SAVED_MODEL_DIRECTORY> --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --saved_model_dir <SAVED_MODEL_DIRECTORY> --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
TensorFlow* 2 SavedModel format strictly requires the 2.x version of TensorFlow installed in the
|
||||
|
@ -1,8 +1,8 @@
|
||||
# Converting a Model to Intermediate Representation (IR) {#openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model}
|
||||
|
||||
Use the <code>mo.py</code> script from the `<INSTALL_DIR>/tools/model_optimizer` directory to run the Model Optimizer and convert the model to the Intermediate Representation (IR):
|
||||
Use the <code>mo</code> script to run the Model Optimizer and convert the model to the Intermediate Representation (IR):
|
||||
```sh
|
||||
python3 mo.py --input_model INPUT_MODEL --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model INPUT_MODEL --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
You need to have have write permissions for an output directory.
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
# Converting a Model Using General Conversion Parameters {#openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model_General}
|
||||
|
||||
To simply convert a model trained by any supported framework, run the Model Optimizer launch script ``mo.py`` specifying a path to the input model file and an output directory where you have write permissions:
|
||||
To simply convert a model trained by any supported framework, run the Model Optimizer launch script ``mo`` specifying a path to the input model file and an output directory where you have write permissions:
|
||||
```sh
|
||||
python3 mo.py --input_model INPUT_MODEL --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model INPUT_MODEL --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
The script is in `$INTEL_OPENVINO_DIR/tools/model_optimizer/`. The output directory must have write permissions, so you can run mo.py from the output directory or specify an output path with the `--output_dir` option.
|
||||
The script is in `$INTEL_OPENVINO_DIR/tools/model_optimizer/`. The output directory must have write permissions, so you can run mo from the output directory or specify an output path with the `--output_dir` option.
|
||||
|
||||
> **NOTE:** The color channel order (RGB or BGR) of an input data should match the channel order of the model training dataset. If they are different, perform the `RGB<->BGR` conversion specifying the command-line parameter: `--reverse_input_channels`. Otherwise, inference results may be incorrect. For details, refer to [When to Reverse Input Channels](#when_to_reverse_input_channels).
|
||||
|
||||
@ -174,54 +174,54 @@ Resulting Intermediate Representation will not be resizable with the help of Inf
|
||||
|
||||
Launch the Model Optimizer for the Caffe bvlc_alexnet model with debug log level:
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel --log_level DEBUG --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model bvlc_alexnet.caffemodel --log_level DEBUG --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Launch the Model Optimizer for the Caffe bvlc_alexnet model with the output IR called `result.*` in the specified `output_dir`:
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel --model_name result --output_dir /../../models/
|
||||
mo --input_model bvlc_alexnet.caffemodel --model_name result --output_dir /../../models/
|
||||
```
|
||||
|
||||
Launch the Model Optimizer for the Caffe bvlc_alexnet model with one input with scale values:
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel --scale_values [59,59,59] --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model bvlc_alexnet.caffemodel --scale_values [59,59,59] --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Launch the Model Optimizer for the Caffe bvlc_alexnet model with multiple inputs with scale values:
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel --input data,rois --scale_values [59,59,59],[5,5,5] --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model bvlc_alexnet.caffemodel --input data,rois --scale_values [59,59,59],[5,5,5] --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Launch the Model Optimizer for the Caffe bvlc_alexnet model with multiple inputs with scale and mean values specified for the particular nodes:
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel --input data,rois --mean_values data[59,59,59] --scale_values rois[5,5,5] --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model bvlc_alexnet.caffemodel --input data,rois --mean_values data[59,59,59] --scale_values rois[5,5,5] --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Launch the Model Optimizer for the Caffe bvlc_alexnet model with specified input layer, overridden input shape, scale 5, batch 8 and specified name of an output operation:
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel --input "data[1 3 224 224]" --output pool5 -s 5 -b 8 --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model bvlc_alexnet.caffemodel --input "data[1 3 224 224]" --output pool5 -s 5 -b 8 --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Launch the Model Optimizer for the Caffe bvlc_alexnet model with disabled fusing for linear operations to Convolution and grouped convolutions:
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel --disable_fusing --disable_gfusing --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model bvlc_alexnet.caffemodel --disable_fusing --disable_gfusing --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Launch the Model Optimizer for the Caffe bvlc_alexnet model with reversed input channels order between RGB and BGR, specified mean values to be used for the input image per channel and specified data type for input tensor values:
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel --reverse_input_channels --mean_values [255,255,255] --data_type FP16 --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model bvlc_alexnet.caffemodel --reverse_input_channels --mean_values [255,255,255] --data_type FP16 --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Launch the Model Optimizer for the Caffe bvlc_alexnet model with extensions listed in specified directories, specified mean_images binaryproto file. For more information about extensions, please refer to [this](../customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md) page.
|
||||
```sh
|
||||
python3 mo.py --input_model bvlc_alexnet.caffemodel --extensions /home/,/some/other/path/ --mean_file /path/to/binaryproto --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model bvlc_alexnet.caffemodel --extensions /home/,/some/other/path/ --mean_file /path/to/binaryproto --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Launch the Model Optimizer for TensorFlow* FaceNet* model with a placeholder freezing value.
|
||||
It replaces the placeholder with a constant layer that contains the passed value.
|
||||
For more information about FaceNet conversion, please refer to [this](tf_specific/Convert_FaceNet_From_Tensorflow.md) page
|
||||
```sh
|
||||
python3 mo.py --input_model FaceNet.pb --input "phase_train->False" --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model FaceNet.pb --input "phase_train->False" --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
Launch the Model Optimizer for any model with a placeholder freezing tensor of values.
|
||||
@ -231,5 +231,5 @@ Tensor here is represented in square brackets with each value separated from ano
|
||||
If data type is set in the model, this tensor will be reshaped to a placeholder shape and casted to placeholder data type.
|
||||
Otherwise, it will be casted to data type passed to `--data_type` parameter (by default, it is FP32).
|
||||
```sh
|
||||
python3 mo.py --input_model FaceNet.pb --input "placeholder_layer_name->[0.1 1.2 2.3]" --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model FaceNet.pb --input "placeholder_layer_name->[0.1 1.2 2.3]" --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
@ -40,7 +40,7 @@ In the TensorBoard, it looks the following way together with some predecessors:
|
||||
Convert this model and put the results in a writable output directory:
|
||||
```sh
|
||||
${INTEL_OPENVINO_DIR}/tools/model_optimizer
|
||||
python3 mo.py --input_model inception_v1.pb -b 1 --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
(The other examples on this page assume that you first cd to the `model_optimizer` directory and add the `--output_dir` argument with a directory where you have write permissions.)
|
||||
|
||||
@ -81,9 +81,9 @@ The last layer in the model is `InceptionV1/Logits/Predictions/Reshape_1`, which
|
||||
```
|
||||
Due to automatic identification of inputs and outputs, you do not need to provide the `--input` and `--output` options to convert the whole model. The following commands are equivalent for the Inception V1 model:
|
||||
```sh
|
||||
python3 mo.py --input_model inception_v1.pb -b 1 --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --output_dir <OUTPUT_MODEL_DIR>
|
||||
|
||||
python3 mo.py --input_model inception_v1.pb -b 1 --input input --output InceptionV1/Logits/Predictions/Reshape_1 --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --input input --output InceptionV1/Logits/Predictions/Reshape_1 --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
The Intermediate Representations are identical for both conversions. The same is true if the model has multiple inputs and/or outputs.
|
||||
|
||||
@ -99,7 +99,7 @@ If you want to cut your model at the end, you have the following options:
|
||||
|
||||
1. The following command cuts off the rest of the model after the `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu`, making this node the last in the model:
|
||||
```sh
|
||||
python3 mo.py --input_model inception_v1.pb -b 1 --output=InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --output=InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
The resulting Intermediate Representation has three layers:
|
||||
```xml
|
||||
@ -143,7 +143,7 @@ python3 mo.py --input_model inception_v1.pb -b 1 --output=InceptionV1/InceptionV
|
||||
|
||||
2. The following command cuts the edge that comes from 0 output port of the `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu` and the rest of the model, making this node the last one in the model:
|
||||
```sh
|
||||
python3 mo.py --input_model inception_v1.pb -b 1 --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu:0 --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu:0 --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
The resulting Intermediate Representation has three layers, which are the same as in the previous case:
|
||||
```xml
|
||||
@ -187,7 +187,7 @@ python3 mo.py --input_model inception_v1.pb -b 1 --output InceptionV1/InceptionV
|
||||
|
||||
3. The following command cuts the edge that comes to 0 input port of the `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu` and the rest of the model including `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu`, deleting this node and making the previous node `InceptionV1/InceptionV1/Conv2d_1a_7x7/Conv2D` the last in the model:
|
||||
```sh
|
||||
python3 mo.py --input_model inception_v1.pb -b 1 --output=0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --output=0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
The resulting Intermediate Representation has two layers, which are the same as the first two layers in the previous case:
|
||||
```xml
|
||||
@ -225,7 +225,7 @@ If you want to go further and cut the beginning of the model, leaving only the `
|
||||
|
||||
1. You can use the following command line, where `--input` and `--output` specify the same node in the graph:
|
||||
```sh
|
||||
python3 mo.py --input_model=inception_v1.pb -b 1 --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --input InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model=inception_v1.pb -b 1 --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --input InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
The resulting Intermediate Representation looks as follows:
|
||||
```xml
|
||||
@ -257,7 +257,7 @@ Even though `--input_shape` is not specified in the command line, the shapes for
|
||||
2. You can cut edge incoming to layer by port number. To specify incoming port use notation `--input=port:input_node`.
|
||||
So, to cut everything before `ReLU` layer, cut edge incoming in port 0 of `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu` node:
|
||||
```sh
|
||||
python3 mo.py --input_model inception_v1.pb -b 1 --input 0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --input 0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
The resulting Intermediate Representation looks as follows:
|
||||
```xml
|
||||
@ -289,7 +289,7 @@ Even though `--input_shape` is not specified in the command line, the shapes for
|
||||
3. You can cut edge outcoming from layer by port number. To specify outcoming port use notation `--input=input_node:port`.
|
||||
So, to cut everything before `ReLU` layer, cut edge from `InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm/batchnorm/add_1` node to `ReLU`:
|
||||
```sh
|
||||
python3 mo.py --input_model inception_v1.pb -b 1 --input InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm/batchnorm/add_1:0 --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --input InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm/batchnorm/add_1:0 --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
The resulting Intermediate Representation looks as follows:
|
||||
```xml
|
||||
@ -320,7 +320,7 @@ python3 mo.py --input_model inception_v1.pb -b 1 --input InceptionV1/InceptionV1
|
||||
|
||||
The input shape can be overridden with `--input_shape`. In this case, the shape is applied to the node referenced in `--input`, not to the original `Placeholder` in the model. For example, this command line
|
||||
```sh
|
||||
python3 mo.py --input_model inception_v1.pb --input_shape=[1,5,10,20] --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --input InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb --input_shape=[1,5,10,20] --output InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --input InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
gives the following shapes in the `Input` and `ReLU` layers:
|
||||
@ -369,14 +369,14 @@ There are operations that contain more than one input ports. In the example cons
|
||||
Following this behavior, the Model Optimizer creates an `Input` layer for port 0 only, leaving port 1 as a constant. So the result of:
|
||||
|
||||
```sh
|
||||
python3 mo.py --input_model inception_v1.pb -b 1 --input InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb -b 1 --input InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
is identical to the result of conversion of the model as a whole, because this convolution is the first executable operation in Inception V1.
|
||||
|
||||
Different behavior occurs when `--input_shape` is also used as an attempt to override the input shape:
|
||||
```sh
|
||||
python3 mo.py --input_model inception_v1.pb--input=InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --input_shape [1,224,224,3] --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb--input=InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --input_shape [1,224,224,3] --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
An error occurs (for more information, see <a href="MO_FAQ.html#FAQ30">FAQ #30</a>):
|
||||
```sh
|
||||
@ -388,5 +388,5 @@ In this case, when `--input_shape` is specified and the node contains multiple i
|
||||
|
||||
The correct command line is:
|
||||
```sh
|
||||
python3 mo.py --input_model inception_v1.pb --input 0:InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --input_shape=[1,224,224,3] --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model inception_v1.pb --input 0:InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --input_shape=[1,224,224,3] --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
@ -7,7 +7,7 @@ for the ASpIRE Chain Time Delay Neural Network (TDNN) from the Kaldi* project of
|
||||
|
||||
To generate the Intermediate Representation (IR) of the model, run the Model Optimizer with the following parameters:
|
||||
```sh
|
||||
python3 ./mo_kaldi.py --input_model exp/chain/tdnn_7b/final.mdl --output output
|
||||
mo --input_model exp/chain/tdnn_7b/final.mdl --output output
|
||||
```
|
||||
|
||||
The IR will have two inputs: `input` for data and `ivector` for ivectors.
|
||||
|
@ -13,14 +13,14 @@ As a result, you will get an MXNet model representation in `ssd_512_mobilenet1.0
|
||||
2. Run the Model Optimizer tool specifying the `--enable_ssd_gluoncv` option. Make sure the `--input_shape` parameter is set to the input shape layout of your model (NHWC or NCHW). The examples below illustrates running the Model Optimizer for the SSD and YOLO-v3 models trained with the NHWC layout and located in the `<model_directory>`:
|
||||
* **For GluonCV SSD topologies:**
|
||||
```sh
|
||||
python3 mo_mxnet.py --input_model <model_directory>/ssd_512_mobilenet1.0.params --enable_ssd_gluoncv --input_shape [1,512,512,3] --input data --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model <model_directory>/ssd_512_mobilenet1.0.params --enable_ssd_gluoncv --input_shape [1,512,512,3] --input data --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
* **For YOLO-v3 topology:**
|
||||
* To convert the model:
|
||||
```sh
|
||||
python3 mo_mxnet.py --input_model <model_directory>/yolo3_mobilenet1.0_voc-0000.params --input_shape [1,255,255,3] --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model <model_directory>/yolo3_mobilenet1.0_voc-0000.params --input_shape [1,255,255,3] --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
* To convert the model with replacing the subgraph with RegionYolo layers:
|
||||
```sh
|
||||
python3 mo_mxnet.py --input_model <model_directory>/models/yolo3_mobilenet1.0_voc-0000.params --input_shape [1,255,255,3] --transformations_config "mo/extensions/front/mxnet/yolo_v3_mobilenet1_voc.json" --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model <model_directory>/models/yolo3_mobilenet1.0_voc-0000.params --input_shape [1,255,255,3] --transformations_config "front/mxnet/yolo_v3_mobilenet1_voc.json" --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
@ -112,6 +112,6 @@ cp models/13_decoder_auxs.nd nst_model
|
||||
|
||||
3. Run the Model Optimizer for MXNet. Use the `--nd_prefix_name` option to specify the decoder prefix and `--input_shape` to specify input shapes in [N,C,W,H] order. For example:<br>
|
||||
```sh
|
||||
python3 mo.py --input_symbol <path/to/nst_model>/nst_vgg19-symbol.json --framework mxnet --output_dir <path/to/output_dir> --input_shape [1,3,224,224] --nd_prefix_name 13_decoder --pretrained_model <path/to/nst_model>/vgg19-0000.params
|
||||
mo --input_symbol <path/to/nst_model>/nst_vgg19-symbol.json --framework mxnet --output_dir <path/to/output_dir> --input_shape [1,3,224,224] --nd_prefix_name 13_decoder --pretrained_model <path/to/nst_model>/vgg19-0000.params
|
||||
```
|
||||
4. The IR is generated (`.bin`, `.xml` and `.mapping` files) in the specified output directory and ready to be consumed by the Inference Engine.
|
||||
|
@ -11,7 +11,7 @@ If you train the model using the [script provided in model repository](https://g
|
||||
|
||||
**Step 2**. To generate the Intermediate Representation (IR) of the model, change your current working directory to the Model Optimizer installation directory and run the Model Optimizer with the following parameters:
|
||||
```sh
|
||||
python3 ./mo.py --input_model dlrm_s_pytorch.onnx
|
||||
mo --input_model dlrm_s_pytorch.onnx
|
||||
```
|
||||
|
||||
Note that Pytorch model uses operation `torch.nn.EmbeddingBag`. This operation converts to onnx as custom `ATen` layer and not directly supported by OpenVINO*, but it is possible to convert this operation to:
|
||||
|
@ -6,12 +6,12 @@ These instructions are applicable only to the Faster R-CNN model converted to th
|
||||
|
||||
**Step 2**. To generate the Intermediate Representation (IR) of the model, change your current working directory to the Model Optimizer installation directory and run the Model Optimizer with the following parameters:
|
||||
```sh
|
||||
python3 ./mo_onnx.py
|
||||
mo
|
||||
--input_model FasterRCNN-10.onnx \
|
||||
--input_shape [1,3,800,800] \
|
||||
--input 0:2 \
|
||||
--mean_values [102.9801,115.9465,122.7717] \
|
||||
--transformations_config ./extensions/front/onnx/faster_rcnn.json
|
||||
--transformations_config front/onnx/faster_rcnn.json
|
||||
```
|
||||
|
||||
Note that the height and width specified with the `input_shape` command line parameter could be different. Refer to the [documentation](https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/faster-rcnn) for more information about supported input image dimensions and required pre- and post-processing steps.
|
||||
|
@ -13,5 +13,5 @@ To download the model and sample test data, click **Download** on [https://githu
|
||||
|
||||
To generate the Intermediate Representation (IR) of the model GPT-2, run the Model Optimizer with the following parameters:
|
||||
```sh
|
||||
python3 mo.py --input_model gpt2-10.onnx --input_shape [X,Y,Z] --output_dir <OUTPUT_MODEL_DIR>
|
||||
mo --input_model gpt2-10.onnx --input_shape [X,Y,Z] --output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
@ -6,12 +6,12 @@ These instructions are applicable only to the [Mask R-CNN model](https://onnxzoo
|
||||
|
||||
**Step 2**. To generate the Intermediate Representation (IR) of the model, change your current working directory to the Model Optimizer installation directory and run the Model Optimizer with the following parameters:
|
||||
```sh
|
||||
python3 ./mo_onnx.py
|
||||
mo
|
||||
--input_model mask_rcnn_R_50_FPN_1x.onnx \
|
||||
--input "0:2" \
|
||||
--input_shape [1,3,800,800] \
|
||||
--mean_values [102.9801,115.9465,122.7717] \
|
||||
--transformations_config ./extensions/front/onnx/mask_rcnn.json
|
||||
--transformations_config front/onnx/mask_rcnn.json
|
||||
```
|
||||
|
||||
Note that the height and width specified with the `input_shape` command line parameter could be different. Refer to the [documentation](https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/mask-rcnn) for more information about supported input image dimensions and required pre- and post-processing steps.
|
||||
|
@ -49,7 +49,7 @@ The script generates ONNX* model file `bert-ner.onnx`.
|
||||
## Convert ONNX* BERT-NER model to IR
|
||||
|
||||
```bash
|
||||
python mo.py --input_model bert-ner.onnx --input "input_mask[1 128],segment_ids[1 128],input_ids[1 128]"
|
||||
mo --input_model bert-ner.onnx --input "input_mask[1 128],segment_ids[1 128],input_ids[1 128]"
|
||||
```
|
||||
|
||||
where `1` is `batch_size` and `128` is `sequence_length`.
|
@ -28,5 +28,5 @@ The script generates the ONNX\* model file `f3net.onnx`. This model conversion w
|
||||
## Convert ONNX* F3Net Model to IR
|
||||
|
||||
```sh
|
||||
./mo.py --input_model <MODEL_DIR>/f3net.onnx
|
||||
mo --input_model <MODEL_DIR>/f3net.onnx
|
||||
```
|
||||
|
@ -29,12 +29,12 @@ They are `decoder`, `encoder` and a combined `decoder(encoder(x))` models, respe
|
||||
|
||||
If using a combined model:
|
||||
```sh
|
||||
./mo.py --input_model <MODEL_DIR>/qt.onnx --input_shape [B,64,X]
|
||||
mo --input_model <MODEL_DIR>/qt.onnx --input_shape [B,64,X]
|
||||
```
|
||||
If using separate models:
|
||||
```sh
|
||||
./mo.py --input_model <MODEL_DIR>/encoder_qt.onnx --input_shape [B,64,X]
|
||||
./mo.py --input_model <MODEL_DIR>/decoder_qt.onnx --input_shape [B,1024,Y]
|
||||
mo --input_model <MODEL_DIR>/encoder_qt.onnx --input_shape [B,64,X]
|
||||
mo --input_model <MODEL_DIR>/decoder_qt.onnx --input_shape [B,1024,Y]
|
||||
```
|
||||
|
||||
Where shape is determined by the audio file Mel-Spectrogram length: B - batch dimension, X - dimension based on the input length, Y - determined by encoder output, usually `X / 2`.
|
||||
|
@ -27,5 +27,5 @@ The script generates the ONNX\* model file RCAN.onnx. You can find more informat
|
||||
## Convert ONNX* RCAN Model to IR
|
||||
|
||||
```sh
|
||||
./mo.py --input_model RCAN.onnx
|
||||
mo --input_model RCAN.onnx
|
||||
```
|
||||
|
@ -99,9 +99,9 @@ After completing this step, the files `rnnt_encoder.onnx`, `rnnt_prediction.onnx
|
||||
**Step 6**. Run the conversion command:
|
||||
|
||||
```bash
|
||||
python3 {path_to_openvino}/mo.py --input_model rnnt_encoder.onnx --input "input[157 1 240],feature_length->157"
|
||||
python3 {path_to_openvino}/mo.py --input_model rnnt_prediction.onnx --input "symbol[1 1],hidden_in_1[2 1 320],hidden_in_2[2 1 320]"
|
||||
python3 {path_to_openvino}/mo.py --input_model rnnt_joint.onnx --input "0[1 1 1024],1[1 1 320]"
|
||||
mo --input_model rnnt_encoder.onnx --input "input[157 1 240],feature_length->157"
|
||||
mo --input_model rnnt_prediction.onnx --input "symbol[1 1],hidden_in_1[2 1 320],hidden_in_2[2 1 320]"
|
||||
mo --input_model rnnt_joint.onnx --input "0[1 1 1024],1[1 1 320]"
|
||||
```
|
||||
Please note that hardcoded value for sequence length = 157 was taken from the MLCommons but conversion to IR preserves
|
||||
network [reshapeability](../../../../IE_DG/ShapeInference.md), this means you can change input shapes manually to any value either during conversion or
|
||||
|
@ -163,7 +163,7 @@ python3 eval.py \
|
||||
**Step 4**. Convert the model to the IR:
|
||||
|
||||
```sh
|
||||
python path/to/model_optimizer/mo.py --input_model /path/to/yolact.onnx
|
||||
mo --input_model /path/to/yolact.onnx
|
||||
```
|
||||
|
||||
**Step 4**. Embed input preprocessing into the IR:
|
||||
@ -173,7 +173,7 @@ To get performance gain by offloading to the OpenVINO application of mean/scale
|
||||
* If the backbone of the model is Resnet50-FPN or Resnet101-FPN, use the following MO command line:
|
||||
|
||||
```sh
|
||||
python path/to/model_optimizer/mo.py \
|
||||
mo \
|
||||
--input_model /path/to/yolact.onnx \
|
||||
--reverse_input_channels \
|
||||
--mean_values "[123.68, 116.78, 103.94]" \
|
||||
@ -183,7 +183,7 @@ python path/to/model_optimizer/mo.py \
|
||||
* If the backbone of the model is Darknet53-FPN, use the following MO command line:
|
||||
|
||||
```sh
|
||||
python path/to/model_optimizer/mo.py \
|
||||
mo \
|
||||
--input_model /path/to/yolact.onnx \
|
||||
--reverse_input_channels \
|
||||
--scale 255
|
||||
|
@ -22,7 +22,7 @@ The original AOCR model contains data preprocessing which consists of the follow
|
||||
|
||||
After that, the resized image is sent to the convolution neural network (CNN). The Model Optimizer does not support image decoding so you should cut of preprocessing part of the model using '--input' command line parameter.
|
||||
```sh
|
||||
python3 path/to/model_optimizer/mo_tf.py \
|
||||
mo \
|
||||
--input_model=model/path/frozen_graph.pb \
|
||||
--input="map/TensorArrayStack/TensorArrayGatherV3:0[1 32 86 1]" \
|
||||
--output "transpose_1,transpose_2" \
|
||||
|
@ -32,7 +32,7 @@ Pre-trained model meta-graph files are `bert_model.ckpt.*`.
|
||||
|
||||
To generate the BERT Intermediate Representation (IR) of the model, run the Model Optimizer with the following parameters:
|
||||
```sh
|
||||
python3 ./mo_tf.py
|
||||
mo \
|
||||
--input_meta_graph uncased_L-12_H-768_A-12/bert_model.ckpt.meta \
|
||||
--output bert/pooler/dense/Tanh \
|
||||
--input Placeholder{i32},Placeholder_1{i32},Placeholder_2{i32}
|
||||
@ -109,9 +109,9 @@ python3 run_classifier.py \
|
||||
|
||||
Run the Model Optimizer with the following command line parameters to generate reshape-able BERT Intermediate Representation (IR):
|
||||
```sh
|
||||
python3 ./mo_tf.py \
|
||||
--input_model inference_graph.pb \
|
||||
--input "IteratorGetNext:0{i32}[1 128],IteratorGetNext:1{i32}[1 128],IteratorGetNext:4{i32}[1 128]"
|
||||
mo \
|
||||
--input_model inference_graph.pb \
|
||||
--input "IteratorGetNext:0{i32}[1 128],IteratorGetNext:1{i32}[1 128],IteratorGetNext:4{i32}[1 128]"
|
||||
```
|
||||
For other applicable parameters, refer to [Convert Model from TensorFlow](../Convert_Model_From_TensorFlow.md).
|
||||
|
||||
|
@ -47,7 +47,7 @@ python tools/test_shadownet.py --image_path data/test_images/test_01.jpg --weigh
|
||||
|
||||
**Step 4.** Convert the model into IR:
|
||||
```sh
|
||||
python3 path/to/model_optimizer/mo_tf.py --input_model path/to/your/CRNN_Tensorflow/frozen_graph.pb
|
||||
mo --input_model path/to/your/CRNN_Tensorflow/frozen_graph.pb
|
||||
```
|
||||
|
||||
|
||||
|
@ -68,8 +68,8 @@ There are certain limitations for the model conversion:
|
||||
|
||||
To generate the IR, run the Model Optimizer with the following parameters:
|
||||
```sh
|
||||
python3 {path_to_mo}/mo_tf.py \
|
||||
--input_model output_graph.pb \
|
||||
mo \
|
||||
--input_model output_graph.pb \
|
||||
--input "input_lengths->[16],input_node[1 16 19 26],previous_state_h[1 2048],previous_state_c[1 2048]" \
|
||||
--output "cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd_1,cudnn_lstm/rnn/multi_rnn_cell/cell_0/cudnn_compatible_lstm_cell/GatherNd,logits"
|
||||
```
|
||||
|
@ -35,7 +35,7 @@ tar zxvf efficientdet-d4.tar.gz
|
||||
```
|
||||
5. Freeze the model:<br>
|
||||
```sh
|
||||
python3 model_inspect.py --runmode=saved_model --model_name=efficientdet-d4 --ckpt_path=efficientdet-d4 --saved_model_dir=savedmodeldir
|
||||
mo --runmode=saved_model --model_name=efficientdet-d4 --ckpt_path=efficientdet-d4 --saved_model_dir=savedmodeldir
|
||||
```
|
||||
As a result the frozen model file `savedmodeldir/efficientdet-d4_frozen.pb` will be generated.
|
||||
|
||||
@ -47,9 +47,9 @@ As a result the frozen model file `savedmodeldir/efficientdet-d4_frozen.pb` will
|
||||
|
||||
To generate the IR of the EfficientDet TensorFlow model, run:<br>
|
||||
```sh
|
||||
python3 $INTEL_OPENVINO_DIR/tools/model_optimizer/mo.py \
|
||||
mo \
|
||||
--input_model savedmodeldir/efficientdet-d4_frozen.pb \
|
||||
--transformations_config $INTEL_OPENVINO_DIR/tools/model_optimizer/extensions/front/tf/automl_efficientdet.json \
|
||||
--transformations_config front/tf/automl_efficientdet.json \
|
||||
--input_shape [1,$IMAGE_SIZE,$IMAGE_SIZE,3] \
|
||||
--reverse_input_channels
|
||||
```
|
||||
@ -61,7 +61,7 @@ The attribute `image_size` specifies the shape to be specified for the model con
|
||||
|
||||
The `transformations_config` command line parameter specifies the configuration json file containing hints
|
||||
to the Model Optimizer on how to convert the model and trigger transformations implemented in the
|
||||
`$MO_ROOT/extensions/front/tf/AutomlEfficientDet.py`. The json file contains some parameters which must be changed if you
|
||||
`<PYTHON_SITE_PACKAGES>/openvino/tools/mo/front/tf/AutomlEfficientDet.py`. The json file contains some parameters which must be changed if you
|
||||
train the model yourself and modified the `hparams_config` file or the parameters are different from the ones used for EfficientDet-D4.
|
||||
The attribute names are self-explanatory or match the name in the `hparams_config` file.
|
||||
|
||||
|
@ -14,7 +14,7 @@ There are two inputs in this network: boolean `phase_train` which manages state
|
||||
|
||||
To generate FaceNet IR provide TensorFlow FaceNet model to Model Optimizer with parameters:
|
||||
```sh
|
||||
python3 ./mo_tf.py
|
||||
mo
|
||||
--input_model path_to_model/model_name.pb \
|
||||
--freeze_placeholder_with_value "phase_train->False"
|
||||
```
|
||||
|
@ -196,7 +196,7 @@ tgt_vocab_size -= 1
|
||||
**Step 4**. Convert the model to the IR:
|
||||
|
||||
```sh
|
||||
python3 path/to/model_optimizer/mo_tf.py
|
||||
mo
|
||||
--input_model /path/to/dump/model/frozen_GNMT_inference_graph.pb
|
||||
--input "IteratorGetNext:1{i32}[1],IteratorGetNext:0{i32}[1 50],dynamic_seq2seq/hash_table_Lookup_1:0[1]->[2],dynamic_seq2seq/hash_table_Lookup:0[1]->[1]"
|
||||
--output dynamic_seq2seq/decoder/decoder/GatherTree
|
||||
|
@ -30,7 +30,7 @@ it has one input that is split into four `ResourceGather` layers. (Click image t
|
||||
But as the Model Optimizer does not support such data feeding, you should skip it. Cut
|
||||
the edges incoming in `ResourceGather`s port 1:
|
||||
```sh
|
||||
python3 mo_tf.py --input_model inference_graph.pb \
|
||||
mo --input_model inference_graph.pb \
|
||||
--input 1:embedding/embedding_lookup,1:embedding_1/embedding_lookup, \
|
||||
1:embedding_2/embedding_lookup,1:embedding_3/embedding_lookup \
|
||||
--input_shape [256],[256],[256],[256] \
|
||||
@ -40,7 +40,7 @@ In the `input_shape` parameter, 256 specifies the `batch_size` for your model.
|
||||
|
||||
Alternatively, you can do steps 2 and 3 in one command line:
|
||||
```sh
|
||||
python3 mo_tf.py --input_meta_graph /path/to/model/model.meta \
|
||||
mo --input_meta_graph /path/to/model/model.meta \
|
||||
--input 1:embedding/embedding_lookup,1:embedding_1/embedding_lookup, \
|
||||
1:embedding_2/embedding_lookup,1:embedding_3/embedding_lookup \
|
||||
--input_shape [256],[256],[256],[256] --output rating/BiasAdd \
|
||||
|
@ -11,10 +11,10 @@ You can download TensorFlow\* Object Detection API models from the <a href="http
|
||||
|
||||
<strong>NOTE</strong>: Before converting, make sure you have configured the Model Optimizer. For configuration steps, refer to [Configuring the Model Optimizer](../../Config_Model_Optimizer.md).
|
||||
|
||||
To convert a TensorFlow\* Object Detection API model, go to the `<INSTALL_DIR>/tools/model_optimizer` directory and run the `mo_tf.py` script with the following required parameters:
|
||||
To convert a TensorFlow\* Object Detection API model, go to the `<INSTALL_DIR>/tools/model_optimizer` directory and run the `mo` script with the following required parameters:
|
||||
|
||||
* `--input_model <path_to_frozen.pb>` --- File with a pre-trained model (binary or text .pb file after freezing) OR `--saved_model_dir <path_to_saved_model>` for the TensorFlow\* 2 models
|
||||
* `--transformations_config <path_to_subgraph_replacement_configuration_file.json>` --- A subgraph replacement configuration file with transformations description. For the models downloaded from the TensorFlow\* Object Detection API zoo, you can find the configuration files in the `<INSTALL_DIR>/tools/model_optimizer/extensions/front/tf` directory. Use:
|
||||
* `--transformations_config <path_to_subgraph_replacement_configuration_file.json>` --- A subgraph replacement configuration file with transformations description. For the models downloaded from the TensorFlow\* Object Detection API zoo, you can find the configuration files in the `<PYTHON_SITE_PACKAGES>/openvino/tools/mo/front/tf` directory. Use:
|
||||
* `ssd_v2_support.json` --- for frozen SSD topologies from the models zoo version up to 1.13.X inclusively
|
||||
* `ssd_support_api_v.1.14.json` --- for SSD topologies trained using the TensorFlow\* Object Detection API version 1.14 up to 1.14.X inclusively
|
||||
* `ssd_support_api_v.1.15.json` --- for SSD topologies trained using the TensorFlow\* Object Detection API version 1.15 up to 2.0
|
||||
@ -52,7 +52,7 @@ Additionally to the mandatory parameters listed above you can use optional conve
|
||||
For example, if you downloaded the [pre-trained SSD InceptionV2 topology](http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2018_01_28.tar.gz) and extracted archive to the directory `/tmp/ssd_inception_v2_coco_2018_01_28`, the sample command line to convert the model looks as follows:
|
||||
|
||||
```
|
||||
<INSTALL_DIR>/tools/model_optimizer/mo_tf.py --input_model=/tmp/ssd_inception_v2_coco_2018_01_28/frozen_inference_graph.pb --transformations_config <INSTALL_DIR>/tools/model_optimizer/extensions/front/tf/ssd_v2_support.json --tensorflow_object_detection_api_pipeline_config /tmp/ssd_inception_v2_coco_2018_01_28/pipeline.config --reverse_input_channels
|
||||
mo --input_model=/tmp/ssd_inception_v2_coco_2018_01_28/frozen_inference_graph.pb --transformations_config front/tf/ssd_v2_support.json --tensorflow_object_detection_api_pipeline_config /tmp/ssd_inception_v2_coco_2018_01_28/pipeline.config --reverse_input_channels
|
||||
```
|
||||
|
||||
## OpenVINO&; Toolkit Samples and Open Model Zoo Demos
|
||||
|
@ -7,7 +7,7 @@ To convert this model to the TensorFlow\* format, you can use [Reproduce Keras*
|
||||
|
||||
After you convert the model to TensorFlow* format, run the Model Optimizer command below:
|
||||
```sh
|
||||
python mo.py --input "input_1[1 1333 1333 3]" --input_model retinanet_resnet50_coco_best_v2.1.0.pb --data_type FP32 --transformations_config ./extensions/front/tf/retinanet.json
|
||||
mo --input "input_1[1 1333 1333 3]" --input_model retinanet_resnet50_coco_best_v2.1.0.pb --data_type FP32 --transformations_config front/tf/retinanet.json
|
||||
```
|
||||
|
||||
Where `transformations_config` command-line parameter specifies the configuration json file containing model conversion hints for the Model Optimizer.
|
||||
|
@ -41,7 +41,7 @@ python3 tf_models/research/slim/export_inference_graph.py \
|
||||
Model Optimizer comes with the summarize graph utility, which identifies graph input and output nodes. Run the utility to determine input/output nodes of the Inception V1 model:
|
||||
|
||||
```sh
|
||||
python3 <MODEL_OPTIMIZER_INSTALL_DIR>/mo/utils/summarize_graph.py --input_model ./inception_v1_inference_graph.pb
|
||||
python3 <PYTHON_SITE_PACKAGES>/openvino/tools/mo/utils/summarize_graph.py --input_model ./inception_v1_inference_graph.pb
|
||||
```
|
||||
|
||||
The output looks as follows:<br>
|
||||
@ -56,7 +56,7 @@ The tool finds one input node with name `input`, type `float32`, fixed image siz
|
||||
Step 4. Convert the model with the Model Optimizer:
|
||||
|
||||
```sh
|
||||
<MODEL_OPTIMIZER_INSTALL_DIR>/mo_tf.py --input_model ./inception_v1_inference_graph.pb --input_checkpoint ./inception_v1.ckpt -b 1 --mean_value [127.5,127.5,127.5] --scale 127.5
|
||||
mo --input_model ./inception_v1_inference_graph.pb --input_checkpoint ./inception_v1.ckpt -b 1 --mean_value [127.5,127.5,127.5] --scale 127.5
|
||||
```
|
||||
|
||||
The `-b` command line parameter is required because the Model Optimizer cannot convert a model with undefined input size.
|
||||
|
@ -92,7 +92,7 @@ python census_main.py
|
||||
Use the following command line to convert the saved model file with the checkpoint:
|
||||
|
||||
```sh
|
||||
python mo.py
|
||||
mo
|
||||
--input_checkpoint checkpoint --input_meta_graph model.ckpt.meta
|
||||
--input "IteratorGetNext:0[2],
|
||||
IteratorGetNext:1[2],
|
||||
|
@ -186,7 +186,7 @@ The script should save into `~/XLNet-Large/xlnet`.
|
||||
|
||||
To generate the XLNet Intermediate Representation (IR) of the model, run the Model Optimizer with the following parameters:
|
||||
```sh
|
||||
python3 mo.py --input_model path-to-model/model_frozen.pb \
|
||||
--input "input_mask[50 1],input_ids[50 1],seg_ids[50 1]"
|
||||
mo --input_model path-to-model/model_frozen.pb \
|
||||
--input "input_mask[50 1],input_ids[50 1],seg_ids[50 1]"
|
||||
```
|
||||
|
||||
|
@ -37,7 +37,7 @@ python converter.py
|
||||
|
||||
> **NOTE:** Before you run the convertion, make sure you have installed all the Model Optimizer dependencies for TensorFlow 2.
|
||||
```sh
|
||||
python mo.py --saved_model_dir yolov4 --output_dir models/IRs --input_shape [1,608,608,3] --model_name yolov4
|
||||
mo --saved_model_dir yolov4 --output_dir models/IRs --input_shape [1,608,608,3] --model_name yolov4
|
||||
```
|
||||
|
||||
## <a name="yolov3-to-ir"></a>Convert YOLOv3 Model to IR
|
||||
@ -124,18 +124,18 @@ where:
|
||||
|
||||
To generate an IR of the YOLOv3 TensorFlow model, run:<br>
|
||||
```sh
|
||||
python3 mo_tf.py \
|
||||
mo \
|
||||
--input_model /path/to/yolo_v3.pb \
|
||||
--transformations_config $MO_ROOT/extensions/front/tf/yolo_v3.json \
|
||||
--transformations_config front/tf/yolo_v3.json \
|
||||
--batch 1 \
|
||||
--output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
|
||||
To generate an IR of the YOLOv3-tiny TensorFlow model, run:<br>
|
||||
```sh
|
||||
python3 mo_tf.py \
|
||||
mo \
|
||||
--input_model /path/to/yolo_v3_tiny.pb \
|
||||
--transformations_config $MO_ROOT/extensions/front/tf/yolo_v3_tiny.json \
|
||||
--transformations_config front/tf/yolo_v3_tiny.json \
|
||||
--batch 1 \
|
||||
--output_dir <OUTPUT_MODEL_DIR>
|
||||
```
|
||||
@ -213,11 +213,11 @@ create another configuration file with custom operations and use it for conversi
|
||||
|
||||
To generate the IR of the YOLOv1 model, provide TensorFlow YOLOv1 or YOLOv2 model to the Model Optimizer with the following parameters:<br>
|
||||
```sh
|
||||
python3 ./mo_tf.py
|
||||
mo
|
||||
--input_model <path_to_model>/<model_name>.pb \
|
||||
--batch 1 \
|
||||
--scale 255 \
|
||||
--transformations_config <OPENVINO_INSTALL_DIR>/tools/model_optimizer/extensions/front/tf/<yolo_config>.json
|
||||
--transformations_config front/tf/<yolo_config>.json
|
||||
```
|
||||
where:
|
||||
|
||||
|
@ -86,7 +86,7 @@ There is a certain limitations for the model conversion:
|
||||
To generate the `lm_1b` Intermediate Representation (IR), provide TensorFlow `lm_1b` model to the
|
||||
Model Optimizer with parameters:
|
||||
```sh
|
||||
python3 ./mo_tf.py
|
||||
mo
|
||||
--input_model lm_1b/graph-2016-09-10.pbtxt \
|
||||
--input_checkpoint lm_1b/ckpt \
|
||||
--input_model_is_text \
|
||||
|
@ -279,7 +279,7 @@ The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the M
|
||||
cd /opt/intel/openvino_2022/tools/model_optimizer
|
||||
```
|
||||
```sh
|
||||
python3 ./mo.py --input_model <model_dir>/<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
||||
mo --input_model <model_dir>/<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
||||
```
|
||||
The produced IR files are in the `<ir_dir>` directory.
|
||||
|
||||
@ -292,7 +292,7 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP
|
||||
cd /opt/intel/openvino_2022/tools/model_optimizer
|
||||
```
|
||||
```sh
|
||||
python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir
|
||||
mo --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir
|
||||
```
|
||||
|
||||
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory.
|
||||
|
@ -261,7 +261,7 @@ The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the M
|
||||
cd /opt/intel/openvino_2022/tools/model_optimizer
|
||||
```
|
||||
```sh
|
||||
python3 ./mo.py --input_model <model_dir>/<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
||||
mo --input_model <model_dir>/<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
||||
```
|
||||
The produced IR files are in the `<ir_dir>` directory.
|
||||
|
||||
@ -274,7 +274,7 @@ The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP
|
||||
cd /opt/intel/openvino_2022/tools/model_optimizer
|
||||
```
|
||||
```sh
|
||||
python3 ./mo.py --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir
|
||||
mo --input_model ~/models/public/squeezenet1.1/squeezenet1.1.caffemodel --data_type FP16 --output_dir ~/models/public/squeezenet1.1/ir
|
||||
```
|
||||
|
||||
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `~/models/public/squeezenet1.1/ir` directory.
|
||||
|
@ -257,7 +257,7 @@ The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the M
|
||||
cd <INSTALL_DIR>\tools\model_optimizer
|
||||
```
|
||||
```bat
|
||||
python .\mo.py --input_model <model_dir>\<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
||||
mo --input_model <model_dir>\<model_file> --data_type <model_precision> --output_dir <ir_dir>
|
||||
```
|
||||
The produced IR files are in the `<ir_dir>` directory.
|
||||
|
||||
@ -266,11 +266,8 @@ The `squeezenet1.1` model is downloaded in the Caffe* format. You must use the M
|
||||
|
||||
The following command converts the public SqueezeNet 1.1 Caffe\* model to the FP16 IR and saves to the `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` output directory:
|
||||
|
||||
```bat
|
||||
cd <INSTALL_DIR>\tools\model_optimizer
|
||||
```
|
||||
```bat
|
||||
python .\mo.py --input_model C:\Users\username\Documents\models\public\squeezenet1.1\squeezenet1.1.caffemodel --data_type FP16 --output_dir C:\Users\username\Documents\models\public\squeezenet1.1\ir
|
||||
mo --input_model C:\Users\<username>\Documents\models\public\squeezenet1.1\squeezenet1.1.caffemodel --data_type FP16 --output_dir C:\Users\username\Documents\models\public\squeezenet1.1\ir
|
||||
```
|
||||
|
||||
After the Model Optimizer script is completed, the produced IR files (`squeezenet1.1.xml`, `squeezenet1.1.bin`) are in the specified `C:\Users\<USER_ID>\Documents\models\public\squeezenet1.1\ir` directory.
|
||||
|
@ -80,7 +80,7 @@ python3 /opt/intel/openvino_2022/extras/open_model_zoo/tools/downloader/pytorch_
|
||||
```
|
||||
Convert the model from ONNX to the OpenVINO™ Intermediate Representation (IR):
|
||||
```
|
||||
mo_onnx.py \
|
||||
mo \
|
||||
-m mobilenet-v2.onnx \
|
||||
--input=data \
|
||||
--mean_values=data[123.675,116.28,103.53] \
|
||||
|
@ -147,7 +147,7 @@ The Model Optimizer is a key component of the Intel® Distribution of OpenVINO
|
||||
|
||||
The Inference Engine reads, loads, and infers the IR files, using a common API across the CPU, GPU, or VPU hardware.
|
||||
|
||||
The Model Optimizer is a Python*-based command line tool (`mo.py`), which is located in `C:\Program Files (x86)\Intel\openvino_2022\tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use.
|
||||
The Model Optimizer is a Python*-based command line tool (`mo`), which is located in `C:\Program Files (x86)\Intel\openvino_2022\tools\model_optimizer`. Use this tool on models trained with popular deep learning frameworks such as Caffe\*, TensorFlow\*, MXNet\*, and ONNX\* to convert them to an optimized IR format that the Inference Engine can use.
|
||||
|
||||
This section explains how to use scripts to configure the Model Optimizer either for all of the supported frameworks at the same time or for individual frameworks. If you want to manually configure the Model Optimizer instead of using scripts, see the **Using Manual Configuration Process** section on the [Configuring the Model Optimizer](../MO_DG/prepare_model/Config_Model_Optimizer.md) page.
|
||||
|
||||
|
@ -12,7 +12,7 @@ OpenVINO™ toolkit is a comprehensive toolkit for quickly developing applicatio
|
||||
|
||||
The Intel® Distribution of OpenVINO™ toolkit\*:
|
||||
- Enables CNN-based deep learning inference on the edge
|
||||
- Supports heterogeneous execution across Intel® CPU, Intel® Integrated Graphics, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
|
||||
- Supports heterogeneous execution across Intel® CPU and Intel® Integrated Graphics
|
||||
- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels
|
||||
|
||||
The **runtime package** includes the following components installed by default:
|
||||
|
@ -3,7 +3,7 @@
|
||||
nGraph representation provides an API to get detailed information about the graph structure.
|
||||
|
||||
To receive additional messages about applied graph modifications, rebuild the nGraph library with
|
||||
the `-DOPENVINO_DEBUG_ENABLE=ON` option.
|
||||
the `-DENABLE_OPENVINO_DEBUG=ON` option.
|
||||
|
||||
To visualize the nGraph function to the xDot format or to an image file, use the
|
||||
`ngraph::pass::VisualizeTree` graph transformation pass:
|
||||
|
@ -45,7 +45,7 @@ if(OpenCV_FOUND)
|
||||
target_link_libraries(${TARGET_NAME} PRIVATE opencv_core)
|
||||
endif()
|
||||
|
||||
if(NGRAPH_ONNX_FRONTEND_ENABLE)
|
||||
if(ENABLE_OV_ONNX_FRONTEND)
|
||||
target_link_libraries(${TARGET_NAME} PRIVATE onnx_ov_frontend)
|
||||
endif()
|
||||
|
||||
|
@ -284,7 +284,7 @@ static RefPreprocessParams resize_to_network_height() {
|
||||
p.input()
|
||||
.tensor().set_spatial_dynamic_shape();
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
|
||||
p.input().network().set_layout("NHWC");
|
||||
p.input().model().set_layout("NHWC");
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
@ -301,7 +301,7 @@ static RefPreprocessParams resize_to_network_width() {
|
||||
p.input()
|
||||
.tensor().set_spatial_dynamic_shape();
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
|
||||
p.input().network().set_layout("NCHW");
|
||||
p.input().model().set_layout("NCHW");
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
@ -319,7 +319,7 @@ static RefPreprocessParams resize_from_spatial_dims() {
|
||||
p.input()
|
||||
.tensor().set_spatial_static_shape(1, 4);
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
|
||||
p.input().network().set_layout("NCHW");
|
||||
p.input().model().set_layout("NCHW");
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
@ -337,7 +337,7 @@ static RefPreprocessParams resize_i8() {
|
||||
.tensor()
|
||||
.set_spatial_dynamic_shape();
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
|
||||
p.input().network().set_layout("NCHW");
|
||||
p.input().model().set_layout("NCHW");
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
@ -356,7 +356,7 @@ static RefPreprocessParams resize_to_network_width_height() {
|
||||
p.input()
|
||||
.tensor().set_spatial_static_shape(5, 5);
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_NEAREST);
|
||||
p.input().network().set_layout("...HW");
|
||||
p.input().model().set_layout("...HW");
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
@ -386,7 +386,7 @@ static RefPreprocessParams resize_to_specified_width_height() {
|
||||
p.input()
|
||||
.tensor().set_spatial_dynamic_shape();
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_NEAREST, 4, 4);
|
||||
p.input().network().set_layout("...HW");
|
||||
p.input().model().set_layout("...HW");
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
@ -524,7 +524,7 @@ static RefPreprocessParams resize_and_convert_layout() {
|
||||
p.input().preprocess()
|
||||
.resize(ResizeAlgorithm::RESIZE_LINEAR)
|
||||
.convert_layout();
|
||||
p.input().network().set_layout("NHWC");
|
||||
p.input().model().set_layout("NHWC");
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
@ -639,7 +639,7 @@ static RefPreprocessParams convert_color_nv12_layout_resize() {
|
||||
.convert_layout()
|
||||
.convert_element_type(element::f32)
|
||||
.resize(ResizeAlgorithm::RESIZE_NEAREST);
|
||||
p.input().network().set_layout("NCHW");
|
||||
p.input().model().set_layout("NCHW");
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
@ -676,7 +676,7 @@ static RefPreprocessParams element_type_before_convert_color_nv12() {
|
||||
p.input().preprocess()
|
||||
.convert_element_type(element::f32)
|
||||
.convert_color(ColorFormat::RGB);
|
||||
p.input().network().set_layout("NHWC");
|
||||
p.input().model().set_layout("NHWC");
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
@ -766,13 +766,43 @@ static RefPreprocessParams convert_color_i420_single_plane() {
|
||||
return res;
|
||||
}
|
||||
|
||||
static RefPreprocessParams set_shape_custom_crop() {
|
||||
RefPreprocessParams res("set_shape_custom_crop");
|
||||
res.function = []() {
|
||||
auto f = create_simple_function(element::f32, PartialShape{2, 2, 2, 2});
|
||||
auto p = PrePostProcessor(f);
|
||||
p.input().tensor().set_shape({-1, -1, -1, -1});
|
||||
p.input().preprocess().custom([](const Output<Node>& node) {
|
||||
// Add custom crop to model's dimensions using 'Slice' operation
|
||||
// Middle part 2x2x2x2 of original user's 4x4x4x4 input tensor will be extracted
|
||||
auto start = opset8::Constant::create(element::i32, {4}, {1, 1, 1, 1});
|
||||
auto stop = opset8::Constant::create(element::i32, {4}, {3, 3, 3, 3});
|
||||
auto step = opset8::Constant::create(element::i32, {4}, {1, 1, 1, 1});
|
||||
auto axis = opset8::Constant::create(element::i32, {4}, {0, 1, 2, 3});
|
||||
auto slice = std::make_shared<opset8::Slice>(node, start, stop, step, axis);
|
||||
return slice;
|
||||
});
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
auto input_size = 4 * 4 * 4 * 4;
|
||||
std::vector<float> input_values(input_size);
|
||||
std::iota(input_values.begin(), input_values.end(), 0);
|
||||
res.inputs.emplace_back(element::f32, Shape{4, 4, 4, 4}, input_values);
|
||||
res.expected.emplace_back(Shape{2, 2, 2, 2}, element::f32, std::vector<float>{ 85, 86, 89, 90,
|
||||
101, 102, 105, 106,
|
||||
149, 150, 153, 154,
|
||||
165, 166, 169, 170});
|
||||
return res;
|
||||
}
|
||||
|
||||
static RefPreprocessParams postprocess_2_inputs_basic() {
|
||||
RefPreprocessParams res("postprocess_2_inputs_basic");
|
||||
res.function = []() {
|
||||
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 1, 2});
|
||||
auto p = PrePostProcessor(f);
|
||||
p.output("tensor_output1")
|
||||
.network().set_layout("NCHW");
|
||||
.model().set_layout("NCHW");
|
||||
p.output("tensor_output1").postprocess().convert_layout();
|
||||
p.output("tensor_output1").tensor().set_layout("NHWC");
|
||||
p.output("tensor_output2")
|
||||
@ -838,7 +868,7 @@ static RefPreprocessParams pre_and_post_processing() {
|
||||
p.input(0).preprocess().convert_element_type(element::f32).mean(1.f);
|
||||
p.input(1).preprocess().scale(2.f);
|
||||
p.output("tensor_output1")
|
||||
.network().set_layout("NCHW");
|
||||
.model().set_layout("NCHW");
|
||||
p.output("tensor_output1").postprocess().convert_layout();
|
||||
p.output("tensor_output1").tensor().set_layout("NHWC");
|
||||
p.output("tensor_output2")
|
||||
@ -972,6 +1002,25 @@ static RefPreprocessParams reverse_dyn_shape() {
|
||||
return res;
|
||||
}
|
||||
|
||||
static RefPreprocessParams reverse_dyn_channels() {
|
||||
RefPreprocessParams res("reverse_dyn_channels");
|
||||
res.function = []() {
|
||||
auto f = create_simple_function(element::u8, PartialShape{Dimension::dynamic(),
|
||||
2,
|
||||
Dimension::dynamic(),
|
||||
Dimension::dynamic()});
|
||||
auto p = PrePostProcessor(f);
|
||||
p.input().tensor().set_layout("NCHW");
|
||||
p.input().preprocess().reverse_channels();
|
||||
p.build();
|
||||
return f;
|
||||
};
|
||||
|
||||
res.inputs.emplace_back(element::u8, Shape{2, 2, 1, 3}, std::vector<uint8_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
|
||||
res.expected.emplace_back(Shape{2, 2, 1, 3}, element::u8, std::vector<uint8_t>{4, 5, 6, 1, 2, 3, 10, 11, 12, 7, 8, 9});
|
||||
return res;
|
||||
}
|
||||
|
||||
static RefPreprocessParams reverse_fully_dyn_shape() {
|
||||
RefPreprocessParams res("reverse_fully_dyn_shape");
|
||||
res.function = []() {
|
||||
@ -1018,6 +1067,7 @@ std::vector<RefPreprocessParams> allPreprocessTests() {
|
||||
element_type_before_convert_color_nv12(),
|
||||
convert_color_i420_to_bgr_three_planes(),
|
||||
convert_color_i420_single_plane(),
|
||||
set_shape_custom_crop(),
|
||||
postprocess_2_inputs_basic(),
|
||||
post_convert_layout_by_dims(),
|
||||
post_convert_layout_by_dims_multi(),
|
||||
@ -1028,6 +1078,7 @@ std::vector<RefPreprocessParams> allPreprocessTests() {
|
||||
reverse_channels_nchw(),
|
||||
reverse_channels_dyn_layout(),
|
||||
reverse_dyn_shape(),
|
||||
reverse_dyn_channels(),
|
||||
reverse_fully_dyn_shape()
|
||||
};
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ TEST_F(ReferencePreprocessLegacyTest, resize) {
|
||||
auto p = PrePostProcessor(function);
|
||||
p.input().tensor().set_layout("NCHW").set_spatial_static_shape(42, 30);
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
|
||||
p.input().network().set_layout("NCHW");
|
||||
p.input().model().set_layout("NCHW");
|
||||
p.build();
|
||||
|
||||
auto &preProcess = legacy_network.getInputsInfo().begin()->second->getPreProcess();
|
||||
@ -126,7 +126,7 @@ TEST_F(ReferencePreprocessLegacyTest, bgrx_to_bgr) {
|
||||
auto& input = p.input();
|
||||
input.tensor().set_color_format(ColorFormat::BGRX).set_element_type(element::u8);
|
||||
input.preprocess().convert_color(ColorFormat::BGR);
|
||||
input.network().set_layout("NCHW");
|
||||
input.model().set_layout("NCHW");
|
||||
function = p.build();
|
||||
inputData.emplace_back(element::u8, Shape{1, h, w, 4}, rgbx_input.data());
|
||||
|
||||
@ -156,7 +156,7 @@ TEST_F(ReferencePreprocessLegacyTest, rgbx_to_bgr) {
|
||||
auto& input = p.input();
|
||||
input.tensor().set_color_format(ColorFormat::RGBX).set_element_type(element::u8);
|
||||
input.preprocess().convert_color(ColorFormat::BGR);
|
||||
input.network().set_layout("NCHW");
|
||||
input.model().set_layout("NCHW");
|
||||
function = p.build();
|
||||
inputData.emplace_back(element::u8, Shape{1, h, w, 4}, rgbx_input.data());
|
||||
|
||||
@ -184,7 +184,7 @@ public:
|
||||
auto p = PrePostProcessor(function);
|
||||
p.input().tensor().set_color_format(ColorFormat::NV12_SINGLE_PLANE);
|
||||
p.input().preprocess().convert_color(ColorFormat::BGR);
|
||||
p.input().network().set_layout("NCHW");
|
||||
p.input().model().set_layout("NCHW");
|
||||
p.build();
|
||||
|
||||
const auto ¶m = function->get_parameters()[0];
|
||||
@ -261,7 +261,7 @@ public:
|
||||
auto& input_info = p.input();
|
||||
input_info.tensor().set_color_format(ColorFormat::I420_SINGLE_PLANE);
|
||||
input_info.preprocess().convert_color(ColorFormat::BGR);
|
||||
input_info.network().set_layout("NCHW");
|
||||
input_info.model().set_layout("NCHW");
|
||||
function = p.build();
|
||||
|
||||
const auto ¶m = function->get_parameters()[0];
|
||||
|
@ -165,7 +165,7 @@ TEST_F(PreprocessOpenCVReferenceTest, resize_u8_simple_linear) {
|
||||
auto p = PrePostProcessor(function);
|
||||
p.input().tensor().set_spatial_static_shape(2, 2);
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
|
||||
p.input().network().set_layout("NCHW");
|
||||
p.input().model().set_layout("NCHW");
|
||||
function = p.build();
|
||||
|
||||
const auto ¶m = function->get_parameters()[0];
|
||||
@ -203,7 +203,7 @@ TEST_F(PreprocessOpenCVReferenceTest, resize_u8_large_picture_linear) {
|
||||
auto p = PrePostProcessor(function);
|
||||
p.input().tensor().set_spatial_static_shape(input_height, input_width);
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
|
||||
p.input().network().set_layout("NCHW");
|
||||
p.input().model().set_layout("NCHW");
|
||||
function = p.build();
|
||||
|
||||
const auto ¶m = function->get_parameters()[0];
|
||||
@ -240,7 +240,7 @@ TEST_F(PreprocessOpenCVReferenceTest, resize_f32_large_picture_linear) {
|
||||
auto p = PrePostProcessor(function);
|
||||
p.input().tensor().set_spatial_static_shape(input_height, input_width);
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR);
|
||||
p.input().network().set_layout("NCHW");
|
||||
p.input().model().set_layout("NCHW");
|
||||
function = p.build();
|
||||
|
||||
const auto ¶m = function->get_parameters()[0];
|
||||
@ -268,7 +268,7 @@ TEST_F(PreprocessOpenCVReferenceTest, DISABLED_resize_f32_large_picture_cubic_sm
|
||||
auto p = PrePostProcessor(function);
|
||||
p.input().tensor().set_spatial_static_shape(input_height, input_width);
|
||||
p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC);
|
||||
p.input().network().set_layout("NCHW");
|
||||
p.input().model().set_layout("NCHW");
|
||||
function = p.build();
|
||||
|
||||
inputData.emplace_back(element_type, input_shape, input_img.data());
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <pot_transformations.hpp>
|
||||
#include <pruning.hpp>
|
||||
#include <transformations/common_optimizations/compress_float_constants.hpp>
|
||||
#include <transformations/common_optimizations/division_by_zero_fp16_resolver.hpp>
|
||||
#include <transformations/common_optimizations/mark_precision_sensitive_subgraphs.hpp>
|
||||
#include <transformations/common_optimizations/moc_transformations.hpp>
|
||||
#include <transformations/control_flow/unroll_tensor_iterator.hpp>
|
||||
@ -60,6 +61,7 @@ void InferenceEnginePython::GenerateMappingFile(InferenceEnginePython::IENetwork
|
||||
|
||||
void InferenceEnginePython::CompressModelTransformation(InferenceEnginePython::IENetwork network) {
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ov::pass::DivisionByZeroFP16Resolver>();
|
||||
manager.register_pass<ov::pass::MarkPrecisionSensitiveSubgraphs>();
|
||||
manager.register_pass<ov::pass::CompressFloatConstants>();
|
||||
manager.run_passes(network.actual->getFunction());
|
||||
|
10
inference-engine/ie_bridges/python/wheel/readme.txt
Normal file
10
inference-engine/ie_bridges/python/wheel/readme.txt
Normal file
@ -0,0 +1,10 @@
|
||||
“LEGAL NOTICE: Your use of this software and any required dependent software (the “Software Package”) is subject to the terms and conditions of the software license agreements for the Software Package, which may also include notices, disclaimers, or license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details.
|
||||
|
||||
------------------------------------------------------------------------
|
||||
Components and their third party programs:
|
||||
|
||||
* OpenVINO(TM) Runtime (Apache 2.0): <install_root>/runtime-third-party-programs.txt, <install_root>/onednn_third-party-programs.txt, <install_root>/tbb_third-party-programs.txt
|
||||
|
||||
------------------------------------------------------------------------
|
||||
Licenses:
|
||||
* Apache 2.0 <install_root>/LICENSE
|
@ -78,12 +78,6 @@ LIB_INSTALL_CFG = {
|
||||
'install_dir': OV_RUNTIME_LIBS_DIR,
|
||||
'rpath': LIBS_RPATH,
|
||||
},
|
||||
'myriad_plugin': {
|
||||
'name': 'myriad',
|
||||
'prefix': 'libs.core',
|
||||
'install_dir': OV_RUNTIME_LIBS_DIR,
|
||||
'rpath': LIBS_RPATH,
|
||||
},
|
||||
'ngraph_libs': {
|
||||
'name': 'ngraph',
|
||||
'prefix': 'libs.core',
|
||||
@ -183,7 +177,7 @@ class CustomBuild(build):
|
||||
self.spawn(['cmake', '-H' + str(openvino_root_dir), '-B' + self.build_temp,
|
||||
'-DCMAKE_BUILD_TYPE={type}'.format(type=self.config),
|
||||
'-DENABLE_PYTHON=ON',
|
||||
'-DNGRAPH_ONNX_FRONTEND_ENABLE=ON'])
|
||||
'-DENABLE_OV_ONNX_FRONTEND=ON'])
|
||||
|
||||
self.announce('Building binaries', level=3)
|
||||
self.spawn(['cmake', '--build', self.build_temp,
|
||||
|
@ -934,7 +934,7 @@ public:
|
||||
normalize_kernel.reset(
|
||||
new jit_uni_normalize_kernel_f32<cpu::x64::avx2>(jcp, *kernel_attrs.get()));
|
||||
} else if (mayiuse(cpu::x64::sse41)) {
|
||||
blk_size = 4;
|
||||
blk_size = jcp.is_blk ? 8 : 4;
|
||||
normalize_modulo_kernel.reset(new jit_uni_normalize_modulo_kernel_f32<cpu::x64::sse41>(jcp));
|
||||
normalize_kernel.reset(
|
||||
new jit_uni_normalize_kernel_f32<cpu::x64::sse41>(jcp, *kernel_attrs.get()));
|
||||
|
@ -2,23 +2,30 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <openvino/core/node.hpp>
|
||||
#include "shape_inference.hpp"
|
||||
|
||||
#include <ngraph/runtime/host_tensor.hpp>
|
||||
#include <openvino/core/node.hpp>
|
||||
#include <openvino/opsets/opset1.hpp>
|
||||
#include <openvino/opsets/opset2.hpp>
|
||||
#include <openvino/opsets/opset4.hpp>
|
||||
#include <openvino/opsets/opset5.hpp>
|
||||
#include <openvino/opsets/opset6.hpp>
|
||||
#include <openvino/opsets/opset8.hpp>
|
||||
#include "static_shape.hpp"
|
||||
#include "utils.hpp"
|
||||
#include "shape_inference.hpp"
|
||||
#include "convolution_shape_inference.hpp"
|
||||
#include "reduce_shape_inference.hpp"
|
||||
#include "shape_nodes.hpp"
|
||||
#include "fake_quantize.hpp"
|
||||
#include "experimental_detectron_detection_output_shape_inference.hpp"
|
||||
|
||||
#include "assign_shape_inference.hpp"
|
||||
#include "convolution_shape_inference.hpp"
|
||||
#include "experimental_detectron_detection_output_shape_inference.hpp"
|
||||
#include "experimental_detectron_prior_grid_generator_shape_inference.hpp"
|
||||
#include "fake_quantize.hpp"
|
||||
#include "lstm_cell_shape_inference.hpp"
|
||||
#include "read_value_shape_inference.hpp"
|
||||
#include "reduce_shape_inference.hpp"
|
||||
#include "shape_inference.hpp"
|
||||
#include "shape_nodes.hpp"
|
||||
#include "static_shape.hpp"
|
||||
#include "tile_shape_inference.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
void shape_inference(ov::Node* op,
|
||||
const std::vector<ov::StaticShape>& input_shapes,
|
||||
@ -27,44 +34,53 @@ void shape_inference(ov::Node* op,
|
||||
if (auto node = ov::as_type<ov::opset8::Convolution>(op)) {
|
||||
ov::CoordinateDiff pads_begin, pads_end;
|
||||
bool status = resolve_auto_pad_for_shape(node, pads_begin, pads_end, input_shapes, 2, 2);
|
||||
OPENVINO_ASSERT(status, "Convolution shape inference doesn't have enough information to calculate static shapes");
|
||||
OPENVINO_ASSERT(status,
|
||||
"Convolution shape inference doesn't have enough information to calculate static shapes");
|
||||
shape_infer(node, pads_begin, pads_end, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset8::GroupConvolution>(op)) {
|
||||
ov::CoordinateDiff pads_begin, pads_end;
|
||||
bool status = resolve_auto_pad_for_shape(node, pads_begin, pads_end, input_shapes, 2, 3);
|
||||
OPENVINO_ASSERT(status, "GroupConvolution shape inference doesn't have enough information to calculate static shapes");
|
||||
OPENVINO_ASSERT(status,
|
||||
"GroupConvolution shape inference doesn't have enough information to calculate static shapes");
|
||||
shape_infer(node, pads_begin, pads_end, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset8::ConvolutionBackpropData>(op)) {
|
||||
ov::CoordinateDiff pads_begin, pads_end;
|
||||
ov::StaticShape output_shape_input;
|
||||
if (node->get_input_size() == 3)
|
||||
get_data_as_shape<ov::StaticShape>(2, op, output_shape_input, constant_data);
|
||||
bool status = resolve_auto_pad_for_shape_back_prop(node, pads_begin, pads_end, input_shapes, output_shape_input, 2, 2);
|
||||
OPENVINO_ASSERT(status, "ConvolutionBackpropData shape inference doesn't have enough information to calculate static shapes");
|
||||
bool status =
|
||||
resolve_auto_pad_for_shape_back_prop(node, pads_begin, pads_end, input_shapes, output_shape_input, 2, 2);
|
||||
OPENVINO_ASSERT(
|
||||
status,
|
||||
"ConvolutionBackpropData shape inference doesn't have enough information to calculate static shapes");
|
||||
shape_infer(node, pads_begin, pads_end, output_shape_input, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset8::GroupConvolutionBackpropData>(op)) {
|
||||
ov::CoordinateDiff pads_begin, pads_end;
|
||||
ov::StaticShape output_shape_input;
|
||||
if (node->get_input_size() == 3)
|
||||
get_data_as_shape<ov::StaticShape>(2, op, output_shape_input, constant_data);
|
||||
bool status = resolve_auto_pad_for_shape_back_prop(node, pads_begin, pads_end, input_shapes, output_shape_input, 2, 3);
|
||||
OPENVINO_ASSERT(status, "GroupConvolutionBackpropData shape inference doesn't have enough information to calculate static shapes");
|
||||
bool status =
|
||||
resolve_auto_pad_for_shape_back_prop(node, pads_begin, pads_end, input_shapes, output_shape_input, 2, 3);
|
||||
OPENVINO_ASSERT(
|
||||
status,
|
||||
"GroupConvolutionBackpropData shape inference doesn't have enough information to calculate static shapes");
|
||||
shape_infer(node, pads_begin, pads_end, output_shape_input, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::op::util::ArithmeticReductionKeepDims>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes, constant_data);
|
||||
} else if (auto node = ov::as_type<ov::op::util::LogicalReductionKeepDims>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes, constant_data);
|
||||
} else if (ov::is_type<ov::op::util::UnaryElementwiseArithmetic>(op) ||
|
||||
ov::is_type<ov::opset1::Convert>(op) || ov::is_type<ov::opset1::Clamp>(op) ||
|
||||
ov::is_type<ov::opset1::GRN>(op) || ov::is_type<ov::opset1::LRN>(op) ||
|
||||
ov::is_type<ov::opset1::LogicalNot>(op) || ov::is_type<ov::opset4::Mish>(op) ||
|
||||
ov::is_type<ov::opset2::MVN>(op) || ov::is_type<ov::opset6::MVN>(op) ||
|
||||
ov::is_type<ov::opset1::PRelu>(op) || ov::is_type<ov::opset1::Relu>(op) ||
|
||||
ov::is_type<ov::opset4::Swish>(op) || ov::is_type<ov::opset1::Softmax>(op) ||
|
||||
ov::is_type<ov::opset1::Elu>(op) || ov::is_type<ov::opset5::Round>(op)) {
|
||||
} else if (ov::is_type<ov::op::util::UnaryElementwiseArithmetic>(op) || ov::is_type<ov::opset1::Convert>(op) ||
|
||||
ov::is_type<ov::opset1::Clamp>(op) || ov::is_type<ov::opset1::GRN>(op) ||
|
||||
ov::is_type<ov::opset1::LRN>(op) || ov::is_type<ov::opset1::LogicalNot>(op) ||
|
||||
ov::is_type<ov::opset4::Mish>(op) || ov::is_type<ov::opset2::MVN>(op) ||
|
||||
ov::is_type<ov::opset6::MVN>(op) || ov::is_type<ov::opset1::PRelu>(op) ||
|
||||
ov::is_type<ov::opset1::Relu>(op) || ov::is_type<ov::opset4::Swish>(op) ||
|
||||
ov::is_type<ov::opset1::Softmax>(op) || ov::is_type<ov::opset1::Elu>(op) ||
|
||||
ov::is_type<ov::opset5::Round>(op)) {
|
||||
copy_shape_infer(node, input_shapes, output_shapes);
|
||||
} else if (ov::is_type<ov::op::util::BinaryElementwiseArithmetic>(op) ||
|
||||
ov::is_type<ov::op::util::BinaryElementwiseComparison>(op) || ov::is_type<ov::op::util::BinaryElementwiseLogical>(op)) {
|
||||
ov::is_type<ov::op::util::BinaryElementwiseComparison>(op) ||
|
||||
ov::is_type<ov::op::util::BinaryElementwiseLogical>(op)) {
|
||||
eltwise_shape_infer(op, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset1::FakeQuantize>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes);
|
||||
@ -80,15 +96,30 @@ void shape_inference(ov::Node* op,
|
||||
shape_infer(node, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset6::ExperimentalDetectronDetectionOutput>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset3::Assign>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset6::Assign>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset6::ExperimentalDetectronPriorGridGenerator>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset1::LSTMCell>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset6::LSTMCell>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset3::ReadValue>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset6::ReadValue>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes);
|
||||
} else if (auto node = ov::as_type<ov::opset6::Tile>(op)) {
|
||||
shape_infer(node, input_shapes, output_shapes, constant_data);
|
||||
} else {
|
||||
ngraph::OutputVector new_inputs;
|
||||
for (size_t i = 0; i < op->get_input_size(); ++i) {
|
||||
if (constant_data.count(i)) {
|
||||
new_inputs.push_back(std::make_shared<ov::opset1::Constant>(constant_data.at(i)));
|
||||
} else {
|
||||
new_inputs.push_back(
|
||||
std::make_shared<ov::opset1::Parameter>(
|
||||
op->get_input_element_type(i), input_shapes[i].to_partial_shape()));
|
||||
new_inputs.push_back(std::make_shared<ov::opset1::Parameter>(op->get_input_element_type(i),
|
||||
input_shapes[i].to_partial_shape()));
|
||||
}
|
||||
}
|
||||
const auto local_op = op->clone_with_new_inputs(new_inputs);
|
||||
@ -96,8 +127,10 @@ void shape_inference(ov::Node* op,
|
||||
|
||||
output_shapes.resize(op->get_output_size());
|
||||
for (size_t i = 0; i < output_shapes.size(); ++i) {
|
||||
const auto &partial_shape = local_op->get_output_partial_shape(i);
|
||||
OPENVINO_ASSERT(partial_shape.is_static(), "On device shape infer shouldn't support default shape infer for nodes with internal dynamism");
|
||||
const auto& partial_shape = local_op->get_output_partial_shape(i);
|
||||
OPENVINO_ASSERT(
|
||||
partial_shape.is_static(),
|
||||
"On device shape infer shouldn't support default shape infer for nodes with internal dynamism");
|
||||
output_shapes[i] = ov::StaticShape(partial_shape.to_shape());
|
||||
}
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ bool ngraph::pass::Pruning::run_on_function(std::shared_ptr<Function> f) {
|
||||
manager.register_pass<InitMasks>();
|
||||
manager.register_pass<PropagateMasks>();
|
||||
|
||||
#ifdef OPENVINO_DEBUG_ENABLE
|
||||
#ifdef ENABLE_OPENVINO_DEBUG
|
||||
// VisualizeTree modifier helps to print Masks and mark nodes with masks
|
||||
/*
|
||||
auto modifier = [](const Node& node, std::vector<std::string>& attributes) {
|
||||
@ -56,7 +56,7 @@ bool ngraph::pass::Pruning::run_on_function(std::shared_ptr<Function> f) {
|
||||
|
||||
manager.register_pass<ShrinkWeights>();
|
||||
|
||||
#ifdef OPENVINO_DEBUG_ENABLE
|
||||
#ifdef ENABLE_OPENVINO_DEBUG
|
||||
// Uncomment following line and change path to resulting svg file
|
||||
// manager.register_pass<VisualizeTree>("/tmp/after.svg");
|
||||
#endif
|
||||
|
@ -0,0 +1,17 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <transformations_visibility.hpp>
|
||||
#include <ngraph/pass/graph_rewrite.hpp>
|
||||
|
||||
namespace vpu {
|
||||
|
||||
class ConvertGatherND8ToGatherND5 : public ngraph::pass::MatcherPass {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
ConvertGatherND8ToGatherND5();
|
||||
};
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "vpu/ngraph/transformations/convert_gatherND8.hpp"
|
||||
#include <ngraph/opsets/opset5.hpp>
|
||||
#include <ngraph/opsets/opset8.hpp>
|
||||
#include <ngraph/rt_info.hpp>
|
||||
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(vpu::ConvertGatherND8ToGatherND5, "ConvertGatherND8ToGatherND5", 0);
|
||||
|
||||
namespace vpu {
|
||||
|
||||
ConvertGatherND8ToGatherND5::ConvertGatherND8ToGatherND5() {
|
||||
auto gather_nd_v8_pattern = ngraph::pattern::wrap_type<ngraph::opset8::GatherND>();
|
||||
|
||||
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
|
||||
auto gather_nd_v8_node = std::dynamic_pointer_cast<ngraph::opset8::GatherND>(m.get_match_root());
|
||||
if (!gather_nd_v8_node) {
|
||||
return false;
|
||||
}
|
||||
if (gather_nd_v8_node->get_batch_dims() == 0) {
|
||||
auto gather_nd_v5_node = std::make_shared<ngraph::opset5::GatherND>(gather_nd_v8_node->input_value(0),
|
||||
gather_nd_v8_node->input_value(1),
|
||||
gather_nd_v8_node->get_batch_dims());
|
||||
|
||||
gather_nd_v5_node->set_friendly_name(gather_nd_v8_node->get_friendly_name());
|
||||
ngraph::copy_runtime_info(gather_nd_v8_node, gather_nd_v5_node);
|
||||
ngraph::replace_node(gather_nd_v8_node, gather_nd_v5_node);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(gather_nd_v8_pattern, "ConvertGatherND8ToGatherND5");
|
||||
register_matcher(m, callback);
|
||||
}
|
||||
|
||||
} // namespace vpu
|
@ -34,6 +34,7 @@
|
||||
#include <transformations/op_conversions/hswish_decomposition.hpp>
|
||||
#include <transformations/op_conversions/simplify_ctc_greedy_decoder_seq_len.hpp>
|
||||
#include <transformations/op_conversions/convert_gather_downgrade.hpp>
|
||||
#include <vpu/ngraph/transformations/convert_gatherND8.hpp>
|
||||
#include <transformations/convert_precision.hpp>
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
|
||||
#include <transformations/common_optimizations/common_optimizations.hpp>
|
||||
@ -181,6 +182,7 @@ ie::CNNNetwork FrontEnd::convertNetwork(ie::CNNNetwork& network) {
|
||||
manager.register_pass<ngraph::pass::ConvertNMS3ToNMS5>();
|
||||
manager.register_pass<ngraph::pass::ConvertNMS4ToNMS5>();
|
||||
manager.register_pass<ngraph::pass::ConvertGather7ToGather1>();
|
||||
manager.register_pass<vpu::ConvertGatherND8ToGatherND5>();
|
||||
manager.register_pass<vpu::MergeGatherGatherElements>();
|
||||
manager.register_pass<ngraph::pass::CommonOptimizations>();
|
||||
|
||||
|
@ -109,10 +109,8 @@ const std::unordered_set<std::string>& MyriadMetrics::OptimizationCapabilities()
|
||||
}
|
||||
|
||||
std::string MyriadMetrics::DeviceArchitecture(const std::map<std::string, InferenceEngine::Parameter> & options) const {
|
||||
// TODO: Task 49309. Return same architecture for devices which can share same cache
|
||||
// E.g. when device "MYRIAD.ma2480-1" is loaded, options.at("DEVICE_ID") will be "ma2480-1"
|
||||
// For DEVICE_ID="ma2480-0" and DEVICE_ID="ma2480-1" this method shall return same string, like "ma2480"
|
||||
// In this case inference engine will be able to reuse cached model and total reduce load network time
|
||||
// Since all supported devices can use the same cashe
|
||||
// this function returns the same value for evrything
|
||||
return "MYRIAD";
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ set(DEPENDENCIES
|
||||
test_model_zoo
|
||||
)
|
||||
|
||||
if(NGRAPH_IR_FRONTEND_ENABLE)
|
||||
if(ENABLE_OV_IR_FRONTEND)
|
||||
list(APPEND DEPENDENCIES ir_ov_frontend)
|
||||
endif()
|
||||
|
||||
@ -52,11 +52,11 @@ if(ENABLE_BATCH)
|
||||
list(APPEND DEPENDENCIES ov_auto_batch_plugin)
|
||||
endif()
|
||||
|
||||
if (NOT NGRAPH_ONNX_FRONTEND_ENABLE)
|
||||
if (NOT ENABLE_OV_ONNX_FRONTEND)
|
||||
list(APPEND EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/onnx_reader")
|
||||
endif()
|
||||
|
||||
if (NOT NGRAPH_PDPD_FRONTEND_ENABLE)
|
||||
if (NOT ENABLE_OV_PDPD_FRONTEND)
|
||||
list(APPEND EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/paddle_reader")
|
||||
endif()
|
||||
|
||||
@ -74,14 +74,14 @@ addIeTargetTest(
|
||||
|
||||
set_ie_threading_interface_for(${TARGET_NAME})
|
||||
|
||||
if(NGRAPH_ONNX_FRONTEND_ENABLE)
|
||||
if(ENABLE_OV_ONNX_FRONTEND)
|
||||
target_compile_definitions(${TARGET_NAME} PRIVATE
|
||||
NGRAPH_ONNX_FRONTEND_ENABLE
|
||||
ENABLE_OV_ONNX_FRONTEND
|
||||
ONNX_TEST_MODELS="${TEST_MODEL_ZOO}/onnx_reader/models/")
|
||||
add_dependencies(${TARGET_NAME} onnx_ov_frontend)
|
||||
endif()
|
||||
|
||||
if(NGRAPH_PDPD_FRONTEND_ENABLE)
|
||||
if(ENABLE_OV_PDPD_FRONTEND)
|
||||
target_compile_definitions(${TARGET_NAME} PRIVATE
|
||||
PADDLE_TEST_MODELS="${CMAKE_CURRENT_SOURCE_DIR}/paddle_reader/models/")
|
||||
add_dependencies(${TARGET_NAME} paddlepaddle_ov_frontend)
|
||||
@ -95,7 +95,6 @@ ie_faster_build(${TARGET_NAME}
|
||||
|
||||
file(GLOB_RECURSE legacy_tests
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/transformations/*.cpp" # CVS-55385
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/lp_transformations/*.cpp" # CVS-55376
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/ngraph_reader/*.cpp" # CVS-55365
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/cnn_network/cnn_ngraph_impl_tests.cpp" # CVS-55375
|
||||
)
|
||||
|
@ -11,32 +11,29 @@
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||
#include "ie_core.hpp"
|
||||
#include "openvino/runtime/core.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "openvino/runtime/core.hpp"
|
||||
#include "transformations/serialize.hpp"
|
||||
|
||||
#ifndef IR_SERIALIZATION_MODELS_PATH // should be already defined by cmake
|
||||
# error "IR_SERIALIZATION_MODELS_PATH is not defined"
|
||||
#error "IR_SERIALIZATION_MODELS_PATH is not defined"
|
||||
#endif
|
||||
|
||||
#ifndef IE_BUILD_POSTFIX // should be already defined by cmake
|
||||
# error "IE_BUILD_POSTFIX is not defined"
|
||||
#error "IE_BUILD_POSTFIX is not defined"
|
||||
#endif
|
||||
|
||||
static std::string get_extension_path() {
|
||||
return FileUtils::makePluginLibraryName<char>(
|
||||
{}, std::string("template_extension") + IE_BUILD_POSTFIX);
|
||||
return FileUtils::makePluginLibraryName<char>({}, std::string("template_extension") + IE_BUILD_POSTFIX);
|
||||
}
|
||||
|
||||
static std::string get_ov_extension_path() {
|
||||
return FileUtils::makePluginLibraryName<char>(
|
||||
{}, std::string("template_ov_extension") + IE_BUILD_POSTFIX);
|
||||
return FileUtils::makePluginLibraryName<char>({}, std::string("template_ov_extension") + IE_BUILD_POSTFIX);
|
||||
}
|
||||
|
||||
class CustomOpsSerializationTest : public ::testing::Test {
|
||||
protected:
|
||||
std::string test_name =
|
||||
::testing::UnitTest::GetInstance()->current_test_info()->name();
|
||||
std::string test_name = ::testing::UnitTest::GetInstance()->current_test_info()->name();
|
||||
std::string m_out_xml_path = test_name + ".xml";
|
||||
std::string m_out_bin_path = test_name + ".bin";
|
||||
|
||||
@ -47,13 +44,10 @@ protected:
|
||||
};
|
||||
|
||||
TEST_F(CustomOpsSerializationTest, CustomOpUser_MO) {
|
||||
const std::string model = CommonTestUtils::getModelFromTestModelZoo(
|
||||
IR_SERIALIZATION_MODELS_PATH "custom_op.xml");
|
||||
const std::string model = CommonTestUtils::getModelFromTestModelZoo(IR_SERIALIZATION_MODELS_PATH "custom_op.xml");
|
||||
|
||||
InferenceEngine::Core ie;
|
||||
ie.AddExtension(
|
||||
std::make_shared<InferenceEngine::Extension>(
|
||||
get_extension_path()));
|
||||
ie.AddExtension(std::make_shared<InferenceEngine::Extension>(get_extension_path()));
|
||||
|
||||
auto expected = ie.ReadNetwork(model);
|
||||
expected.serialize(m_out_xml_path, m_out_bin_path);
|
||||
@ -61,13 +55,12 @@ TEST_F(CustomOpsSerializationTest, CustomOpUser_MO) {
|
||||
|
||||
bool success;
|
||||
std::string message;
|
||||
std::tie(success, message) =
|
||||
compare_functions(result.getFunction(), expected.getFunction(), true);
|
||||
std::tie(success, message) = compare_functions(result.getFunction(), expected.getFunction(), true);
|
||||
|
||||
ASSERT_TRUE(success) << message;
|
||||
}
|
||||
|
||||
#ifdef NGRAPH_ONNX_FRONTEND_ENABLE
|
||||
#ifdef ENABLE_OV_ONNX_FRONTEND
|
||||
|
||||
// This test will not work because template_extension for ONNX registers
|
||||
// extension via `register_operator` function which registers operator
|
||||
@ -76,13 +69,10 @@ TEST_F(CustomOpsSerializationTest, CustomOpUser_MO) {
|
||||
#ifndef OPENVINO_STATIC_LIBRARY
|
||||
|
||||
TEST_F(CustomOpsSerializationTest, CustomOpUser_ONNXImporter) {
|
||||
const std::string model = CommonTestUtils::getModelFromTestModelZoo(
|
||||
IR_SERIALIZATION_MODELS_PATH "custom_op.onnx");
|
||||
const std::string model = CommonTestUtils::getModelFromTestModelZoo(IR_SERIALIZATION_MODELS_PATH "custom_op.onnx");
|
||||
|
||||
InferenceEngine::Core ie;
|
||||
ie.AddExtension(
|
||||
std::make_shared<InferenceEngine::Extension>(
|
||||
get_extension_path()));
|
||||
ie.AddExtension(std::make_shared<InferenceEngine::Extension>(get_extension_path()));
|
||||
|
||||
auto expected = ie.ReadNetwork(model);
|
||||
expected.serialize(m_out_xml_path, m_out_bin_path);
|
||||
@ -90,49 +80,39 @@ TEST_F(CustomOpsSerializationTest, CustomOpUser_ONNXImporter) {
|
||||
|
||||
bool success;
|
||||
std::string message;
|
||||
std::tie(success, message) =
|
||||
compare_functions(result.getFunction(), expected.getFunction(), true);
|
||||
std::tie(success, message) = compare_functions(result.getFunction(), expected.getFunction(), true);
|
||||
|
||||
ASSERT_TRUE(success) << message;
|
||||
}
|
||||
|
||||
#endif // OPENVINO_STATIC_LIBRARY
|
||||
#endif // OPENVINO_STATIC_LIBRARY
|
||||
|
||||
#endif // NGRAPH_ONNX_FRONTEND_ENABLE
|
||||
#endif // NGRAPH_ONNX_FRONTEND_ENABLE
|
||||
|
||||
TEST_F(CustomOpsSerializationTest, CustomOpTransformation) {
|
||||
const std::string model = CommonTestUtils::getModelFromTestModelZoo(
|
||||
IR_SERIALIZATION_MODELS_PATH "custom_op.xml");
|
||||
const std::string model = CommonTestUtils::getModelFromTestModelZoo(IR_SERIALIZATION_MODELS_PATH "custom_op.xml");
|
||||
|
||||
InferenceEngine::Core ie;
|
||||
auto extension =
|
||||
std::make_shared<InferenceEngine::Extension>(
|
||||
get_extension_path());
|
||||
auto extension = std::make_shared<InferenceEngine::Extension>(get_extension_path());
|
||||
ie.AddExtension(extension);
|
||||
auto expected = ie.ReadNetwork(model);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::Serialize>(
|
||||
m_out_xml_path, m_out_bin_path, extension->getOpSets(),
|
||||
ngraph::pass::Serialize::Version::IR_V10);
|
||||
m_out_xml_path, m_out_bin_path, extension->getOpSets(), ngraph::pass::Serialize::Version::IR_V10);
|
||||
manager.run_passes(expected.getFunction());
|
||||
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
|
||||
|
||||
bool success;
|
||||
std::string message;
|
||||
std::tie(success, message) =
|
||||
compare_functions(result.getFunction(), expected.getFunction(), true);
|
||||
std::tie(success, message) = compare_functions(result.getFunction(), expected.getFunction(), true);
|
||||
|
||||
ASSERT_TRUE(success) << message;
|
||||
}
|
||||
|
||||
class FrameworkNodeExtension : public InferenceEngine::IExtension {
|
||||
public:
|
||||
void GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept override {
|
||||
static InferenceEngine::Version ExtensionDescription = {
|
||||
{1, 0},
|
||||
"1.0",
|
||||
"framework_node_ext"
|
||||
};
|
||||
void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override {
|
||||
static InferenceEngine::Version ExtensionDescription = {{1, 0}, "1.0", "framework_node_ext"};
|
||||
|
||||
versionInfo = &ExtensionDescription;
|
||||
}
|
||||
@ -151,8 +131,7 @@ public:
|
||||
};
|
||||
|
||||
TEST_F(CustomOpsSerializationTest, CustomOpNoExtensions) {
|
||||
const std::string model = CommonTestUtils::getModelFromTestModelZoo(
|
||||
IR_SERIALIZATION_MODELS_PATH "custom_op.xml");
|
||||
const std::string model = CommonTestUtils::getModelFromTestModelZoo(IR_SERIALIZATION_MODELS_PATH "custom_op.xml");
|
||||
|
||||
InferenceEngine::Core ie;
|
||||
auto extension = std::make_shared<FrameworkNodeExtension>();
|
||||
@ -160,37 +139,34 @@ TEST_F(CustomOpsSerializationTest, CustomOpNoExtensions) {
|
||||
auto expected = ie.ReadNetwork(model);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::Serialize>(
|
||||
m_out_xml_path, m_out_bin_path, extension->getOpSets(),
|
||||
ngraph::pass::Serialize::Version::IR_V10);
|
||||
m_out_xml_path, m_out_bin_path, extension->getOpSets(), ngraph::pass::Serialize::Version::IR_V10);
|
||||
manager.run_passes(expected.getFunction());
|
||||
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
|
||||
|
||||
bool success;
|
||||
std::string message;
|
||||
std::tie(success, message) =
|
||||
compare_functions(result.getFunction(), expected.getFunction(), true, false, false, true, true);
|
||||
compare_functions(result.getFunction(), expected.getFunction(), true, false, false, true, true);
|
||||
|
||||
ASSERT_TRUE(success) << message;
|
||||
}
|
||||
|
||||
TEST_F(CustomOpsSerializationTest, CustomOpOVExtensions) {
|
||||
const std::string model = CommonTestUtils::getModelFromTestModelZoo(
|
||||
IR_SERIALIZATION_MODELS_PATH "custom_identity.xml");
|
||||
const std::string model =
|
||||
CommonTestUtils::getModelFromTestModelZoo(IR_SERIALIZATION_MODELS_PATH "custom_identity.xml");
|
||||
|
||||
ov::runtime::Core core;
|
||||
core.add_extension(get_ov_extension_path());
|
||||
auto expected = core.read_model(model);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::Serialize>(
|
||||
m_out_xml_path, m_out_bin_path,
|
||||
ngraph::pass::Serialize::Version::IR_V10);
|
||||
m_out_xml_path, m_out_bin_path, ngraph::pass::Serialize::Version::IR_V10);
|
||||
manager.run_passes(expected);
|
||||
auto result = core.read_model(m_out_xml_path, m_out_bin_path);
|
||||
|
||||
bool success;
|
||||
std::string message;
|
||||
std::tie(success, message) =
|
||||
compare_functions(result, expected, true, false, false, true, true);
|
||||
std::tie(success, message) = compare_functions(result, expected, true, false, false, true, true);
|
||||
|
||||
ASSERT_TRUE(success) << message;
|
||||
}
|
||||
|
@ -1,109 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <memory>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <transformations/utils/utils.hpp>
|
||||
#include <transformations/init_node_info.hpp>
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/reshape_fully_connected.hpp>
|
||||
#include <ngraph/pass/constant_folding.hpp>
|
||||
#include "layer_transformation.hpp"
|
||||
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||
#include "lpt_ngraph_functions/reshape_fully_connected_function.hpp"
|
||||
|
||||
using namespace testing;
|
||||
using namespace ngraph::pass;
|
||||
|
||||
namespace {
|
||||
|
||||
class ReshapeFullyConnectedTransformationTestValues {
|
||||
public:
|
||||
ngraph::Shape inputShape;
|
||||
ngraph::element::Type inputPrecision1;
|
||||
ngraph::element::Type inputPrecision2;
|
||||
ngraph::element::Type inputPrecision3;
|
||||
ngraph::Shape outputShape;
|
||||
ngraph::element::Type outputPrecision;
|
||||
};
|
||||
|
||||
class ReshapeFullyConnectedTransformation :
|
||||
public LayerTransformation,
|
||||
public testing::WithParamInterface<ReshapeFullyConnectedTransformationTestValues> {
|
||||
public:
|
||||
void SetUp() override {
|
||||
using namespace ngraph::builder::subgraph;
|
||||
const ReshapeFullyConnectedTransformationTestValues testValues = GetParam();
|
||||
|
||||
actualFunction = ReshapeFullyConnectedFunction::getOriginal(
|
||||
testValues.inputShape,
|
||||
testValues.inputPrecision1,
|
||||
testValues.inputPrecision2,
|
||||
testValues.inputPrecision3,
|
||||
testValues.outputShape,
|
||||
testValues.outputPrecision);
|
||||
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::ReshapeFullyConnected>();
|
||||
manager.register_pass<ngraph::pass::ConstantFolding>();
|
||||
manager.run_passes(actualFunction);
|
||||
|
||||
referenceFunction = ReshapeFullyConnectedFunction::getReference(
|
||||
testValues.inputShape,
|
||||
testValues.inputPrecision1,
|
||||
testValues.inputPrecision2,
|
||||
testValues.inputPrecision3,
|
||||
testValues.outputShape,
|
||||
testValues.outputPrecision);
|
||||
}
|
||||
|
||||
static std::string getTestCaseName(testing::TestParamInfo<ReshapeFullyConnectedTransformationTestValues> obj) {
|
||||
const ReshapeFullyConnectedTransformationTestValues testValues = obj.param;
|
||||
std::ostringstream result;
|
||||
result <<
|
||||
testValues.inputShape << "_" <<
|
||||
testValues.inputPrecision1 << "_" <<
|
||||
testValues.inputPrecision2 << "_" <<
|
||||
testValues.outputShape << "_" <<
|
||||
testValues.outputPrecision;
|
||||
return result.str();
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(ReshapeFullyConnectedTransformation, CompareFunctions) {
|
||||
actualFunction->validate_nodes_and_infer_types();
|
||||
auto res = compare_functions(referenceFunction, actualFunction, true, true, true);
|
||||
ASSERT_TRUE(res.first) << res.second;
|
||||
|
||||
ASSERT_TRUE(LayerTransformation::allNamesAreUnique(actualFunction)) << "Not all names are unique";
|
||||
}
|
||||
|
||||
std::vector<ReshapeFullyConnectedTransformationTestValues> testValues = {
|
||||
{
|
||||
{ 1, 1, 2048 },
|
||||
ngraph::element::u8,
|
||||
ngraph::element::i8,
|
||||
ngraph::element::f32,
|
||||
{ 1, 1000 },
|
||||
ngraph::element::f32
|
||||
},
|
||||
{
|
||||
{ 1, 1, 2048 },
|
||||
ngraph::element::f32,
|
||||
ngraph::element::f32,
|
||||
ngraph::element::f32,
|
||||
{ 1, 1000 },
|
||||
ngraph::element::f32
|
||||
}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
smoke_LPT,
|
||||
ReshapeFullyConnectedTransformation,
|
||||
::testing::ValuesIn(testValues),
|
||||
ReshapeFullyConnectedTransformation::getTestCaseName);
|
||||
} // namespace
|
@ -0,0 +1,210 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
#include <ngraph/function.hpp>
|
||||
#include <openvino/opsets/opset4.hpp>
|
||||
#include <openvino/pass/manager.hpp>
|
||||
#include <transformations/common_optimizations/division_by_zero_fp16_resolver.hpp>
|
||||
#include <transformations/init_node_info.hpp>
|
||||
|
||||
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||
|
||||
using namespace testing;
|
||||
using namespace ov;
|
||||
constexpr float normalized_fp16_min = 6.103515625e-05f; // fp16 minimal normalized value
|
||||
|
||||
|
||||
TEST_F(TransformationTestsF, DivisionByZeroMinimalPattern) {
|
||||
const float eps_value = 1.e-12;
|
||||
{
|
||||
auto input_1 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto input_2 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{1}, {eps_value});
|
||||
auto add = std::make_shared<opset4::Add>(input_2, eps_const);
|
||||
auto divide = std::make_shared<opset4::Divide>(input_1, add);
|
||||
|
||||
function = std::make_shared<Function>(NodeVector{divide}, ParameterVector{input_1, input_2});
|
||||
|
||||
manager.register_pass<pass::DivisionByZeroFP16Resolver>();
|
||||
}
|
||||
|
||||
{
|
||||
auto input_1 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto input_2 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{1}, {normalized_fp16_min});
|
||||
auto add = std::make_shared<opset4::Add>(input_2, eps_const);
|
||||
auto divide = std::make_shared<opset4::Divide>(input_1, add);
|
||||
|
||||
function_ref = std::make_shared<Function>(NodeVector{divide}, ParameterVector{input_1, input_2});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
|
||||
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, PowWithNegativeExponent) {
|
||||
const float eps_value = 1.e-12;
|
||||
{
|
||||
auto input_1 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto input_2 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{1}, {eps_value});
|
||||
auto add = std::make_shared<opset4::Add>(input_2, eps_const);
|
||||
auto pow_exp_const = opset4::Constant::create(element::f32, Shape{1}, {-1.77});
|
||||
auto pow = std::make_shared<opset4::Power>(add, pow_exp_const);
|
||||
auto mul = std::make_shared<opset4::Multiply>(input_1, pow);
|
||||
|
||||
function = std::make_shared<Function>(NodeVector{mul}, ParameterVector{input_1, input_2});
|
||||
|
||||
manager.register_pass<pass::DivisionByZeroFP16Resolver>();
|
||||
}
|
||||
|
||||
{
|
||||
auto input_1 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto input_2 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{1}, {normalized_fp16_min});
|
||||
auto add = std::make_shared<opset4::Add>(input_2, eps_const);
|
||||
auto pow_exp_const = opset4::Constant::create(element::f32, Shape{1}, {-1.77});
|
||||
auto pow = std::make_shared<opset4::Power>(add, pow_exp_const);
|
||||
auto mul = std::make_shared<opset4::Multiply>(input_1, pow);
|
||||
|
||||
function_ref = std::make_shared<Function>(NodeVector{mul}, ParameterVector{input_1, input_2});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
|
||||
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, PowWithPositiveExponent) {
|
||||
// graph should be left unchanged
|
||||
const float eps_value = 1.e-12;
|
||||
{
|
||||
auto input_1 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto input_2 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{1}, {eps_value});
|
||||
auto add = std::make_shared<opset4::Add>(input_2, eps_const);
|
||||
auto pow_exp_const = opset4::Constant::create(element::f32, Shape{1}, {1.77});
|
||||
auto pow = std::make_shared<opset4::Power>(add, pow_exp_const);
|
||||
auto mul = std::make_shared<opset4::Multiply>(input_1, pow);
|
||||
|
||||
function = std::make_shared<Function>(NodeVector{mul}, ParameterVector{input_1, input_2});
|
||||
|
||||
manager.register_pass<pass::DivisionByZeroFP16Resolver>();
|
||||
}
|
||||
|
||||
{
|
||||
auto input_1 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto input_2 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{1}, {eps_value});
|
||||
auto add = std::make_shared<opset4::Add>(input_2, eps_const);
|
||||
auto pow_exp_const = opset4::Constant::create(element::f32, Shape{1}, {1.77});
|
||||
auto pow = std::make_shared<opset4::Power>(add, pow_exp_const);
|
||||
auto mul = std::make_shared<opset4::Multiply>(input_1, pow);
|
||||
|
||||
function_ref = std::make_shared<Function>(NodeVector{mul}, ParameterVector{input_1, input_2});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
|
||||
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, DivisionByZeroMinimalPatternUnchanged) {
|
||||
// if eps_value is greater than normalized_fp16_min then leave graph unchanged
|
||||
const float eps_value = 0.0001f;
|
||||
{
|
||||
auto input_1 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto input_2 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{1}, {eps_value});
|
||||
auto add = std::make_shared<opset4::Add>(input_2, eps_const);
|
||||
auto divide = std::make_shared<opset4::Divide>(input_1, add);
|
||||
|
||||
function = std::make_shared<Function>(NodeVector{divide}, ParameterVector{input_1, input_2});
|
||||
|
||||
manager.register_pass<pass::DivisionByZeroFP16Resolver>();
|
||||
}
|
||||
|
||||
{
|
||||
auto input_1 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto input_2 = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{1}, {eps_value});
|
||||
auto add = std::make_shared<opset4::Add>(input_2, eps_const);
|
||||
auto divide = std::make_shared<opset4::Divide>(input_1, add);
|
||||
|
||||
function_ref = std::make_shared<Function>(NodeVector{divide}, ParameterVector{input_1, input_2});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
|
||||
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, DivisionByZeroInL2NormWithSqrtAndWithMax) {
|
||||
const float eps_value = 1.e-12;
|
||||
{
|
||||
auto input = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto exp = opset4::Constant::create(element::f32, Shape{}, {2.f});
|
||||
auto pow = std::make_shared<opset4::Power>(input, exp);
|
||||
auto axes_const = opset4::Constant::create(element::i64, Shape{2}, {0, 1});
|
||||
auto reduce_sum = std::make_shared<opset4::ReduceSum>(pow, axes_const);
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{}, {eps_value});
|
||||
auto max = std::make_shared<opset4::Maximum>(reduce_sum, eps_const);
|
||||
auto sqrt = std::make_shared<opset4::Sqrt>(max);
|
||||
auto divide = std::make_shared<opset4::Divide>(input, sqrt);
|
||||
|
||||
function = std::make_shared<Function>(NodeVector{divide}, ParameterVector{input});
|
||||
|
||||
manager.register_pass<pass::DivisionByZeroFP16Resolver>();
|
||||
}
|
||||
|
||||
{
|
||||
auto input = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto exp = opset4::Constant::create(element::f32, Shape{}, {2.f});
|
||||
auto pow = std::make_shared<opset4::Power>(input, exp);
|
||||
auto axes_const = opset4::Constant::create(element::i64, Shape{2}, {0, 1});
|
||||
auto reduce_sum = std::make_shared<opset4::ReduceSum>(pow, axes_const);
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{}, {normalized_fp16_min});
|
||||
auto max = std::make_shared<opset4::Maximum>(reduce_sum, eps_const);
|
||||
auto sqrt = std::make_shared<opset4::Sqrt>(max);
|
||||
auto divide = std::make_shared<opset4::Divide>(input, sqrt);
|
||||
|
||||
function_ref = std::make_shared<Function>(NodeVector{divide}, ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
|
||||
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
|
||||
}
|
||||
|
||||
|
||||
TEST_F(TransformationTestsF, DivisionByZeroInL2NormWithSqrtAndWithAdd) {
|
||||
const float eps_value = 1.e-12;
|
||||
{
|
||||
auto input = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto exp = opset4::Constant::create(element::f32, Shape{}, {2.f});
|
||||
auto pow = std::make_shared<opset4::Power>(input, exp);
|
||||
auto axes_const = opset4::Constant::create(element::i64, Shape{2}, {0, 1});
|
||||
auto reduce_sum = std::make_shared<opset4::ReduceSum>(pow, axes_const);
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{1}, {eps_value});
|
||||
auto add = std::make_shared<opset4::Add>(reduce_sum, eps_const);
|
||||
auto sqrt = std::make_shared<opset4::Sqrt>(add);
|
||||
auto divide = std::make_shared<opset4::Divide>(input, sqrt);
|
||||
|
||||
function = std::make_shared<Function>(NodeVector{divide}, ParameterVector{input});
|
||||
|
||||
manager.register_pass<pass::DivisionByZeroFP16Resolver>();
|
||||
}
|
||||
|
||||
{
|
||||
auto input = std::make_shared<opset4::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto exp = opset4::Constant::create(element::f32, Shape{}, {2.f});
|
||||
auto pow = std::make_shared<opset4::Power>(input, exp);
|
||||
auto axes_const = opset4::Constant::create(element::i64, Shape{2}, {0, 1});
|
||||
auto reduce_sum = std::make_shared<opset4::ReduceSum>(pow, axes_const);
|
||||
auto eps_const = opset4::Constant::create(element::f32, Shape{1}, {normalized_fp16_min});
|
||||
auto add = std::make_shared<opset4::Add>(reduce_sum, eps_const);
|
||||
auto sqrt = std::make_shared<opset4::Sqrt>(add);
|
||||
auto divide = std::make_shared<opset4::Divide>(input, sqrt);
|
||||
|
||||
function_ref = std::make_shared<Function>(NodeVector{divide}, ParameterVector{input});
|
||||
}
|
||||
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
|
||||
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
|
||||
}
|
@ -37,6 +37,64 @@ TEST(SmartReshapeTests, SS_Squeeze) {
|
||||
ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3}));
|
||||
}
|
||||
|
||||
TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end_mask) {
|
||||
std::shared_ptr<ngraph::Function> f(nullptr);
|
||||
{
|
||||
auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 128, 768});
|
||||
auto ss = std::make_shared<ngraph::opset5::StridedSlice>(
|
||||
input,
|
||||
ngraph::opset5::Constant::create(ngraph::element::i64, {3}, {0, 1, 0}),
|
||||
ngraph::opset5::Constant::create(ngraph::element::i64, {3}, {0, 2, 768}),
|
||||
ngraph::opset5::Constant::create(ngraph::element::i64, {3}, {1, 1, 1}),
|
||||
std::vector<int64_t>{0}, std::vector<int64_t>{1}); // begin_mask.size() is no larger than axis that is going to be squeezed.
|
||||
auto squeeze = std::make_shared<ngraph::opset5::Squeeze>(ss, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {1}));
|
||||
|
||||
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{squeeze}, ngraph::ParameterVector{input});
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork network(f);
|
||||
|
||||
ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({1, 768})) <<
|
||||
network.getFunction()->get_results()[0]->get_output_partial_shape(0);
|
||||
ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 128, 768}));
|
||||
|
||||
auto inputname = network.getFunction()->get_parameters()[0]->get_friendly_name();
|
||||
ASSERT_NO_THROW(network.reshape(InferenceEngine::ICNNNetwork::InputShapes{{inputname, {2, 128, 768}}}));
|
||||
|
||||
ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) <<
|
||||
network.getFunction()->get_results()[0]->get_output_partial_shape(0);
|
||||
ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 128, 768}));
|
||||
}
|
||||
|
||||
|
||||
TEST(SmartReshapeTests, SS_Squeeze_partial_begin_end) {
|
||||
std::shared_ptr<ngraph::Function> f(nullptr);
|
||||
{
|
||||
auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 1, 768});
|
||||
auto ss = std::make_shared<ngraph::opset5::StridedSlice>(
|
||||
input,
|
||||
ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {0}), // begin.size() is no larger than axis that is going to be squeezed.
|
||||
ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {0}),
|
||||
ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {1}),
|
||||
std::vector<int64_t>{1, 1, 1}, std::vector<int64_t>{1, 1, 1});
|
||||
auto squeeze = std::make_shared<ngraph::opset5::Squeeze>(ss, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {1}));
|
||||
|
||||
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{squeeze}, ngraph::ParameterVector{input});
|
||||
}
|
||||
|
||||
InferenceEngine::CNNNetwork network(f);
|
||||
|
||||
ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({1, 768})) <<
|
||||
network.getFunction()->get_results()[0]->get_output_partial_shape(0);
|
||||
ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 1, 768}));
|
||||
|
||||
auto inputname = network.getFunction()->get_parameters()[0]->get_friendly_name();
|
||||
ASSERT_NO_THROW(network.reshape(InferenceEngine::ICNNNetwork::InputShapes{{inputname, {2, 1, 768}}}));
|
||||
|
||||
ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({2, 768})) <<
|
||||
network.getFunction()->get_results()[0]->get_output_partial_shape(0);
|
||||
ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 1, 768}));
|
||||
}
|
||||
|
||||
TEST(SmartReshapeTests, SS_Squeeze_mask_use_negative) {
|
||||
std::shared_ptr<ngraph::Function> f(nullptr);
|
||||
|
@ -0,0 +1,21 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "op_impl_check/op_impl_check.hpp"
|
||||
#include "op_impl_check/single_op_graph.hpp"
|
||||
#include "conformance.hpp"
|
||||
|
||||
namespace ConformanceTests {
|
||||
using namespace ov::test::subgraph;
|
||||
|
||||
namespace {
|
||||
INSTANTIATE_TEST_SUITE_P(conformance,
|
||||
OpImplCheckTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(createFunctions()),
|
||||
::testing::Values(targetDevice),
|
||||
::testing::Values(std::map<std::string, std::string>())),
|
||||
OpImplCheckTest::getTestCaseName);
|
||||
} // namespace
|
||||
} // namespace ConformanceTests
|
@ -11,7 +11,7 @@ target_link_libraries(cpuSpecificRtInfo PRIVATE ngraph)
|
||||
set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} $<TARGET_PROPERTY:MKLDNNPlugin,SOURCE_DIR>)
|
||||
set(DEPENDENCIES MKLDNNPlugin)
|
||||
set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo)
|
||||
if (NGRAPH_ONNX_FRONTEND_ENABLE)
|
||||
if (ENABLE_OV_ONNX_FRONTEND)
|
||||
list(APPEND INCLUDES "${OpenVINO_SOURCE_DIR}/docs/onnx_custom_op")
|
||||
list(APPEND LINK_LIBRARIES onnx_custom_op)
|
||||
list(APPEND DEPENDENCIES template_extension onnx_custom_op)
|
||||
|
@ -165,8 +165,6 @@ std::vector<std::string> disabledTestPatterns() {
|
||||
// Failure happened on win and macos for current seeds.
|
||||
R"(.*CTCLossLayerTest.*CMR=1.*)",
|
||||
R"(.*CTCLossLayerCPUTest.*ctcMergeRepeated=1.*)",
|
||||
// Issue: 72151
|
||||
R"(.*smoke_ROIAlignLayoutTest.*)",
|
||||
};
|
||||
|
||||
#define FIX_62820 0
|
||||
|
@ -94,17 +94,17 @@ protected:
|
||||
auto coordsTensorData = static_cast<float*>(coordsTensor.data());
|
||||
for (size_t i = 0; i < coordsTensor.get_size(); i += 4) {
|
||||
coordsTensorData[i] = 1.f;
|
||||
coordsTensorData[i] = 1.f;
|
||||
coordsTensorData[i] = 19.f;
|
||||
coordsTensorData[i] = 19.f;
|
||||
coordsTensorData[i + 1] = 1.f;
|
||||
coordsTensorData[i + 2] = 19.f;
|
||||
coordsTensorData[i + 3] = 19.f;
|
||||
}
|
||||
} else if (coordsET == ElementType::bf16) {
|
||||
auto coordsTensorData = static_cast<std::int16_t*>(coordsTensor.data());
|
||||
for (size_t i = 0; i < coordsTensor.get_size(); i += 4) {
|
||||
coordsTensorData[i] = static_cast<std::int16_t>(ngraph::bfloat16(1.f).to_bits());
|
||||
coordsTensorData[i] = static_cast<std::int16_t>(ngraph::bfloat16(1.f).to_bits());
|
||||
coordsTensorData[i] = static_cast<std::int16_t>(ngraph::bfloat16(19.f).to_bits());
|
||||
coordsTensorData[i] = static_cast<std::int16_t>(ngraph::bfloat16(19.f).to_bits());
|
||||
coordsTensorData[i + 1] = static_cast<std::int16_t>(ngraph::bfloat16(1.f).to_bits());
|
||||
coordsTensorData[i + 2] = static_cast<std::int16_t>(ngraph::bfloat16(19.f).to_bits());
|
||||
coordsTensorData[i + 3] = static_cast<std::int16_t>(ngraph::bfloat16(19.f).to_bits());
|
||||
}
|
||||
} else {
|
||||
IE_THROW() << "roi align. Unsupported precision: " << coordsET;
|
||||
@ -112,13 +112,10 @@ protected:
|
||||
|
||||
auto roisIdxTensor = ov::runtime::Tensor{ funcInputs[2].get_element_type(), targetInputStaticShapes[2] };
|
||||
auto roisIdxTensorData = static_cast<std::int32_t*>(roisIdxTensor.data());
|
||||
if (roisIdxTensor.get_size() == 1) {
|
||||
roisIdxTensorData[0] = 1;
|
||||
} else if (roisIdxTensor.get_size() == 2) {
|
||||
roisIdxTensorData[0] = 0;
|
||||
roisIdxTensorData[1] = 1;
|
||||
} else {
|
||||
IE_THROW() << "Unexpected roiIdx size: " << roisIdxTensor.get_size();
|
||||
std::int32_t batchIdx = 0;
|
||||
for (int i = 0; i < roisIdxTensor.get_size(); i++) {
|
||||
roisIdxTensorData[i] = batchIdx;
|
||||
batchIdx = (batchIdx + 1) % targetInputStaticShapes[0][0];
|
||||
}
|
||||
|
||||
inputs.insert({ funcInputs[0].get_node_shared_ptr(), data_tensor });
|
||||
|
@ -21,11 +21,6 @@ addIeTargetTest(
|
||||
GPU
|
||||
)
|
||||
|
||||
# CVS-55376
|
||||
set_source_files_properties("${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/low_precision_transformations/layer_transformation.cpp"
|
||||
PROPERTIES INCLUDE_DIRECTORIES
|
||||
$<TARGET_PROPERTY:inference_engine_legacy,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
# try to find VA libraries
|
||||
find_package(PkgConfig QUIET)
|
||||
if(PkgConfig_FOUND)
|
||||
|
@ -1,59 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp"
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include <ie_core.hpp>
|
||||
|
||||
|
||||
#include <transformations/common_optimizations/common_optimizations.hpp>
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
|
||||
#include <legacy/ngraph_ops/fully_connected.hpp>
|
||||
#include <legacy/net_pass.h>
|
||||
#include <transformations/opset_conversions/convert_opset2_to_opset1.hpp>
|
||||
#include <transformations/opset_conversions/convert_opset3_to_opset2.hpp>
|
||||
#include <transformations/init_node_info.hpp>
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
#include <ngraph/opsets/opset2.hpp>
|
||||
#include <ngraph/opsets/opset3.hpp>
|
||||
#include <ngraph/opsets/opset4.hpp>
|
||||
#include <ngraph/op/gelu.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include "ngraph_functions/pass/convert_prc.hpp"
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include <legacy/ie_util_internal.hpp>
|
||||
|
||||
#include "functional_test_utils/plugin_cache.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "functional_test_utils/blob_utils.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp"
|
||||
|
||||
using namespace InferenceEngine::details;
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "functional_test_utils/plugin_cache.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "functional_test_utils/blob_utils.hpp"
|
||||
|
||||
namespace LayerTestsUtils {
|
||||
|
||||
InferenceEngine::Precision LayerTransformation::getDeviceInternalPrecision(const InferenceEngine::Precision precision) {
|
||||
if (precision == InferenceEngine::Precision::FP16) {
|
||||
return InferenceEngine::Precision::FP32;
|
||||
}
|
||||
|
||||
return precision;
|
||||
}
|
||||
|
||||
ngraph::pass::low_precision::LayerTransformation::Params LayerTransformationParamsNGraphFactory::createParams() {
|
||||
return ngraph::pass::low_precision::LayerTransformation::Params();
|
||||
}
|
||||
|
||||
} // namespace LayerTestsUtils
|
@ -94,5 +94,7 @@ std::vector<std::string> disabledTestPatterns() {
|
||||
R"(smoke_PrePostProcess.*cvt_color_i420.*)",
|
||||
// Unsupported
|
||||
R"(smoke_Behavior/InferRequestSetBlobByType.setInputBlobsByType/BlobType=Batched_Device=GPU_Config=().*)",
|
||||
// TODO: Issue 72624
|
||||
R"(smoke_PrePostProcess.*resize_dynamic.*)",
|
||||
};
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ inline std::vector<preprocess_func> GPU_smoke_preprocess_functions() {
|
||||
preprocess_func(resize_nearest, "resize_nearest", 0.01f),
|
||||
preprocess_func(resize_linear_nhwc, "resize_linear_nhwc", 0.01f),
|
||||
preprocess_func(resize_cubic, "resize_cubic", 0.01f),
|
||||
preprocess_func(resize_dynamic, "resize_dynamic", 0.01f, { ov::Shape {1, 3, 123, 123} }),
|
||||
preprocess_func(convert_layout_by_dims, "convert_layout_by_dims", 0.01f),
|
||||
preprocess_func(resize_and_convert_layout, "resize_and_convert_layout", 0.01f),
|
||||
preprocess_func(cvt_color_nv12_to_rgb_single_plane, "cvt_color_nv12_to_rgb_single_plane", 1.f),
|
||||
|
@ -39,6 +39,18 @@ const std::vector<GatherNDParamsSubset> layerParams = {
|
||||
GatherNDParamsSubset{{2, 2, 2, 2}, {2, 2, 1}, 2},
|
||||
};
|
||||
|
||||
const std::vector<GatherNDParamsSubset> layerParamsND8 = {
|
||||
GatherNDParamsSubset{{500, 256, 10, 15}, {25, 125, 3}, 0},
|
||||
GatherNDParamsSubset{{3, 3}, {2, 2}, 0},
|
||||
GatherNDParamsSubset{{5, 3}, {2, 1}, 0},
|
||||
GatherNDParamsSubset{{5, 3, 4}, {2, 2}, 0},
|
||||
GatherNDParamsSubset{{6, 3, 4}, {2, 1, 2}, 0},
|
||||
GatherNDParamsSubset{{5, 2, 6, 8}, {1}, 0},
|
||||
GatherNDParamsSubset{{6, 6, 9, 7}, {2}, 0},
|
||||
GatherNDParamsSubset{{2, 4, 9, 4}, {3}, 0},
|
||||
GatherNDParamsSubset{{5, 2, 3, 7}, {4}, 0},
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
smoke_GatherND,
|
||||
GatherNDLayerTest,
|
||||
@ -50,4 +62,15 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
testing::Values<Config>({{InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO)}})),
|
||||
GatherNDLayerTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
smoke_GatherND,
|
||||
GatherND8LayerTest,
|
||||
testing::Combine(
|
||||
testing::ValuesIn(layerParamsND8),
|
||||
testing::ValuesIn(netPrecisions),
|
||||
testing::ValuesIn(indicesPrecisions),
|
||||
testing::Values(CommonTestUtils::DEVICE_MYRIAD),
|
||||
testing::Values<Config>({{InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO)}})),
|
||||
GatherND8LayerTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -22,11 +22,11 @@ endif()
|
||||
|
||||
|
||||
# remove once CVS-69781 is fixed
|
||||
if(NGRAPH_IR_FRONTEND_ENABLE)
|
||||
if(ENABLE_OV_IR_FRONTEND)
|
||||
list(APPEND DEPENDENCIES ir_ov_frontend)
|
||||
endif()
|
||||
|
||||
if (NGRAPH_ONNX_FRONTEND_ENABLE)
|
||||
if (ENABLE_OV_ONNX_FRONTEND)
|
||||
list(APPEND DEPENDENCIES test_model_zoo)
|
||||
list(APPEND DEFINES TEST_MODELS="${TEST_MODEL_ZOO}/func_tests/models/")
|
||||
else()
|
||||
@ -56,7 +56,6 @@ addIeTarget(
|
||||
lptNgraphFunctions
|
||||
sharedTestClasses
|
||||
PRIVATE
|
||||
inference_engine_legacy # CVS-55376
|
||||
openvino::util
|
||||
inference_engine_transformations
|
||||
DEPENDENCIES
|
||||
@ -67,11 +66,6 @@ if(ENABLE_GAPI_PREPROCESSING)
|
||||
target_compile_definitions(${TARGET_NAME} PUBLIC ENABLE_GAPI_PREPROCESSING)
|
||||
endif()
|
||||
|
||||
# CVS-55376
|
||||
set_source_files_properties("${CMAKE_CURRENT_SOURCE_DIR}/src/low_precision_transformations/layer_transformation.cpp"
|
||||
PROPERTIES INCLUDE_DIRECTORIES
|
||||
$<TARGET_PROPERTY:inference_engine_legacy,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
|
||||
ie_faster_build(${TARGET_NAME}
|
||||
PCH PRIVATE "src/precomp.hpp"
|
||||
)
|
||||
|
@ -0,0 +1,41 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
#include "common_test_utils/test_common.hpp"
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
|
||||
#include "functional_test_utils/layer_test_utils/summary.hpp"
|
||||
#include "functional_test_utils/ov_plugin_cache.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace subgraph {
|
||||
|
||||
using OpImplParams = std::tuple<
|
||||
std::pair<ov::DiscreteTypeInfo, std::shared_ptr<ov::Function>>, // Function to check
|
||||
std::string, // Target Device
|
||||
std::map<std::string, std::string>>; // Plugin Config
|
||||
|
||||
class OpImplCheckTest : public testing::WithParamInterface<OpImplParams>,
|
||||
public CommonTestUtils::TestsCommon {
|
||||
protected:
|
||||
LayerTestsUtils::Summary& summary = LayerTestsUtils::Summary::getInstance();
|
||||
std::shared_ptr<ov::runtime::Core> core = ov::test::utils::PluginCache::get().core();
|
||||
std::shared_ptr<ov::Function> function;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> configuration;
|
||||
|
||||
public:
|
||||
void run();
|
||||
void SetUp() override;
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<OpImplParams> &obj);
|
||||
};
|
||||
|
||||
} // namespace subgraph
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,35 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional_test_utils/layer_test_utils/summary.hpp>
|
||||
#include <ngraph_functions/subgraph_builders.hpp>
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace subgraph {
|
||||
|
||||
using OpGenerator = std::map<ov::DiscreteTypeInfo, std::function<std::shared_ptr<ov::Function>()>>;
|
||||
OpGenerator getOpGeneratorMap();
|
||||
|
||||
static const std::vector<std::pair<ov::DiscreteTypeInfo, std::shared_ptr<ov::Function>>> createFunctions() {
|
||||
std::vector<std::pair<ov::DiscreteTypeInfo, std::shared_ptr<ov::Function>>> res;
|
||||
auto opsets = LayerTestsUtils::Summary::getInstance().getOpSets();
|
||||
auto opGenerator = getOpGeneratorMap();
|
||||
std::set<ngraph::NodeTypeInfo> opsInfo;
|
||||
for (const auto& opset : opsets) {
|
||||
const auto &type_info_set = opset.get_type_info_set();
|
||||
opsInfo.insert(type_info_set.begin(), type_info_set.end());
|
||||
}
|
||||
|
||||
for (const auto& type_info : opsInfo) {
|
||||
res.push_back({type_info, opGenerator.find(type_info)->second()});
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
} // namespace subgraph
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -1,56 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp"
|
||||
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include <ie_core.hpp>
|
||||
|
||||
#include <legacy/net_pass.h>
|
||||
#include <legacy/graph_transformer.h>
|
||||
#include <legacy/convert_function_to_cnn_network.hpp>
|
||||
#include <transformations/common_optimizations/common_optimizations.hpp>
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
|
||||
#include <transformations/opset_conversions/convert_opset2_to_opset1.hpp>
|
||||
#include <transformations/opset_conversions/convert_opset3_to_opset2.hpp>
|
||||
#include <transformations/init_node_info.hpp>
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
#include <ngraph/opsets/opset2.hpp>
|
||||
#include <ngraph/opsets/opset3.hpp>
|
||||
#include <ngraph/opsets/opset4.hpp>
|
||||
#include "legacy/ngraph_ops/fully_connected.hpp"
|
||||
#include <ngraph/op/gelu.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include "ngraph_functions/pass/convert_prc.hpp"
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include <legacy/ie_util_internal.hpp>
|
||||
|
||||
#include "functional_test_utils/plugin_cache.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "functional_test_utils/blob_utils.hpp"
|
||||
#include "shared_test_classes/base/layer_test_utils.hpp"
|
||||
#include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp"
|
||||
|
||||
#include <low_precision/convolution.hpp>
|
||||
|
||||
namespace LayerTestsUtils {
|
||||
|
||||
InferenceEngine::Precision LayerTransformation::getDeviceInternalPrecision(const InferenceEngine::Precision precision) {
|
||||
if (precision == InferenceEngine::Precision::FP16) {
|
||||
return InferenceEngine::Precision::FP32;
|
||||
}
|
||||
|
||||
return precision;
|
||||
}
|
||||
|
||||
ngraph::pass::low_precision::LayerTransformation::Params LayerTransformationParamsNGraphFactory::createParams() {
|
||||
return ngraph::pass::low_precision::LayerTransformation::Params();
|
||||
}
|
||||
|
||||
} // namespace LayerTestsUtils
|
@ -0,0 +1,67 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#include <signal.h>
|
||||
#ifdef _WIN32
|
||||
#include <process.h>
|
||||
#endif
|
||||
|
||||
#include "op_impl_check/op_impl_check.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace subgraph {
|
||||
|
||||
void OpImplCheckTest::run() {
|
||||
if (function == nullptr) {
|
||||
GTEST_FAIL() << "Target function is empty!";
|
||||
}
|
||||
auto crashHandler = [](int errCode) {
|
||||
auto& s = LayerTestsUtils::Summary::getInstance();
|
||||
s.saveReport();
|
||||
std::cerr << "Unexpected application crash with code: " << errCode << std::endl;
|
||||
std::abort();
|
||||
};
|
||||
signal(SIGSEGV, crashHandler);
|
||||
|
||||
summary.setDeviceName(targetDevice);
|
||||
try {
|
||||
auto executableNetwork = core->compile_model(function, targetDevice, configuration);
|
||||
summary.updateOPsImplStatus(function, true);
|
||||
} catch (...) {
|
||||
summary.updateOPsImplStatus(function, false);
|
||||
GTEST_FAIL() << "Error in the LoadNetwork!";
|
||||
}
|
||||
}
|
||||
|
||||
void OpImplCheckTest::SetUp() {
|
||||
std::pair<ov::DiscreteTypeInfo, std::shared_ptr<ov::Function>> funcInfo;
|
||||
std::tie(funcInfo, targetDevice, configuration) = this->GetParam();
|
||||
function = funcInfo.second;
|
||||
}
|
||||
|
||||
std::string OpImplCheckTest::getTestCaseName(const testing::TestParamInfo<OpImplParams> &obj) {
|
||||
std::pair<ov::DiscreteTypeInfo, std::shared_ptr<ov::Function>> funcInfo;
|
||||
std::string targetDevice;
|
||||
std::map<std::string, std::string> config;
|
||||
std::tie(funcInfo, targetDevice, config) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
std::string friendlyName = funcInfo.first.name + std::string("_") + funcInfo.first.get_version();
|
||||
result << "Function=" << friendlyName << "_";
|
||||
result << "Device=" << targetDevice << "_";
|
||||
result << "Config=(";
|
||||
for (const auto& configItem : config) {
|
||||
result << configItem.first << "=" << configItem.second << "_";
|
||||
}
|
||||
result << ")";
|
||||
return result.str();
|
||||
}
|
||||
|
||||
TEST_P(OpImplCheckTest, checkPluginImplementation) {
|
||||
run();
|
||||
}
|
||||
|
||||
} // namespace subgraph
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,75 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <op_impl_check/op_impl_check.hpp>
|
||||
#include <op_impl_check/single_op_graph.hpp>
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace subgraph {
|
||||
|
||||
namespace {
|
||||
std::shared_ptr<ov::Function> generate(const std::shared_ptr<ov::op::Op> &node) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Function> generateBinaryEltwise(const std::shared_ptr<ov::op::Op> &node) {
|
||||
const auto params = ngraph::builder::makeDynamicParams(ov::element::f32, {{1, 2},
|
||||
{1, 2}});
|
||||
std::shared_ptr<ov::Node> eltwiseNode;
|
||||
if (ov::is_type<ov::op::v0::SquaredDifference>(node)) {
|
||||
eltwiseNode = std::make_shared<ov::op::v0::SquaredDifference>(params.front(), params.back());
|
||||
} else if (ov::is_type<ov::op::v1::Add>(node)) {
|
||||
eltwiseNode = std::make_shared<ov::op::v1::Add>(params.front(), params.back());
|
||||
} else if (ov::is_type<ov::op::v1::Divide>(node)) {
|
||||
eltwiseNode = std::make_shared<ov::op::v1::Divide>(params.front(), params.back());
|
||||
} else if (ov::is_type<ov::op::v1::FloorMod>(node)) {
|
||||
eltwiseNode = std::make_shared<ov::op::v1::FloorMod>(params.front(), params.back());
|
||||
} else if (ov::is_type<ov::op::v1::Maximum>(node)) {
|
||||
eltwiseNode = std::make_shared<ov::op::v1::Maximum>(params.front(), params.back());
|
||||
} else if (ov::is_type<ov::op::v1::Minimum>(node)) {
|
||||
eltwiseNode = std::make_shared<ov::op::v1::Minimum>(params.front(), params.back());
|
||||
} else if (ov::is_type<ov::op::v1::Multiply>(node)) {
|
||||
eltwiseNode = std::make_shared<ov::op::v1::Multiply>(params.front(), params.back());
|
||||
} else if (ov::is_type<ov::op::v1::Power>(node)) {
|
||||
eltwiseNode = std::make_shared<ov::op::v1::Power>(params.front(), params.back());
|
||||
} else if (ov::is_type<ov::op::v1::Subtract>(node)) {
|
||||
eltwiseNode = std::make_shared<ov::op::v1::Subtract>(params.front(), params.back());
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(eltwiseNode)};
|
||||
return std::make_shared<ngraph::Function>(results, params, "BinaryEltwiseGraph");
|
||||
}
|
||||
} // namespace
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<ov::Function> generateGraph() {
|
||||
std::shared_ptr<T> node = std::shared_ptr<T>(new T);
|
||||
if (ov::is_type<ov::op::util::BinaryElementwiseArithmetic>(node)) {
|
||||
return generateBinaryEltwise(node);
|
||||
}
|
||||
return generate(node);
|
||||
}
|
||||
|
||||
OpGenerator getOpGeneratorMap() {
|
||||
static OpGenerator opGeneratorMap{
|
||||
#define _OPENVINO_OP_REG(NAME, NAMESPACE) {NAMESPACE::NAME::get_type_info_static(), generateGraph<NAMESPACE::NAME>},
|
||||
#include "openvino/opsets/opset1_tbl.hpp"
|
||||
#include "openvino/opsets/opset2_tbl.hpp"
|
||||
#include "openvino/opsets/opset3_tbl.hpp"
|
||||
#include "openvino/opsets/opset4_tbl.hpp"
|
||||
#include "openvino/opsets/opset5_tbl.hpp"
|
||||
#include "openvino/opsets/opset6_tbl.hpp"
|
||||
#include "openvino/opsets/opset7_tbl.hpp"
|
||||
#include "openvino/opsets/opset8_tbl.hpp"
|
||||
#undef _OPENVINO_OP_REG
|
||||
};
|
||||
return opGeneratorMap;
|
||||
}
|
||||
|
||||
} // namespace subgraph
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -43,8 +43,6 @@ protected:
|
||||
|
||||
static std::string toString(const ngraph::pass::low_precision::LayerTransformation::Params& params);
|
||||
|
||||
static InferenceEngine::Precision getDeviceInternalPrecision(const InferenceEngine::Precision precision);
|
||||
|
||||
static std::string getTestCaseNameByParams(
|
||||
const InferenceEngine::Precision precision,
|
||||
const InferenceEngine::SizeVector& inputShapes,
|
||||
|
@ -61,7 +61,7 @@ protected:
|
||||
constexpr static const double disable_threshold = std::numeric_limits<double>::max();
|
||||
double abs_threshold = disable_threshold, rel_threshold = disable_threshold;
|
||||
|
||||
LayerTestsUtils::Summary& summary = LayerTestsUtils::Summary::getInstance();;
|
||||
LayerTestsUtils::Summary& summary = LayerTestsUtils::Summary::getInstance();
|
||||
|
||||
private:
|
||||
std::vector<ov::runtime::Tensor> calculate_refs();
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include "../base/layer_test_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "ngraph_functions/preprocess/preprocess_builders.hpp"
|
||||
#include "ngraph_functions/utils/ngraph_helpers.hpp"
|
||||
@ -21,7 +21,7 @@ using preprocessParamsTuple = std::tuple<
|
||||
std::string>; // Device name
|
||||
|
||||
class PrePostProcessTest : public testing::WithParamInterface<preprocessParamsTuple>,
|
||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<preprocessParamsTuple> &obj);
|
||||
|
||||
|
@ -29,6 +29,10 @@ ngraph::pass::low_precision::LayerTransformation::Params LayerTransformationPara
|
||||
return ngraph::pass::low_precision::LayerTransformation::Params();
|
||||
}
|
||||
|
||||
ngraph::pass::low_precision::LayerTransformation::Params LayerTransformationParamsNGraphFactory::createParams() {
|
||||
return ngraph::pass::low_precision::LayerTransformation::Params();
|
||||
}
|
||||
|
||||
LayerTransformation::LayerTransformation() {
|
||||
threshold = 0.05;
|
||||
auto& configuration = GetConfiguration();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user