Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
9ccacedefe
@ -30,8 +30,8 @@ jobs:
|
|||||||
WORK_DIR: $(Pipeline.Workspace)/_w
|
WORK_DIR: $(Pipeline.Workspace)/_w
|
||||||
BUILD_DIR: $(WORK_DIR)/build
|
BUILD_DIR: $(WORK_DIR)/build
|
||||||
BUILD_SAMPLES_DIR: $(WORK_DIR)/build_samples
|
BUILD_SAMPLES_DIR: $(WORK_DIR)/build_samples
|
||||||
BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE)
|
|
||||||
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
INSTALL_DIR: $(WORK_DIR)/install_pkg
|
||||||
|
INSTALL_TEST_DIR: $(INSTALL_DIR)/tests
|
||||||
SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
|
SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@ -103,7 +103,6 @@ jobs:
|
|||||||
cmakeArgs: >
|
cmakeArgs: >
|
||||||
-GNinja
|
-GNinja
|
||||||
-DVERBOSE_BUILD=ON
|
-DVERBOSE_BUILD=ON
|
||||||
-DENABLE_TEMPLATE_PLUGIN=ON
|
|
||||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
|
||||||
-DENABLE_PYTHON=ON
|
-DENABLE_PYTHON=ON
|
||||||
-DPYTHON_EXECUTABLE=/usr/bin/python3.6
|
-DPYTHON_EXECUTABLE=/usr/bin/python3.6
|
||||||
@ -117,6 +116,9 @@ jobs:
|
|||||||
$(REPO_DIR)
|
$(REPO_DIR)
|
||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
|
|
||||||
|
- script: ls -alR $(REPO_DIR)/inference-engine/temp/
|
||||||
|
displayName: 'List temp SDKs'
|
||||||
|
|
||||||
- script: ninja
|
- script: ninja
|
||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
displayName: 'Build Lin'
|
displayName: 'Build Lin'
|
||||||
@ -131,6 +133,15 @@ jobs:
|
|||||||
- script: ls -alR $(INSTALL_DIR)
|
- script: ls -alR $(INSTALL_DIR)
|
||||||
displayName: 'List install files'
|
displayName: 'List install files'
|
||||||
|
|
||||||
|
- script: |
|
||||||
|
mkdir $(INSTALL_DIR)/opencv/
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P cmake_install.cmake && cp -R $(REPO_DIR)/inference-engine/temp/opencv_4.5.2_ubuntu18/opencv/* $(INSTALL_DIR)/opencv/
|
||||||
|
workingDirectory: $(BUILD_DIR)
|
||||||
|
displayName: 'Install tests'
|
||||||
|
|
||||||
|
- script: ls -alR $(INSTALL_DIR)
|
||||||
|
displayName: 'List install files'
|
||||||
|
|
||||||
- script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/cpp/build_samples.sh
|
- script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/cpp/build_samples.sh
|
||||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||||
displayName: 'Build cpp samples'
|
displayName: 'Build cpp samples'
|
||||||
@ -139,62 +150,59 @@ jobs:
|
|||||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||||
displayName: 'Build c samples'
|
displayName: 'Build c samples'
|
||||||
|
|
||||||
- script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||||
displayName: 'nGraph UT'
|
displayName: 'nGraph UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
# python3 $(WORK_DIR)/gtest-parallel/gtest_parallel.py $(BIN_DIR)/InferenceEngineUnitTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=InferenceEngineUnitTests.json --gtest_filter=*smoke* -- --gtest_print_time=1
|
# . $(SETUPVARS) && python3 $(WORK_DIR)/gtest-parallel/gtest_parallel.py $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=InferenceEngineUnitTests.json --gtest_filter=*smoke* -- --gtest_print_time=1
|
||||||
- script: $(BIN_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
|
||||||
displayName: 'IE UT old'
|
displayName: 'IE UT old'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: $(BIN_DIR)/ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
|
||||||
displayName: 'IE UT'
|
displayName: 'IE UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: $(BIN_DIR)/cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml
|
||||||
displayName: 'CPU UT'
|
displayName: 'CPU UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: $(BIN_DIR)/gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml
|
||||||
displayName: 'GNA UT'
|
displayName: 'GNA UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: $(BIN_DIR)/vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
|
||||||
displayName: 'VPU UT'
|
displayName: 'VPU UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: $(BIN_DIR)/onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
|
||||||
displayName: 'ONNX Importer UT'
|
displayName: 'ONNX Importer UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: $(BIN_DIR)/ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml
|
||||||
displayName: 'IE FuncTests'
|
displayName: 'IE FuncTests'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: $(BIN_DIR)/templateFuncTests --gtest_filter=*smoke* --gtest_output=xml:TEST-templateFuncTests.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/templateFuncTests --gtest_filter=*smoke* --gtest_output=xml:TEST-templateFuncTests.xml
|
||||||
displayName: 'TEMPLATE FuncTests'
|
displayName: 'TEMPLATE FuncTests'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: $(BIN_DIR)/cpuFuncTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml
|
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuFuncTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml
|
||||||
displayName: 'CPU FuncTests'
|
displayName: 'CPU FuncTests'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
export DATA_PATH=$(MODELS_PATH)
|
export DATA_PATH=$(MODELS_PATH)
|
||||||
export MODELS_PATH=$(MODELS_PATH)
|
export MODELS_PATH=$(MODELS_PATH)
|
||||||
$(BIN_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
|
. $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
|
||||||
displayName: 'IE CAPITests'
|
displayName: 'IE CAPITests'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
export DATA_PATH=$(MODELS_PATH)
|
export DATA_PATH=$(MODELS_PATH)
|
||||||
export MODELS_PATH=$(MODELS_PATH)
|
export MODELS_PATH=$(MODELS_PATH)
|
||||||
export LD_LIBRARY_PATH=$(BIN_DIR)/lib
|
|
||||||
export PYTHONPATH=$(BIN_DIR)/lib/python_api/python3.6
|
|
||||||
env
|
|
||||||
cd $(REPO_DIR)/inference-engine/ie_bridges/python/tests
|
cd $(REPO_DIR)/inference-engine/ie_bridges/python/tests
|
||||||
pytest pytest --junitxml=TEST-PythonAPI.xml
|
. $(SETUPVARS) -pyver 3.6 && pytest pytest --junitxml=TEST-PythonAPI.xml
|
||||||
displayName: 'Python API Tests'
|
displayName: 'Python API Tests'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
enabled: false
|
enabled: false
|
||||||
|
@ -30,14 +30,13 @@ jobs:
|
|||||||
WORK_DIR: $(Pipeline.Workspace)\_w
|
WORK_DIR: $(Pipeline.Workspace)\_w
|
||||||
BUILD_DIR: D:\build
|
BUILD_DIR: D:\build
|
||||||
BUILD_SAMPLES_DIR: D:\build_samples
|
BUILD_SAMPLES_DIR: D:\build_samples
|
||||||
BIN_DIR: $(REPO_DIR)\bin\intel64
|
|
||||||
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
|
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
|
||||||
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
|
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
|
||||||
INSTALL_DIR: $(WORK_DIR)\install_pkg
|
INSTALL_DIR: $(WORK_DIR)\install_pkg
|
||||||
|
INSTALL_TEST_DIR: $(INSTALL_DIR)\tests
|
||||||
SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat
|
SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat
|
||||||
IB_DIR: C:\Program Files (x86)\IncrediBuild
|
IB_DIR: C:\Program Files (x86)\IncrediBuild
|
||||||
IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe
|
IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe
|
||||||
TEST_ENV_PATH: $(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.2\opencv\bin;$(IB_DIR);%PATH%
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- script: |
|
- script: |
|
||||||
@ -96,6 +95,9 @@ jobs:
|
|||||||
workingDirectory: $(BUILD_DIR)
|
workingDirectory: $(BUILD_DIR)
|
||||||
displayName: 'CMake'
|
displayName: 'CMake'
|
||||||
|
|
||||||
|
- script: dir $(REPO_DIR)\inference-engine\temp\ /s
|
||||||
|
displayName: 'List temp SDKs'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
set PATH=$(WORK_DIR)\ninja-win;%PATH%
|
set PATH=$(WORK_DIR)\ninja-win;%PATH%
|
||||||
call "$(MSVS_VARS_PATH)" && "C:\Program Files (x86)\IncrediBuild\BuildConsole.exe" /COMMAND="ninja"
|
call "$(MSVS_VARS_PATH)" && "C:\Program Files (x86)\IncrediBuild\BuildConsole.exe" /COMMAND="ninja"
|
||||||
@ -112,6 +114,13 @@ jobs:
|
|||||||
- script: dir $(INSTALL_DIR) /s
|
- script: dir $(INSTALL_DIR) /s
|
||||||
displayName: 'List install files'
|
displayName: 'List install files'
|
||||||
|
|
||||||
|
- script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P cmake_install.cmake && xcopy $(REPO_DIR)\inference-engine\temp\opencv_4.5.2\opencv\* $(INSTALL_DIR)\opencv\ /e /h /y
|
||||||
|
workingDirectory: $(BUILD_DIR)
|
||||||
|
displayName: 'Install tests'
|
||||||
|
|
||||||
|
- script: dir $(INSTALL_DIR) /s
|
||||||
|
displayName: 'List install files'
|
||||||
|
|
||||||
- script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\cpp\build_samples_msvc.bat
|
- script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\cpp\build_samples_msvc.bat
|
||||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||||
displayName: 'Build cpp samples'
|
displayName: 'Build cpp samples'
|
||||||
@ -120,71 +129,55 @@ jobs:
|
|||||||
workingDirectory: $(BUILD_SAMPLES_DIR)
|
workingDirectory: $(BUILD_SAMPLES_DIR)
|
||||||
displayName: 'Build c samples'
|
displayName: 'Build c samples'
|
||||||
|
|
||||||
- script: |
|
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
||||||
set PATH=$(TEST_ENV_PATH)
|
|
||||||
$(BIN_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
|
|
||||||
displayName: 'nGraph UT'
|
displayName: 'nGraph UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
set PATH=$(TEST_ENV_PATH)
|
set PATH=$(IB_DIR);%PATH%
|
||||||
"$(IB_TESTCONSOLE)" $(BIN_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests-IB.xml
|
call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests-IB.xml
|
||||||
displayName: 'IE UT old - IB'
|
displayName: 'IE UT old - IB'
|
||||||
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
|
||||||
set PATH=$(TEST_ENV_PATH)
|
|
||||||
$(BIN_DIR)\ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
|
|
||||||
displayName: 'IE UT'
|
displayName: 'IE UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml
|
||||||
set PATH=$(TEST_ENV_PATH)
|
|
||||||
$(BIN_DIR)\cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml
|
|
||||||
displayName: 'CPU UT'
|
displayName: 'CPU UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml
|
||||||
set PATH=$(TEST_ENV_PATH)
|
|
||||||
$(BIN_DIR)\gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml
|
|
||||||
displayName: 'GNA UT'
|
displayName: 'GNA UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
|
||||||
set PATH=$(TEST_ENV_PATH)
|
|
||||||
$(BIN_DIR)\vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
|
|
||||||
displayName: 'VPU UT'
|
displayName: 'VPU UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
|
||||||
set PATH=$(TEST_ENV_PATH)
|
|
||||||
$(BIN_DIR)\onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
|
|
||||||
displayName: 'ONNX Importer UT'
|
displayName: 'ONNX Importer UT'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml
|
||||||
set PATH=$(TEST_ENV_PATH)
|
|
||||||
$(BIN_DIR)\ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml
|
|
||||||
displayName: 'IE FuncTests'
|
displayName: 'IE FuncTests'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\templateFuncTests --gtest_output=xml:TEST-templateFuncTests.xml
|
||||||
set PATH=$(TEST_ENV_PATH)
|
|
||||||
$(BIN_DIR)\templateFuncTests --gtest_output=xml:TEST-templateFuncTests.xml
|
|
||||||
displayName: 'TEMPLATE FuncTests'
|
displayName: 'TEMPLATE FuncTests'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
|
# call $(SETUPVARS) && $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-cpuFuncTests.xml
|
||||||
- script: |
|
- script: |
|
||||||
set PATH=$(TEST_ENV_PATH)
|
set PATH=$(IB_DIR);%PATH%
|
||||||
rem $(BIN_DIR)\cpuFuncTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-cpuFuncTests.xml
|
call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke*:-*CompareWithRefs/base_size=16_pre_nms_topn=100_post_nms_topn=100_nms_thresh=0.7_feat_stride=1_min_size=1_ratio*:*smoke_GRUSequenceCommonZeroClip/GRUSequenceTest.CompareWithRefs/mode=CONVERT_TO_TI_MAX_SEQ_LEN_CONST_seq_lengths* --gtest_output=xml:TEST-cpuFuncTests-IB.xml /testlevel=24
|
||||||
"$(IB_TESTCONSOLE)" $(BIN_DIR)\cpuFuncTests.exe --gtest_filter=*smoke*:-*CompareWithRefs/base_size=16_pre_nms_topn=100_post_nms_topn=100_nms_thresh=0.7_feat_stride=1_min_size=1_ratio*:*smoke_GRUSequenceCommonZeroClip/GRUSequenceTest.CompareWithRefs/mode=CONVERT_TO_TI_MAX_SEQ_LEN_CONST_seq_lengths* --gtest_output=xml:TEST-cpuFuncTests-IB.xml /testlevel=24
|
|
||||||
displayName: 'CPU FuncTests - IB'
|
displayName: 'CPU FuncTests - IB'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
set PATH=$(TEST_ENV_PATH)
|
|
||||||
set DATA_PATH=$(MODELS_PATH)
|
set DATA_PATH=$(MODELS_PATH)
|
||||||
set MODELS_PATH=$(MODELS_PATH)
|
set MODELS_PATH=$(MODELS_PATH)
|
||||||
$(BIN_DIR)\InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
|
call $(SETUPVARS) && $(INSTALL_TEST_DIR)\InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
|
||||||
displayName: 'IE CAPITests'
|
displayName: 'IE CAPITests'
|
||||||
continueOnError: false
|
continueOnError: false
|
||||||
|
|
||||||
|
3
.github/org_control/config.json
vendored
3
.github/org_control/config.json
vendored
@ -33,6 +33,7 @@
|
|||||||
"openvino-mo-maintainers": "category: MO",
|
"openvino-mo-maintainers": "category: MO",
|
||||||
"openvino-ngraph-maintainers": "category: nGraph",
|
"openvino-ngraph-maintainers": "category: nGraph",
|
||||||
"openvino-tests-maintainers": "category: IE Tests",
|
"openvino-tests-maintainers": "category: IE Tests",
|
||||||
"openvino-tools-maintainers": "category: tools"
|
"openvino-tools-maintainers": "category: tools",
|
||||||
|
"openvino-configuration-mgmt": "category: dependency_changes"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -73,3 +73,8 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins
|
|||||||
# Documentation
|
# Documentation
|
||||||
/docs/ @openvinotoolkit/openvino-docs-maintainers
|
/docs/ @openvinotoolkit/openvino-docs-maintainers
|
||||||
*.md @openvinotoolkit/openvino-docs-maintainers
|
*.md @openvinotoolkit/openvino-docs-maintainers
|
||||||
|
|
||||||
|
# Control 3d party dependencies
|
||||||
|
*requirements* @openvino-configuration-mgmt
|
||||||
|
*setup.py @openvino-configuration-mgmt
|
||||||
|
/scripts/install_dependencies/ @openvino-configuration-mgmt
|
@ -24,8 +24,6 @@ Supported values:\
|
|||||||
|
|
||||||
ie_option (ENABLE_PROFILING_FIRST_INFERENCE "Build with ITT tracing of first inference time." ON)
|
ie_option (ENABLE_PROFILING_FIRST_INFERENCE "Build with ITT tracing of first inference time." ON)
|
||||||
|
|
||||||
ie_option(ENABLE_TEMPLATE_PLUGIN "Register template plugin into plugins.xml" OFF)
|
|
||||||
|
|
||||||
ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \
|
ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \
|
||||||
In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected InelSEAPI statistics. \
|
In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected InelSEAPI statistics. \
|
||||||
Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF
|
Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF
|
||||||
|
@ -1,36 +1,45 @@
|
|||||||
# Converting a PyTorch* Model {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_PyTorch}
|
# Converting a PyTorch* Model {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_PyTorch}
|
||||||
|
|
||||||
|
## Supported Topologies
|
||||||
|
|
||||||
|
Here is the list of models that are tested and guaranteed to be supported. However, you can also use these instructions to convert PyTorch\* models that are not presented in the list.
|
||||||
|
|
||||||
|
* [Torchvision Models](https://pytorch.org/docs/stable/torchvision/index.html): alexnet, densenet121, densenet161,
|
||||||
|
densenet169, densenet201, resnet101, resnet152, resnet18, resnet34, resnet50, vgg11, vgg13, vgg16, vgg19.
|
||||||
|
The models can be converted using [regular instructions](#typical-pytorch).
|
||||||
|
* [Cadene Pretrained Models](https://github.com/Cadene/pretrained-models.pytorch): alexnet, fbresnet152, resnet101,
|
||||||
|
resnet152, resnet18, resnet34, resnet152, resnet18, resnet34, resnet50, resnext101_32x4d, resnext101_64x4d, vgg11.
|
||||||
|
The models can be converted using [regular instructions](#typical-pytorch).
|
||||||
|
* [ESPNet Models](https://github.com/sacmehta/ESPNet/tree/master/pretrained) can be converted using [regular instructions](#typical-pytorch).
|
||||||
|
* [MobileNetV3](https://github.com/d-li14/mobilenetv3.pytorch) can be converted using [regular instructions](#typical-pytorch).
|
||||||
|
* [iSeeBetter](https://github.com/amanchadha/iSeeBetter) can be converted using [regular instructions](#typical-pytorch).
|
||||||
|
Please refer to [`iSeeBetterTest.py`](https://github.com/amanchadha/iSeeBetter/blob/master/iSeeBetterTest.py) script for code to initialize the model.
|
||||||
|
* F3Net topology can be converted using steps described in [Convert PyTorch\* F3Net to the IR](pytorch_specific/Convert_F3Net.md)
|
||||||
|
instruction which is used instead of steps 2 and 3 of [regular instructions](#typical-pytorch).
|
||||||
|
* QuartzNet topologies from [NeMo project](https://github.com/NVIDIA/NeMo) can be converted using steps described in
|
||||||
|
[Convert PyTorch\* QuartzNet to the IR](pytorch_specific/Convert_QuartzNet.md) instruction which is used instead of
|
||||||
|
steps 2 and 3 of [regular instructions](#typical-pytorch).
|
||||||
|
* YOLACT topology can be converted using steps described in [Convert PyTorch\* YOLACT to the IR](pytorch_specific/Convert_YOLACT.md)
|
||||||
|
instruction which is used instead of steps 2 and 3 of [regular instructions](#typical-pytorch).
|
||||||
|
* [RCAN](https://github.com/yulunzhang/RCAN) topology can be converted using steps described in [Convert PyTorch\* RCAN to the IR](pytorch_specific/Convert_RCAN.md)
|
||||||
|
instruction which is used instead of steps 2 and 3 of [regular instructions](#typical-pytorch).
|
||||||
|
* [BERT_NER](https://github.com/kamalkraj/BERT-NER) topology can be converted using steps described in [Convert PyTorch* BERT-NER to the IR](pytorch_specific/Convert_Bert_ner.md)
|
||||||
|
instruction which is used instead of steps 2 and 3 of [regular instructions](#typical-pytorch).
|
||||||
|
|
||||||
|
## Typical steps to convert PyTorch\* model <a name="typical-pytorch"></a>
|
||||||
|
|
||||||
PyTorch* framework is supported through export to ONNX\* format. A summary of the steps for optimizing and deploying a model that was trained with the PyTorch\* framework:
|
PyTorch* framework is supported through export to ONNX\* format. A summary of the steps for optimizing and deploying a model that was trained with the PyTorch\* framework:
|
||||||
|
|
||||||
1. [Export PyTorch model to ONNX\*](#export-to-onnx).
|
1. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for ONNX\*.
|
||||||
2. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for ONNX\*.
|
2. [Export PyTorch model to ONNX\*](#export-to-onnx).
|
||||||
3. [Convert an ONNX\* model](Convert_Model_From_ONNX.md) to produce an optimized [Intermediate Representation (IR)](../../IR_and_opsets.md) of the model based on the trained network topology, weights, and biases values.
|
3. [Convert an ONNX\* model](Convert_Model_From_ONNX.md) to produce an optimized [Intermediate Representation (IR)](../../IR_and_opsets.md) of the model based on the trained network topology, weights, and biases values.
|
||||||
4. Test the model in the Intermediate Representation format using the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in the target environment via provided [sample applications](../../../IE_DG/Samples_Overview.md).
|
4. Test the model in the Intermediate Representation format using the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in the target environment via provided [sample applications](../../../IE_DG/Samples_Overview.md).
|
||||||
5. [Integrate](../../../IE_DG/Samples_Overview.md) the Inference Engine in your application to deploy the model in the target environment.
|
5. [Integrate](../../../IE_DG/Samples_Overview.md) the Inference Engine in your application to deploy the model in the target environment.
|
||||||
|
|
||||||
## Supported Topologies
|
|
||||||
|
|
||||||
Here is the list of models that were tested and are guaranteed to be supported.
|
|
||||||
It is not a full list of models that can be converted to ONNX\* and to IR.
|
|
||||||
|
|
||||||
|Package Name|Supported Models|
|
|
||||||
|:----|:----|
|
|
||||||
| [Torchvision Models](https://pytorch.org/docs/stable/torchvision/index.html) | alexnet, densenet121, densenet161, densenet169, densenet201, resnet101, resnet152, resnet18, resnet34, resnet50, vgg11, vgg13, vgg16, vgg19 |
|
|
||||||
| [Pretrained Models](https://github.com/Cadene/pretrained-models.pytorch) | alexnet, fbresnet152, resnet101, resnet152, resnet18, resnet34, resnet152, resnet18, resnet34, resnet50, resnext101_32x4d, resnext101_64x4d, vgg11 |
|
|
||||||
|
|
||||||
**Other supported topologies**
|
|
||||||
|
|
||||||
* [ESPNet Models](https://github.com/sacmehta/ESPNet/tree/master/pretrained)
|
|
||||||
* [MobileNetV3](https://github.com/d-li14/mobilenetv3.pytorch)
|
|
||||||
* F3Net topology can be converted using [Convert PyTorch\* F3Net to the IR](pytorch_specific/Convert_F3Net.md) instruction.
|
|
||||||
* QuartzNet topologies from [NeMo project](https://github.com/NVIDIA/NeMo) can be converted using [Convert PyTorch\* QuartzNet to the IR](pytorch_specific/Convert_QuartzNet.md) instruction.
|
|
||||||
* YOLACT topology can be converted using [Convert PyTorch\* YOLACT to the IR](pytorch_specific/Convert_YOLACT.md) instruction.
|
|
||||||
* [RCAN](https://github.com/yulunzhang/RCAN) topologies can be converted using [Convert PyTorch\* RCAN to the IR](pytorch_specific/Convert_RCAN.md) instruction.
|
|
||||||
* [BERT_NER](https://github.com/kamalkraj/BERT-NER) can be converted using [Convert PyTorch* BERT-NER to the IR](pytorch_specific/Convert_Bert_ner.md) instruction.
|
|
||||||
|
|
||||||
## Export PyTorch\* Model to ONNX\* Format <a name="export-to-onnx"></a>
|
## Export PyTorch\* Model to ONNX\* Format <a name="export-to-onnx"></a>
|
||||||
|
|
||||||
PyTorch models are defined in a Python\* code, to export such models use `torch.onnx.export()` method.
|
PyTorch models are defined in a Python\* code, to export such models use `torch.onnx.export()` method. Usually code to
|
||||||
|
evaluate or test the model is provided with the model code and can be used to initialize and export model.
|
||||||
Only the basics will be covered here, the step to export to ONNX\* is crucial but it is covered by PyTorch\* framework.
|
Only the basics will be covered here, the step to export to ONNX\* is crucial but it is covered by PyTorch\* framework.
|
||||||
For more information, please refer to [PyTorch\* documentation](https://pytorch.org/docs/stable/onnx.html).
|
For more information, please refer to [PyTorch\* documentation](https://pytorch.org/docs/stable/onnx.html).
|
||||||
|
|
||||||
|
@ -8,7 +8,8 @@ These instructions are applicable only to the Faster R-CNN model converted to th
|
|||||||
```sh
|
```sh
|
||||||
python3 ./mo_onnx.py
|
python3 ./mo_onnx.py
|
||||||
--input_model FasterRCNN-10.onnx \
|
--input_model FasterRCNN-10.onnx \
|
||||||
--input_shape [3,800,800] \
|
--input_shape [1,3,800,800] \
|
||||||
|
--input 0:2 \
|
||||||
--mean_values [102.9801,115.9465,122.7717] \
|
--mean_values [102.9801,115.9465,122.7717] \
|
||||||
--transformations_config ./extensions/front/onnx/faster_rcnn.json
|
--transformations_config ./extensions/front/onnx/faster_rcnn.json
|
||||||
```
|
```
|
||||||
|
@ -6,6 +6,15 @@ This guide provides installation steps for Intel® Distribution of OpenVINO™ t
|
|||||||
|
|
||||||
> **NOTE**: Intel® Graphics Compute Runtime for OpenCL™ is not a part of OpenVINO™ APT distribution. You can install it from the [Intel® Graphics Compute Runtime for OpenCL™ GitHub repo](https://github.com/intel/compute-runtime).
|
> **NOTE**: Intel® Graphics Compute Runtime for OpenCL™ is not a part of OpenVINO™ APT distribution. You can install it from the [Intel® Graphics Compute Runtime for OpenCL™ GitHub repo](https://github.com/intel/compute-runtime).
|
||||||
|
|
||||||
|
## System Requirements
|
||||||
|
|
||||||
|
The complete list of supported hardware is available in the [Release Notes](https://software.intel.com/content/www/us/en/develop/articles/openvino-relnotes.html#inpage-nav-8).
|
||||||
|
|
||||||
|
**Operating Systems**
|
||||||
|
|
||||||
|
- Ubuntu 18.04.x long-term support (LTS), 64-bit
|
||||||
|
- Ubuntu 20.04.0 long-term support (LTS), 64-bit
|
||||||
|
|
||||||
## Included with Runtime Package
|
## Included with Runtime Package
|
||||||
|
|
||||||
The following components are installed with the OpenVINO runtime package:
|
The following components are installed with the OpenVINO runtime package:
|
||||||
@ -31,50 +40,46 @@ The following components are installed with the OpenVINO developer package:
|
|||||||
| [Documentation for Pre-Trained Models ](@ref omz_models_group_intel) | Documentation for the pre-trained models available in the [Open Model Zoo repo](https://github.com/openvinotoolkit/open_model_zoo). |
|
| [Documentation for Pre-Trained Models ](@ref omz_models_group_intel) | Documentation for the pre-trained models available in the [Open Model Zoo repo](https://github.com/openvinotoolkit/open_model_zoo). |
|
||||||
| Deep Learning Streamer (DL Streamer) | Streaming analytics framework, based on GStreamer\*, for constructing graphs of media analytics components. For the DL Streamer documentation, see [DL Streamer Samples](@ref gst_samples_README), [API Reference](https://openvinotoolkit.github.io/dlstreamer_gst/), [Elements](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/Elements), [Tutorial](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/DL-Streamer-Tutorial). |
|
| Deep Learning Streamer (DL Streamer) | Streaming analytics framework, based on GStreamer\*, for constructing graphs of media analytics components. For the DL Streamer documentation, see [DL Streamer Samples](@ref gst_samples_README), [API Reference](https://openvinotoolkit.github.io/dlstreamer_gst/), [Elements](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/Elements), [Tutorial](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/DL-Streamer-Tutorial). |
|
||||||
|
|
||||||
## Set up the Repository
|
|
||||||
### Install the GPG key for the repository
|
## Install Packages
|
||||||
|
|
||||||
|
### Set up the OpenVINO™ Toolkit APT Repository
|
||||||
|
|
||||||
|
#### Install the GPG key for the Repository
|
||||||
|
|
||||||
1. Download the public key from [https://apt.repos.intel.com/openvino/2021/GPG-PUB-KEY-INTEL-OPENVINO-2021](https://apt.repos.intel.com/openvino/2021/GPG-PUB-KEY-INTEL-OPENVINO-2021) and save it to a file.
|
1. Download the public key from [https://apt.repos.intel.com/openvino/2021/GPG-PUB-KEY-INTEL-OPENVINO-2021](https://apt.repos.intel.com/openvino/2021/GPG-PUB-KEY-INTEL-OPENVINO-2021) and save it to a file.
|
||||||
2. Add this key to the system keyring:
|
2. Add this key to the system keyring:
|
||||||
```sh
|
```sh
|
||||||
sudo apt-key add <PATH_TO_DOWNLOADED_GPG_KEY>
|
sudo apt-key add <PATH_TO_DOWNLOADED_GPG_KEY>
|
||||||
```
|
```
|
||||||
|
> **NOTE**: You might need to install GnuPG: `sudo apt-get install gnupg`
|
||||||
|
|
||||||
3. Check the list of APT keys running the following command:
|
3. Check the list of APT keys running the following command:
|
||||||
```sh
|
```sh
|
||||||
sudo apt-key list
|
sudo apt-key list
|
||||||
```
|
```
|
||||||
|
|
||||||
### Add the APT Repository
|
#### Add the Repository
|
||||||
|
|
||||||
Run the following command:
|
Run the following command:
|
||||||
```sh
|
```sh
|
||||||
echo "deb https://apt.repos.intel.com/openvino/2021 all main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2021.list
|
echo "deb https://apt.repos.intel.com/openvino/2021 all main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2021.list
|
||||||
```
|
```
|
||||||
|
|
||||||
### Update the list of packages
|
#### Update the List of Packages
|
||||||
|
|
||||||
Run the `update` command:
|
Run the `update` command:
|
||||||
```sh
|
```sh
|
||||||
sudo apt update
|
sudo apt update
|
||||||
```
|
```
|
||||||
There are full release Runtime and Developer packages, and also some available components.
|
|
||||||
|
|
||||||
**Runtime Packages**
|
#### Verify that the APT Repository is Properly Set Up
|
||||||
- Ubuntu 18.04: `intel-openvino-runtime-ubuntu18`
|
|
||||||
- Ubuntu 20.04: `intel-openvino-runtime-ubuntu20`
|
|
||||||
|
|
||||||
**Developer Packages**
|
|
||||||
- Ubuntu 18.04: `intel-openvino-dev-ubuntu18`
|
|
||||||
- Ubuntu 20.04: `intel-openvino-dev-ubuntu20`
|
|
||||||
|
|
||||||
### Get the list of available packages
|
|
||||||
|
|
||||||
Run the `apt-cache` command to see a list of all available OpenVINO packages and components:
|
Run the `apt-cache` command to see a list of all available OpenVINO packages and components:
|
||||||
```sh
|
```sh
|
||||||
apt-cache search openvino
|
apt-cache search openvino
|
||||||
```
|
```
|
||||||
|
See the example commands below:
|
||||||
#### Examples
|
|
||||||
|
|
||||||
* **Runtime Packages**
|
* **Runtime Packages**
|
||||||
|
|
||||||
@ -97,29 +102,23 @@ apt-cache search openvino
|
|||||||
sudo apt-cache search intel-openvino-dev-ubuntu20
|
sudo apt-cache search intel-openvino-dev-ubuntu20
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Install Runtime or Developer Packages using the APT Package Manager
|
||||||
## Install the runtime or developer packages using the APT Package Manager
|
Intel® OpenVINO™ Toolkit will be installed in: `/opt/intel/openvino_<VERSION>.<UPDATE>.<BUILD_NUM>`
|
||||||
Intel® OpenVINO will be installed in: `/opt/intel/openvino_<VERSION>.<UPDATE>.<BUILD_NUM>`
|
|
||||||
|
|
||||||
A symlink will be created: `/opt/intel/openvino_<VERSION>`
|
A symlink will be created: `/opt/intel/openvino_<VERSION>`
|
||||||
|
|
||||||
---
|
#### To Install a Specific Version
|
||||||
### To Install a specific version
|
|
||||||
|
|
||||||
To get a list of OpenVINO packages available for installation:
|
|
||||||
|
|
||||||
|
1. Get a list of OpenVINO packages available for installation:
|
||||||
```sh
|
```sh
|
||||||
sudo apt-cache search intel-openvino-runtime-ubuntu18
|
sudo apt-cache search intel-openvino-runtime-ubuntu18
|
||||||
```
|
```
|
||||||
|
2. Install a specific version of an OpenVINO package:
|
||||||
To install a specific version of an OpenVINO package:
|
|
||||||
```sh
|
```sh
|
||||||
sudo apt install intel-openvino-<PACKAGE_TYPE>-ubuntu<OS_VERSION>-<VERSION>.<UPDATE>.<BUILD_NUM>
|
sudo apt install intel-openvino-<PACKAGE_TYPE>-ubuntu<OS_VERSION>-<VERSION>.<UPDATE>.<BUILD_NUM>
|
||||||
```
|
```
|
||||||
|
See the example commands below:
|
||||||
#### Examples
|
* **Runtime Package**<br>
|
||||||
* **Runtime Package**
|
|
||||||
|
|
||||||
On Ubuntu 18.04:
|
On Ubuntu 18.04:
|
||||||
```sh
|
```sh
|
||||||
sudo apt install intel-openvino-runtime-ubuntu18-2021.1.105
|
sudo apt install intel-openvino-runtime-ubuntu18-2021.1.105
|
||||||
@ -138,10 +137,17 @@ sudo apt install intel-openvino-<PACKAGE_TYPE>-ubuntu<OS_VERSION>-<VERSION>.<UPD
|
|||||||
sudo apt install intel-openvino-dev-ubuntu20-2021.1.105
|
sudo apt install intel-openvino-dev-ubuntu20-2021.1.105
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
#### To check for Installed Packages and Versions
|
||||||
### To Uninstall a specific version
|
|
||||||
|
|
||||||
To uninstall a specific full runtime package:
|
To get a list of installed OpenVINO packages:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
apt list --installed | grep openvino
|
||||||
|
```
|
||||||
|
|
||||||
|
#### To Uninstall a Specific Version
|
||||||
|
|
||||||
|
To uninstall a specific package:
|
||||||
```sh
|
```sh
|
||||||
sudo apt autoremove intel-openvino-<PACKAGE_TYPE>-ubuntu<OS_VERSION>-<VERSION>.<UPDATE>.<BUILD_NUM>
|
sudo apt autoremove intel-openvino-<PACKAGE_TYPE>-ubuntu<OS_VERSION>-<VERSION>.<UPDATE>.<BUILD_NUM>
|
||||||
```
|
```
|
||||||
|
@ -2,22 +2,39 @@
|
|||||||
|
|
||||||
This guide provides installation steps for Intel® Distribution of OpenVINO™ toolkit distributed through the Anaconda* Cloud.
|
This guide provides installation steps for Intel® Distribution of OpenVINO™ toolkit distributed through the Anaconda* Cloud.
|
||||||
|
|
||||||
|
> **NOTE**: Only runtime packages are available from Anaconda* Cloud.
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
OpenVINO™ toolkit is a comprehensive toolkit for quickly developing applications and solutions that solve a variety of tasks including emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, and many others. Based on latest generations of artificial neural networks, including Convolutional Neural Networks (CNNs), recurrent and attention-based networks, the toolkit extends computer vision and non-vision workloads across Intel® hardware, maximizing performance. It accelerates applications with high-performance, AI and deep learning inference deployed from edge to cloud.
|
||||||
|
|
||||||
|
The Intel® Distribution of OpenVINO™ toolkit\*:
|
||||||
|
- Enables CNN-based deep learning inference on the edge
|
||||||
|
- Supports heterogeneous execution across Intel® CPU, Intel® Integrated Graphics, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
|
||||||
|
- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels
|
||||||
|
The **runtime package** includes the following components installed by default:
|
||||||
|
|
||||||
|
| Component | Description |
|
||||||
|
|-----------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| [Inference Engine](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) | This is the engine that runs the deep learning model. It includes a set of libraries for an easy inference integration into your applications. |
|
||||||
|
|
||||||
## System Requirements
|
## System Requirements
|
||||||
|
|
||||||
|
**Software**
|
||||||
|
|
||||||
- [Anaconda* distribution](https://www.anaconda.com/products/individual/)
|
- [Anaconda* distribution](https://www.anaconda.com/products/individual/)
|
||||||
|
|
||||||
**Operating Systems**
|
**Operating Systems**
|
||||||
|
|
||||||
- Ubuntu* 18.04 long-term support (LTS), 64-bit
|
| Supported Operating System | [Python* Version (64-bit)](https://www.python.org/) |
|
||||||
- CentOS* 7.6, 64-bit
|
| :------------------------------------------------------------| :---------------------------------------------------|
|
||||||
- macOS* 10.14.x versions.
|
| Ubuntu* 18.04 long-term support (LTS), 64-bit | 3.6, 3.7 |
|
||||||
- Windows 10*, 64-bit Pro, Enterprise or Education (1607 Anniversary Update, Build 14393 or higher) editions
|
| Ubuntu* 20.04 long-term support (LTS), 64-bit | 3.6, 3.7 |
|
||||||
- Windows Server* 2016 or higher
|
| CentOS* 7.6, 64-bit | 3.6, 3.7 |
|
||||||
|
| macOS* 10.15.x | 3.6, 3.7 |
|
||||||
|
| Windows 10*, 64-bit | 3.6, 3.7 |
|
||||||
|
|
||||||
|
## Install the Runtime Package using the Anaconda* Package Manager
|
||||||
|
|
||||||
## Install the runtime package using the Anaconda* Package Manager
|
|
||||||
|
|
||||||
1. Set up the Anaconda* environment:
|
1. Set up the Anaconda* environment:
|
||||||
```sh
|
```sh
|
||||||
@ -26,11 +43,19 @@ This guide provides installation steps for Intel® Distribution of OpenVINO™ t
|
|||||||
```sh
|
```sh
|
||||||
conda activate py37
|
conda activate py37
|
||||||
```
|
```
|
||||||
2. Updated conda to the latest version:
|
2. Update Anaconda environment to the latest version:
|
||||||
```sh
|
```sh
|
||||||
conda update --all
|
conda update --all
|
||||||
```
|
```
|
||||||
3. Install the Intel® Distribution of OpenVINO™ Toolkit:
|
3. Install pre-requisites:
|
||||||
|
```sh
|
||||||
|
conda install numpy
|
||||||
|
```
|
||||||
|
4. Install the Intel® Distribution of OpenVINO™ Toolkit:
|
||||||
|
- Ubuntu* 20.04
|
||||||
|
```sh
|
||||||
|
conda install openvino-ie4py-ubuntu20 -c intel
|
||||||
|
```
|
||||||
- Ubuntu* 18.04
|
- Ubuntu* 18.04
|
||||||
```sh
|
```sh
|
||||||
conda install openvino-ie4py-ubuntu18 -c intel
|
conda install openvino-ie4py-ubuntu18 -c intel
|
||||||
@ -43,19 +68,13 @@ This guide provides installation steps for Intel® Distribution of OpenVINO™ t
|
|||||||
```sh
|
```sh
|
||||||
conda install openvino-ie4py -c intel
|
conda install openvino-ie4py -c intel
|
||||||
```
|
```
|
||||||
4. Verify the package installed:
|
5. Verify the package is installed:
|
||||||
```sh
|
```sh
|
||||||
python -c "import openvino"
|
python -c "from openvino.inference_engine import IECore"
|
||||||
```
|
```
|
||||||
|
If installation was successful, you will not see any error messages (no console output).
|
||||||
|
|
||||||
Now you can start to develop and run your application.
|
Now you can start developing your application.
|
||||||
|
|
||||||
|
|
||||||
## Known Issues and Limitations
|
|
||||||
|
|
||||||
- You cannot use Python bindings included in Intel® Distribution of OpenVINO™ toolkit with [Anaconda* distribution](https://www.anaconda.com/products/individual/)
|
|
||||||
- You cannot use Python OpenVINO™ bindings included in Anaconda* package with official [Python distribution](https://www.python.org/).
|
|
||||||
|
|
||||||
|
|
||||||
## Additional Resources
|
## Additional Resources
|
||||||
|
|
||||||
|
@ -8,6 +8,14 @@ This guide provides installation steps for the Intel® Distribution of OpenVINO
|
|||||||
|
|
||||||
> **NOTE**: Only runtime packages are available via the YUM repository.
|
> **NOTE**: Only runtime packages are available via the YUM repository.
|
||||||
|
|
||||||
|
## System Requirements
|
||||||
|
|
||||||
|
The complete list of supported hardware is available in the [Release Notes](https://software.intel.com/content/www/us/en/develop/articles/openvino-relnotes.html#inpage-nav-8).
|
||||||
|
|
||||||
|
**Operating Systems**
|
||||||
|
|
||||||
|
- CentOS 7.6, 64-bit
|
||||||
|
|
||||||
## Included with Runtime Package
|
## Included with Runtime Package
|
||||||
|
|
||||||
The following components are installed with the OpenVINO runtime package:
|
The following components are installed with the OpenVINO runtime package:
|
||||||
@ -18,6 +26,8 @@ The following components are installed with the OpenVINO runtime package:
|
|||||||
| [OpenCV*](https://docs.opencv.org/master/) | OpenCV* community version compiled for Intel® hardware. |
|
| [OpenCV*](https://docs.opencv.org/master/) | OpenCV* community version compiled for Intel® hardware. |
|
||||||
| Deep Learning Stream (DL Streamer) | Streaming analytics framework, based on GStreamer, for constructing graphs of media analytics components. For the DL Streamer documentation, see [DL Streamer Samples](@ref gst_samples_README), [API Reference](https://openvinotoolkit.github.io/dlstreamer_gst/), [Elements](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/Elements), [Tutorial](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/DL-Streamer-Tutorial). |
|
| Deep Learning Stream (DL Streamer) | Streaming analytics framework, based on GStreamer, for constructing graphs of media analytics components. For the DL Streamer documentation, see [DL Streamer Samples](@ref gst_samples_README), [API Reference](https://openvinotoolkit.github.io/dlstreamer_gst/), [Elements](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/Elements), [Tutorial](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/DL-Streamer-Tutorial). |
|
||||||
|
|
||||||
|
## Install Packages
|
||||||
|
|
||||||
## Set up the Repository
|
## Set up the Repository
|
||||||
|
|
||||||
> **NOTE:** You must be logged in as root to set up and install the repository.
|
> **NOTE:** You must be logged in as root to set up and install the repository.
|
||||||
@ -39,25 +49,23 @@ Configure YUM with the OpenVINO repository to install OpenVINO. You have two opt
|
|||||||
```
|
```
|
||||||
|
|
||||||
* **OPTION 2:** Create the repository file manually:
|
* **OPTION 2:** Create the repository file manually:
|
||||||
1. Navigate to the repository directory:
|
|
||||||
```sh
|
1. Create the YUM repo file in the /tmp directory as a normal user:
|
||||||
cd /etc/yum.repos.d
|
|
||||||
```
|
|
||||||
2. Edit the repo file:
|
|
||||||
```sh
|
|
||||||
vi intel-openvino-2021.repo
|
|
||||||
```
|
|
||||||
3. Append the following code:
|
|
||||||
```sh
|
```sh
|
||||||
|
tee > /tmp/openvino-2021.repo << EOF
|
||||||
[intel-openvino-2021]
|
[intel-openvino-2021]
|
||||||
name=Intel(R) Distribution of OpenVINO 2021
|
name=Intel(R) Distribution of OpenVINO 2021
|
||||||
baseurl=https://yum.repos.intel.com/openvino/2021
|
baseurl=https://yum.repos.intel.com/openvino/2021
|
||||||
enabled=1
|
enabled=1
|
||||||
gpgcheck=1
|
gpgcheck=1
|
||||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-INTEL-OPENVINO-2021
|
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-INTEL-OPENVINO-2021
|
||||||
|
EOF
|
||||||
```
|
```
|
||||||
4. Save and close the `intel-openvino-2021.repo` file.
|
2. Move the newly created openvino-2021.repo file to the YUM configuration directory /etc/yum.repos.d:
|
||||||
5. Import the gpg public key for the repository:
|
```sh
|
||||||
|
sudo mv /tmp/openvino-2021.repo /etc/yum.repos.d
|
||||||
|
```
|
||||||
|
3. Import the gpg public key for the repository:
|
||||||
```sh
|
```sh
|
||||||
sudo rpm --import https://yum.repos.intel.com/openvino/2021/setup/RPM-GPG-KEY-INTEL-OPENVINO-2021
|
sudo rpm --import https://yum.repos.intel.com/openvino/2021/setup/RPM-GPG-KEY-INTEL-OPENVINO-2021
|
||||||
```
|
```
|
||||||
@ -103,6 +111,21 @@ To install the full runtime version of the OpenVINO package:
|
|||||||
sudo yum install intel-openvino-runtime-centos7-<VERSION>.<UPDATE>.<BUILD_NUM>
|
sudo yum install intel-openvino-runtime-centos7-<VERSION>.<UPDATE>.<BUILD_NUM>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Examples
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo yum install intel-openvino-runtime-centos7-2021.3.394
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### To check for installed packages and version
|
||||||
|
|
||||||
|
To check a specific version of an OpenVINO package:
|
||||||
|
```sh
|
||||||
|
yum list installed intel-openvino*
|
||||||
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### To Uninstall a specific version
|
### To Uninstall a specific version
|
||||||
|
@ -15,11 +15,11 @@ The **developer package** includes the following components installed by default
|
|||||||
|
|
||||||
| Component | Console Script | Description |
|
| Component | Console Script | Description |
|
||||||
|------------------|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|------------------|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| [Model Optimizer](https://docs.openvinotoolkit.org/latest/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) | `mo` |**Model Optimizer** imports, converts, and optimizes models that were trained in popular frameworks to a format usable by Intel tools, especially the Inference Engine. <br>Popular frameworks include Caffe\*, TensorFlow\*, MXNet\*, and ONNX\*. |
|
| [Model Optimizer](https://docs.openvinotoolkit.org/latest/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) | `mo` |**Model Optimizer** imports, converts, and optimizes models that were trained in popular frameworks to a format usable by Intel tools, especially the Inference Engine. <br>Supported frameworks include Caffe\*, TensorFlow\*, MXNet\*, and ONNX\*. |
|
||||||
| [Benchmark Tool](https://docs.openvinotoolkit.org/latest/openvino_inference_engine_tools_benchmark_tool_README.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. |
|
| [Benchmark Tool](https://docs.openvinotoolkit.org/latest/openvino_inference_engine_tools_benchmark_tool_README.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. |
|
||||||
| [Accuracy Checker](https://docs.openvinotoolkit.org/latest/omz_tools_accuracy_checker.html) and <br> [Annotation Converter](https://docs.openvinotoolkit.org/latest/omz_tools_accuracy_checker_annotation_converters.html) | `accuracy_check` <br> `convert_annotation` |**Accuracy Checker** is a deep learning accuracy validation tool that allows you to collect accuracy metrics against popular datasets. The main advantages of the tool are the flexibility of configuration and an impressive set of supported datasets, preprocessing, postprocessing, and metrics. <br> **Annotation Converter** is a utility for offline conversion of datasets to the format suitable for metric evaluation used in Accuracy Checker. |
|
| [Accuracy Checker](https://docs.openvinotoolkit.org/latest/omz_tools_accuracy_checker.html) and <br> [Annotation Converter](https://docs.openvinotoolkit.org/latest/omz_tools_accuracy_checker_annotation_converters.html) | `accuracy_check` <br> `convert_annotation` |**Accuracy Checker** is a deep learning accuracy validation tool that allows you to collect accuracy metrics against popular datasets. The main advantages of the tool are the flexibility of configuration and a set of supported datasets, preprocessing, postprocessing, and metrics. <br> **Annotation Converter** is a utility that prepares datasets for evaluation with Accuracy Checker. |
|
||||||
| [Post-Training Optimization Tool](https://docs.openvinotoolkit.org/latest/pot_README.html)| `pot` |**Post-Training Optimization Tool** allows you to optimize trained models with advanced capabilities, such as quantization and low-precision optimizations, without the need to retrain or fine-tune models. Optimizations are also available through the [API](https://docs.openvinotoolkit.org/latest/pot_compression_api_README.html). |
|
| [Post-Training Optimization Tool](https://docs.openvinotoolkit.org/latest/pot_README.html)| `pot` |**Post-Training Optimization Tool** allows you to optimize trained models with advanced capabilities, such as quantization and low-precision optimizations, without the need to retrain or fine-tune models. Optimizations are also available through the [API](https://docs.openvinotoolkit.org/latest/pot_compression_api_README.html). |
|
||||||
| [Model Downloader and other Open Model Zoo tools](https://docs.openvinotoolkit.org/latest/omz_tools_downloader.html)| `omz_downloader` <br> `omz_converter` <br> `omz_quantizer` <br> `omz_info_dumper`| **Model Downloader** is a tool for getting access to the collection of high-quality and extremely fast pre-trained deep learning [public](https://docs.openvinotoolkit.org/latest/omz_models_group_public.html) and [intel](https://docs.openvinotoolkit.org/latest/omz_models_group_intel.html)-trained models. Use these free pre-trained models instead of training your own models to speed up the development and production deployment process. The principle of the tool is as follows: it downloads model files from online sources and, if necessary, patches them with Model Optimizer to make them more usable. A number of additional tools are also provided to automate the process of working with downloaded models:<br> **Model Converter** is a tool for converting the models stored in a format other than the Intermediate Representation (IR) into that format using Model Optimizer. <br> **Model Quantizer** is a tool for automatic quantization of full-precision IR models into low-precision versions using Post-Training Optimization Tool. <br> **Model Information Dumper** is a helper utility for dumping information about the models in a stable machine-readable format.|
|
| [Model Downloader and other Open Model Zoo tools](https://docs.openvinotoolkit.org/latest/omz_tools_downloader.html)| `omz_downloader` <br> `omz_converter` <br> `omz_quantizer` <br> `omz_info_dumper`| **Model Downloader** is a tool for getting access to the collection of high-quality and extremely fast pre-trained deep learning [public](https://docs.openvinotoolkit.org/latest/omz_models_group_public.html) and [Intel](https://docs.openvinotoolkit.org/latest/omz_models_group_intel.html)-trained models. These free pre-trained models can be used to speed up the development and production deployment process without training your own models. The tool downloads model files from online sources and, if necessary, patches them to make them more usable with Model Optimizer. A number of additional tools are also provided to automate the process of working with downloaded models:<br> **Model Converter** is a tool for converting Open Model Zoo models that are stored in an original deep learning framework format into the Inference Engine Intermediate Representation (IR) using Model Optimizer. <br> **Model Quantizer** is a tool for automatic quantization of full-precision models in the IR format into low-precision versions using the Post-Training Optimization Tool. <br> **Model Information Dumper** is a helper utility for dumping information about the models to a stable, machine-readable format.
|
||||||
|
|
||||||
|
|
||||||
**Developer package** also provides the **runtime package** installed as a dependency. The runtime package includes the following components:
|
**Developer package** also provides the **runtime package** installed as a dependency. The runtime package includes the following components:
|
||||||
@ -54,17 +54,14 @@ To avoid dependency conflicts, use a virtual environment. Skip this
|
|||||||
|
|
||||||
Create virtual environment:
|
Create virtual environment:
|
||||||
|
|
||||||
On Linux and macOS:
|
|
||||||
```sh
|
|
||||||
# Depending on your OS, this step may require installing python3-venv
|
|
||||||
python3 -m venv openvino_env
|
|
||||||
```
|
|
||||||
|
|
||||||
On Windows:
|
|
||||||
```sh
|
```sh
|
||||||
|
python -m pip install --user virtualenv
|
||||||
python -m venv openvino_env
|
python -m venv openvino_env
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> **NOTE**: On Linux and macOS, you may need to type `python3` instead of
|
||||||
|
`python`. You may also need to [install pip](https://pip.pypa.io/en/stable/installing/). For example, on Ubuntu execute the following command to get pip installed: `sudo apt install python3-venv python3-pip`.
|
||||||
|
|
||||||
### Step 2. Activate Virtual Environment
|
### Step 2. Activate Virtual Environment
|
||||||
|
|
||||||
On Linux and macOS:
|
On Linux and macOS:
|
||||||
@ -90,8 +87,8 @@ To install and configure the components of the development package for working w
|
|||||||
| DL Framework | Extra |
|
| DL Framework | Extra |
|
||||||
| :------------------------------------------------------------------------------- | :-------------------------------|
|
| :------------------------------------------------------------------------------- | :-------------------------------|
|
||||||
| [Caffe*](https://caffe.berkeleyvision.org/) | caffe |
|
| [Caffe*](https://caffe.berkeleyvision.org/) | caffe |
|
||||||
| [Caffe2*](https://caffe2.ai/) | caffe2 |
|
| [Caffe2*](https://github.com/pytorch/pytorch) | caffe2 |
|
||||||
| [Kaldi*](https://kaldi-asr.org/) | kaldi |
|
| [Kaldi*](https://github.com/kaldi-asr/kaldi) | kaldi |
|
||||||
| [MXNet*](https://mxnet.apache.org/) | mxnet |
|
| [MXNet*](https://mxnet.apache.org/) | mxnet |
|
||||||
| [ONNX*](https://github.com/microsoft/onnxruntime/) | onnx |
|
| [ONNX*](https://github.com/microsoft/onnxruntime/) | onnx |
|
||||||
| [PyTorch*](https://pytorch.org/) | pytorch |
|
| [PyTorch*](https://pytorch.org/) | pytorch |
|
||||||
|
@ -51,7 +51,7 @@ python -m venv openvino_env
|
|||||||
```
|
```
|
||||||
|
|
||||||
> **NOTE**: On Linux and macOS, you may need to type `python3` instead of
|
> **NOTE**: On Linux and macOS, you may need to type `python3` instead of
|
||||||
`python`. You may also need to [install pip](https://pip.pypa.io/en/stable/installing/).
|
`python`. You may also need to [install pip](https://pip.pypa.io/en/stable/installing/). For example, on Ubuntu execute the following command to get pip installed: `sudo apt install python3-venv python3-pip`.
|
||||||
|
|
||||||
### Step 2. Activate Virtual Environment
|
### Step 2. Activate Virtual Environment
|
||||||
|
|
||||||
|
@ -25,7 +25,6 @@ clamp( x_{i} )=\min\big( \max\left( x_{i}, min\_value \right), max\_value \big)
|
|||||||
* **Description**: *min* is the lower bound of values in the output.
|
* **Description**: *min* is the lower bound of values in the output.
|
||||||
* **Range of values**: arbitrary floating point number
|
* **Range of values**: arbitrary floating point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *max*
|
* *max*
|
||||||
@ -33,7 +32,6 @@ clamp( x_{i} )=\min\big( \max\left( x_{i}, min\_value \right), max\_value \big)
|
|||||||
* **Description**: *max* is the upper bound of values in the output.
|
* **Description**: *max* is the upper bound of values in the output.
|
||||||
* **Range of values**: arbitrary floating point number
|
* **Range of values**: arbitrary floating point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**:
|
**Inputs**:
|
||||||
|
@ -29,7 +29,6 @@ where α corresponds to *alpha* attribute.
|
|||||||
* **Description**: scale for the negative factor
|
* **Description**: scale for the negative factor
|
||||||
* **Range of values**: non-negative arbitrary floating-point number
|
* **Range of values**: non-negative arbitrary floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: none
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**:
|
**Inputs**:
|
||||||
|
@ -21,7 +21,6 @@ Computation algorithm for mode *xnor-popcount*:
|
|||||||
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(y, x)` axes for 2D convolutions. For example, *strides* equal `2,1` means sliding the filter 2 pixel at a time over height dimension and 1 over width dimension.
|
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(y, x)` axes for 2D convolutions. For example, *strides* equal `2,1` means sliding the filter 2 pixel at a time over height dimension and 1 over width dimension.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pads_begin*
|
* *pads_begin*
|
||||||
@ -29,7 +28,6 @@ Computation algorithm for mode *xnor-popcount*:
|
|||||||
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
|
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -38,7 +36,6 @@ Computation algorithm for mode *xnor-popcount*:
|
|||||||
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -47,7 +44,6 @@ Computation algorithm for mode *xnor-popcount*:
|
|||||||
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
|
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
|
||||||
* **Range of values**: integer value starting from 0
|
* **Range of values**: integer value starting from 0
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *mode*
|
* *mode*
|
||||||
@ -56,7 +52,6 @@ Computation algorithm for mode *xnor-popcount*:
|
|||||||
* **Range of values**:
|
* **Range of values**:
|
||||||
* *xnor-popcount*
|
* *xnor-popcount*
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: value `0` in inputs is interpreted as `-1`, value `1` as `1`
|
* **Note**: value `0` in inputs is interpreted as `-1`, value `1` as `1`
|
||||||
|
|
||||||
@ -65,7 +60,6 @@ Computation algorithm for mode *xnor-popcount*:
|
|||||||
* **Description**: *pad_value* is a floating-point value used to fill pad area.
|
* **Description**: *pad_value* is a floating-point value used to fill pad area.
|
||||||
* **Range of values**: a floating-point number
|
* **Range of values**: a floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *auto_pad*
|
* *auto_pad*
|
||||||
|
@ -43,7 +43,6 @@ else:
|
|||||||
* **Description**: *strides* has the same definition as *strides* for a regular Convolution but applied in the backward way, for the output tensor.
|
* **Description**: *strides* has the same definition as *strides* for a regular Convolution but applied in the backward way, for the output tensor.
|
||||||
* **Range of values**: positive integers
|
* **Range of values**: positive integers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pads_begin*
|
* *pads_begin*
|
||||||
@ -51,7 +50,6 @@ else:
|
|||||||
* **Description**: *pads_begin* has the same definition as *pads_begin* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted specified, in which case pads are calculated automatically.
|
* **Description**: *pads_begin* has the same definition as *pads_begin* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted specified, in which case pads are calculated automatically.
|
||||||
* **Range of values**: non-negative integers
|
* **Range of values**: non-negative integers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -60,7 +58,6 @@ else:
|
|||||||
* **Description**: *pads_end* has the same definition as *pads_end* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted, in which case pads are calculated automatically.
|
* **Description**: *pads_end* has the same definition as *pads_end* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted, in which case pads are calculated automatically.
|
||||||
* **Range of values**: non-negative integers
|
* **Range of values**: non-negative integers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -69,7 +66,6 @@ else:
|
|||||||
* **Description**: *dilations* has the same definition as *dilations* for a regular Convolution but applied in the backward way, for the output tensor.
|
* **Description**: *dilations* has the same definition as *dilations* for a regular Convolution but applied in the backward way, for the output tensor.
|
||||||
* **Range of values**: positive integers
|
* **Range of values**: positive integers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *auto_pad*
|
* *auto_pad*
|
||||||
|
@ -38,7 +38,6 @@ The receptive field in each layer is calculated using the formulas:
|
|||||||
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(z, y, x)` axes for 3D convolutions and `(y, x)` axes for 2D convolutions. For example, *strides* equal `4,2,1` means sliding the filter 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
|
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(z, y, x)` axes for 3D convolutions and `(y, x)` axes for 2D convolutions. For example, *strides* equal `4,2,1` means sliding the filter 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pads_begin*
|
* *pads_begin*
|
||||||
@ -46,7 +45,6 @@ The receptive field in each layer is calculated using the formulas:
|
|||||||
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
|
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -55,7 +53,6 @@ The receptive field in each layer is calculated using the formulas:
|
|||||||
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -64,7 +61,6 @@ The receptive field in each layer is calculated using the formulas:
|
|||||||
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
|
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
|
||||||
* **Range of values**: integer value starting from 0
|
* **Range of values**: integer value starting from 0
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *auto_pad*
|
* *auto_pad*
|
||||||
|
@ -35,7 +35,6 @@ Where
|
|||||||
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(y,x)` axes. For example, *strides* equal `2,1` means sliding the filter 2 pixel at a time over height dimension and 1 over width dimension.
|
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(y,x)` axes. For example, *strides* equal `2,1` means sliding the filter 2 pixel at a time over height dimension and 1 over width dimension.
|
||||||
* **Range of values**: integer values starting from `0`
|
* **Range of values**: integer values starting from `0`
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pads_begin*
|
* *pads_begin*
|
||||||
@ -43,7 +42,6 @@ Where
|
|||||||
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
|
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
|
||||||
* **Range of values**: integer values starting from `0`
|
* **Range of values**: integer values starting from `0`
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -52,7 +50,6 @@ Where
|
|||||||
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
||||||
* **Range of values**: integer values starting from `0`
|
* **Range of values**: integer values starting from `0`
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -61,7 +58,6 @@ Where
|
|||||||
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
|
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
|
||||||
* **Range of values**: integer value starting from `0`
|
* **Range of values**: integer value starting from `0`
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *auto_pad*
|
* *auto_pad*
|
||||||
|
@ -38,7 +38,6 @@ Where
|
|||||||
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(y,x)` axes. For example, *strides* equal `2,1` means sliding the filter 2 pixel at a time over height dimension and 1 over width dimension.
|
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(y,x)` axes. For example, *strides* equal `2,1` means sliding the filter 2 pixel at a time over height dimension and 1 over width dimension.
|
||||||
* **Range of values**: integer values starting from `0`
|
* **Range of values**: integer values starting from `0`
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pads_begin*
|
* *pads_begin*
|
||||||
@ -46,7 +45,6 @@ Where
|
|||||||
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
|
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
|
||||||
* **Range of values**: integer values starting from `0`
|
* **Range of values**: integer values starting from `0`
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -55,7 +53,6 @@ Where
|
|||||||
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
||||||
* **Range of values**: integer values starting from `0`
|
* **Range of values**: integer values starting from `0`
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -64,7 +61,6 @@ Where
|
|||||||
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
|
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
|
||||||
* **Range of values**: integer value starting from `0`
|
* **Range of values**: integer value starting from `0`
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *auto_pad*
|
* *auto_pad*
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
* **Description**: *strides* has the same definition as *strides* for a regular Convolution but applied in the backward way, for the output tensor.
|
* **Description**: *strides* has the same definition as *strides* for a regular Convolution but applied in the backward way, for the output tensor.
|
||||||
* **Range of values**: positive integers
|
* **Range of values**: positive integers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pads_begin*
|
* *pads_begin*
|
||||||
@ -23,7 +22,6 @@
|
|||||||
* **Description**: *pads_begin* has the same definition as *pads_begin* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted, in which case pads are calculated automatically.
|
* **Description**: *pads_begin* has the same definition as *pads_begin* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted, in which case pads are calculated automatically.
|
||||||
* **Range of values**: non-negative integers
|
* **Range of values**: non-negative integers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -32,7 +30,6 @@
|
|||||||
* **Description**: *pads_end* has the same definition as *pads_end* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted, in which case pads are calculated automatically.
|
* **Description**: *pads_end* has the same definition as *pads_end* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted, in which case pads are calculated automatically.
|
||||||
* **Range of values**: non-negative integers
|
* **Range of values**: non-negative integers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -41,7 +38,6 @@
|
|||||||
* **Description**: *dilations* has the same definition as *dilations* for a regular Convolution but applied in the backward way, for the output tensor.
|
* **Description**: *dilations* has the same definition as *dilations* for a regular Convolution but applied in the backward way, for the output tensor.
|
||||||
* **Range of values**: positive integers
|
* **Range of values**: positive integers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *auto_pad*
|
* *auto_pad*
|
||||||
|
@ -16,7 +16,6 @@ Neural Networks](https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76
|
|||||||
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(z, y, x)` axes for 3D convolutions and `(y, x)` axes for 2D convolutions. For example, *strides* equal `4,2,1` means sliding the filter 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
|
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(z, y, x)` axes for 3D convolutions and `(y, x)` axes for 2D convolutions. For example, *strides* equal `4,2,1` means sliding the filter 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
|
||||||
* **Range of values**: positive integer numbers
|
* **Range of values**: positive integer numbers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pads_begin*
|
* *pads_begin*
|
||||||
@ -24,7 +23,6 @@ Neural Networks](https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76
|
|||||||
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
|
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
|
||||||
* **Range of values**: positive integer numbers
|
* **Range of values**: positive integer numbers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -33,7 +31,6 @@ Neural Networks](https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76
|
|||||||
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
||||||
* **Range of values**: positive integer numbers
|
* **Range of values**: positive integer numbers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -42,7 +39,6 @@ Neural Networks](https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76
|
|||||||
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
|
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
|
||||||
* **Range of values**: positive integer numbers
|
* **Range of values**: positive integer numbers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *auto_pad*
|
* *auto_pad*
|
||||||
|
@ -22,7 +22,6 @@ This operation is compatible with [MXNet DeformablePSROIPooling](https://mxnet.a
|
|||||||
* **Description**: *output_dim* is the number of the output channels, size of output `C` dimension.
|
* **Description**: *output_dim* is the number of the output channels, size of output `C` dimension.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *spatial_scale*
|
* *spatial_scale*
|
||||||
@ -30,7 +29,6 @@ This operation is compatible with [MXNet DeformablePSROIPooling](https://mxnet.a
|
|||||||
* **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input original size to the pooling input. Ratio of the input score map size to the original image size.
|
* **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input original size to the pooling input. Ratio of the input score map size to the original image size.
|
||||||
* **Range of values**: a positive floating-point number
|
* **Range of values**: a positive floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *group_size*
|
* *group_size*
|
||||||
|
@ -17,7 +17,6 @@ At each feature map cell, *DetectionOutput* predicts the offsets relative to the
|
|||||||
* **Description**: number of classes to be predicted
|
* **Description**: number of classes to be predicted
|
||||||
* **Range of values**: positive integer number
|
* **Range of values**: positive integer number
|
||||||
* **Type**: int
|
* **Type**: int
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *background_label_id*
|
* *background_label_id*
|
||||||
@ -49,7 +48,6 @@ At each feature map cell, *DetectionOutput* predicts the offsets relative to the
|
|||||||
* **Description**: maximum number of bounding boxes per batch to be kept after NMS step. -1 means keeping all bounding boxes after NMS step.
|
* **Description**: maximum number of bounding boxes per batch to be kept after NMS step. -1 means keeping all bounding boxes after NMS step.
|
||||||
* **Range of values**: integer values
|
* **Range of values**: integer values
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *code_type*
|
* *code_type*
|
||||||
@ -73,7 +71,6 @@ At each feature map cell, *DetectionOutput* predicts the offsets relative to the
|
|||||||
* **Description**: threshold to be used in the NMS stage
|
* **Description**: threshold to be used in the NMS stage
|
||||||
* **Range of values**: floating point values
|
* **Range of values**: floating point values
|
||||||
* **Type**: float
|
* **Type**: float
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *confidence_threshold*
|
* *confidence_threshold*
|
||||||
|
@ -19,7 +19,6 @@ ROIs coordinates are specified in absolute values for the average mode and in no
|
|||||||
* **Description**: *output_dim* is a pooled output channel number.
|
* **Description**: *output_dim* is a pooled output channel number.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *group_size*
|
* *group_size*
|
||||||
@ -35,7 +34,6 @@ ROIs coordinates are specified in absolute values for the average mode and in no
|
|||||||
* **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling.
|
* **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling.
|
||||||
* **Range of values**: a positive floating-point number
|
* **Range of values**: a positive floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *mode*
|
* *mode*
|
||||||
|
@ -39,7 +39,6 @@
|
|||||||
* **Description**: *offset* is a shift of box respectively to top left corner. For example, *offset* equal 85 means that the shift of neighborhood prior boxes centers is 85.
|
* **Description**: *offset* is a shift of box respectively to top left corner. For example, *offset* equal 85 means that the shift of neighborhood prior boxes centers is 85.
|
||||||
* **Range of values**: floating point positive number
|
* **Range of values**: floating point positive number
|
||||||
* **Type**: float
|
* **Type**: float
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *variance*
|
* *variance*
|
||||||
|
@ -92,7 +92,6 @@
|
|||||||
* **Description**: *offset* is a shift of box respectively to top left corner.
|
* **Description**: *offset* is a shift of box respectively to top left corner.
|
||||||
* **Range of values**: floating point non-negative number
|
* **Range of values**: floating point non-negative number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *variance*
|
* *variance*
|
||||||
|
@ -28,21 +28,18 @@
|
|||||||
* **Description**: *base_size* is the size of the anchor to which *scale* and *ratio* attributes are applied.
|
* **Description**: *base_size* is the size of the anchor to which *scale* and *ratio* attributes are applied.
|
||||||
* **Range of values**: a positive integer number
|
* **Range of values**: a positive integer number
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pre_nms_topn*
|
* *pre_nms_topn*
|
||||||
* **Description**: *pre_nms_topn* is the number of bounding boxes before the NMS operation. For example, *pre_nms_topn* equal to 15 means to take top 15 boxes with the highest scores.
|
* **Description**: *pre_nms_topn* is the number of bounding boxes before the NMS operation. For example, *pre_nms_topn* equal to 15 means to take top 15 boxes with the highest scores.
|
||||||
* **Range of values**: a positive integer number
|
* **Range of values**: a positive integer number
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *post_nms_topn*
|
* *post_nms_topn*
|
||||||
* **Description**: *post_nms_topn* is the number of bounding boxes after the NMS operation. For example, *post_nms_topn* equal to 15 means to take after NMS top 15 boxes with the highest scores.
|
* **Description**: *post_nms_topn* is the number of bounding boxes after the NMS operation. For example, *post_nms_topn* equal to 15 means to take after NMS top 15 boxes with the highest scores.
|
||||||
* **Range of values**: a positive integer number
|
* **Range of values**: a positive integer number
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *nms_thresh*
|
* *nms_thresh*
|
||||||
@ -50,7 +47,6 @@
|
|||||||
* **Description**: *nms_thresh* is the minimum value of the proposal to be taken into consideration. For example, *nms_thresh* equal to 0.5 means that all boxes with prediction probability less than 0.5 are filtered out.
|
* **Description**: *nms_thresh* is the minimum value of the proposal to be taken into consideration. For example, *nms_thresh* equal to 0.5 means that all boxes with prediction probability less than 0.5 are filtered out.
|
||||||
* **Range of values**: a positive floating-point number
|
* **Range of values**: a positive floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *feat_stride*
|
* *feat_stride*
|
||||||
@ -58,7 +54,6 @@
|
|||||||
* **Description**: *feat_stride* is the step size to slide over boxes (in pixels). For example, *feat_stride* equal to 16 means that all boxes are analyzed with the slide 16.
|
* **Description**: *feat_stride* is the step size to slide over boxes (in pixels). For example, *feat_stride* equal to 16 means that all boxes are analyzed with the slide 16.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *min_size*
|
* *min_size*
|
||||||
@ -66,7 +61,6 @@
|
|||||||
* **Description**: *min_size* is the minimum size of box to be taken into consideration. For example, *min_size* equal 35 means that all boxes with box size less than 35 are filtered out.
|
* **Description**: *min_size* is the minimum size of box to be taken into consideration. For example, *min_size* equal 35 means that all boxes with box size less than 35 are filtered out.
|
||||||
* **Range of values**: a positive integer number
|
* **Range of values**: a positive integer number
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *ratio*
|
* *ratio*
|
||||||
@ -74,7 +68,6 @@
|
|||||||
* **Description**: *ratio* is the ratios for anchor generation.
|
* **Description**: *ratio* is the ratios for anchor generation.
|
||||||
* **Range of values**: a list of floating-point numbers
|
* **Range of values**: a list of floating-point numbers
|
||||||
* **Type**: `float[]`
|
* **Type**: `float[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *scale*
|
* *scale*
|
||||||
@ -82,7 +75,6 @@
|
|||||||
* **Description**: *scale* is the scales for anchor generation.
|
* **Description**: *scale* is the scales for anchor generation.
|
||||||
* **Range of values**: a list of floating-point numbers
|
* **Range of values**: a list of floating-point numbers
|
||||||
* **Type**: `float[]`
|
* **Type**: `float[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *clip_before_nms*
|
* *clip_before_nms*
|
||||||
|
@ -37,21 +37,18 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
|
|||||||
* **Description**: *base_size* is the size of the anchor to which *scale* and *ratio* attributes are applied.
|
* **Description**: *base_size* is the size of the anchor to which *scale* and *ratio* attributes are applied.
|
||||||
* **Range of values**: a positive integer number
|
* **Range of values**: a positive integer number
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pre_nms_topn*
|
* *pre_nms_topn*
|
||||||
* **Description**: *pre_nms_topn* is the number of bounding boxes before the NMS operation. For example, *pre_nms_topn* equal to 15 means to take top 15 boxes with the highest scores.
|
* **Description**: *pre_nms_topn* is the number of bounding boxes before the NMS operation. For example, *pre_nms_topn* equal to 15 means to take top 15 boxes with the highest scores.
|
||||||
* **Range of values**: a positive integer number
|
* **Range of values**: a positive integer number
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *post_nms_topn*
|
* *post_nms_topn*
|
||||||
* **Description**: *post_nms_topn* is the number of bounding boxes after the NMS operation. For example, *post_nms_topn* equal to 15 means to take after NMS top 15 boxes with the highest scores.
|
* **Description**: *post_nms_topn* is the number of bounding boxes after the NMS operation. For example, *post_nms_topn* equal to 15 means to take after NMS top 15 boxes with the highest scores.
|
||||||
* **Range of values**: a positive integer number
|
* **Range of values**: a positive integer number
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *nms_thresh*
|
* *nms_thresh*
|
||||||
@ -59,7 +56,6 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
|
|||||||
* **Description**: *nms_thresh* is the minimum value of the proposal to be taken into consideration. For example, *nms_thresh* equal to 0.5 means that all boxes with prediction probability less than 0.5 are filtered out.
|
* **Description**: *nms_thresh* is the minimum value of the proposal to be taken into consideration. For example, *nms_thresh* equal to 0.5 means that all boxes with prediction probability less than 0.5 are filtered out.
|
||||||
* **Range of values**: a positive floating-point number
|
* **Range of values**: a positive floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *feat_stride*
|
* *feat_stride*
|
||||||
@ -67,7 +63,6 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
|
|||||||
* **Description**: *feat_stride* is the step size to slide over boxes (in pixels). For example, *feat_stride* equal to 16 means that all boxes are analyzed with the slide 16.
|
* **Description**: *feat_stride* is the step size to slide over boxes (in pixels). For example, *feat_stride* equal to 16 means that all boxes are analyzed with the slide 16.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *min_size*
|
* *min_size*
|
||||||
@ -75,7 +70,6 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
|
|||||||
* **Description**: *min_size* is the minimum size of box to be taken into consideration. For example, *min_size* equal 35 means that all boxes with box size less than 35 are filtered out.
|
* **Description**: *min_size* is the minimum size of box to be taken into consideration. For example, *min_size* equal 35 means that all boxes with box size less than 35 are filtered out.
|
||||||
* **Range of values**: a positive integer number
|
* **Range of values**: a positive integer number
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *ratio*
|
* *ratio*
|
||||||
@ -83,7 +77,6 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
|
|||||||
* **Description**: *ratio* is the ratios for anchor generation.
|
* **Description**: *ratio* is the ratios for anchor generation.
|
||||||
* **Range of values**: a list of floating-point numbers
|
* **Range of values**: a list of floating-point numbers
|
||||||
* **Type**: `float[]`
|
* **Type**: `float[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *scale*
|
* *scale*
|
||||||
@ -91,7 +84,6 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
|
|||||||
* **Description**: *scale* is the scales for anchor generation.
|
* **Description**: *scale* is the scales for anchor generation.
|
||||||
* **Range of values**: a list of floating-point numbers
|
* **Range of values**: a list of floating-point numbers
|
||||||
* **Type**: `float[]`
|
* **Type**: `float[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *clip_before_nms*
|
* *clip_before_nms*
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
* **Description**: *pooled_h* is the height of the ROI output feature map.
|
* **Description**: *pooled_h* is the height of the ROI output feature map.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pooled_w*
|
* *pooled_w*
|
||||||
@ -28,7 +27,6 @@
|
|||||||
* **Description**: *pooled_w* is the width of the ROI output feature map.
|
* **Description**: *pooled_w* is the width of the ROI output feature map.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *sampling_ratio*
|
* *sampling_ratio*
|
||||||
@ -37,7 +35,6 @@
|
|||||||
is equal to 0 then use adaptive number of elements over height and width: `ceil(roi_height / pooled_h)` and `ceil(roi_width / pooled_w)` respectively.
|
is equal to 0 then use adaptive number of elements over height and width: `ceil(roi_height / pooled_h)` and `ceil(roi_width / pooled_w)` respectively.
|
||||||
* **Range of values**: a non-negative integer
|
* **Range of values**: a non-negative integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *spatial_scale*
|
* *spatial_scale*
|
||||||
@ -45,7 +42,6 @@
|
|||||||
* **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling.
|
* **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling.
|
||||||
* **Range of values**: a positive floating-point number
|
* **Range of values**: a positive floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *mode*
|
* *mode*
|
||||||
@ -55,7 +51,6 @@
|
|||||||
* *max* - maximum pooling
|
* *max* - maximum pooling
|
||||||
* *avg* - average pooling
|
* *avg* - average pooling
|
||||||
* **Type**: string
|
* **Type**: string
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**:
|
**Inputs**:
|
||||||
@ -109,4 +104,3 @@ The box height and width are calculated the following way: `roi_width = max(spat
|
|||||||
</output>
|
</output>
|
||||||
</layer>
|
</layer>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ The box height and width have different representation based on **method** attri
|
|||||||
* **Description**: *pooled_h* is the height of the ROI output feature map. For example, *pooled_h* equal to 6 means that the height of the output of *ROIPooling* is 6.
|
* **Description**: *pooled_h* is the height of the ROI output feature map. For example, *pooled_h* equal to 6 means that the height of the output of *ROIPooling* is 6.
|
||||||
* **Range of values**: a non-negative integer
|
* **Range of values**: a non-negative integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pooled_w*
|
* *pooled_w*
|
||||||
@ -34,7 +33,6 @@ The box height and width have different representation based on **method** attri
|
|||||||
* **Description**: *pooled_w* is the width of the ROI output feature map. For example, *pooled_w* equal to 6 means that the width of the output of *ROIPooling* is 6.
|
* **Description**: *pooled_w* is the width of the ROI output feature map. For example, *pooled_w* equal to 6 means that the width of the output of *ROIPooling* is 6.
|
||||||
* **Range of values**: a non-negative integer
|
* **Range of values**: a non-negative integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *spatial_scale*
|
* *spatial_scale*
|
||||||
@ -42,7 +40,6 @@ The box height and width have different representation based on **method** attri
|
|||||||
* **Description**: *spatial_scale* is the ratio of the input feature map over the input image size.
|
* **Description**: *spatial_scale* is the ratio of the input feature map over the input image size.
|
||||||
* **Range of values**: a positive floating-point number
|
* **Range of values**: a positive floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *method*
|
* *method*
|
||||||
|
@ -23,7 +23,6 @@
|
|||||||
* **Description**: starting axis index in the input tensor `data` shape that will be flattened in the output; the end of flattened range is defined by `end_axis` attribute.
|
* **Description**: starting axis index in the input tensor `data` shape that will be flattened in the output; the end of flattened range is defined by `end_axis` attribute.
|
||||||
* **Range of values**: `-rank(data) .. rank(data)-1`
|
* **Range of values**: `-rank(data) .. rank(data)-1`
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *coords*
|
* *coords*
|
||||||
@ -31,7 +30,6 @@
|
|||||||
* **Description**: *coords* is the number of coordinates for each region.
|
* **Description**: *coords* is the number of coordinates for each region.
|
||||||
* **Range of values**: an integer
|
* **Range of values**: an integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *classes*
|
* *classes*
|
||||||
@ -39,7 +37,6 @@
|
|||||||
* **Description**: *classes* is the number of classes for each region.
|
* **Description**: *classes* is the number of classes for each region.
|
||||||
* **Range of values**: an integer
|
* **Range of values**: an integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *end_axis*
|
* *end_axis*
|
||||||
@ -47,7 +44,6 @@
|
|||||||
* **Description**: ending axis index in the input tensor `data` shape that will be flattened in the output; the beginning of the flattened range is defined by `axis` attribute.
|
* **Description**: ending axis index in the input tensor `data` shape that will be flattened in the output; the beginning of the flattened range is defined by `axis` attribute.
|
||||||
* **Range of values**: `-rank(data)..rank(data)-1`
|
* **Range of values**: `-rank(data)..rank(data)-1`
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *num*
|
* *num*
|
||||||
@ -55,7 +51,6 @@
|
|||||||
* **Description**: *num* is the number of regions.
|
* **Description**: *num* is the number of regions.
|
||||||
* **Range of values**: an integer
|
* **Range of values**: an integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *do_softmax*
|
* *do_softmax*
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
* **Description**: *stride* is the distance between cut throws in output blobs.
|
* **Description**: *stride* is the distance between cut throws in output blobs.
|
||||||
* **Range of values**: positive integer
|
* **Range of values**: positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**:
|
**Inputs**:
|
||||||
|
@ -13,8 +13,7 @@
|
|||||||
* **Description**: the output tensor type
|
* **Description**: the output tensor type
|
||||||
* **Range of values**: any numeric type
|
* **Range of values**: any numeric type
|
||||||
* **Type**: string
|
* **Type**: string
|
||||||
* **Default value**: None
|
* **Required**: *yes*
|
||||||
* **Required**: *Yes*
|
|
||||||
|
|
||||||
**Inputs**:
|
**Inputs**:
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
* **Description**: `axes` specify spatial dimension indices where interpolation is applied. Other dimensions are treated as batch dimensions. The order of elements in `axes` attribute matters and mapped directly to elements with the same indices in the 2nd input `target_spatial_shape`.
|
* **Description**: `axes` specify spatial dimension indices where interpolation is applied. Other dimensions are treated as batch dimensions. The order of elements in `axes` attribute matters and mapped directly to elements with the same indices in the 2nd input `target_spatial_shape`.
|
||||||
* **Range of values**: list of non-negative integer numbers
|
* **Range of values**: list of non-negative integer numbers
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *mode*
|
* *mode*
|
||||||
@ -21,7 +20,6 @@
|
|||||||
* **Description**: specifies type of interpolation
|
* **Description**: specifies type of interpolation
|
||||||
* **Range of values**: one of `nearest`, `linear`, `cubic`, `area`
|
* **Range of values**: one of `nearest`, `linear`, `cubic`, `area`
|
||||||
* **Type**: string
|
* **Type**: string
|
||||||
* **Default value**: none
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *align_corners*
|
* *align_corners*
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
* **Description**: specifies type of interpolation
|
* **Description**: specifies type of interpolation
|
||||||
* **Range of values**: one of `nearest`, `linear`, `linear_onnx`, `cubic`
|
* **Range of values**: one of `nearest`, `linear`, `linear_onnx`, `cubic`
|
||||||
* **Type**: string
|
* **Type**: string
|
||||||
* **Default value**: none
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
**Note**: Only 2D, 3D, 4D, 5D tensors with `axes = {0, 1}`, `axes = {0, 1, 2}`, `axes = {2, 3}`, `axes = {2, 3, 4}` respectively are supported for `"mode" == "linear_onnx"`.
|
**Note**: Only 2D, 3D, 4D, 5D tensors with `axes = {0, 1}`, `axes = {0, 1, 2}`, `axes = {2, 3}`, `axes = {2, 3, 4}` respectively are supported for `"mode" == "linear_onnx"`.
|
||||||
|
|
||||||
@ -24,7 +23,6 @@
|
|||||||
* `sizes` - an output shape is calculated as `output_shape[axes[i]] = sizes[i]` for all `i in range(0, len(axes))` and `output_shape[j] = input_shape[j] + pads_begin[j] + pads_end[j]` for `j not in axes`, `j in range(0, rank(data))`.
|
* `sizes` - an output shape is calculated as `output_shape[axes[i]] = sizes[i]` for all `i in range(0, len(axes))` and `output_shape[j] = input_shape[j] + pads_begin[j] + pads_end[j]` for `j not in axes`, `j in range(0, rank(data))`.
|
||||||
* `scales` - an output shape is calculated as `output_shape[axes[i]] = floor(scales[i] * (input_shape[axes[i]] + pads_begin[axes[i]] + pads_end[axes[i]]))` for all `i in range(0, len(axes))` and `output_shape[j] = input_shape[j] + pads_begin[j] + pads_end[j]` for `j not in axes`, `j in range(0, rank(data))`
|
* `scales` - an output shape is calculated as `output_shape[axes[i]] = floor(scales[i] * (input_shape[axes[i]] + pads_begin[axes[i]] + pads_end[axes[i]]))` for all `i in range(0, len(axes))` and `output_shape[j] = input_shape[j] + pads_begin[j] + pads_end[j]` for `j not in axes`, `j in range(0, rank(data))`
|
||||||
* **Type**: string
|
* **Type**: string
|
||||||
* **Default value**: none
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *coordinate_transformation_mode*
|
* *coordinate_transformation_mode*
|
||||||
|
@ -19,8 +19,7 @@ declared in `variable_id` and returns an error otherwise.
|
|||||||
* **Description**: identificator of the variable to be updated
|
* **Description**: identificator of the variable to be updated
|
||||||
* **Range of values**: any non-empty string
|
* **Range of values**: any non-empty string
|
||||||
* **Type**: string
|
* **Type**: string
|
||||||
* **Default value**: None
|
* **Required**: *yes*
|
||||||
* **Required**: *Yes*
|
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
* **Description**: specifies position in binary file with weights where the content of the constant begins; value in bytes
|
* **Description**: specifies position in binary file with weights where the content of the constant begins; value in bytes
|
||||||
* **Range of values**: non-negative integer value
|
* **Range of values**: non-negative integer value
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *size*
|
* *size*
|
||||||
@ -21,7 +20,6 @@
|
|||||||
* **Description**: size of constant content in binary files; value in bytes
|
* **Description**: size of constant content in binary files; value in bytes
|
||||||
* **Range of values**: positive integer bigger than zero
|
* **Range of values**: positive integer bigger than zero
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *element_type*
|
* *element_type*
|
||||||
@ -29,16 +27,14 @@
|
|||||||
* **Description**: the type of element of output tensor
|
* **Description**: the type of element of output tensor
|
||||||
* **Range of values**: u1, u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, boolean, bf16
|
* **Range of values**: u1, u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, boolean, bf16
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
* **Required**: *yes*
|
||||||
* **Required**: *Yes*
|
|
||||||
|
|
||||||
* *shape*
|
* *shape*
|
||||||
|
|
||||||
* **Description**: the shape of the output tensor
|
* **Description**: the shape of the output tensor
|
||||||
* **Range of values**: list of non-negative integers, empty list is allowed, which means 0D or scalar tensor
|
* **Range of values**: list of non-negative integers, empty list is allowed, which means 0D or scalar tensor
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
* **Required**: *yes*
|
||||||
* **Required**: *Yes*
|
|
||||||
|
|
||||||
|
|
||||||
**Outputs**
|
**Outputs**
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
* **Description**: the type of element of output tensor
|
* **Description**: the type of element of output tensor
|
||||||
* **Range of values**: u1, u4, u8, u16, u32, u64, i4, i8, i16, i32, i64, f16, f32, boolean, bf16
|
* **Range of values**: u1, u4, u8, u16, u32, u64, i4, i8, i16, i32, i64, f16, f32, boolean, bf16
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *shape*
|
* *shape*
|
||||||
@ -21,7 +20,6 @@
|
|||||||
* **Description**: the shape of the output tensor
|
* **Description**: the shape of the output tensor
|
||||||
* **Range of values**: list of non-negative integers, empty list is allowed, which means 0D or scalar tensor
|
* **Range of values**: list of non-negative integers, empty list is allowed, which means 0D or scalar tensor
|
||||||
* **Type**: `int[]`
|
* **Type**: `int[]`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,8 +21,7 @@ with the shape and type from the 1 input.
|
|||||||
* **Description**: identificator of the variable to be read
|
* **Description**: identificator of the variable to be read
|
||||||
* **Range of values**: any non-empty string
|
* **Range of values**: any non-empty string
|
||||||
* **Type**: string
|
* **Type**: string
|
||||||
* **Default value**: None
|
* **Required**: *yes*
|
||||||
* **Required**: *Yes*
|
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
* **Description**: *axis* specifies dimension to concatenate along
|
* **Description**: *axis* specifies dimension to concatenate along
|
||||||
* **Range of values**: integer number. Negative value means counting dimension from the end. The range is `[-R, R-1]`, where `R` is the rank of all inputs.
|
* **Range of values**: integer number. Negative value means counting dimension from the end. The range is `[-R, R-1]`, where `R` is the rank of all inputs.
|
||||||
* **Type**: int
|
* **Type**: int
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**:
|
**Inputs**:
|
||||||
|
@ -43,7 +43,6 @@ If `mode = depth_first`:
|
|||||||
* *blocks_first*: the input depth is divided to `[block_size, ..., block_size, new_depth]`
|
* *blocks_first*: the input depth is divided to `[block_size, ..., block_size, new_depth]`
|
||||||
* *depth_first*: the input depth is divided to `[new_depth, block_size, ..., block_size]`
|
* *depth_first*: the input depth is divided to `[new_depth, block_size, ..., block_size]`
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -22,7 +22,6 @@ The "auto_pad" attribute has no effect on the size of each patch, it determines
|
|||||||
* **Description**: *sizes* is a size `[size_rows, size_cols]` of the extracted patches.
|
* **Description**: *sizes* is a size `[size_rows, size_cols]` of the extracted patches.
|
||||||
* **Range of values**: non-negative integer number
|
* **Range of values**: non-negative integer number
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *strides*
|
* *strides*
|
||||||
@ -30,7 +29,6 @@ The "auto_pad" attribute has no effect on the size of each patch, it determines
|
|||||||
* **Description**: *strides* is a distance `[stride_rows, stride_cols]` between centers of two consecutive patches in an input tensor.
|
* **Description**: *strides* is a distance `[stride_rows, stride_cols]` between centers of two consecutive patches in an input tensor.
|
||||||
* **Range of values**: non-negative integer number
|
* **Range of values**: non-negative integer number
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *rates*
|
* *rates*
|
||||||
@ -38,7 +36,6 @@ The "auto_pad" attribute has no effect on the size of each patch, it determines
|
|||||||
* **Description**: *rates* is the input stride `[rate_rows, rate_cols]`, specifying how far two consecutive patch samples are in the input. Equivalent to extracting patches with `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling them spatially by a factor of rates. This is equivalent to rate in dilated (a.k.a. Atrous) convolutions.
|
* **Description**: *rates* is the input stride `[rate_rows, rate_cols]`, specifying how far two consecutive patch samples are in the input. Equivalent to extracting patches with `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling them spatially by a factor of rates. This is equivalent to rate in dilated (a.k.a. Atrous) convolutions.
|
||||||
* **Range of values**: non-negative integer number
|
* **Range of values**: non-negative integer number
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *auto_pad*
|
* *auto_pad*
|
||||||
@ -47,7 +44,6 @@ The "auto_pad" attribute has no effect on the size of each patch, it determines
|
|||||||
* *same_upper (same_lower)* the input is padded by zeros to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
|
* *same_upper (same_lower)* the input is padded by zeros to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
|
||||||
* *valid* - do not use padding.
|
* *valid* - do not use padding.
|
||||||
* **Type**: string
|
* **Type**: string
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -69,7 +69,6 @@ OUTPUT =
|
|||||||
* `reflect` - padded values are a reflection of the input `data` tensor; values on the edges are not duplicated. `pads_begin[D]` and `pads_end[D]` must be not greater than `data.shape[D] – 1` for any valid `D`.
|
* `reflect` - padded values are a reflection of the input `data` tensor; values on the edges are not duplicated. `pads_begin[D]` and `pads_end[D]` must be not greater than `data.shape[D] – 1` for any valid `D`.
|
||||||
* `symmetric` - padded values are symmetrically added from the input `data` tensor. This method is similar to the `reflect`, but values on edges are duplicated. Refer to the examples above for more details. `pads_begin[D]` and `pads_end[D]` must be not greater than `data.shape[D]` for any valid `D`.
|
* `symmetric` - padded values are symmetrically added from the input `data` tensor. This method is similar to the `reflect`, but values on edges are duplicated. Refer to the examples above for more details. `pads_begin[D]` and `pads_end[D]` must be not greater than `data.shape[D]` for any valid `D`.
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -21,7 +21,6 @@ If no axis specified, that means either the second input is empty if `index` mod
|
|||||||
* **Description**: specifies how the second input tensor should be interpreted: as a set of indices or a mask
|
* **Description**: specifies how the second input tensor should be interpreted: as a set of indices or a mask
|
||||||
* **Range of values**: `index`, `mask`
|
* **Range of values**: `index`, `mask`
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**:
|
**Inputs**:
|
||||||
|
@ -44,7 +44,6 @@ If `mode = depth_first`:
|
|||||||
* *blocks_first*: the output depth is gathered from `[block_size, ..., block_size, C]`
|
* *blocks_first*: the output depth is gathered from `[block_size, ..., block_size, C]`
|
||||||
* *depth_first*: the output depth is gathered from `[C, block_size, ..., block_size]`
|
* *depth_first*: the output depth is gathered from `[C, block_size, ..., block_size]`
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -24,7 +24,6 @@ Where D is the rank of input tensor `data`. The axis being split must be evenly
|
|||||||
* **Description**: number of outputs into which the input tensor `data` will be split along `axis` dimension. The dimension of `data` shape along `axis` must be evenly divisible by *num_splits*
|
* **Description**: number of outputs into which the input tensor `data` will be split along `axis` dimension. The dimension of `data` shape along `axis` must be evenly divisible by *num_splits*
|
||||||
* **Range of values**: an integer within the range `[1, data.shape[axis]]`
|
* **Range of values**: an integer within the range `[1, data.shape[axis]]`
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: none
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -60,7 +60,6 @@ For a particular activation, consider a mini-batch \f$\mathcal{B}\f$ of m values
|
|||||||
* **Description**: *epsilon* is a constant added to the variance for numerical stability.
|
* **Description**: *epsilon* is a constant added to the variance for numerical stability.
|
||||||
* **Range of values**: a floating-point number greater than or equal to zero
|
* **Range of values**: a floating-point number greater than or equal to zero
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: none
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -60,7 +60,6 @@ For a particular activation, consider a mini-batch \f$\mathcal{B}\f$ of m values
|
|||||||
* **Description**: *epsilon* is a constant added to the variance for numerical stability.
|
* **Description**: *epsilon* is a constant added to the variance for numerical stability.
|
||||||
* **Range of values**: a floating-point number greater than or equal to zero
|
* **Range of values**: a floating-point number greater than or equal to zero
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: none
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
* **Description**: *bias* is added to the variance.
|
* **Description**: *bias* is added to the variance.
|
||||||
* **Range of values**: a non-negative floating point value
|
* **Range of values**: a non-negative floating point value
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -34,7 +34,6 @@ output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
|
|||||||
* **Description**: *alpha* represents the scaling attribute for the normalizing sum. For example, *alpha* equal `0.0001` means that the normalizing sum is multiplied by `0.0001`.
|
* **Description**: *alpha* represents the scaling attribute for the normalizing sum. For example, *alpha* equal `0.0001` means that the normalizing sum is multiplied by `0.0001`.
|
||||||
* **Range of values**: no restrictions
|
* **Range of values**: no restrictions
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *beta*
|
* *beta*
|
||||||
@ -42,7 +41,6 @@ output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
|
|||||||
* **Description**: *beta* represents the exponent for the normalizing sum. For example, *beta* equal `0.75` means that the normalizing sum is raised to the power of `0.75`.
|
* **Description**: *beta* represents the exponent for the normalizing sum. For example, *beta* equal `0.75` means that the normalizing sum is raised to the power of `0.75`.
|
||||||
* **Range of values**: positive number
|
* **Range of values**: positive number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *bias*
|
* *bias*
|
||||||
@ -50,7 +48,6 @@ output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
|
|||||||
* **Description**: *bias* represents the offset. Usually positive number to avoid dividing by zero.
|
* **Description**: *bias* represents the offset. Usually positive number to avoid dividing by zero.
|
||||||
* **Range of values**: no restrictions
|
* **Range of values**: no restrictions
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *size*
|
* *size*
|
||||||
@ -58,7 +55,6 @@ output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
|
|||||||
* **Description**: *size* represents the side length of the region to be used for the normalization sum. The region can have one or more dimensions depending on the second input axes indices.
|
* **Description**: *size* represents the side length of the region to be used for the normalization sum. The region can have one or more dimensions depending on the second input axes indices.
|
||||||
* **Range of values**: positive integer
|
* **Range of values**: positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -44,7 +44,6 @@ o_{i}=\frac{o_{i}}{\sum \sqrt {o_{k}^2}+\epsilon}
|
|||||||
* **Description**: *eps* is the number to be added to the variance to avoid division by zero when normalizing the value. For example, *epsilon* equal to 0.001 means that 0.001 is added to the variance.
|
* **Description**: *eps* is the number to be added to the variance to avoid division by zero when normalizing the value. For example, *epsilon* equal to 0.001 means that 0.001 is added to the variance.
|
||||||
* **Range of values**: a positive floating-point number
|
* **Range of values**: a positive floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -33,7 +33,6 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
|
|||||||
* `false` -- Do not normalize variance
|
* `false` -- Do not normalize variance
|
||||||
* `true` -- Normalize variance
|
* `true` -- Normalize variance
|
||||||
* **Type**: `boolean`
|
* **Type**: `boolean`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *eps*
|
* *eps*
|
||||||
@ -41,7 +40,6 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
|
|||||||
* **Description**: *eps* is the number to be added to the variance to avoid division by zero when normalizing the value.
|
* **Description**: *eps* is the number to be added to the variance to avoid division by zero when normalizing the value.
|
||||||
* **Range of values**: a positive floating-point number
|
* **Range of values**: a positive floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *eps_mode*
|
* *eps_mode*
|
||||||
@ -51,7 +49,6 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
|
|||||||
* `inside_sqrt` -- Add epsilon inside sqrt
|
* `inside_sqrt` -- Add epsilon inside sqrt
|
||||||
* `outside_sqrt` -- Add epsilon outside of sqrt
|
* `outside_sqrt` -- Add epsilon outside of sqrt
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
@ -4,47 +4,57 @@
|
|||||||
|
|
||||||
**Category**: *Normalization*
|
**Category**: *Normalization*
|
||||||
|
|
||||||
**Short description**: *NormalizeL2* operation performs L2 normalization of the 1st input tensor in slices specified by the 2nd input.
|
**Short description**: *NormalizeL2* operation performs L2 normalization on a given input `data` along dimensions specified by `axes` input.
|
||||||
|
|
||||||
|
**Detailed Description**
|
||||||
|
|
||||||
|
Each element in the output is the result of dividing the corresponding element of `data` input by the result of L2 reduction along dimensions specified by the `axes` input:
|
||||||
|
|
||||||
|
output[i0, i1, ..., iN] = x[i0, i1, ..., iN] / sqrt(eps_mode(sum[j0,..., jN](x[j0, ..., jN]**2), eps))
|
||||||
|
|
||||||
|
Where indices `i0, ..., iN` run through all valid indices for the `data` input and summation `sum[j0, ..., jN]` has `jk = ik` for those dimensions `k` that are not in the set of indices specified by the `axes` input of the operation.
|
||||||
|
`eps_mode` selects how the reduction value and `eps` are combined. It can be `max` or `add` depending on `eps_mode` attribute value.
|
||||||
|
|
||||||
|
Particular cases:
|
||||||
|
|
||||||
|
1. If `axes` is an empty list, then each input element is divided by itself resulting value `1` for all non-zero elements.
|
||||||
|
2. If `axes` contains all dimensions of input `data`, a single L2 reduction value is calculated for the entire input tensor and each input element is divided by that value.
|
||||||
|
|
||||||
|
|
||||||
**Attributes**
|
**Attributes**
|
||||||
|
|
||||||
* *eps*
|
* *eps*
|
||||||
|
|
||||||
* **Description**: *eps* is the number to be added/maximized to/with the variance to avoid division by zero when normalizing the value. For example, *eps* equal to 0.001 means that 0.001 is used if all the values in normalization are equal to zero.
|
* **Description**: *eps* is the number applied by *eps_mode* function to the sum of squares to avoid division by zero when normalizing the value.
|
||||||
* **Range of values**: a positive floating-point number
|
* **Range of values**: a positive floating-point number
|
||||||
* **Type**: `float`
|
* **Type**: `float`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *eps_mode*
|
* *eps_mode*
|
||||||
|
|
||||||
* **Description**: Specifies how *eps* is combined with L2 value calculated before division.
|
* **Description**: Specifies how *eps* is combined with the sum of squares to avoid division by zero.
|
||||||
* **Range of values**: `add`, `max`
|
* **Range of values**: `add` or `max`
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
|
||||||
* **1**: `data` - input tensor to be normalized. Type of elements is any floating point type. Required.
|
* **1**: `data` - A tensor of type *T* and arbitrary shape. **Required.**
|
||||||
|
|
||||||
* **2**: `axes` - scalar or 1D tensor with axis indices for the `data` input along which L2 reduction is calculated. Required.
|
* **2**: `axes` - Axis indices of `data` input tensor, along which L2 reduction is calculated. A scalar or 1D tensor of unique elements and type *T_IND*. The range of elements is `[-r, r-1]`, where `r` is the rank of `data` input tensor. **Required.**
|
||||||
|
|
||||||
**Outputs**
|
**Outputs**
|
||||||
|
|
||||||
* **1**: Tensor of the same shape and type as the `data` input and normalized slices defined by `axes` input.
|
* **1**: The result of *NormalizeL2* function applied to `data` input tensor. Normalized tensor of the same type and shape as the data input.
|
||||||
|
|
||||||
**Detailed Description**
|
**Types**
|
||||||
|
|
||||||
Each element in the output is the result of division of corresponding element from the `data` input tensor by the result of L2 reduction along dimensions specified by the `axes` input:
|
* *T*: arbitrary supported floating-point type.
|
||||||
|
* *T_IND*: any supported integer type.
|
||||||
|
|
||||||
output[i0, i1, ..., iN] = x[i0, i1, ..., iN] / sqrt(eps_mode(sum[j0,..., jN](x[j0, ..., jN]**2), eps))
|
**Examples**
|
||||||
|
|
||||||
Where indices `i0, ..., iN` run through all valid indices for the 1st input and summation `sum[j0, ..., jN]` have `jk = ik` for those dimensions `k` that are not in the set of indices specified by the `axes` input of the operation. One of the corner cases is when `axes` is an empty list, then we divide each input element by itself resulting value 1 for all non-zero elements. Another corner case is where `axes` input contains all dimensions from `data` tensor, which means that a single L2 reduction value is calculated for entire input tensor and each input element is divided by that value.
|
*Example: Normalization over channel dimension for `NCHW` layout*
|
||||||
|
|
||||||
`eps_mode` selects how the reduction value and `eps` are combined. It can be `max` or `add` depending on `eps_mode` attribute value.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<layer id="1" type="NormalizeL2" ...>
|
<layer id="1" type="NormalizeL2" ...>
|
||||||
@ -57,7 +67,34 @@ Where indices `i0, ..., iN` run through all valid indices for the 1st input and
|
|||||||
<dim>24</dim>
|
<dim>24</dim>
|
||||||
</port>
|
</port>
|
||||||
<port id="1">
|
<port id="1">
|
||||||
<dim>2</dim> <!-- value is [2, 3] that means independent normalization in each channel -->
|
<dim>1</dim> <!-- axes list [1] means normalization over channel dimension -->
|
||||||
|
</port>
|
||||||
|
</input>
|
||||||
|
<output>
|
||||||
|
<port id="2">
|
||||||
|
<dim>6</dim>
|
||||||
|
<dim>12</dim>
|
||||||
|
<dim>10</dim>
|
||||||
|
<dim>24</dim>
|
||||||
|
</port>
|
||||||
|
</output>
|
||||||
|
</layer>
|
||||||
|
```
|
||||||
|
|
||||||
|
*Example: Normalization over channel and spatial dimensions for `NCHW` layout*
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<layer id="1" type="NormalizeL2" ...>
|
||||||
|
<data eps="1e-8" eps_mode="add"/>
|
||||||
|
<input>
|
||||||
|
<port id="0">
|
||||||
|
<dim>6</dim>
|
||||||
|
<dim>12</dim>
|
||||||
|
<dim>10</dim>
|
||||||
|
<dim>24</dim>
|
||||||
|
</port>
|
||||||
|
<port id="1">
|
||||||
|
<dim>3</dim> <!-- axes list [1, 2, 3] means normalization over channel and spatial dimensions -->
|
||||||
</port>
|
</port>
|
||||||
</input>
|
</input>
|
||||||
<output>
|
<output>
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
* **Description**: *strides* is a distance (in pixels) to slide the window on the feature map over the (z, y, x) axes for 3D poolings and (y, x) axes for 2D poolings. For example, *strides* equal "4,2,1" means sliding the window 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
|
* **Description**: *strides* is a distance (in pixels) to slide the window on the feature map over the (z, y, x) axes for 3D poolings and (y, x) axes for 2D poolings. For example, *strides* equal "4,2,1" means sliding the window 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pads_begin*
|
* *pads_begin*
|
||||||
@ -27,7 +26,6 @@
|
|||||||
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal "1,2" means adding 1 pixel to the top of the input and 2 to the left of the input.
|
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal "1,2" means adding 1 pixel to the top of the input and 2 to the left of the input.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -36,7 +34,6 @@
|
|||||||
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal "1,2" means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal "1,2" means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -45,7 +42,6 @@
|
|||||||
* **Description**: *kernel* is a size of each filter. For example, *kernel* equal (2, 3) means that each filter has height equal to 2 and width equal to 3.
|
* **Description**: *kernel* is a size of each filter. For example, *kernel* equal (2, 3) means that each filter has height equal to 2 and width equal to 3.
|
||||||
* **Range of values**: integer values starting from 1
|
* **Range of values**: integer values starting from 1
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *exclude-pad*
|
* *exclude-pad*
|
||||||
@ -53,7 +49,6 @@
|
|||||||
* **Description**: *exclude-pad* is a type of pooling strategy for values in the padding area. For example, if *exclude-pad* is "true", then zero-values that came from padding are not included in averaging calculation.
|
* **Description**: *exclude-pad* is a type of pooling strategy for values in the padding area. For example, if *exclude-pad* is "true", then zero-values that came from padding are not included in averaging calculation.
|
||||||
* **Range of values**: true or false
|
* **Range of values**: true or false
|
||||||
* **Type**: boolean
|
* **Type**: boolean
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *rounding_type*
|
* *rounding_type*
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
* **Description**: *strides* is a distance (in pixels) to slide the window on the feature map over the (z, y, x) axes for 3D poolings and (y, x) axes for 2D poolings. For example, *strides* equal "4,2,1" means sliding the window 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
|
* **Description**: *strides* is a distance (in pixels) to slide the window on the feature map over the (z, y, x) axes for 3D poolings and (y, x) axes for 2D poolings. For example, *strides* equal "4,2,1" means sliding the window 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *pads_begin*
|
* *pads_begin*
|
||||||
@ -23,7 +22,6 @@
|
|||||||
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal "1,2" means adding 1 pixel to the top of the input and 2 to the left of the input.
|
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal "1,2" means adding 1 pixel to the top of the input and 2 to the left of the input.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -32,7 +30,6 @@
|
|||||||
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal "1,2" means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal "1,2" means adding 1 pixel to the bottom of the input and 2 to the right of the input.
|
||||||
* **Range of values**: integer values starting from 0
|
* **Range of values**: integer values starting from 0
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
|
||||||
|
|
||||||
@ -41,7 +38,6 @@
|
|||||||
* **Description**: *kernel* is a size of each filter. For example, *kernel* equal (2, 3) means that each filter has height equal to 2 and width equal to 3.
|
* **Description**: *kernel* is a size of each filter. For example, *kernel* equal (2, 3) means that each filter has height equal to 2 and width equal to 3.
|
||||||
* **Range of values**: integer values starting from 1
|
* **Range of values**: integer values starting from 1
|
||||||
* **Type**: int[]
|
* **Type**: int[]
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *rounding_type*
|
* *rounding_type*
|
||||||
|
@ -29,7 +29,6 @@ else:
|
|||||||
* **Description**: *levels* is the number of quantization levels (e.g. 2 is for binarization, 255/256 is for int8 quantization)
|
* **Description**: *levels* is the number of quantization levels (e.g. 2 is for binarization, 255/256 is for int8 quantization)
|
||||||
* **Range of values**: an integer greater than or equal to 2
|
* **Range of values**: an integer greater than or equal to 2
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *auto_broadcast*
|
* *auto_broadcast*
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
* **Description**: *hidden_size* specifies hidden state size.
|
* **Description**: *hidden_size* specifies hidden state size.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *activations*
|
* *activations*
|
||||||
|
@ -18,7 +18,6 @@ A single cell in the sequence is implemented in the same way as in <a href="#GRU
|
|||||||
* **Description**: *hidden_size* specifies hidden state size.
|
* **Description**: *hidden_size* specifies hidden state size.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *activations*
|
* *activations*
|
||||||
@ -50,8 +49,7 @@ A single cell in the sequence is implemented in the same way as in <a href="#GRU
|
|||||||
* **Description**: Specify if the RNN is forward, reverse, or bidirectional. If it is one of *forward* or *reverse* then `num_directions = 1`, if it is *bidirectional*, then `num_directions = 2`. This `num_directions` value specifies input/output shape requirements.
|
* **Description**: Specify if the RNN is forward, reverse, or bidirectional. If it is one of *forward* or *reverse* then `num_directions = 1`, if it is *bidirectional*, then `num_directions = 2`. This `num_directions` value specifies input/output shape requirements.
|
||||||
* **Range of values**: *forward*, *reverse*, *bidirectional*
|
* **Range of values**: *forward*, *reverse*, *bidirectional*
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
* **Required**: *yes*
|
||||||
* **Required**: *Yes*
|
|
||||||
|
|
||||||
* *linear_before_reset*
|
* *linear_before_reset*
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@ tanh - (e^{2x} - 1)/(e^{2x} + 1)
|
|||||||
* **Description**: *hidden_size* specifies hidden state size.
|
* **Description**: *hidden_size* specifies hidden state size.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *activations*
|
* *activations*
|
||||||
|
@ -18,7 +18,6 @@ A single cell in the sequence is implemented in the same way as in <a href="#LST
|
|||||||
* **Description**: *hidden_size* specifies hidden state size.
|
* **Description**: *hidden_size* specifies hidden state size.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *activations*
|
* *activations*
|
||||||
@ -50,8 +49,7 @@ A single cell in the sequence is implemented in the same way as in <a href="#LST
|
|||||||
* **Description**: Specify if the RNN is forward, reverse, or bidirectional. If it is one of *forward* or *reverse* then `num_directions = 1`, if it is *bidirectional*, then `num_directions = 2`. This `num_directions` value specifies input/output shape requirements.
|
* **Description**: Specify if the RNN is forward, reverse, or bidirectional. If it is one of *forward* or *reverse* then `num_directions = 1`, if it is *bidirectional*, then `num_directions = 2`. This `num_directions` value specifies input/output shape requirements.
|
||||||
* **Range of values**: *forward*, *reverse*, *bidirectional*
|
* **Range of values**: *forward*, *reverse*, *bidirectional*
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
* **Required**: *yes*
|
||||||
* **Required**: *Yes*
|
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
|
||||||
|
@ -27,7 +27,6 @@ The types of input scalars `on_value` and `off_value` should match and be equal
|
|||||||
* **Description**: *axis* is a new axis position in the output shape to fill with one-hot values.
|
* **Description**: *axis* is a new axis position in the output shape to fill with one-hot values.
|
||||||
* **Range of values**: an integer. Negative value means counting dimension from the end.
|
* **Range of values**: an integer. Negative value means counting dimension from the end.
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**:
|
**Inputs**:
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
* **Description**: *hidden_size* specifies hidden state size.
|
* **Description**: *hidden_size* specifies hidden state size.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *activations*
|
* *activations*
|
||||||
|
@ -18,7 +18,6 @@ A single cell in the sequence is implemented in the same way as in <a href="#RNN
|
|||||||
* **Description**: *hidden_size* specifies hidden state size.
|
* **Description**: *hidden_size* specifies hidden state size.
|
||||||
* **Range of values**: a positive integer
|
* **Range of values**: a positive integer
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *activations*
|
* *activations*
|
||||||
@ -50,8 +49,7 @@ A single cell in the sequence is implemented in the same way as in <a href="#RNN
|
|||||||
* **Description**: Specify if the RNN is forward, reverse, or bidirectional. If it is one of *forward* or *reverse* then `num_directions = 1`, if it is *bidirectional*, then `num_directions = 2`. This `num_directions` value specifies input/output shape requirements.
|
* **Description**: Specify if the RNN is forward, reverse, or bidirectional. If it is one of *forward* or *reverse* then `num_directions = 1`, if it is *bidirectional*, then `num_directions = 2`. This `num_directions` value specifies input/output shape requirements.
|
||||||
* **Range of values**: *forward*, *reverse*, *bidirectional*
|
* **Range of values**: *forward*, *reverse*, *bidirectional*
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
* **Required**: *yes*
|
||||||
* **Required**: *Yes*
|
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ If `special_zero` is set to `true` index of `0` cannot be larger than the rank o
|
|||||||
* **Description**: *special_zero* controls how zero values in `shape` are interpreted. If *special_zero* is `false`, then `0` is interpreted as-is which means that output shape will contain a zero dimension at the specified location. Input and output tensors are empty in this case. If *special_zero* is `true`, then all zeros in `shape` implies the copying of corresponding dimensions from `data.shape` into the output shape *(left aligned)*.
|
* **Description**: *special_zero* controls how zero values in `shape` are interpreted. If *special_zero* is `false`, then `0` is interpreted as-is which means that output shape will contain a zero dimension at the specified location. Input and output tensors are empty in this case. If *special_zero* is `true`, then all zeros in `shape` implies the copying of corresponding dimensions from `data.shape` into the output shape *(left aligned)*.
|
||||||
* **Range of values**: `false` or `true`
|
* **Range of values**: `false` or `true`
|
||||||
* **Type**: `boolean`
|
* **Type**: `boolean`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
**Inputs**:
|
**Inputs**:
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
* **Description**: Specifies the axis along which the values are retrieved.
|
* **Description**: Specifies the axis along which the values are retrieved.
|
||||||
* **Range of values**: An integer. Negative value means counting dimension from the end.
|
* **Range of values**: An integer. Negative value means counting dimension from the end.
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *mode*
|
* *mode*
|
||||||
@ -21,7 +20,6 @@
|
|||||||
* **Description**: Specifies which operation is used to select the biggest element of two.
|
* **Description**: Specifies which operation is used to select the biggest element of two.
|
||||||
* **Range of values**: `min`, `max`
|
* **Range of values**: `min`, `max`
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *sort*
|
* *sort*
|
||||||
@ -29,7 +27,6 @@
|
|||||||
* **Description**: Specifies order of output elements and/or indices.
|
* **Description**: Specifies order of output elements and/or indices.
|
||||||
* **Range of values**: `value`, `index`, `none`
|
* **Range of values**: `value`, `index`, `none`
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *index_element_type*
|
* *index_element_type*
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
* **Description**: Specifies the axis along which the values are retrieved.
|
* **Description**: Specifies the axis along which the values are retrieved.
|
||||||
* **Range of values**: An integer. Negative value means counting dimension from the end.
|
* **Range of values**: An integer. Negative value means counting dimension from the end.
|
||||||
* **Type**: `int`
|
* **Type**: `int`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *mode*
|
* *mode*
|
||||||
@ -21,7 +20,6 @@
|
|||||||
* **Description**: Specifies which operation is used to select the biggest element of two.
|
* **Description**: Specifies which operation is used to select the biggest element of two.
|
||||||
* **Range of values**: `min`, `max`
|
* **Range of values**: `min`, `max`
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *sort*
|
* *sort*
|
||||||
@ -29,7 +27,6 @@
|
|||||||
* **Description**: Specifies order of output elements and/or indices.
|
* **Description**: Specifies order of output elements and/or indices.
|
||||||
* **Range of values**: `value`, `index`, `none`
|
* **Range of values**: `value`, `index`, `none`
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
|
||||||
* **Required**: *yes*
|
* **Required**: *yes*
|
||||||
|
|
||||||
* *index_element_type*
|
* *index_element_type*
|
||||||
|
@ -25,8 +25,7 @@ where `a` corresponds to the input tensor.
|
|||||||
* **Description**: the destination type.
|
* **Description**: the destination type.
|
||||||
* **Range of values**: one of the supported types *T*
|
* **Range of values**: one of the supported types *T*
|
||||||
* **Type**: `string`
|
* **Type**: `string`
|
||||||
* **Default value**: None
|
* **Required**: *yes*
|
||||||
* **Required**: *Yes*
|
|
||||||
|
|
||||||
**Inputs**
|
**Inputs**
|
||||||
|
|
||||||
|
@ -36,10 +36,8 @@ target_link_libraries(${TARGET_NAME} PRIVATE
|
|||||||
set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
|
set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
|
||||||
|
|
||||||
# ATTENTION: uncomment to register a plugin in the plugins.xml file
|
# ATTENTION: uncomment to register a plugin in the plugins.xml file
|
||||||
if(ENABLE_TEMPLATE_PLUGIN)
|
# ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
|
||||||
ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
|
# POSSIBLE_PLUGINS ${TARGET_NAME})
|
||||||
POSSIBLE_PLUGINS ${TARGET_NAME})
|
|
||||||
endif()
|
|
||||||
# [cmake:plugin]
|
# [cmake:plugin]
|
||||||
|
|
||||||
# ATTENTION: uncomment to install component
|
# ATTENTION: uncomment to install component
|
||||||
|
@ -66,7 +66,16 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
|
|||||||
// TODO: add post-processing based on outputsInfoMap
|
// TODO: add post-processing based on outputsInfoMap
|
||||||
// Example: register CommonOptimizations transformation from transformations library
|
// Example: register CommonOptimizations transformation from transformations library
|
||||||
passManager.register_pass<ngraph::pass::CommonOptimizations>();
|
passManager.register_pass<ngraph::pass::CommonOptimizations>();
|
||||||
// Template plugin handles only FP32 networks
|
// GAPI supports only FP32 networks for pre-processing
|
||||||
|
bool needF16toF32 = false;
|
||||||
|
for (const auto& param : function->get_parameters()) {
|
||||||
|
if (param->get_element_type() == ngraph::element::f16 &&
|
||||||
|
inputInfoMap.at(param->get_friendly_name())->getTensorDesc().getPrecision() != InferenceEngine::Precision::FP16) {
|
||||||
|
needF16toF32 = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (needF16toF32)
|
||||||
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
|
passManager.register_pass<ngraph::pass::ConvertPrecision>(precisions_array {{ngraph::element::f16, ngraph::element::f32}});
|
||||||
// Example: register plugin specific transformation
|
// Example: register plugin specific transformation
|
||||||
passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
|
passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
|
||||||
|
@ -0,0 +1,173 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
#include "base_reference_test.hpp"
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include "transformations/utils/utils.hpp"
|
||||||
|
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
|
||||||
|
CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") {
|
||||||
|
core = PluginCache::get().ie(targetDevice);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CommonReferenceTest::Exec() {
|
||||||
|
LoadNetwork();
|
||||||
|
FillInputs();
|
||||||
|
Infer();
|
||||||
|
Validate();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CommonReferenceTest::LoadNetwork() {
|
||||||
|
InferenceEngine::CNNNetwork cnnNetwork(function);
|
||||||
|
auto inputInfo = cnnNetwork.getInputsInfo();
|
||||||
|
auto outputInfo = cnnNetwork.getOutputsInfo();
|
||||||
|
for (const auto& param : function->get_parameters()) {
|
||||||
|
inputInfo[param->get_friendly_name()]->setPrecision(InferenceEngine::details::convertPrecision(param->get_element_type()));
|
||||||
|
}
|
||||||
|
for (const auto& result : function->get_results()) {
|
||||||
|
outputInfo[ngraph::op::util::create_ie_output_name(result->input_value(0))]->setPrecision(
|
||||||
|
InferenceEngine::details::convertPrecision(result->get_element_type()));
|
||||||
|
}
|
||||||
|
executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CommonReferenceTest::FillInputs() {
|
||||||
|
const auto& inputInfo = executableNetwork.GetInputsInfo();
|
||||||
|
const auto& params = function->get_parameters();
|
||||||
|
ASSERT_EQ(params.size(), inputData.size());
|
||||||
|
ASSERT_EQ(inputInfo.size(), inputData.size());
|
||||||
|
|
||||||
|
for (size_t i = 0; i < params.size(); i++) {
|
||||||
|
const auto& param = params[i];
|
||||||
|
const auto infoIt = inputInfo.find(param->get_friendly_name());
|
||||||
|
GTEST_ASSERT_NE(infoIt, inputInfo.cend());
|
||||||
|
|
||||||
|
const auto& info = infoIt->second;
|
||||||
|
auto blob = make_blob_with_precision(info->getTensorDesc());
|
||||||
|
blob->allocate();
|
||||||
|
|
||||||
|
ASSERT_EQ(blob->byteSize(), inputData[i]->byteSize());
|
||||||
|
|
||||||
|
MemoryBlob::Ptr mInputData = as<MemoryBlob>(inputData[i]);
|
||||||
|
ASSERT_NE(mInputData, nullptr);
|
||||||
|
auto minputDataHolder = mInputData->rmap();
|
||||||
|
|
||||||
|
MemoryBlob::Ptr mBlob = as<MemoryBlob>(blob);
|
||||||
|
ASSERT_NE(mBlob, nullptr);
|
||||||
|
auto mBlobHolder = mBlob->wmap();
|
||||||
|
|
||||||
|
std::memcpy(mBlobHolder.as<void*>(), minputDataHolder.as<const void*>(), inputData[i]->byteSize());
|
||||||
|
inputData[i] = blob;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CommonReferenceTest::Infer() {
|
||||||
|
inferRequest = executableNetwork.CreateInferRequest();
|
||||||
|
|
||||||
|
const auto& inputsInfo = executableNetwork.GetInputsInfo();
|
||||||
|
const auto& functionParams = function->get_parameters();
|
||||||
|
for (size_t i = 0; i < functionParams.size(); ++i) {
|
||||||
|
const auto& param = functionParams[i];
|
||||||
|
const auto infoIt = inputsInfo.find(param->get_friendly_name());
|
||||||
|
GTEST_ASSERT_NE(infoIt, inputsInfo.cend());
|
||||||
|
|
||||||
|
const auto& info = infoIt->second;
|
||||||
|
auto blob = inputData[i];
|
||||||
|
|
||||||
|
inferRequest.SetBlob(info->name(), blob);
|
||||||
|
}
|
||||||
|
inferRequest.Infer();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CommonReferenceTest::Validate() {
|
||||||
|
ASSERT_EQ(executableNetwork.GetOutputsInfo().size(), refOutData.size());
|
||||||
|
std::vector<InferenceEngine::Blob::Ptr> outputs;
|
||||||
|
for (const auto& result : function->get_results()) {
|
||||||
|
auto name = ngraph::op::util::create_ie_output_name(result->input_value(0));
|
||||||
|
outputs.emplace_back(inferRequest.GetBlob(name));
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT_EQ(refOutData.size(), outputs.size());
|
||||||
|
for (size_t i = 0; i < refOutData.size(); i++) {
|
||||||
|
ValidateBlobs(refOutData[i], outputs[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void CommonReferenceTest::ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob) {
|
||||||
|
ASSERT_TRUE(refBlob != nullptr);
|
||||||
|
ASSERT_TRUE(outBlob != nullptr);
|
||||||
|
ASSERT_EQ(refBlob->getTensorDesc().getPrecision(), outBlob->getTensorDesc().getPrecision());
|
||||||
|
ASSERT_EQ(refBlob->byteSize(), outBlob->byteSize());
|
||||||
|
|
||||||
|
auto mRef = as<InferenceEngine::MemoryBlob>(refBlob);
|
||||||
|
IE_ASSERT(mRef);
|
||||||
|
const auto refLockMemory = mRef->rmap();
|
||||||
|
const auto refBuffer = refLockMemory.as<const std::uint8_t*>();
|
||||||
|
|
||||||
|
auto mOut = as<InferenceEngine::MemoryBlob>(outBlob);
|
||||||
|
IE_ASSERT(mOut);
|
||||||
|
const auto outLockMemory = mOut->rmap();
|
||||||
|
const auto outBuffer = outLockMemory.as<const std::uint8_t*>();
|
||||||
|
|
||||||
|
const auto& precision = refBlob->getTensorDesc().getPrecision();
|
||||||
|
switch (precision) {
|
||||||
|
case InferenceEngine::Precision::BF16:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<ngraph::bfloat16, ngraph::bfloat16>(
|
||||||
|
reinterpret_cast<const ngraph::bfloat16*>(refBuffer), reinterpret_cast<const ngraph::bfloat16*>(outBuffer), refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::FP16:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<ngraph::float16, ngraph::float16>(
|
||||||
|
reinterpret_cast<const ngraph::float16*>(refBuffer), reinterpret_cast<const ngraph::float16*>(outBuffer), refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::FP32:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<float, float>(reinterpret_cast<const float*>(refBuffer), reinterpret_cast<const float*>(outBuffer),
|
||||||
|
refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::I8:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<int8_t, int8_t>(reinterpret_cast<const int8_t*>(refBuffer), reinterpret_cast<const int8_t*>(outBuffer),
|
||||||
|
refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::I16:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<int16_t, int16_t>(reinterpret_cast<const int16_t*>(refBuffer), reinterpret_cast<const int16_t*>(outBuffer),
|
||||||
|
refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::I32:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<int32_t, int32_t>(reinterpret_cast<const int32_t*>(refBuffer), reinterpret_cast<const int32_t*>(outBuffer),
|
||||||
|
refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::I64:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<int64_t, int64_t>(reinterpret_cast<const int64_t*>(refBuffer), reinterpret_cast<const int64_t*>(outBuffer),
|
||||||
|
refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::BOOL:
|
||||||
|
case InferenceEngine::Precision::U8:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
|
||||||
|
refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::U16:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<uint16_t, uint16_t>(reinterpret_cast<const uint16_t*>(refBuffer),
|
||||||
|
reinterpret_cast<const uint16_t*>(outBuffer), refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::U32:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<uint32_t, uint32_t>(reinterpret_cast<const uint32_t*>(refBuffer),
|
||||||
|
reinterpret_cast<const uint32_t*>(outBuffer), refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::U64:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<uint64_t, uint64_t>(reinterpret_cast<const uint64_t*>(refBuffer),
|
||||||
|
reinterpret_cast<const uint64_t*>(outBuffer), refBlob->size(), threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::I4:
|
||||||
|
case InferenceEngine::Precision::U4:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
|
||||||
|
refBlob->size() / 2, threshold);
|
||||||
|
break;
|
||||||
|
case InferenceEngine::Precision::BIN:
|
||||||
|
LayerTestsUtils::LayerTestsCommon::Compare<uint8_t, uint8_t>(reinterpret_cast<const uint8_t*>(refBuffer), reinterpret_cast<const uint8_t*>(outBuffer),
|
||||||
|
refBlob->size() / 8, threshold);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
FAIL() << "Comparator for " << precision << " precision isn't supported";
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,53 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
|
||||||
|
class CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
CommonReferenceTest();
|
||||||
|
|
||||||
|
void Exec();
|
||||||
|
|
||||||
|
void LoadNetwork();
|
||||||
|
|
||||||
|
void FillInputs();
|
||||||
|
|
||||||
|
void Infer();
|
||||||
|
|
||||||
|
void Validate();
|
||||||
|
|
||||||
|
private:
|
||||||
|
void ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
const std::string targetDevice;
|
||||||
|
std::shared_ptr<InferenceEngine::Core> core;
|
||||||
|
std::shared_ptr<ngraph::Function> function;
|
||||||
|
|
||||||
|
InferenceEngine::ExecutableNetwork executableNetwork;
|
||||||
|
InferenceEngine::InferRequest inferRequest;
|
||||||
|
std::vector<InferenceEngine::Blob::Ptr> inputData;
|
||||||
|
std::vector<InferenceEngine::Blob::Ptr> refOutData;
|
||||||
|
float threshold = 1e-2f;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type, const std::vector<T>& values, size_t size = 0) {
|
||||||
|
size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size();
|
||||||
|
auto blob = make_blob_with_precision(
|
||||||
|
InferenceEngine::TensorDesc(InferenceEngine::details::convertPrecision(element_type), {real_size}, InferenceEngine::Layout::C));
|
||||||
|
blob->allocate();
|
||||||
|
InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
|
||||||
|
IE_ASSERT(minput);
|
||||||
|
auto minputHolder = minput->wmap();
|
||||||
|
|
||||||
|
std::memcpy(minputHolder.as<void*>(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size()));
|
||||||
|
|
||||||
|
return blob;
|
||||||
|
}
|
||||||
|
|
441
docs/template_plugin/tests/functional/op_reference/convert.cpp
Normal file
441
docs/template_plugin/tests/functional/op_reference/convert.cpp
Normal file
@ -0,0 +1,441 @@
|
|||||||
|
// Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <ie_core.hpp>
|
||||||
|
#include <ie_ngraph_utils.hpp>
|
||||||
|
#include <ngraph/ngraph.hpp>
|
||||||
|
#include <shared_test_classes/base/layer_test_utils.hpp>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "base_reference_test.hpp"
|
||||||
|
|
||||||
|
using namespace ngraph;
|
||||||
|
using namespace InferenceEngine;
|
||||||
|
|
||||||
|
struct ConvertParams {
|
||||||
|
template <class IT, class OT>
|
||||||
|
ConvertParams(const ngraph::PartialShape& shape, const ngraph::element::Type& iType, const ngraph::element::Type& oType, const std::vector<IT>& iValues,
|
||||||
|
const std::vector<OT>& oValues, size_t iSize = 0, size_t oSize = 0)
|
||||||
|
: pshape(shape), inType(iType), outType(oType), inputData(CreateBlob(iType, iValues, iSize)), refData(CreateBlob(oType, oValues, oSize)) {}
|
||||||
|
ngraph::PartialShape pshape;
|
||||||
|
ngraph::element::Type inType;
|
||||||
|
ngraph::element::Type outType;
|
||||||
|
InferenceEngine::Blob::Ptr inputData;
|
||||||
|
InferenceEngine::Blob::Ptr refData;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ReferenceConvertLayerTest : public testing::TestWithParam<ConvertParams>, public CommonReferenceTest {
|
||||||
|
public:
|
||||||
|
void SetUp() override {
|
||||||
|
auto params = GetParam();
|
||||||
|
function = CreateFunction(params.pshape, params.inType, params.outType);
|
||||||
|
inputData = {params.inputData};
|
||||||
|
refOutData = {params.refData};
|
||||||
|
}
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<ConvertParams>& obj) {
|
||||||
|
auto param = obj.param;
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "shape=" << param.pshape << "_";
|
||||||
|
result << "iType=" << param.inType << "_";
|
||||||
|
result << "oType=" << param.outType;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
|
||||||
|
const element::Type& expected_output_type) {
|
||||||
|
const auto in = std::make_shared<op::Parameter>(input_type, input_shape);
|
||||||
|
const auto convert = std::make_shared<op::Convert>(in, expected_output_type);
|
||||||
|
return std::make_shared<Function>(NodeVector {convert}, ParameterVector {in});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_P(ReferenceConvertLayerTest, CompareWithHardcodedRefs) {
|
||||||
|
Exec();
|
||||||
|
}
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_Convert_With_Hardcoded_Refs, ReferenceConvertLayerTest,
|
||||||
|
::testing::Values(
|
||||||
|
// destination boolean
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 3}, ngraph::element::u8, ngraph::element::boolean,
|
||||||
|
std::vector<uint8_t> {0, 12, 23, 0, std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max()},
|
||||||
|
std::vector<char> {0, 1, 1, 0, 0, 1}),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 3}, ngraph::element::i32, ngraph::element::boolean,
|
||||||
|
std::vector<int32_t> {0, -12, 23, 0, std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max()},
|
||||||
|
std::vector<char> {0, 1, 1, 0, 1, 1}),
|
||||||
|
ConvertParams(ngraph::PartialShape {3, 3}, ngraph::element::f32, ngraph::element::boolean,
|
||||||
|
std::vector<float> {0.f, 1.5745f, 0.12352f, 0.f, std::numeric_limits<float>::lowest(), std::numeric_limits<float>::max(),
|
||||||
|
std::numeric_limits<float>::min(), std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity()},
|
||||||
|
std::vector<char> {0, 1, 1, 0, 1, 1, 1, 1, 1}),
|
||||||
|
|
||||||
|
// destination bf16
|
||||||
|
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::bf16,
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
|
std::vector<bfloat16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::bf16,
|
||||||
|
std::vector<uint8_t> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
|
||||||
|
std::vector<bfloat16> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
|
||||||
|
|
||||||
|
// destination f16
|
||||||
|
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f16,
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
|
std::vector<float16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::f16, std::vector<uint8_t> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
|
||||||
|
std::vector<float16> {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
|
||||||
|
|
||||||
|
// destination f32
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u1, ngraph::element::f32, std::vector<uint8_t> {0xA0},
|
||||||
|
std::vector<float> {1.0f, 0.0f, 1.0f, 0.0f}, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u4, ngraph::element::f32, std::vector<uint8_t> {0xFB, 0x0A},
|
||||||
|
std::vector<float> {15.0f, 11.0f, 0.0f, 10.0f}, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u8, ngraph::element::f32, std::vector<uint8_t> {255, 128, 32, 0},
|
||||||
|
std::vector<float> {255.0f, 128.0f, 32.0f, 0.0f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u16, ngraph::element::f32, std::vector<uint16_t> {64000, 32000, 128, 0},
|
||||||
|
std::vector<float> {64000.0f, 32000.0f, 128.0f, 0.0f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u32, ngraph::element::f32, std::vector<uint32_t> {4000000, 2000000, 128, 0},
|
||||||
|
std::vector<float> {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u64, ngraph::element::f32, std::vector<uint64_t> {4000000, 2000000, 128, 0},
|
||||||
|
std::vector<float> {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i4, ngraph::element::f32, std::vector<uint8_t> {0xFE, 0xF2},
|
||||||
|
std::vector<float> {-1.0f, -2.0f, -1.0f, 2.0f}, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i8, ngraph::element::f32, std::vector<int8_t> {-127, -0, 0, 127},
|
||||||
|
std::vector<float> {-127.0f, -0.0f, 0.0f, 127.0f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i16, ngraph::element::f32, std::vector<int16_t> {-32000, -0, 0, 32000},
|
||||||
|
std::vector<float> {-32000.0f, -0.0f, 0.0f, 32000.0f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i32, ngraph::element::f32, std::vector<int32_t> {-64000, -0, 0, 64000},
|
||||||
|
std::vector<float> {-64000.0f, -0.0f, 0.0f, 64000.0f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i64, ngraph::element::f32, std::vector<int64_t> {-64000, -0, 0, 64000},
|
||||||
|
std::vector<float> {-64000.0f, -0.0f, 0.0f, 64000.0f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::bf16, ngraph::element::f32,
|
||||||
|
std::vector<bfloat16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f16, ngraph::element::f32,
|
||||||
|
std::vector<float16> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f32,
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
|
||||||
|
std::vector<float> {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
|
||||||
|
|
||||||
|
// destination i4
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::i4, std::vector<uint8_t> {0xA0}, std::vector<uint8_t> {0x10, 0x10}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i4, std::vector<uint8_t> {0x12, 0x03}, std::vector<uint8_t> {0x12, 0x03},
|
||||||
|
4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i4, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<uint8_t> {0x12, 0x03},
|
||||||
|
4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i4, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i4, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i4, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i4, std::vector<uint8_t> {0xFE, 0x03}, std::vector<uint8_t> {0xFE, 0x03},
|
||||||
|
4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i4, std::vector<int8_t> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
|
||||||
|
4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i4, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i4, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i4, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i4, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i4, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i4, std::vector<float> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
|
||||||
|
4, 4),
|
||||||
|
// destination i8
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i8, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<int8_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i8, std::vector<uint8_t> {0x21, 0x43}, std::vector<int8_t> {2, 1, 4, 3},
|
||||||
|
4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i8, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i8, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i8, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i8, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i8, std::vector<uint8_t> {0x21, 0x43}, std::vector<int8_t> {2, 1, 4, 3},
|
||||||
|
4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i8, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i8, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i8, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i8, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i8, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i8, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i8, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<int8_t> {-1, -2, 2, 3}),
|
||||||
|
// destination i16
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i16, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<int16_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i16, std::vector<uint8_t> {0x21, 0x43}, std::vector<int16_t> {2, 1, 4, 3},
|
||||||
|
4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i16, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i16, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i16, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i16, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i16, std::vector<uint8_t> {0x21, 0x43}, std::vector<int16_t> {2, 1, 4, 3},
|
||||||
|
4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i16, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i16, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i16, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i16, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i16, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i16, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i16, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<int16_t> {-1, -2, 2, 3}),
|
||||||
|
// destination i32
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i32, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<int32_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i32, std::vector<uint8_t> {0x21, 0x43}, std::vector<int32_t> {2, 1, 4, 3},
|
||||||
|
4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i32, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i32, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i32, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i32, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i32, std::vector<uint8_t> {0x21, 0x43}, std::vector<int32_t> {2, 1, 4, 3},
|
||||||
|
4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i32, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i32, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i32, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i32, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i32, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i32, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i32, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<int32_t> {-1, -2, 2, 3}),
|
||||||
|
// destination i64
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i64, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<int64_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i64, std::vector<uint8_t> {0x21, 0x43}, std::vector<int64_t> {2, 1, 4, 3},
|
||||||
|
4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i64, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i64, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i64, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i64, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<int64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i64, std::vector<uint8_t> {0x21, 0x43}, std::vector<int64_t> {2, 1, 4, 3},
|
||||||
|
4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i64, std::vector<int8_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i64, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i64, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i64, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i64, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i64, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i64, std::vector<float> {-1, -2, 2, 3},
|
||||||
|
std::vector<int64_t> {-1, -2, 2, 3}),
|
||||||
|
|
||||||
|
// destination u1
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u1, std::vector<uint8_t> {0xA0}, std::vector<uint8_t> {0xA0}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u4, ngraph::element::u1, std::vector<uint8_t> {0x10, 0x01, 0x00, 0x00},
|
||||||
|
std::vector<uint8_t> {0x90}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u8, ngraph::element::u1, std::vector<uint8_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u16, ngraph::element::u1, std::vector<uint16_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u32, ngraph::element::u1, std::vector<uint32_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u64, ngraph::element::u1, std::vector<uint64_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i4, ngraph::element::u1, std::vector<uint8_t> {0x10, 0x01, 0x00, 0x00},
|
||||||
|
std::vector<uint8_t> {0x90}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i8, ngraph::element::u1, std::vector<int8_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i16, ngraph::element::u1, std::vector<int16_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i32, ngraph::element::u1, std::vector<int32_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::i64, ngraph::element::u1, std::vector<int64_t> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::f16, ngraph::element::u1, std::vector<ngraph::float16> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::bf16, ngraph::element::u1, std::vector<ngraph::bfloat16> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::f32, ngraph::element::u1, std::vector<float> {1, 0, 1, 0, 0, 0, 0, 1},
|
||||||
|
std::vector<uint8_t> {0xA1}, 8, 8),
|
||||||
|
|
||||||
|
// destination u4
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::u4, std::vector<uint8_t> {0xA0}, std::vector<uint8_t> {0x10, 0x10}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u4, std::vector<uint8_t> {0x12, 0x03}, std::vector<uint8_t> {0x12, 0x03},
|
||||||
|
4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u4, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<uint8_t> {0x12, 0x03},
|
||||||
|
4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u4, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u4, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u4, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0x12, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u4, std::vector<uint8_t> {0xFE, 0x03}, std::vector<uint8_t> {0xFE, 0x03},
|
||||||
|
4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u4, std::vector<int8_t> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
|
||||||
|
4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u4, std::vector<int16_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u4, std::vector<int32_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u4, std::vector<int64_t> {-1, -2, 2, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x23}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u4, std::vector<ngraph::float16> {-1, -2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u4, std::vector<ngraph::bfloat16> {-1, -2, 0, 3},
|
||||||
|
std::vector<uint8_t> {0xFE, 0x03}, 4, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u4, std::vector<float> {-1, -2, 2, 3}, std::vector<uint8_t> {0xFE, 0x23},
|
||||||
|
4, 4),
|
||||||
|
|
||||||
|
// destination u8
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u8, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<uint8_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u8, std::vector<uint8_t> {0x21, 0x43}, std::vector<uint8_t> {2, 1, 4, 3},
|
||||||
|
4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u8, std::vector<uint8_t> {1, 2, 0, 3}, std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u8, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u8, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u8, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u8, std::vector<uint8_t> {0x21, 0x43}, std::vector<uint8_t> {2, 1, 4, 3},
|
||||||
|
4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u8, std::vector<int8_t> {1, 2, 2, 3}, std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u8, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u8, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u8, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u8, std::vector<ngraph::float16> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u8, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
|
||||||
|
std::vector<uint8_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u8, std::vector<float> {1, 2, 2, 3}, std::vector<uint8_t> {1, 2, 2, 3}),
|
||||||
|
|
||||||
|
// destination u16
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u16, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<uint16_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u16, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint16_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u16, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u16, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u16, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u16, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u16, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint16_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u16, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u16, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u16, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u16, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u16, std::vector<ngraph::float16> {1, 2, 0, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u16, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u16, std::vector<float> {1, 2, 2, 3},
|
||||||
|
std::vector<uint16_t> {1, 2, 2, 3}),
|
||||||
|
|
||||||
|
// destination u32
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u32, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<uint32_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u32, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint32_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u32, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u32, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u32, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u32, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u32, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint32_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u32, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u32, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u32, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u32, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u32, std::vector<ngraph::float16> {1, 2, 0, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u32, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u32, std::vector<float> {1, 2, 2, 3},
|
||||||
|
std::vector<uint32_t> {1, 2, 2, 3}),
|
||||||
|
|
||||||
|
// destination u64
|
||||||
|
ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::u64, std::vector<uint8_t> {0x81},
|
||||||
|
std::vector<uint64_t> {1, 0, 0, 0, 0, 0, 0, 1}, 8),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::u64, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint64_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::u64, std::vector<uint8_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::u64, std::vector<uint16_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::u64, std::vector<uint32_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::u64, std::vector<uint64_t> {1, 2, 0, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::u64, std::vector<uint8_t> {0x21, 0x43},
|
||||||
|
std::vector<uint64_t> {2, 1, 4, 3}, 4),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::u64, std::vector<int8_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::u64, std::vector<int16_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::u64, std::vector<int32_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::u64, std::vector<int64_t> {1, 2, 2, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 2, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::u64, std::vector<ngraph::float16> {1, 2, 0, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::u64, std::vector<ngraph::bfloat16> {1, 2, 0, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 0, 3}),
|
||||||
|
ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::u64, std::vector<float> {1, 2, 2, 3},
|
||||||
|
std::vector<uint64_t> {1, 2, 2, 3})),
|
||||||
|
ReferenceConvertLayerTest::getTestCaseName);
|
@ -73,7 +73,7 @@ using IEClassSetConfigTestHETERO = IEClassNetworkTest;
|
|||||||
|
|
||||||
TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
|
TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
|
||||||
{
|
{
|
||||||
Core ie;
|
Core ie = createCoreWithTemplate();
|
||||||
Parameter p;
|
Parameter p;
|
||||||
|
|
||||||
ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES)}}, "HETERO"));
|
ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(YES)}}, "HETERO"));
|
||||||
@ -84,7 +84,7 @@ TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
Core ie;
|
Core ie = createCoreWithTemplate();
|
||||||
Parameter p;
|
Parameter p;
|
||||||
|
|
||||||
ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(NO)}}, "HETERO"));
|
ASSERT_NO_THROW(ie.SetConfig({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_VALUE(NO)}}, "HETERO"));
|
||||||
@ -95,7 +95,7 @@ TEST_F(IEClassSetConfigTestHETERO, smoke_SetConfigNoThrow) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
Core ie;
|
Core ie = createCoreWithTemplate();
|
||||||
Parameter p;
|
Parameter p;
|
||||||
|
|
||||||
ASSERT_NO_THROW(ie.GetMetric("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
|
ASSERT_NO_THROW(ie.GetMetric("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
|
||||||
@ -118,7 +118,7 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest;
|
using IEClassGetConfigTestTEMPLATE = IEClassNetworkTest;
|
||||||
|
|
||||||
TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
|
TEST_F(IEClassGetConfigTestTEMPLATE, smoke_GetConfigNoThrow) {
|
||||||
Core ie;
|
Core ie = createCoreWithTemplate();
|
||||||
Parameter p;
|
Parameter p;
|
||||||
std::string deviceName = CommonTestUtils::DEVICE_TEMPLATE;
|
std::string deviceName = CommonTestUtils::DEVICE_TEMPLATE;
|
||||||
|
|
||||||
|
@ -47,33 +47,45 @@ To run the sample, you need specify a model and image:
|
|||||||
>
|
>
|
||||||
> - The sample accepts models in ONNX format (\*.onnx) that do not require preprocessing.
|
> - The sample accepts models in ONNX format (\*.onnx) that do not require preprocessing.
|
||||||
|
|
||||||
You can do inference of an image using a trained AlexNet network on a GPU using the following command:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script:
|
||||||
./hello_classification_c <path_to_model>/alexnet_fp32.xml <path_to_image>/cat.png GPU
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of `car.bmp` using `alexnet` model on a `GPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
<path_to_sample>/hello_classification_c <path_to_model>/alexnet.xml <path_to_image>/car.bmp GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The application outputs top-10 inference results.
|
The application outputs top-10 inference results.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
Image /opt/intel/openvino/deployment_tools/demo/car.png
|
Image C:\images\car.bmp
|
||||||
|
|
||||||
classid probability
|
classid probability
|
||||||
------- -----------
|
------- -----------
|
||||||
479 0.7562205
|
656 0.666479
|
||||||
511 0.0760381
|
654 0.112940
|
||||||
436 0.0724111
|
581 0.068487
|
||||||
817 0.0462140
|
874 0.033385
|
||||||
656 0.0301231
|
436 0.026132
|
||||||
661 0.0056171
|
817 0.016731
|
||||||
581 0.0031622
|
675 0.010980
|
||||||
468 0.0029917
|
511 0.010592
|
||||||
717 0.0023081
|
569 0.008178
|
||||||
627 0.0016193
|
717 0.006336
|
||||||
|
|
||||||
This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
||||||
```
|
```
|
||||||
|
@ -62,17 +62,29 @@ ffmpeg -i cat.jpg -pix_fmt nv12 cat.yuv
|
|||||||
>
|
>
|
||||||
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
||||||
|
|
||||||
You can perform inference on an NV12 image using a trained AlexNet network on a CPU with the following command:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script:
|
||||||
./hello_nv12_input_classification_c <path_to_model>/alexnet_fp32.xml <path_to_image>/cat.yuv 300x300 CPU
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of NV12 image using `alexnet` model on a `CPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
<path_to_sample>/hello_nv12_input_classification_c <path_to_model>/alexnet.xml <path_to_image>/cat.yuv 300x300 CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The application outputs top-10 inference results.
|
The application outputs top-10 inference results.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
Image ./cat.yuv
|
Image ./cat.yuv
|
||||||
|
@ -45,10 +45,10 @@ To run the sample, you need specify a model and image:
|
|||||||
- you can use [public](@ref omz_models_public_index) or [Intel's](@ref omz_models_intel_index) pre-trained models from the Open Model Zoo. The models can be downloaded using the [Model Downloader](@ref omz_tools_downloader_README).
|
- you can use [public](@ref omz_models_public_index) or [Intel's](@ref omz_models_intel_index) pre-trained models from the Open Model Zoo. The models can be downloaded using the [Model Downloader](@ref omz_tools_downloader_README).
|
||||||
- you can use images from the media files collection available at https://storage.openvinotoolkit.org/data/test_data.
|
- you can use images from the media files collection available at https://storage.openvinotoolkit.org/data/test_data.
|
||||||
|
|
||||||
Running the application with the <code>-h</code> option yields the following usage message:
|
Running the application with the `-h` option yields the following usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
./object_detection_sample_ssd_c -h
|
<path_to_sample>/object_detection_sample_ssd_c -h
|
||||||
[ INFO ] InferenceEngine:
|
[ INFO ] InferenceEngine:
|
||||||
<version><number>
|
<version><number>
|
||||||
[ INFO ] Parsing input parameters
|
[ INFO ] Parsing input parameters
|
||||||
@ -76,24 +76,36 @@ Options:
|
|||||||
>
|
>
|
||||||
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
||||||
|
|
||||||
For example, to perform inference on a CPU with the OpenVINO™ toolkit person detection SSD models, run one of the following commands:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name person-detection-retail-0013
|
||||||
|
```
|
||||||
|
|
||||||
|
2. `person-detection-retail-0013` model does not need to be converted, because it is already in necessary format, so you can skip this step. If you want to use a other model that is not in the Inference Engine IR or ONNX format, you can convert it using the model converter script:
|
||||||
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name <model_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
3. For example, to perform inference on a CPU with the OpenVINO™ toolkit person detection SSD models, run one of the following commands:
|
||||||
|
|
||||||
- with one image and [person-detection-retail-0013](https://docs.openvinotoolkit.org/latest/omz_models_intel_person_detection_retail_0013_description_person_detection_retail_0013.html) model
|
- with one image and [person-detection-retail-0013](https://docs.openvinotoolkit.org/latest/omz_models_intel_person_detection_retail_0013_description_person_detection_retail_0013.html) model
|
||||||
|
|
||||||
```sh
|
```
|
||||||
./object_detection_sample_ssd_c -i <path_to_image>/inputImage.bmp -m <path_to_model>/person-detection-retail-0013.xml -d CPU
|
<path_to_sample>/object_detection_sample_ssd_c -i <path_to_image>/inputImage.bmp -m <path_to_model>/person-detection-retail-0013.xml -d CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
- with some images and [person-detection-retail-0013](https://docs.openvinotoolkit.org/latest/omz_models_intel_person_detection_retail_0013_description_person_detection_retail_0013.html) model
|
- with some images and [person-detection-retail-0013](https://docs.openvinotoolkit.org/latest/omz_models_intel_person_detection_retail_0013_description_person_detection_retail_0013.html) model
|
||||||
|
|
||||||
```sh
|
```
|
||||||
./object_detection_sample_ssd_c -i <path_to_image>/inputImage1.bmp <path_to_image>/inputImage2.bmp ... -m <path_to_model>/person-detection-retail-0013.xml -d CPU
|
<path_to_sample>/object_detection_sample_ssd_c -i <path_to_image>/inputImage1.bmp <path_to_image>/inputImage2.bmp ... -m <path_to_model>/person-detection-retail-0013.xml -d CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
- with [person-detection-retail-0002](https://docs.openvinotoolkit.org/latest/omz_models_intel_person_detection_retail_0002_description_person_detection_retail_0002.html) model
|
- with [person-detection-retail-0002](https://docs.openvinotoolkit.org/latest/omz_models_intel_person_detection_retail_0002_description_person_detection_retail_0002.html) model
|
||||||
|
|
||||||
```sh
|
```
|
||||||
./object_detection_sample_ssd_c -i <path_to_folder_with_images> -m <path_to_model>/person-detection-retail-0002.xml -d CPU
|
<path_to_sample>/object_detection_sample_ssd_c -i <path_to_folder_with_images> -m <path_to_model>/person-detection-retail-0002.xml -d CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
@ -101,8 +113,8 @@ For example, to perform inference on a CPU with the OpenVINO™ toolkit pers
|
|||||||
The application outputs several images (`out_0.bmp`, `out_1.bmp`, ... ) with detected objects enclosed in rectangles. It outputs the list of
|
The application outputs several images (`out_0.bmp`, `out_1.bmp`, ... ) with detected objects enclosed in rectangles. It outputs the list of
|
||||||
classes of the detected objects along with the respective confidence values and the coordinates of the rectangles to the standard output stream.
|
classes of the detected objects along with the respective confidence values and the coordinates of the rectangles to the standard output stream.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
object_detection_sample_ssd_c -m person-detection-retail-0013.xml -i image_1.png image_2.jpg
|
<path_to_sample>/object_detection_sample_ssd_c -m person-detection-retail-0013.xml -i image_1.png image_2.jpg
|
||||||
|
|
||||||
[ INFO ] InferenceEngine:
|
[ INFO ] InferenceEngine:
|
||||||
<version><number>
|
<version><number>
|
||||||
|
@ -28,15 +28,15 @@ each sample step at [Integration Steps](../../../../../docs/IE_DG/Integrate_with
|
|||||||
|
|
||||||
## Running
|
## Running
|
||||||
|
|
||||||
Run the application with the <code>-h</code> option to see the usage message:
|
Run the application with the `-h` option to see the usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python classification_sample_async.py -h
|
python <path_to_sample>/classification_sample_async.py -h
|
||||||
```
|
```
|
||||||
|
|
||||||
Usage message:
|
Usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
usage: classification_sample_async.py [-h] -m MODEL -i INPUT [INPUT ...]
|
usage: classification_sample_async.py [-h] -m MODEL -i INPUT [INPUT ...]
|
||||||
[-l EXTENSION] [-c CONFIG] [-d DEVICE]
|
[-l EXTENSION] [-c CONFIG] [-d DEVICE]
|
||||||
[--labels LABELS] [-nt NUMBER_TOP]
|
[--labels LABELS] [-nt NUMBER_TOP]
|
||||||
@ -79,55 +79,67 @@ To run the sample, you need specify a model and image:
|
|||||||
>
|
>
|
||||||
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
||||||
|
|
||||||
You can do inference of an image using a pre-trained model on a GPU using the following command:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script:
|
||||||
python classification_sample_async.py -m <path_to_model>/alexnet.xml -i <path_to_image>/cat.bmp <path_to_image>/car.bmp -d GPU
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of `car.bmp` and `cat.jpg` using `alexnet` model on a `GPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
python <path_to_sample>/classification_sample_async.py -m <path_to_model>/alexnet.xml -i <path_to_image>/car.bmp <path_to_image>/cat.jpg -d GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The sample application logs each step in a standard output stream and outputs top-10 inference results.
|
The sample application logs each step in a standard output stream and outputs top-10 inference results.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
[ INFO ] Creating Inference Engine
|
[ INFO ] Creating Inference Engine
|
||||||
[ INFO ] Reading the network: models\alexnet.xml
|
[ INFO ] Reading the network: c:\openvino\deployment_tools\open_model_zoo\tools\downloader\public\alexnet\FP32\alexnet.xml
|
||||||
[ INFO ] Configuring input and output blobs
|
[ INFO ] Configuring input and output blobs
|
||||||
[ INFO ] Loading the model to the plugin
|
[ INFO ] Loading the model to the plugin
|
||||||
[ WARNING ] Image images\cat.bmp is resized from (300, 300) to (227, 227)
|
[ WARNING ] Image c:\images\car.bmp is resized from (637, 749) to (227, 227)
|
||||||
[ WARNING ] Image images\car.bmp is resized from (259, 787) to (227, 227)
|
[ WARNING ] Image c:\images\cat.jpg is resized from (300, 300) to (227, 227)
|
||||||
[ INFO ] Starting inference in asynchronous mode
|
[ INFO ] Starting inference in asynchronous mode
|
||||||
[ INFO ] Infer request 0 returned 0
|
[ INFO ] Infer request 0 returned 0
|
||||||
[ INFO ] Image path: images\cat.bmp
|
[ INFO ] Image path: c:\images\car.bmp
|
||||||
[ INFO ] Top 10 results:
|
[ INFO ] Top 10 results:
|
||||||
[ INFO ] classid probability
|
[ INFO ] classid probability
|
||||||
[ INFO ] -------------------
|
[ INFO ] -------------------
|
||||||
[ INFO ] 435 0.0996898
|
[ INFO ] 656 0.6645315
|
||||||
[ INFO ] 876 0.0900239
|
[ INFO ] 654 0.1121185
|
||||||
[ INFO ] 999 0.0691452
|
[ INFO ] 581 0.0698451
|
||||||
[ INFO ] 587 0.0390186
|
[ INFO ] 874 0.0334973
|
||||||
[ INFO ] 666 0.0360390
|
[ INFO ] 436 0.0259718
|
||||||
[ INFO ] 419 0.0308306
|
[ INFO ] 817 0.0173190
|
||||||
[ INFO ] 285 0.0306287
|
[ INFO ] 675 0.0109321
|
||||||
[ INFO ] 700 0.0293007
|
[ INFO ] 511 0.0109075
|
||||||
[ INFO ] 696 0.0202707
|
[ INFO ] 569 0.0083093
|
||||||
[ INFO ] 631 0.0199126
|
[ INFO ] 717 0.0063173
|
||||||
[ INFO ]
|
[ INFO ]
|
||||||
[ INFO ] Infer request 1 returned 0
|
[ INFO ] Infer request 1 returned 0
|
||||||
[ INFO ] Image path: images\car.bmp
|
[ INFO ] Image path: c:\images\cat.jpg
|
||||||
[ INFO ] Top 10 results:
|
[ INFO ] Top 10 results:
|
||||||
[ INFO ] classid probability
|
[ INFO ] classid probability
|
||||||
[ INFO ] -------------------
|
[ INFO ] -------------------
|
||||||
[ INFO ] 479 0.7561803
|
[ INFO ] 876 0.1320105
|
||||||
[ INFO ] 511 0.0755696
|
[ INFO ] 435 0.1210389
|
||||||
[ INFO ] 436 0.0730265
|
[ INFO ] 285 0.0712640
|
||||||
[ INFO ] 817 0.0460268
|
[ INFO ] 282 0.0570528
|
||||||
[ INFO ] 656 0.0303792
|
[ INFO ] 281 0.0319335
|
||||||
[ INFO ] 661 0.0055282
|
[ INFO ] 999 0.0285931
|
||||||
[ INFO ] 581 0.0031296
|
[ INFO ] 94 0.0270323
|
||||||
[ INFO ] 468 0.0029875
|
[ INFO ] 36 0.0240510
|
||||||
[ INFO ] 717 0.0022792
|
[ INFO ] 335 0.0198461
|
||||||
[ INFO ] 627 0.0016297
|
[ INFO ] 186 0.0183939
|
||||||
[ INFO ]
|
[ INFO ]
|
||||||
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
||||||
```
|
```
|
||||||
|
@ -13,7 +13,7 @@ The following Inference Engine Python API is used in the application:
|
|||||||
|
|
||||||
| Options | Values |
|
| Options | Values |
|
||||||
| :------------------------- | :-------------------------------------------------------------------------------------------------------- |
|
| :------------------------- | :-------------------------------------------------------------------------------------------------------- |
|
||||||
| Validated Models | [alexnet](@ref omz_models_model_alexnet) |
|
| Validated Models | [alexnet](@ref omz_models_model_alexnet), [googlenet-v1](@ref omz_models_model_googlenet_v1) |
|
||||||
| Model Format | Inference Engine Intermediate Representation (.xml + .bin), ONNX (.onnx) |
|
| Model Format | Inference Engine Intermediate Representation (.xml + .bin), ONNX (.onnx) |
|
||||||
| Supported devices | [All](../../../../../docs/IE_DG/supported_plugins/Supported_Devices.md) |
|
| Supported devices | [All](../../../../../docs/IE_DG/supported_plugins/Supported_Devices.md) |
|
||||||
| Other language realization | [C++](../../../../samples/hello_classification/README.md), [C](../../../c/samples/hello_classification/README.md) |
|
| Other language realization | [C++](../../../../samples/hello_classification/README.md), [C](../../../c/samples/hello_classification/README.md) |
|
||||||
@ -29,13 +29,13 @@ each sample step at [Integration Steps](../../../../../docs/IE_DG/Integrate_with
|
|||||||
|
|
||||||
Run the application with the `-h` option to see the usage message:
|
Run the application with the `-h` option to see the usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python hello_classification.py -h
|
python <path_to_sample>/hello_classification.py -h
|
||||||
```
|
```
|
||||||
|
|
||||||
Usage message:
|
Usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
usage: hello_classification.py [-h] -m MODEL -i INPUT [-d DEVICE]
|
usage: hello_classification.py [-h] -m MODEL -i INPUT [-d DEVICE]
|
||||||
[--labels LABELS] [-nt NUMBER_TOP]
|
[--labels LABELS] [-nt NUMBER_TOP]
|
||||||
|
|
||||||
@ -68,37 +68,49 @@ To run the sample, you need specify a model and image:
|
|||||||
>
|
>
|
||||||
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
||||||
|
|
||||||
For example, to perform inference of an image using a pre-trained model on a GPU, run the following command:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script:
|
||||||
python hello_classification.py -m <path_to_model>/alexnet.xml -i <path_to_image>/cat.bmp -d GPU
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of `car.bmp` using `alexnet` model on a `GPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
python <path_to_sample>/hello_classification.py -m <path_to_model>/alexnet.xml -i <path_to_image>/car.bmp -d GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The sample application logs each step in a standard output stream and outputs top-10 inference results.
|
The sample application logs each step in a standard output stream and outputs top-10 inference results.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
[ INFO ] Creating Inference Engine
|
[ INFO ] Creating Inference Engine
|
||||||
[ INFO ] Reading the network: models\alexnet.xml
|
[ INFO ] Reading the network: c:\openvino\deployment_tools\open_model_zoo\tools\downloader\public\alexnet\FP32\alexnet.xml
|
||||||
[ INFO ] Configuring input and output blobs
|
[ INFO ] Configuring input and output blobs
|
||||||
[ INFO ] Loading the model to the plugin
|
[ INFO ] Loading the model to the plugin
|
||||||
[ WARNING ] Image images\cat.bmp is resized from (300, 300) to (227, 227)
|
[ WARNING ] Image c:\images\car.bmp is resized from (637, 749) to (227, 227)
|
||||||
[ INFO ] Starting inference in synchronous mode
|
[ INFO ] Starting inference in synchronous mode
|
||||||
[ INFO ] Image path: images\cat.bmp
|
[ INFO ] Image path: c:\images\car.bmp
|
||||||
[ INFO ] Top 10 results:
|
[ INFO ] Top 10 results:
|
||||||
[ INFO ] classid probability
|
[ INFO ] classid probability
|
||||||
[ INFO ] -------------------
|
[ INFO ] -------------------
|
||||||
[ INFO ] 435 0.0996890
|
[ INFO ] 656 0.6645315
|
||||||
[ INFO ] 876 0.0900242
|
[ INFO ] 654 0.1121185
|
||||||
[ INFO ] 999 0.0691449
|
[ INFO ] 581 0.0698451
|
||||||
[ INFO ] 587 0.0390189
|
[ INFO ] 874 0.0334973
|
||||||
[ INFO ] 666 0.0360393
|
[ INFO ] 436 0.0259718
|
||||||
[ INFO ] 419 0.0308307
|
[ INFO ] 817 0.0173190
|
||||||
[ INFO ] 285 0.0306287
|
[ INFO ] 675 0.0109321
|
||||||
[ INFO ] 700 0.0293009
|
[ INFO ] 511 0.0109075
|
||||||
[ INFO ] 696 0.0202707
|
[ INFO ] 569 0.0083093
|
||||||
[ INFO ] 631 0.0199126
|
[ INFO ] 717 0.0063173
|
||||||
[ INFO ]
|
[ INFO ]
|
||||||
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
||||||
```
|
```
|
||||||
|
@ -22,15 +22,15 @@ The sample queries all available Inference Engine devices and prints their suppo
|
|||||||
|
|
||||||
The sample has no command-line parameters. To see the report, run the following command:
|
The sample has no command-line parameters. To see the report, run the following command:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python hello_query_device.py
|
python <path_to_sample>/hello_query_device.py
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The application prints all available devices with their supported metrics and default values for configuration parameters. (Some lines are not shown due to length.) For example:
|
The application prints all available devices with their supported metrics and default values for configuration parameters. (Some lines are not shown due to length.) For example:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
[ INFO ] Creating Inference Engine
|
[ INFO ] Creating Inference Engine
|
||||||
[ INFO ] Available devices:
|
[ INFO ] Available devices:
|
||||||
[ INFO ] CPU :
|
[ INFO ] CPU :
|
||||||
|
@ -29,15 +29,15 @@ each sample step at [Integration Steps](../../../../../docs/IE_DG/Integrate_with
|
|||||||
|
|
||||||
## Running
|
## Running
|
||||||
|
|
||||||
Run the application with the <code>-h</code> option to see the usage message:
|
Run the application with the `-h` option to see the usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python hello_reshape_ssd.py -h
|
python <path_to_sample>/hello_reshape_ssd.py -h
|
||||||
```
|
```
|
||||||
|
|
||||||
Usage message:
|
Usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
usage: hello_reshape_ssd.py [-h] -m MODEL -i INPUT [-l EXTENSION] [-c CONFIG]
|
usage: hello_reshape_ssd.py [-h] -m MODEL -i INPUT [-l EXTENSION] [-c CONFIG]
|
||||||
[-d DEVICE] [--labels LABELS]
|
[-d DEVICE] [--labels LABELS]
|
||||||
|
|
||||||
@ -76,26 +76,38 @@ To run the sample, you need specify a model and image:
|
|||||||
>
|
>
|
||||||
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
||||||
|
|
||||||
You can do inference of an image using a pre-trained model on a GPU using the following command:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name mobilenet-ssd
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script:
|
||||||
python hello_reshape_ssd.py -m <path_to_model>/mobilenet-ssd.xml -i <path_to_image>/cat.bmp -d GPU
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name mobilenet-ssd
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of `car.bmp` using `mobilenet-ssd` model on a `GPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
python <path_to_sample>/hello_reshape_ssd.py -m <path_to_model>/mobilenet-ssd.xml -i <path_to_image>/car.bmp -d GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The sample application logs each step in a standard output stream and creates an output image, drawing bounding boxes for inference results with an over 50% confidence.
|
The sample application logs each step in a standard output stream and creates an output image, drawing bounding boxes for inference results with an over 50% confidence.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
[ INFO ] Creating Inference Engine
|
[ INFO ] Creating Inference Engine
|
||||||
[ INFO ] Reading the network: models\mobilenet-ssd.xml
|
[ INFO ] Reading the network: c:\openvino\deployment_tools\open_model_zoo\tools\downloader\public\mobilenet-ssd\FP32\mobilenet-ssd.xml
|
||||||
[ INFO ] Configuring input and output blobs
|
[ INFO ] Configuring input and output blobs
|
||||||
[ INFO ] Reshaping the network to the height and width of the input image
|
[ INFO ] Reshaping the network to the height and width of the input image
|
||||||
[ INFO ] Input shape before reshape: [1, 3, 300, 300]
|
[ INFO ] Input shape before reshape: [1, 3, 300, 300]
|
||||||
[ INFO ] Input shape after reshape: [1, 3, 300, 300]
|
[ INFO ] Input shape after reshape: [1, 3, 637, 749]
|
||||||
[ INFO ] Loading the model to the plugin
|
[ INFO ] Loading the model to the plugin
|
||||||
[ INFO ] Starting inference in synchronous mode
|
[ INFO ] Starting inference in synchronous mode
|
||||||
[ INFO ] Found: label = 8, confidence = 1.00, coords = (115, 64), (189, 182)
|
[ INFO ] Found: label = 7, confidence = 0.99, coords = (283, 166), (541, 472)
|
||||||
[ INFO ] Image out.bmp was created!
|
[ INFO ] Image out.bmp was created!
|
||||||
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
||||||
```
|
```
|
||||||
|
@ -30,15 +30,15 @@ each sample step at [Integration Steps](../../../../../docs/IE_DG/Integrate_with
|
|||||||
|
|
||||||
## Running
|
## Running
|
||||||
|
|
||||||
Run the application with the <code>-h</code> option to see the usage message:
|
Run the application with the `-h` option to see the usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python ngraph_function_creation_sample.py -h
|
python <path_to_sample>/ngraph_function_creation_sample.py -h
|
||||||
```
|
```
|
||||||
|
|
||||||
Usage message:
|
Usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
usage: ngraph_function_creation_sample.py [-h] -m MODEL -i INPUT [INPUT ...]
|
usage: ngraph_function_creation_sample.py [-h] -m MODEL -i INPUT [INPUT ...]
|
||||||
[-d DEVICE] [--labels LABELS]
|
[-d DEVICE] [--labels LABELS]
|
||||||
[-nt NUMBER_TOP]
|
[-nt NUMBER_TOP]
|
||||||
@ -73,25 +73,25 @@ To run the sample, you need specify a model weights and image:
|
|||||||
>
|
>
|
||||||
> - The white over black images will be automatically inverted in color for a better predictions.
|
> - The white over black images will be automatically inverted in color for a better predictions.
|
||||||
|
|
||||||
You can do inference of an image using a pre-trained model on a GPU using the following command:
|
For example, you can do inference of `3.png` using the pre-trained model on a `GPU`:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python ngraph_function_creation_sample.py -m <path_to_model>/lenet.bin -i <path_to_image>/3.png -d GPU
|
python <path_to_sample>/ngraph_function_creation_sample.py -m <path_to_sample>/lenet.bin -i <path_to_image>/3.png -d GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The sample application logs each step in a standard output stream and outputs top-10 inference results.
|
The sample application logs each step in a standard output stream and outputs top-10 inference results.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
[ INFO ] Creating Inference Engine
|
[ INFO ] Creating Inference Engine
|
||||||
[ INFO ] Loading the network using ngraph function with weights from <path_to_model>/lenet.bin
|
[ INFO ] Loading the network using ngraph function with weights from c:\openvino\deployment_tools\inference_engine\samples\python\ngraph_function_creation_sample\lenet.bin
|
||||||
[ INFO ] Configuring input and output blobs
|
[ INFO ] Configuring input and output blobs
|
||||||
[ INFO ] Loading the model to the plugin
|
[ INFO ] Loading the model to the plugin
|
||||||
[ WARNING ] <path_to_image>/3.png is inverted to white over black
|
[ WARNING ] Image c:\images\3.png is inverted to white over black
|
||||||
[ WARNING ] <path_to_image>/3.png is is resized from (351, 353) to (28, 28)
|
[ WARNING ] Image c:\images\3.png is resized from (351, 353) to (28, 28)
|
||||||
[ INFO ] Starting inference in synchronous mode
|
[ INFO ] Starting inference in synchronous mode
|
||||||
[ INFO ] Image path: <path_to_image>/3.png
|
[ INFO ] Image path: c:\images\3.png
|
||||||
[ INFO ] Top 10 results:
|
[ INFO ] Top 10 results:
|
||||||
[ INFO ] classid probability
|
[ INFO ] classid probability
|
||||||
[ INFO ] -------------------
|
[ INFO ] -------------------
|
||||||
|
@ -29,15 +29,15 @@ each sample step at [Integration Steps](../../../../../docs/IE_DG/Integrate_with
|
|||||||
|
|
||||||
## Running
|
## Running
|
||||||
|
|
||||||
Run the application with the <code>-h</code> option to see the usage message:
|
Run the application with the `-h` option to see the usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python object_detection_sample_ssd.py -h
|
python <path_to_sample>/object_detection_sample_ssd.py -h
|
||||||
```
|
```
|
||||||
|
|
||||||
Usage message:
|
Usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
usage: object_detection_sample_ssd.py [-h] -m MODEL -i INPUT [-l EXTENSION]
|
usage: object_detection_sample_ssd.py [-h] -m MODEL -i INPUT [-l EXTENSION]
|
||||||
[-c CONFIG] [-d DEVICE]
|
[-c CONFIG] [-d DEVICE]
|
||||||
[--labels LABELS]
|
[--labels LABELS]
|
||||||
@ -78,23 +78,37 @@ To run the sample, you need specify a model and image:
|
|||||||
>
|
>
|
||||||
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
||||||
|
|
||||||
You can do inference of an image using a pre-trained model on a GPU using the following command:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name mobilenet-ssd
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script:
|
||||||
python object_detection_sample_ssd.py -m <path_to_model>/mobilenet-ssd.xml -i <path_to_image>/cat.bmp -d GPU
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name mobilenet-ssd
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of `car.bmp` using `mobilenet-ssd` model on a `GPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
python <path_to_sample>/object_detection_sample_ssd.py -m <path_to_model>/mobilenet-ssd.xml -i <path_to_image>/car.bmp -d GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The sample application logs each step in a standard output stream and creates an output image, drawing bounding boxes for inference results with an over 50% confidence.
|
The sample application logs each step in a standard output stream and creates an output image, drawing bounding boxes for inference results with an over 50% confidence.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
[ INFO ] Creating Inference Engine
|
[ INFO ] Creating Inference Engine
|
||||||
[ INFO ] Reading the network: models\mobilenet-ssd.xml
|
[ INFO ] Reading the network: c:\openvino\deployment_tools\open_model_zoo\tools\downloader\public\mobilenet-ssd\FP32\mobilenet-ssd.xml
|
||||||
[ INFO ] Configuring input and output blobs
|
[ INFO ] Configuring input and output blobs
|
||||||
[ INFO ] Loading the model to the plugin
|
[ INFO ] Loading the model to the plugin
|
||||||
|
[ WARNING ] Image c:\images\car.bmp is resized from (637, 749) to (300, 300)
|
||||||
[ INFO ] Starting inference in synchronous mode
|
[ INFO ] Starting inference in synchronous mode
|
||||||
[ INFO ] Found: label = 8, confidence = 1.00, coords = (115, 64), (189, 182)
|
[ INFO ] Found: label = 7, confidence = 1.00, coords = (228, 120), (502, 460)
|
||||||
|
[ INFO ] Found: label = 7, confidence = 0.95, coords = (637, 233), (743, 608)
|
||||||
[ INFO ] Image out.bmp created!
|
[ INFO ] Image out.bmp created!
|
||||||
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
[ INFO ] This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
||||||
```
|
```
|
||||||
|
@ -68,15 +68,15 @@ In addition to performing inference directly from a GNA model file, this option
|
|||||||
|
|
||||||
## Running
|
## Running
|
||||||
|
|
||||||
Run the application with the <code>-h</code> option to see the usage message:
|
Run the application with the `-h` option to see the usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python speech_sample.py -h
|
python <path_to_sample>/speech_sample.py -h
|
||||||
```
|
```
|
||||||
|
|
||||||
Usage message:
|
Usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
usage: speech_sample.py [-h] (-m MODEL | -rg IMPORT_GNA_MODEL) -i INPUT
|
usage: speech_sample.py [-h] (-m MODEL | -rg IMPORT_GNA_MODEL) -i INPUT
|
||||||
[-o OUTPUT] [-r REFERENCE] [-d DEVICE]
|
[-o OUTPUT] [-r REFERENCE] [-d DEVICE]
|
||||||
[-bs BATCH_SIZE] [-qb QUANTIZATION_BITS]
|
[-bs BATCH_SIZE] [-qb QUANTIZATION_BITS]
|
||||||
@ -131,8 +131,8 @@ Options:
|
|||||||
|
|
||||||
You can use the following model optimizer command to convert a Kaldi nnet1 or nnet2 neural network to Inference Engine Intermediate Representation format:
|
You can use the following model optimizer command to convert a Kaldi nnet1 or nnet2 neural network to Inference Engine Intermediate Representation format:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python mo.py --framework kaldi --input_model wsj_dnn5b.nnet --counts wsj_dnn5b.counts --remove_output_softmax --output_dir <OUTPUT_MODEL_DIR>
|
python <path_to_mo>/mo.py --framework kaldi --input_model wsj_dnn5b.nnet --counts wsj_dnn5b.counts --remove_output_softmax --output_dir <path_to_dir>
|
||||||
```
|
```
|
||||||
|
|
||||||
The following pre-trained models are available:
|
The following pre-trained models are available:
|
||||||
@ -147,8 +147,8 @@ All of them can be downloaded from [https://storage.openvinotoolkit.org/models_c
|
|||||||
|
|
||||||
You can do inference on Intel® Processors with the GNA co-processor (or emulation library):
|
You can do inference on Intel® Processors with the GNA co-processor (or emulation library):
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python speech_sample.py -d GNA_AUTO -m wsj_dnn5b.xml -i dev93_10.ark -r dev93_scores_10.ark -o result.npz
|
python <path_to_sample>/speech_sample.py -m <path_to_model>/wsj_dnn5b.xml -i <path_to_ark>/dev93_10.ark -r <path_to_ark>/dev93_scores_10.ark -d GNA_AUTO -o result.npz
|
||||||
```
|
```
|
||||||
|
|
||||||
> **NOTES**:
|
> **NOTES**:
|
||||||
@ -161,7 +161,7 @@ python speech_sample.py -d GNA_AUTO -m wsj_dnn5b.xml -i dev93_10.ark -r dev93_sc
|
|||||||
|
|
||||||
The sample application logs each step in a standard output stream.
|
The sample application logs each step in a standard output stream.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
[ INFO ] Creating Inference Engine
|
[ INFO ] Creating Inference Engine
|
||||||
[ INFO ] Reading the network: wsj_dnn5b.xml
|
[ INFO ] Reading the network: wsj_dnn5b.xml
|
||||||
[ INFO ] Configuring input and output blobs
|
[ INFO ] Configuring input and output blobs
|
||||||
|
@ -30,15 +30,15 @@ each sample step at [Integration Steps](../../../../../docs/IE_DG/Integrate_with
|
|||||||
|
|
||||||
## Running
|
## Running
|
||||||
|
|
||||||
Run the application with the <code>-h</code> option to see the usage message:
|
Run the application with the `-h` option to see the usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
python style_transfer_sample.py -h
|
python <path_to_sample>/style_transfer_sample.py -h
|
||||||
```
|
```
|
||||||
|
|
||||||
Usage message:
|
Usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
usage: style_transfer_sample.py [-h] -m MODEL -i INPUT [INPUT ...]
|
usage: style_transfer_sample.py [-h] -m MODEL -i INPUT [INPUT ...]
|
||||||
[-l EXTENSION] [-c CONFIG] [-d DEVICE]
|
[-l EXTENSION] [-c CONFIG] [-d DEVICE]
|
||||||
[--original_size] [--mean_val_r MEAN_VAL_R]
|
[--original_size] [--mean_val_r MEAN_VAL_R]
|
||||||
@ -90,23 +90,35 @@ To run the sample, you need specify a model and image:
|
|||||||
>
|
>
|
||||||
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
||||||
|
|
||||||
You can do inference of an image using a pre-trained model on a GPU using the following command:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name fast-neural-style-mosaic-onnx
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. `fast-neural-style-mosaic-onnx` model does not need to be converted, because it is already in necessary format, so you can skip this step. If you want to use a other model that is not in the Inference Engine IR or ONNX format, you can convert it using the model converter script:
|
||||||
python style_transfer_sample.py -m <path_to_model>/fast-neural-style-mosaic-onnx.onnx -i <path_to_image>/car.png <path_to_image>/cat.jpg -d GPU
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name <model_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of `car.bmp` and `cat.jpg` using `fast-neural-style-mosaic-onnx` model on a `GPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
python <path_to_sample>/style_transfer_sample.py -m <path_to_model>/fast-neural-style-mosaic-onnx.onnx -i <path_to_image>/car.bmp <path_to_image>/cat.jpg -d GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The sample application logs each step in a standard output stream and creates an output image (`out_0.bmp`) or a sequence of images (`out_0.bmp`, .., `out_<n>.bmp`) that are redrawn in the style of the style transfer model used.
|
The sample application logs each step in a standard output stream and creates an output image (`out_0.bmp`) or a sequence of images (`out_0.bmp`, .., `out_<n>.bmp`) that are redrawn in the style of the style transfer model used.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
[ INFO ] Creating Inference Engine
|
[ INFO ] Creating Inference Engine
|
||||||
[ INFO ] Reading the network: models\fast-neural-style-mosaic-onnx.onnx
|
[ INFO ] Reading the network: c:\openvino\deployment_tools\open_model_zoo\tools\downloader\public\fast-neural-style-mosaic-onnx\fast-neural-style-mosaic-onnx.onnx
|
||||||
[ INFO ] Configuring input and output blobs
|
[ INFO ] Configuring input and output blobs
|
||||||
[ INFO ] Loading the model to the plugin
|
[ INFO ] Loading the model to the plugin
|
||||||
[ WARNING ] Image images\car.bmp is resized from (259, 787) to (224, 224)
|
[ WARNING ] Image c:\images\car.bmp is resized from (637, 749) to (224, 224)
|
||||||
[ WARNING ] Image images\cat.bmp is resized from (300, 300) to (224, 224)
|
[ WARNING ] Image c:\images\cat.jpg is resized from (300, 300) to (224, 224)
|
||||||
[ INFO ] Starting inference in synchronous mode
|
[ INFO ] Starting inference in synchronous mode
|
||||||
[ INFO ] Image out_0.bmp created!
|
[ INFO ] Image out_0.bmp created!
|
||||||
[ INFO ] Image out_1.bmp created!
|
[ INFO ] Image out_1.bmp created!
|
||||||
|
@ -7,6 +7,7 @@ import sys
|
|||||||
import errno
|
import errno
|
||||||
import subprocess # nosec
|
import subprocess # nosec
|
||||||
import typing
|
import typing
|
||||||
|
from fnmatch import fnmatchcase
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import copyfile, rmtree
|
from shutil import copyfile, rmtree
|
||||||
from distutils.command.install import install
|
from distutils.command.install import install
|
||||||
@ -120,6 +121,16 @@ class CustomBuild(build):
|
|||||||
def run(self):
|
def run(self):
|
||||||
self.run_command('build_clib')
|
self.run_command('build_clib')
|
||||||
build.run(self)
|
build.run(self)
|
||||||
|
# Copy extra package_data content filtered by find_packages
|
||||||
|
dst = Path(self.build_lib)
|
||||||
|
src = Path(get_package_dir(PY_INSTALL_CFG))
|
||||||
|
exclude = ignore_patterns('*ez_setup*', '*__pycache__*', '*.egg-info*')
|
||||||
|
for path in src.glob('**/*'):
|
||||||
|
if path.is_dir() or exclude(str(path)):
|
||||||
|
continue
|
||||||
|
path_rel = path.relative_to(src)
|
||||||
|
(dst / path_rel.parent).mkdir(exist_ok=True, parents=True)
|
||||||
|
copyfile(path, dst / path_rel)
|
||||||
|
|
||||||
|
|
||||||
class CustomInstall(install):
|
class CustomInstall(install):
|
||||||
@ -215,6 +226,13 @@ class CustomClean(clean):
|
|||||||
clean.run(self)
|
clean.run(self)
|
||||||
|
|
||||||
|
|
||||||
|
def ignore_patterns(*patterns):
|
||||||
|
"""
|
||||||
|
Filter names by given patterns
|
||||||
|
"""
|
||||||
|
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
|
||||||
|
|
||||||
|
|
||||||
def is_tool(name):
|
def is_tool(name):
|
||||||
"""Check if the command-line tool is available"""
|
"""Check if the command-line tool is available"""
|
||||||
try:
|
try:
|
||||||
@ -348,8 +366,7 @@ package_license = config('WHEEL_LICENSE', '')
|
|||||||
if os.path.exists(package_license):
|
if os.path.exists(package_license):
|
||||||
copyfile(package_license, 'LICENSE')
|
copyfile(package_license, 'LICENSE')
|
||||||
|
|
||||||
|
packages = find_namespace_packages(get_package_dir(PY_INSTALL_CFG))
|
||||||
packages = find_namespace_packages(','.join(get_dir_list(PY_INSTALL_CFG)))
|
|
||||||
package_data: typing.Dict[str, list] = {}
|
package_data: typing.Dict[str, list] = {}
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
|
@ -199,6 +199,22 @@ public:
|
|||||||
*/
|
*/
|
||||||
void serialize(const std::string& xmlPath, const std::string& binPath = {}) const;
|
void serialize(const std::string& xmlPath, const std::string& binPath = {}) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Serialize network to IR and weights streams.
|
||||||
|
*
|
||||||
|
* @param xmlBuf output IR stream.
|
||||||
|
* @param binBuf output weights stream.
|
||||||
|
*/
|
||||||
|
void serialize(std::ostream& xmlBuf, std::ostream& binBuf) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Serialize network to IR stream and weights Blob::Ptr.
|
||||||
|
*
|
||||||
|
* @param xmlBuf output IR stream.
|
||||||
|
* @param binBlob output weights Blob::Ptr.
|
||||||
|
*/
|
||||||
|
void serialize(std::ostream& xmlBuf, Blob::Ptr& binBlob) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Method maps framework tensor name to OpenVINO name
|
* @brief Method maps framework tensor name to OpenVINO name
|
||||||
* @param orig_name Framework tensor name
|
* @param orig_name Framework tensor name
|
||||||
|
@ -22,12 +22,12 @@ namespace InferenceEngine {
|
|||||||
|
|
||||||
namespace gpu {
|
namespace gpu {
|
||||||
/**
|
/**
|
||||||
* @brief This class represents an abstraction for GPU plugin remote context
|
* @brief This class represents an abstraction for GPU plugin remote context
|
||||||
* which is shared with VA display object.
|
* which is shared with VA display object.
|
||||||
* The plugin object derived from this class can be obtained either with
|
* The plugin object derived from this class can be obtained either with
|
||||||
* GetContext() method of Executable network or using CreateContext() Core call.
|
* GetContext() method of Executable network or using CreateContext() Core call.
|
||||||
* @note User can also obtain OpenCL context handle from this class.
|
* @note User can also obtain OpenCL context handle from this class.
|
||||||
*/
|
*/
|
||||||
class VAContext : public ClContext {
|
class VAContext : public ClContext {
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
@ -47,11 +47,11 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This class represents an abstraction for GPU plugin remote blob
|
* @brief This class represents an abstraction for GPU plugin remote blob
|
||||||
* which is shared with VA output surface.
|
* which is shared with VA output surface.
|
||||||
* The plugin object derived from this class can be obtained with CreateBlob() call.
|
* The plugin object derived from this class can be obtained with CreateBlob() call.
|
||||||
* @note User can also obtain OpenCL 2D image handle from this class.
|
* @note User can also obtain OpenCL 2D image handle from this class.
|
||||||
*/
|
*/
|
||||||
class VASurfaceBlob : public ClImage2DBlob {
|
class VASurfaceBlob : public ClImage2DBlob {
|
||||||
public:
|
public:
|
||||||
/**
|
/**
|
||||||
|
@ -11,8 +11,8 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Definitions required by Khronos headers
|
* @brief Definitions required by Khronos headers
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef CL_HPP_ENABLE_EXCEPTIONS
|
#ifndef CL_HPP_ENABLE_EXCEPTIONS
|
||||||
# define CL_HPP_ENABLE_EXCEPTIONS
|
# define CL_HPP_ENABLE_EXCEPTIONS
|
||||||
|
@ -44,63 +44,65 @@ namespace GPUContextParams {
|
|||||||
#define DECLARE_GPU_PARAM_KEY(name, ...) \
|
#define DECLARE_GPU_PARAM_KEY(name, ...) \
|
||||||
static constexpr auto PARAM_##name = #name
|
static constexpr auto PARAM_##name = #name
|
||||||
/**
|
/**
|
||||||
* @brief Shared device context type: can be either pure OpenCL (OCL)
|
* @brief Shared device context type: can be either pure OpenCL (OCL)
|
||||||
* or shared video decoder (VA_SHARED) context
|
* or shared video decoder (VA_SHARED) context
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_KEY(CONTEXT_TYPE, std::string);
|
DECLARE_GPU_PARAM_KEY(CONTEXT_TYPE, std::string);
|
||||||
/**
|
/**
|
||||||
* @brief Pure OpenCL device context
|
* @brief Pure OpenCL device context
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_VALUE(OCL);
|
DECLARE_GPU_PARAM_VALUE(OCL);
|
||||||
/**
|
/**
|
||||||
* @brief Shared context (video decoder or D3D)
|
* @brief Shared context (video decoder or D3D)
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_VALUE(VA_SHARED);
|
DECLARE_GPU_PARAM_VALUE(VA_SHARED);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This key identifies OpenCL context handle
|
* @brief This key identifies OpenCL context handle
|
||||||
* in a shared context or shared memory blob parameter map
|
* in a shared context or shared memory blob parameter map
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_KEY(OCL_CONTEXT, gpu_handle_param);
|
DECLARE_GPU_PARAM_KEY(OCL_CONTEXT, gpu_handle_param);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This key identifies video acceleration device/display handle
|
* @brief This key identifies video acceleration device/display handle
|
||||||
* in a shared context or shared memory blob parameter map
|
* in a shared context or shared memory blob parameter map
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_KEY(VA_DEVICE, gpu_handle_param);
|
DECLARE_GPU_PARAM_KEY(VA_DEVICE, gpu_handle_param);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This key identifies type of internal shared memory
|
* @brief This key identifies type of internal shared memory
|
||||||
* in a shared memory blob parameter map.
|
* in a shared memory blob parameter map.
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_KEY(SHARED_MEM_TYPE, std::string);
|
DECLARE_GPU_PARAM_KEY(SHARED_MEM_TYPE, std::string);
|
||||||
/**
|
/**
|
||||||
* @brief Shared OpenCL buffer blob
|
* @brief Shared OpenCL buffer blob
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_VALUE(OCL_BUFFER);
|
DECLARE_GPU_PARAM_VALUE(OCL_BUFFER);
|
||||||
/**
|
/**
|
||||||
* @brief Shared OpenCL 2D image blob
|
* @brief Shared OpenCL 2D image blob
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_VALUE(OCL_IMAGE2D);
|
DECLARE_GPU_PARAM_VALUE(OCL_IMAGE2D);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Shared video decoder surface or D3D 2D texture blob
|
* @brief Shared video decoder surface or D3D 2D texture blob
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_VALUE(VA_SURFACE);
|
DECLARE_GPU_PARAM_VALUE(VA_SURFACE);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Shared D3D buffer blob
|
* @brief Shared D3D buffer blob
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_VALUE(DX_BUFFER);
|
DECLARE_GPU_PARAM_VALUE(DX_BUFFER);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This key identifies OpenCL memory handle
|
* @brief This key identifies OpenCL memory handle
|
||||||
* in a shared memory blob parameter map
|
* in a shared memory blob parameter map
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_KEY(MEM_HANDLE, gpu_handle_param);
|
DECLARE_GPU_PARAM_KEY(MEM_HANDLE, gpu_handle_param);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This key identifies video decoder surface handle
|
* @brief This key identifies video decoder surface handle
|
||||||
* in a shared memory blob parameter map
|
* in a shared memory blob parameter map
|
||||||
*/
|
*/
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
DECLARE_GPU_PARAM_KEY(DEV_OBJECT_HANDLE, gpu_handle_param);
|
DECLARE_GPU_PARAM_KEY(DEV_OBJECT_HANDLE, gpu_handle_param);
|
||||||
#else
|
#else
|
||||||
@ -108,9 +110,9 @@ DECLARE_GPU_PARAM_KEY(DEV_OBJECT_HANDLE, uint32_t);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This key identifies video decoder surface plane
|
* @brief This key identifies video decoder surface plane
|
||||||
* in a shared memory blob parameter map
|
* in a shared memory blob parameter map
|
||||||
*/
|
*/
|
||||||
DECLARE_GPU_PARAM_KEY(VA_PLANE, uint32_t);
|
DECLARE_GPU_PARAM_KEY(VA_PLANE, uint32_t);
|
||||||
|
|
||||||
} // namespace GPUContextParams
|
} // namespace GPUContextParams
|
||||||
|
@ -335,12 +335,9 @@ public:
|
|||||||
return size() * element_size();
|
return size() * element_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
size_t element_size() const noexcept override {
|
||||||
* @brief Provides the number of bytes per element.
|
return tensorDesc.getPrecision().size();
|
||||||
* Abstract method.
|
}
|
||||||
* @return The number of bytes per element.
|
|
||||||
*/
|
|
||||||
size_t element_size() const noexcept override = 0;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Allocates memory to store the data.
|
* @brief Allocates memory to store the data.
|
||||||
@ -569,10 +566,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
virtual ~TBlob();
|
virtual ~TBlob();
|
||||||
|
|
||||||
size_t element_size() const noexcept override {
|
|
||||||
return sizeof(T);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Creates an new empty rvalue LockedMemory object.
|
* @brief Creates an new empty rvalue LockedMemory object.
|
||||||
*
|
*
|
||||||
|
@ -200,6 +200,32 @@ public:
|
|||||||
virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
|
virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
|
||||||
noexcept = 0;
|
noexcept = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @deprecated Use InferenceEngine::CNNNetwork wrapper instead
|
||||||
|
* @brief Serialize network to IR and weights files.
|
||||||
|
*
|
||||||
|
* @param xmlStream A stream for xml content (.xml file)
|
||||||
|
* @param binStream A stream for weights content (.bin file)
|
||||||
|
* @param resp Pointer to the response message that holds a description of an error if any occurred
|
||||||
|
* @return Status code of the operation
|
||||||
|
*/
|
||||||
|
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead")
|
||||||
|
virtual StatusCode serialize(std::ostream& xmlStream, std::ostream& binStream, ResponseDesc* resp) const
|
||||||
|
noexcept = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @deprecated Use InferenceEngine::CNNNetwork wrapper instead
|
||||||
|
* @brief Serialize network to IR and weights files.
|
||||||
|
*
|
||||||
|
* @param xmlStream A stream for xml content (.xml file)
|
||||||
|
* @param binData A blob for weights content (.bin file)
|
||||||
|
* @param resp Pointer to the response message that holds a description of an error if any occurred
|
||||||
|
* @return Status code of the operation
|
||||||
|
*/
|
||||||
|
INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::CNNNetwork wrapper instead")
|
||||||
|
virtual StatusCode serialize(std::ostream& xmlStream, Blob::Ptr& binData, ResponseDesc* resp) const
|
||||||
|
noexcept = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @deprecated Use InferenceEngine::CNNNetwork wrapper instead
|
* @deprecated Use InferenceEngine::CNNNetwork wrapper instead
|
||||||
* @brief Methods maps framework tensor name to OpenVINO name
|
* @brief Methods maps framework tensor name to OpenVINO name
|
||||||
|
@ -109,11 +109,6 @@ public:
|
|||||||
template <class T>
|
template <class T>
|
||||||
bool hasStorageType(const char* typeName = nullptr) const noexcept {
|
bool hasStorageType(const char* typeName = nullptr) const noexcept {
|
||||||
try {
|
try {
|
||||||
if (precisionInfo.value != BIN) {
|
|
||||||
if (sizeof(T) != size()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#define CASE(x, y) \
|
#define CASE(x, y) \
|
||||||
case x: \
|
case x: \
|
||||||
return std::is_same<T, y>()
|
return std::is_same<T, y>()
|
||||||
@ -247,14 +242,14 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Returns size of single element of that precision in bits
|
* @brief Returns size of single element of that precision in bytes
|
||||||
* @returns Number of bytes per element
|
* @returns Number of bytes per element
|
||||||
*/
|
*/
|
||||||
size_t size() const {
|
size_t size() const {
|
||||||
if (precisionInfo.bitsSize == 0) {
|
if (precisionInfo.bitsSize == 0) {
|
||||||
IE_THROW() << " cannot estimate element if precision is " << precisionInfo.name;
|
IE_THROW() << " cannot estimate element if precision is " << precisionInfo.name;
|
||||||
}
|
}
|
||||||
return precisionInfo.bitsSize >> 3;
|
return (precisionInfo.bitsSize + 7) / 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -461,7 +456,7 @@ inline Precision::PrecisionInfo Precision::makePrecisionInfo(const char* name) {
|
|||||||
Precision::PrecisionInfo info;
|
Precision::PrecisionInfo info;
|
||||||
info.name = name;
|
info.name = name;
|
||||||
|
|
||||||
size_t nBits = precision == BIN ? 1 : 8;
|
size_t nBits = precision == BIN ? 1 : (precision == U4 || precision == I4) ? 4 : 8;
|
||||||
info.bitsSize = nBits * type_size_or_zero<typename PrecisionTrait<precision>::value_type>();
|
info.bitsSize = nBits * type_size_or_zero<typename PrecisionTrait<precision>::value_type>();
|
||||||
info.isFloat = PrecisionTrait<precision>::is_float;
|
info.isFloat = PrecisionTrait<precision>::is_float;
|
||||||
info.value = precision;
|
info.value = precision;
|
||||||
|
@ -46,10 +46,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
explicit RemoteBlob(const TensorDesc& tensorDesc): MemoryBlob(tensorDesc) {}
|
explicit RemoteBlob(const TensorDesc& tensorDesc): MemoryBlob(tensorDesc) {}
|
||||||
|
|
||||||
size_t element_size() const noexcept override {
|
|
||||||
return tensorDesc.getPrecision().size();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Returns a map of device-specific parameters required for low-level
|
* @brief Returns a map of device-specific parameters required for low-level
|
||||||
* operations with underlying object.
|
* operations with underlying object.
|
||||||
@ -194,3 +190,4 @@ inline RemoteBlob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::P
|
|||||||
}
|
}
|
||||||
|
|
||||||
} // namespace InferenceEngine
|
} // namespace InferenceEngine
|
||||||
|
|
||||||
|
@ -59,4 +59,9 @@ DECLARE_VPU_CONFIG(MYRIAD_USB);
|
|||||||
*/
|
*/
|
||||||
DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS);
|
DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Default key definition for InferenceEngine::MYRIAD_THROUGHPUT_STREAMS option.
|
||||||
|
*/
|
||||||
|
DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS_AUTO);
|
||||||
|
|
||||||
} // namespace InferenceEngine
|
} // namespace InferenceEngine
|
||||||
|
@ -60,8 +60,8 @@ To run the sample, you need specify a model and image:
|
|||||||
|
|
||||||
Running the application with the `-h` option yields the following usage message:
|
Running the application with the `-h` option yields the following usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
./classification_sample_async -h
|
<path_to_sample>/classification_sample_async -h
|
||||||
InferenceEngine:
|
InferenceEngine:
|
||||||
API version ............ <version>
|
API version ............ <version>
|
||||||
Build .................. <build>
|
Build .................. <build>
|
||||||
@ -85,33 +85,43 @@ Options:
|
|||||||
|
|
||||||
Running the application with the empty list of options yields the usage message given above and an error message.
|
Running the application with the empty list of options yields the usage message given above and an error message.
|
||||||
|
|
||||||
You can do inference of an image using a trained [AlexNet network](https://docs.openvinotoolkit.org/latest/omz_models_model_alexnet.html) on GPU using the following command:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script:
|
||||||
./classification_sample_async -m <path_to_model>/alexnet_fp32.xml -i <path_to_image>/cat.bmp -d GPU
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of `car.bmp` using `alexnet` model on a `GPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
<path_to_sample>/classification_sample_async -m <path_to_model>/alexnet.xml -i <path_to_image>/car.bmp -d GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
By default the application outputs top-10 inference results for each infer request.
|
By default the application outputs top-10 inference results for each infer request.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
classification_sample_async -m alexnet_fp32/alexnet.xml -i car_1.bmp -d GPU
|
|
||||||
[ INFO ] InferenceEngine:
|
[ INFO ] InferenceEngine:
|
||||||
API version ............
|
IE version ......... 2021.4.0
|
||||||
Build ..................
|
Build ........... 2021.4.0-3839-cd81789d294-releases/2021/4
|
||||||
Description ....... API
|
|
||||||
[ INFO ] Parsing input parameters
|
|
||||||
[ INFO ] Parsing input parameters
|
[ INFO ] Parsing input parameters
|
||||||
[ INFO ] Files were added: 1
|
[ INFO ] Files were added: 1
|
||||||
[ INFO ] car_1.bmp
|
[ INFO ] C:\images\car.bmp
|
||||||
[ INFO ] Loading Inference Engine
|
[ INFO ] Loading Inference Engine
|
||||||
[ INFO ] Device info:
|
[ INFO ] Device info:
|
||||||
GPU
|
GPU
|
||||||
clDNNPlugin version ......... <version>
|
clDNNPlugin version ......... 2021.4.0
|
||||||
Build ........... <build>
|
Build ........... 2021.4.0-3839-cd81789d294-releases/2021/4
|
||||||
|
|
||||||
[ INFO ] Loading network files:
|
[ INFO ] Loading network files:
|
||||||
alexnet_fp32/alexnet.xml
|
[ INFO ] C:\openvino\deployment_tools\open_model_zoo\tools\downloader\public\alexnet\FP32\alexnet.xml
|
||||||
[ INFO ] Preparing input blobs
|
[ INFO ] Preparing input blobs
|
||||||
[ WARNING ] Image is resized from (749, 637) to (227, 227)
|
[ WARNING ] Image is resized from (749, 637) to (227, 227)
|
||||||
[ INFO ] Batch size is 1
|
[ INFO ] Batch size is 1
|
||||||
@ -132,20 +142,20 @@ classification_sample_async -m alexnet_fp32/alexnet.xml -i car_1.bmp -d GPU
|
|||||||
|
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
Image car_1.bmp
|
Image C:\images\car.bmp
|
||||||
|
|
||||||
classid probability
|
classid probability
|
||||||
------- -----------
|
------- -----------
|
||||||
656 0.5491584
|
656 0.6645315
|
||||||
874 0.1101241
|
654 0.1121185
|
||||||
654 0.0559816
|
581 0.0698451
|
||||||
436 0.0488046
|
874 0.0334973
|
||||||
581 0.0330480
|
436 0.0259718
|
||||||
705 0.0307707
|
817 0.0173190
|
||||||
734 0.0185521
|
675 0.0109321
|
||||||
627 0.0162536
|
511 0.0109075
|
||||||
675 0.0145008
|
569 0.0083093
|
||||||
757 0.0125437
|
717 0.0063173
|
||||||
|
|
||||||
[ INFO ] Execution successful
|
[ INFO ] Execution successful
|
||||||
|
|
||||||
|
@ -47,33 +47,45 @@ To run the sample, you need specify a model and image:
|
|||||||
>
|
>
|
||||||
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
||||||
|
|
||||||
You can do inference of an image using a trained AlexNet network on a GPU using the following command:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script:
|
||||||
./hello_classification <path_to_model>/alexnet_fp32.xml <path_to_image>/car.png GPU
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of `car.bmp` using `alexnet` model on a `GPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
<path_to_sample>/hello_classification <path_to_model>/alexnet.xml <path_to_image>/car.bmp GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The application outputs top-10 inference results.
|
The application outputs top-10 inference results.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
Top 10 results:
|
Top 10 results:
|
||||||
|
|
||||||
Image /opt/intel/openvino/deployment_tools/demo/car.png
|
Image C:\images\car.bmp
|
||||||
|
|
||||||
classid probability
|
classid probability
|
||||||
------- -----------
|
------- -----------
|
||||||
479 0.7562194
|
656 0.6664789
|
||||||
511 0.0760387
|
654 0.1129405
|
||||||
436 0.0724114
|
581 0.0684867
|
||||||
817 0.0462140
|
874 0.0333845
|
||||||
656 0.0301230
|
436 0.0261321
|
||||||
661 0.0056171
|
817 0.0167310
|
||||||
581 0.0031623
|
675 0.0109796
|
||||||
468 0.0029917
|
511 0.0105919
|
||||||
717 0.0023081
|
569 0.0081782
|
||||||
627 0.0016193
|
717 0.0063356
|
||||||
|
|
||||||
This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
||||||
```
|
```
|
||||||
|
@ -64,17 +64,29 @@ ffmpeg -i cat.jpg -pix_fmt nv12 cat.yuv
|
|||||||
>
|
>
|
||||||
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
> - The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
|
||||||
|
|
||||||
You can perform inference on an NV12 image using a trained AlexNet network on CPU with the following command:
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. If a model is not in the Inference Engine IR or ONNX format, it must be converted. You can do this using the model converter script:
|
||||||
./hello_nv12_input_classification <path_to_model>/alexnet_fp32.xml <path_to_image>/cat.yuv 300x300 CPU
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name alexnet
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of NV12 image using `alexnet` model on a `CPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
<path_to_sample>/hello_nv12_input_classification <path_to_model>/alexnet.xml <path_to_image>/cat.yuv 300x300 CPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
|
||||||
The application outputs top-10 inference results.
|
The application outputs top-10 inference results.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
[ INFO ] Files were added: 1
|
[ INFO ] Files were added: 1
|
||||||
[ INFO ] ./cat.yuv
|
[ INFO ] ./cat.yuv
|
||||||
Batch size is 1
|
Batch size is 1
|
||||||
|
@ -27,8 +27,8 @@ To build the sample, please use instructions available at [Build the Sample Appl
|
|||||||
|
|
||||||
To see quired information, run the following:
|
To see quired information, run the following:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
./hello_query_device -h
|
<path_to_sample>/hello_query_device -h
|
||||||
Usage : hello_query_device
|
Usage : hello_query_device
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -36,8 +36,7 @@ Usage : hello_query_device
|
|||||||
|
|
||||||
The application prints all available devices with their supported metrics and default values for configuration parameters:
|
The application prints all available devices with their supported metrics and default values for configuration parameters:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
./hello_query_device
|
|
||||||
Available devices:
|
Available devices:
|
||||||
Device: CPU
|
Device: CPU
|
||||||
Metrics:
|
Metrics:
|
||||||
|
@ -51,14 +51,26 @@ To run the sample, you need specify a model and image:
|
|||||||
|
|
||||||
You can use the following command to do inference on CPU of an image using a trained SSD network:
|
You can use the following command to do inference on CPU of an image using a trained SSD network:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
hello_reshape_ssd <path_to_model> <path_to_image> <device> <batch>
|
<path_to_sample>/hello_reshape_ssd <path_to_model> <path_to_image> <device> <batch>
|
||||||
```
|
```
|
||||||
|
|
||||||
with one image and [person-detection-retail-0013](https://docs.openvinotoolkit.org/latest/omz_models_intel_person_detection_retail_0013_description_person_detection_retail_0013.html) model
|
### Example
|
||||||
|
1. Download a pre-trained model using [Model Downloader](@ref omz_tools_downloader_README):
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/downloader.py --name person-detection-retail-0013
|
||||||
|
```
|
||||||
|
|
||||||
```sh
|
2. `person-detection-retail-0013` model does not need to be converted, because it is already in necessary format, so you can skip this step. If you want to use a other model that is not in the Inference Engine IR or ONNX format, you can convert it using the model converter script:
|
||||||
hello_reshape_ssd <path_to_model>/person-detection-retail-0013.xml <path_to_image>/inputImage.bmp CPU 1
|
|
||||||
|
```
|
||||||
|
python <path_to_omz_tools>/converter.py --name <model_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Perform inference of `person_detection.png` using `person-detection-retail-0013` model on a `GPU`, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
<path_to_sample>/hello_reshape_ssd <path_to_model>/person-detection-retail-0013.xml <path_to_image>/person_detection.png GPU 1
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
@ -67,13 +79,11 @@ The application renders an image with detected objects enclosed in rectangles. I
|
|||||||
of the detected objects along with the respective confidence values and the coordinates of the
|
of the detected objects along with the respective confidence values and the coordinates of the
|
||||||
rectangles to the standard output stream.
|
rectangles to the standard output stream.
|
||||||
|
|
||||||
```sh
|
```
|
||||||
hello_reshape_ssd person-detection-retail-0013/FP16/person-detection-retail-0013.xml person_detection.png CPU 1
|
|
||||||
|
|
||||||
Resizing network to the image size = [960x1699] with batch = 1
|
Resizing network to the image size = [960x1699] with batch = 1
|
||||||
Resulting input shape = [1,3,960,1699]
|
Resulting input shape = [1,3,960,1699]
|
||||||
Resulting output shape = [1,1,200,7]
|
Resulting output shape = [1,1,200,7]
|
||||||
[0,1] element, prob = 0.721457, bbox = (852.37,187.54)-(983.326,520.672), batch id = 0
|
[0,1] element, prob = 0.722292, bbox = (852.382,187.756)-(983.352,520.733), batch id = 0
|
||||||
The resulting image was saved in the file: hello_reshape_ssd_output.jpg
|
The resulting image was saved in the file: hello_reshape_ssd_output.jpg
|
||||||
|
|
||||||
This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool
|
||||||
|
@ -50,7 +50,7 @@ To run the sample, you need specify a model wights and ubyte image:
|
|||||||
|
|
||||||
Running the application with the `-h` option yields the following usage message:
|
Running the application with the `-h` option yields the following usage message:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
ngraph_function_creation_sample -h
|
ngraph_function_creation_sample -h
|
||||||
[ INFO ] InferenceEngine:
|
[ INFO ] InferenceEngine:
|
||||||
API version ............<version>
|
API version ............<version>
|
||||||
@ -75,8 +75,8 @@ Running the application with the empty list of options yields the usage message
|
|||||||
|
|
||||||
You can do inference of an image using a pre-trained model on a GPU using the following command:
|
You can do inference of an image using a pre-trained model on a GPU using the following command:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
./ngraph_function_creation_sample -m <path_to_weights_file>/lenet.bin -i <path_to_image> -d GPU
|
<path_to_sample>/ngraph_function_creation_sample -m <path_to_weights_file>/lenet.bin -i <path_to_image> -d GPU
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sample Output
|
## Sample Output
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user