diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml
index 146775f6189..24f59d96be3 100644
--- a/.ci/azure/linux.yml
+++ b/.ci/azure/linux.yml
@@ -30,8 +30,8 @@ jobs:
WORK_DIR: $(Pipeline.Workspace)/_w
BUILD_DIR: $(WORK_DIR)/build
BUILD_SAMPLES_DIR: $(WORK_DIR)/build_samples
- BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE)
INSTALL_DIR: $(WORK_DIR)/install_pkg
+ INSTALL_TEST_DIR: $(INSTALL_DIR)/tests
SETUPVARS: $(INSTALL_DIR)/bin/setupvars.sh
steps:
@@ -103,7 +103,6 @@ jobs:
cmakeArgs: >
-GNinja
-DVERBOSE_BUILD=ON
- -DENABLE_TEMPLATE_PLUGIN=ON
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE)
-DENABLE_PYTHON=ON
-DPYTHON_EXECUTABLE=/usr/bin/python3.6
@@ -117,6 +116,9 @@ jobs:
$(REPO_DIR)
workingDirectory: $(BUILD_DIR)
+ - script: ls -alR $(REPO_DIR)/inference-engine/temp/
+ displayName: 'List temp SDKs'
+
- script: ninja
workingDirectory: $(BUILD_DIR)
displayName: 'Build Lin'
@@ -131,6 +133,15 @@ jobs:
- script: ls -alR $(INSTALL_DIR)
displayName: 'List install files'
+ - script: |
+ mkdir $(INSTALL_DIR)/opencv/
+ cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P cmake_install.cmake && cp -R $(REPO_DIR)/inference-engine/temp/opencv_4.5.2_ubuntu18/opencv/* $(INSTALL_DIR)/opencv/
+ workingDirectory: $(BUILD_DIR)
+ displayName: 'Install tests'
+
+ - script: ls -alR $(INSTALL_DIR)
+ displayName: 'List install files'
+
- script: $(INSTALL_DIR)/deployment_tools/inference_engine/samples/cpp/build_samples.sh
workingDirectory: $(BUILD_SAMPLES_DIR)
displayName: 'Build cpp samples'
@@ -139,62 +150,59 @@ jobs:
workingDirectory: $(BUILD_SAMPLES_DIR)
displayName: 'Build c samples'
- - script: $(BIN_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
+ - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
displayName: 'nGraph UT'
continueOnError: false
- # python3 $(WORK_DIR)/gtest-parallel/gtest_parallel.py $(BIN_DIR)/InferenceEngineUnitTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=InferenceEngineUnitTests.json --gtest_filter=*smoke* -- --gtest_print_time=1
- - script: $(BIN_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
+ # . $(SETUPVARS) && python3 $(WORK_DIR)/gtest-parallel/gtest_parallel.py $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=InferenceEngineUnitTests.json --gtest_filter=*smoke* -- --gtest_print_time=1
+ - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
displayName: 'IE UT old'
continueOnError: false
- - script: $(BIN_DIR)/ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
+ - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
displayName: 'IE UT'
continueOnError: false
- - script: $(BIN_DIR)/cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml
+ - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml
displayName: 'CPU UT'
continueOnError: false
- - script: $(BIN_DIR)/gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml
+ - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml
displayName: 'GNA UT'
continueOnError: false
- - script: $(BIN_DIR)/vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
+ - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
displayName: 'VPU UT'
continueOnError: false
- - script: $(BIN_DIR)/onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
+ - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
displayName: 'ONNX Importer UT'
continueOnError: false
- - script: $(BIN_DIR)/ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml
+ - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml
displayName: 'IE FuncTests'
continueOnError: false
- - script: $(BIN_DIR)/templateFuncTests --gtest_filter=*smoke* --gtest_output=xml:TEST-templateFuncTests.xml
+ - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/templateFuncTests --gtest_filter=*smoke* --gtest_output=xml:TEST-templateFuncTests.xml
displayName: 'TEMPLATE FuncTests'
continueOnError: false
- - script: $(BIN_DIR)/cpuFuncTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml
+ - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuFuncTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml
displayName: 'CPU FuncTests'
continueOnError: false
- script: |
export DATA_PATH=$(MODELS_PATH)
export MODELS_PATH=$(MODELS_PATH)
- $(BIN_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
+ . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
displayName: 'IE CAPITests'
continueOnError: false
- script: |
export DATA_PATH=$(MODELS_PATH)
export MODELS_PATH=$(MODELS_PATH)
- export LD_LIBRARY_PATH=$(BIN_DIR)/lib
- export PYTHONPATH=$(BIN_DIR)/lib/python_api/python3.6
- env
cd $(REPO_DIR)/inference-engine/ie_bridges/python/tests
- pytest pytest --junitxml=TEST-PythonAPI.xml
+ . $(SETUPVARS) -pyver 3.6 && pytest pytest --junitxml=TEST-PythonAPI.xml
displayName: 'Python API Tests'
continueOnError: false
enabled: false
diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml
index f5e5d7673c2..c368776c8f4 100644
--- a/.ci/azure/windows.yml
+++ b/.ci/azure/windows.yml
@@ -30,14 +30,13 @@ jobs:
WORK_DIR: $(Pipeline.Workspace)\_w
BUILD_DIR: D:\build
BUILD_SAMPLES_DIR: D:\build_samples
- BIN_DIR: $(REPO_DIR)\bin\intel64
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
INSTALL_DIR: $(WORK_DIR)\install_pkg
+ INSTALL_TEST_DIR: $(INSTALL_DIR)\tests
SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat
IB_DIR: C:\Program Files (x86)\IncrediBuild
IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe
- TEST_ENV_PATH: $(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.2\opencv\bin;$(IB_DIR);%PATH%
steps:
- script: |
@@ -96,6 +95,9 @@ jobs:
workingDirectory: $(BUILD_DIR)
displayName: 'CMake'
+ - script: dir $(REPO_DIR)\inference-engine\temp\ /s
+ displayName: 'List temp SDKs'
+
- script: |
set PATH=$(WORK_DIR)\ninja-win;%PATH%
call "$(MSVS_VARS_PATH)" && "C:\Program Files (x86)\IncrediBuild\BuildConsole.exe" /COMMAND="ninja"
@@ -112,6 +114,13 @@ jobs:
- script: dir $(INSTALL_DIR) /s
displayName: 'List install files'
+ - script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -DCOMPONENT=tests -P cmake_install.cmake && xcopy $(REPO_DIR)\inference-engine\temp\opencv_4.5.2\opencv\* $(INSTALL_DIR)\opencv\ /e /h /y
+ workingDirectory: $(BUILD_DIR)
+ displayName: 'Install tests'
+
+ - script: dir $(INSTALL_DIR) /s
+ displayName: 'List install files'
+
- script: $(INSTALL_DIR)\deployment_tools\inference_engine\samples\cpp\build_samples_msvc.bat
workingDirectory: $(BUILD_SAMPLES_DIR)
displayName: 'Build cpp samples'
@@ -120,71 +129,55 @@ jobs:
workingDirectory: $(BUILD_SAMPLES_DIR)
displayName: 'Build c samples'
- - script: |
- set PATH=$(TEST_ENV_PATH)
- $(BIN_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
+ - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
displayName: 'nGraph UT'
continueOnError: false
- script: |
- set PATH=$(TEST_ENV_PATH)
- "$(IB_TESTCONSOLE)" $(BIN_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests-IB.xml
+ set PATH=$(IB_DIR);%PATH%
+ call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests-IB.xml
displayName: 'IE UT old - IB'
+ continueOnError: false
- - script: |
- set PATH=$(TEST_ENV_PATH)
- $(BIN_DIR)\ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
+ - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
displayName: 'IE UT'
continueOnError: false
- - script: |
- set PATH=$(TEST_ENV_PATH)
- $(BIN_DIR)\cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml
+ - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml
displayName: 'CPU UT'
continueOnError: false
- - script: |
- set PATH=$(TEST_ENV_PATH)
- $(BIN_DIR)\gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml
+ - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml
displayName: 'GNA UT'
continueOnError: false
- - script: |
- set PATH=$(TEST_ENV_PATH)
- $(BIN_DIR)\vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
+ - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
displayName: 'VPU UT'
continueOnError: false
- - script: |
- set PATH=$(TEST_ENV_PATH)
- $(BIN_DIR)\onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
+ - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
displayName: 'ONNX Importer UT'
continueOnError: false
- - script: |
- set PATH=$(TEST_ENV_PATH)
- $(BIN_DIR)\ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml
+ - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml
displayName: 'IE FuncTests'
continueOnError: false
- - script: |
- set PATH=$(TEST_ENV_PATH)
- $(BIN_DIR)\templateFuncTests --gtest_output=xml:TEST-templateFuncTests.xml
+ - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\templateFuncTests --gtest_output=xml:TEST-templateFuncTests.xml
displayName: 'TEMPLATE FuncTests'
continueOnError: false
+ # call $(SETUPVARS) && $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-cpuFuncTests.xml
- script: |
- set PATH=$(TEST_ENV_PATH)
- rem $(BIN_DIR)\cpuFuncTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-cpuFuncTests.xml
- "$(IB_TESTCONSOLE)" $(BIN_DIR)\cpuFuncTests.exe --gtest_filter=*smoke*:-*CompareWithRefs/base_size=16_pre_nms_topn=100_post_nms_topn=100_nms_thresh=0.7_feat_stride=1_min_size=1_ratio*:*smoke_GRUSequenceCommonZeroClip/GRUSequenceTest.CompareWithRefs/mode=CONVERT_TO_TI_MAX_SEQ_LEN_CONST_seq_lengths* --gtest_output=xml:TEST-cpuFuncTests-IB.xml /testlevel=24
+ set PATH=$(IB_DIR);%PATH%
+ call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke*:-*CompareWithRefs/base_size=16_pre_nms_topn=100_post_nms_topn=100_nms_thresh=0.7_feat_stride=1_min_size=1_ratio*:*smoke_GRUSequenceCommonZeroClip/GRUSequenceTest.CompareWithRefs/mode=CONVERT_TO_TI_MAX_SEQ_LEN_CONST_seq_lengths* --gtest_output=xml:TEST-cpuFuncTests-IB.xml /testlevel=24
displayName: 'CPU FuncTests - IB'
continueOnError: false
- script: |
- set PATH=$(TEST_ENV_PATH)
set DATA_PATH=$(MODELS_PATH)
set MODELS_PATH=$(MODELS_PATH)
- $(BIN_DIR)\InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
+ call $(SETUPVARS) && $(INSTALL_TEST_DIR)\InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
displayName: 'IE CAPITests'
continueOnError: false
diff --git a/.github/org_control/config.json b/.github/org_control/config.json
index ed6a1e1f5aa..e013ab30923 100644
--- a/.github/org_control/config.json
+++ b/.github/org_control/config.json
@@ -33,6 +33,7 @@
"openvino-mo-maintainers": "category: MO",
"openvino-ngraph-maintainers": "category: nGraph",
"openvino-tests-maintainers": "category: IE Tests",
- "openvino-tools-maintainers": "category: tools"
+ "openvino-tools-maintainers": "category: tools",
+ "openvino-configuration-mgmt": "category: dependency_changes"
}
}
diff --git a/CODEOWNERS b/CODEOWNERS
index f6cef156f27..d47170c4716 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -73,3 +73,8 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins
# Documentation
/docs/ @openvinotoolkit/openvino-docs-maintainers
*.md @openvinotoolkit/openvino-docs-maintainers
+
+# Control 3d party dependencies
+*requirements* @openvino-configuration-mgmt
+*setup.py @openvino-configuration-mgmt
+/scripts/install_dependencies/ @openvino-configuration-mgmt
\ No newline at end of file
diff --git a/cmake/features.cmake b/cmake/features.cmake
index 6042d9d2238..1f0c198913c 100644
--- a/cmake/features.cmake
+++ b/cmake/features.cmake
@@ -24,8 +24,6 @@ Supported values:\
ie_option (ENABLE_PROFILING_FIRST_INFERENCE "Build with ITT tracing of first inference time." ON)
-ie_option(ENABLE_TEMPLATE_PLUGIN "Register template plugin into plugins.xml" OFF)
-
ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \
In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected InelSEAPI statistics. \
Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF
diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_PyTorch.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_PyTorch.md
index 0898fd7e222..fdf5dcbb983 100644
--- a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_PyTorch.md
+++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_PyTorch.md
@@ -1,36 +1,45 @@
# Converting a PyTorch* Model {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_PyTorch}
+## Supported Topologies
+
+Here is the list of models that are tested and guaranteed to be supported. However, you can also use these instructions to convert PyTorch\* models that are not presented in the list.
+
+* [Torchvision Models](https://pytorch.org/docs/stable/torchvision/index.html): alexnet, densenet121, densenet161,
+ densenet169, densenet201, resnet101, resnet152, resnet18, resnet34, resnet50, vgg11, vgg13, vgg16, vgg19.
+ The models can be converted using [regular instructions](#typical-pytorch).
+* [Cadene Pretrained Models](https://github.com/Cadene/pretrained-models.pytorch): alexnet, fbresnet152, resnet101,
+ resnet152, resnet18, resnet34, resnet152, resnet18, resnet34, resnet50, resnext101_32x4d, resnext101_64x4d, vgg11.
+ The models can be converted using [regular instructions](#typical-pytorch).
+* [ESPNet Models](https://github.com/sacmehta/ESPNet/tree/master/pretrained) can be converted using [regular instructions](#typical-pytorch).
+* [MobileNetV3](https://github.com/d-li14/mobilenetv3.pytorch) can be converted using [regular instructions](#typical-pytorch).
+* [iSeeBetter](https://github.com/amanchadha/iSeeBetter) can be converted using [regular instructions](#typical-pytorch).
+ Please refer to [`iSeeBetterTest.py`](https://github.com/amanchadha/iSeeBetter/blob/master/iSeeBetterTest.py) script for code to initialize the model.
+* F3Net topology can be converted using steps described in [Convert PyTorch\* F3Net to the IR](pytorch_specific/Convert_F3Net.md)
+ instruction which is used instead of steps 2 and 3 of [regular instructions](#typical-pytorch).
+* QuartzNet topologies from [NeMo project](https://github.com/NVIDIA/NeMo) can be converted using steps described in
+ [Convert PyTorch\* QuartzNet to the IR](pytorch_specific/Convert_QuartzNet.md) instruction which is used instead of
+ steps 2 and 3 of [regular instructions](#typical-pytorch).
+* YOLACT topology can be converted using steps described in [Convert PyTorch\* YOLACT to the IR](pytorch_specific/Convert_YOLACT.md)
+ instruction which is used instead of steps 2 and 3 of [regular instructions](#typical-pytorch).
+* [RCAN](https://github.com/yulunzhang/RCAN) topology can be converted using steps described in [Convert PyTorch\* RCAN to the IR](pytorch_specific/Convert_RCAN.md)
+ instruction which is used instead of steps 2 and 3 of [regular instructions](#typical-pytorch).
+* [BERT_NER](https://github.com/kamalkraj/BERT-NER) topology can be converted using steps described in [Convert PyTorch* BERT-NER to the IR](pytorch_specific/Convert_Bert_ner.md)
+ instruction which is used instead of steps 2 and 3 of [regular instructions](#typical-pytorch).
+
+## Typical steps to convert PyTorch\* model
+
PyTorch* framework is supported through export to ONNX\* format. A summary of the steps for optimizing and deploying a model that was trained with the PyTorch\* framework:
-1. [Export PyTorch model to ONNX\*](#export-to-onnx).
-2. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for ONNX\*.
+1. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for ONNX\*.
+2. [Export PyTorch model to ONNX\*](#export-to-onnx).
3. [Convert an ONNX\* model](Convert_Model_From_ONNX.md) to produce an optimized [Intermediate Representation (IR)](../../IR_and_opsets.md) of the model based on the trained network topology, weights, and biases values.
4. Test the model in the Intermediate Representation format using the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in the target environment via provided [sample applications](../../../IE_DG/Samples_Overview.md).
5. [Integrate](../../../IE_DG/Samples_Overview.md) the Inference Engine in your application to deploy the model in the target environment.
-## Supported Topologies
-
-Here is the list of models that were tested and are guaranteed to be supported.
-It is not a full list of models that can be converted to ONNX\* and to IR.
-
-|Package Name|Supported Models|
-|:----|:----|
-| [Torchvision Models](https://pytorch.org/docs/stable/torchvision/index.html) | alexnet, densenet121, densenet161, densenet169, densenet201, resnet101, resnet152, resnet18, resnet34, resnet50, vgg11, vgg13, vgg16, vgg19 |
-| [Pretrained Models](https://github.com/Cadene/pretrained-models.pytorch) | alexnet, fbresnet152, resnet101, resnet152, resnet18, resnet34, resnet152, resnet18, resnet34, resnet50, resnext101_32x4d, resnext101_64x4d, vgg11 |
-
-**Other supported topologies**
-
-* [ESPNet Models](https://github.com/sacmehta/ESPNet/tree/master/pretrained)
-* [MobileNetV3](https://github.com/d-li14/mobilenetv3.pytorch)
-* F3Net topology can be converted using [Convert PyTorch\* F3Net to the IR](pytorch_specific/Convert_F3Net.md) instruction.
-* QuartzNet topologies from [NeMo project](https://github.com/NVIDIA/NeMo) can be converted using [Convert PyTorch\* QuartzNet to the IR](pytorch_specific/Convert_QuartzNet.md) instruction.
-* YOLACT topology can be converted using [Convert PyTorch\* YOLACT to the IR](pytorch_specific/Convert_YOLACT.md) instruction.
-* [RCAN](https://github.com/yulunzhang/RCAN) topologies can be converted using [Convert PyTorch\* RCAN to the IR](pytorch_specific/Convert_RCAN.md) instruction.
-* [BERT_NER](https://github.com/kamalkraj/BERT-NER) can be converted using [Convert PyTorch* BERT-NER to the IR](pytorch_specific/Convert_Bert_ner.md) instruction.
-
## Export PyTorch\* Model to ONNX\* Format
-PyTorch models are defined in a Python\* code, to export such models use `torch.onnx.export()` method.
+PyTorch models are defined in a Python\* code, to export such models use `torch.onnx.export()` method. Usually code to
+evaluate or test the model is provided with the model code and can be used to initialize and export model.
Only the basics will be covered here, the step to export to ONNX\* is crucial but it is covered by PyTorch\* framework.
For more information, please refer to [PyTorch\* documentation](https://pytorch.org/docs/stable/onnx.html).
diff --git a/docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md b/docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md
index e749e37780d..fc5920970ce 100644
--- a/docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md
+++ b/docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md
@@ -8,7 +8,8 @@ These instructions are applicable only to the Faster R-CNN model converted to th
```sh
python3 ./mo_onnx.py
--input_model FasterRCNN-10.onnx \
---input_shape [3,800,800] \
+--input_shape [1,3,800,800] \
+--input 0:2 \
--mean_values [102.9801,115.9465,122.7717] \
--transformations_config ./extensions/front/onnx/faster_rcnn.json
```
diff --git a/docs/install_guides/installing-openvino-apt.md b/docs/install_guides/installing-openvino-apt.md
index 982d71102b3..ae50dd84976 100644
--- a/docs/install_guides/installing-openvino-apt.md
+++ b/docs/install_guides/installing-openvino-apt.md
@@ -4,7 +4,16 @@ This guide provides installation steps for Intel® Distribution of OpenVINO™ t
> **IMPORTANT**: By downloading and using this container and the included software, you agree to the terms and conditions of the [software license agreements](https://software.intel.com/content/dam/develop/external/us/en/documents/intel-openvino-license-agreements.pdf). Please, review the content inside the `/licensing` folder for more details.
-> **NOTE**: Intel® Graphics Compute Runtime for OpenCL™ is not a part of OpenVINO™ APT distribution. You can install it from the [Intel® Graphics Compute Runtime for OpenCL™ GitHub repo](https://github.com/intel/compute-runtime).
+> **NOTE**: Intel® Graphics Compute Runtime for OpenCL™ is not a part of OpenVINO™ APT distribution. You can install it from the [Intel® Graphics Compute Runtime for OpenCL™ GitHub repo](https://github.com/intel/compute-runtime).
+
+## System Requirements
+
+The complete list of supported hardware is available in the [Release Notes](https://software.intel.com/content/www/us/en/develop/articles/openvino-relnotes.html#inpage-nav-8).
+
+**Operating Systems**
+
+- Ubuntu 18.04.x long-term support (LTS), 64-bit
+- Ubuntu 20.04.0 long-term support (LTS), 64-bit
## Included with Runtime Package
@@ -31,50 +40,46 @@ The following components are installed with the OpenVINO developer package:
| [Documentation for Pre-Trained Models ](@ref omz_models_group_intel) | Documentation for the pre-trained models available in the [Open Model Zoo repo](https://github.com/openvinotoolkit/open_model_zoo). |
| Deep Learning Streamer (DL Streamer) | Streaming analytics framework, based on GStreamer\*, for constructing graphs of media analytics components. For the DL Streamer documentation, see [DL Streamer Samples](@ref gst_samples_README), [API Reference](https://openvinotoolkit.github.io/dlstreamer_gst/), [Elements](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/Elements), [Tutorial](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/DL-Streamer-Tutorial). |
-## Set up the Repository
-### Install the GPG key for the repository
+
+## Install Packages
+
+### Set up the OpenVINO™ Toolkit APT Repository
+
+#### Install the GPG key for the Repository
1. Download the public key from [https://apt.repos.intel.com/openvino/2021/GPG-PUB-KEY-INTEL-OPENVINO-2021](https://apt.repos.intel.com/openvino/2021/GPG-PUB-KEY-INTEL-OPENVINO-2021) and save it to a file.
2. Add this key to the system keyring:
```sh
sudo apt-key add
```
+> **NOTE**: You might need to install GnuPG: `sudo apt-get install gnupg`
+
3. Check the list of APT keys running the following command:
```sh
sudo apt-key list
```
-### Add the APT Repository
+#### Add the Repository
Run the following command:
```sh
echo "deb https://apt.repos.intel.com/openvino/2021 all main" | sudo tee /etc/apt/sources.list.d/intel-openvino-2021.list
```
-### Update the list of packages
+#### Update the List of Packages
Run the `update` command:
```sh
sudo apt update
```
-There are full release Runtime and Developer packages, and also some available components.
-**Runtime Packages**
-- Ubuntu 18.04: `intel-openvino-runtime-ubuntu18`
-- Ubuntu 20.04: `intel-openvino-runtime-ubuntu20`
-
-**Developer Packages**
-- Ubuntu 18.04: `intel-openvino-dev-ubuntu18`
-- Ubuntu 20.04: `intel-openvino-dev-ubuntu20`
-
-### Get the list of available packages
+#### Verify that the APT Repository is Properly Set Up
Run the `apt-cache` command to see a list of all available OpenVINO packages and components:
```sh
apt-cache search openvino
```
-
-#### Examples
+See the example commands below:
* **Runtime Packages**
@@ -97,29 +102,23 @@ apt-cache search openvino
sudo apt-cache search intel-openvino-dev-ubuntu20
```
-
-## Install the runtime or developer packages using the APT Package Manager
-Intel® OpenVINO will be installed in: `/opt/intel/openvino_..`
+### Install Runtime or Developer Packages using the APT Package Manager
+Intel® OpenVINO™ Toolkit will be installed in: `/opt/intel/openvino_..`
A symlink will be created: `/opt/intel/openvino_`
----
-### To Install a specific version
-
-To get a list of OpenVINO packages available for installation:
+#### To Install a Specific Version
+1. Get a list of OpenVINO packages available for installation:
```sh
sudo apt-cache search intel-openvino-runtime-ubuntu18
```
-
-To install a specific version of an OpenVINO package:
+2. Install a specific version of an OpenVINO package:
```sh
sudo apt install intel-openvino--ubuntu-..
```
-
-#### Examples
-* **Runtime Package**
-
+See the example commands below:
+* **Runtime Package**
On Ubuntu 18.04:
```sh
sudo apt install intel-openvino-runtime-ubuntu18-2021.1.105
@@ -138,10 +137,17 @@ sudo apt install intel-openvino--ubuntu-.-ubuntu-..
```
diff --git a/docs/install_guides/installing-openvino-conda.md b/docs/install_guides/installing-openvino-conda.md
index a5cefbfb97e..b231b8b93d0 100644
--- a/docs/install_guides/installing-openvino-conda.md
+++ b/docs/install_guides/installing-openvino-conda.md
@@ -2,22 +2,39 @@
This guide provides installation steps for Intel® Distribution of OpenVINO™ toolkit distributed through the Anaconda* Cloud.
+> **NOTE**: Only runtime packages are available from Anaconda* Cloud.
+
+## Introduction
+
+OpenVINO™ toolkit is a comprehensive toolkit for quickly developing applications and solutions that solve a variety of tasks including emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, and many others. Based on latest generations of artificial neural networks, including Convolutional Neural Networks (CNNs), recurrent and attention-based networks, the toolkit extends computer vision and non-vision workloads across Intel® hardware, maximizing performance. It accelerates applications with high-performance, AI and deep learning inference deployed from edge to cloud.
+
+The Intel® Distribution of OpenVINO™ toolkit\*:
+- Enables CNN-based deep learning inference on the edge
+- Supports heterogeneous execution across Intel® CPU, Intel® Integrated Graphics, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs
+- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels
+The **runtime package** includes the following components installed by default:
+
+| Component | Description |
+|-----------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [Inference Engine](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) | This is the engine that runs the deep learning model. It includes a set of libraries for an easy inference integration into your applications. |
## System Requirements
+**Software**
+
- [Anaconda* distribution](https://www.anaconda.com/products/individual/)
**Operating Systems**
-- Ubuntu* 18.04 long-term support (LTS), 64-bit
-- CentOS* 7.6, 64-bit
-- macOS* 10.14.x versions.
-- Windows 10*, 64-bit Pro, Enterprise or Education (1607 Anniversary Update, Build 14393 or higher) editions
-- Windows Server* 2016 or higher
+| Supported Operating System | [Python* Version (64-bit)](https://www.python.org/) |
+| :------------------------------------------------------------| :---------------------------------------------------|
+| Ubuntu* 18.04 long-term support (LTS), 64-bit | 3.6, 3.7 |
+| Ubuntu* 20.04 long-term support (LTS), 64-bit | 3.6, 3.7 |
+| CentOS* 7.6, 64-bit | 3.6, 3.7 |
+| macOS* 10.15.x | 3.6, 3.7 |
+| Windows 10*, 64-bit | 3.6, 3.7 |
-
-
-## Install the runtime package using the Anaconda* Package Manager
+## Install the Runtime Package using the Anaconda* Package Manager
1. Set up the Anaconda* environment:
```sh
@@ -26,11 +43,19 @@ This guide provides installation steps for Intel® Distribution of OpenVINO™ t
```sh
conda activate py37
```
-2. Updated conda to the latest version:
+2. Update Anaconda environment to the latest version:
```sh
conda update --all
```
-3. Install the Intel® Distribution of OpenVINO™ Toolkit:
+3. Install pre-requisites:
+ ```sh
+ conda install numpy
+ ```
+4. Install the Intel® Distribution of OpenVINO™ Toolkit:
+ - Ubuntu* 20.04
+ ```sh
+ conda install openvino-ie4py-ubuntu20 -c intel
+ ```
- Ubuntu* 18.04
```sh
conda install openvino-ie4py-ubuntu18 -c intel
@@ -43,19 +68,13 @@ This guide provides installation steps for Intel® Distribution of OpenVINO™ t
```sh
conda install openvino-ie4py -c intel
```
-4. Verify the package installed:
+5. Verify the package is installed:
```sh
- python -c "import openvino"
+ python -c "from openvino.inference_engine import IECore"
```
-
-Now you can start to develop and run your application.
-
-
-## Known Issues and Limitations
-
-- You cannot use Python bindings included in Intel® Distribution of OpenVINO™ toolkit with [Anaconda* distribution](https://www.anaconda.com/products/individual/)
-- You cannot use Python OpenVINO™ bindings included in Anaconda* package with official [Python distribution](https://www.python.org/).
+ If installation was successful, you will not see any error messages (no console output).
+Now you can start developing your application.
## Additional Resources
diff --git a/docs/install_guides/installing-openvino-yum.md b/docs/install_guides/installing-openvino-yum.md
index ae34d202293..d92e7e891d1 100644
--- a/docs/install_guides/installing-openvino-yum.md
+++ b/docs/install_guides/installing-openvino-yum.md
@@ -8,6 +8,14 @@ This guide provides installation steps for the Intel® Distribution of OpenVINO
> **NOTE**: Only runtime packages are available via the YUM repository.
+## System Requirements
+
+The complete list of supported hardware is available in the [Release Notes](https://software.intel.com/content/www/us/en/develop/articles/openvino-relnotes.html#inpage-nav-8).
+
+**Operating Systems**
+
+- CentOS 7.6, 64-bit
+
## Included with Runtime Package
The following components are installed with the OpenVINO runtime package:
@@ -18,6 +26,8 @@ The following components are installed with the OpenVINO runtime package:
| [OpenCV*](https://docs.opencv.org/master/) | OpenCV* community version compiled for Intel® hardware. |
| Deep Learning Stream (DL Streamer) | Streaming analytics framework, based on GStreamer, for constructing graphs of media analytics components. For the DL Streamer documentation, see [DL Streamer Samples](@ref gst_samples_README), [API Reference](https://openvinotoolkit.github.io/dlstreamer_gst/), [Elements](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/Elements), [Tutorial](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/DL-Streamer-Tutorial). |
+## Install Packages
+
## Set up the Repository
> **NOTE:** You must be logged in as root to set up and install the repository.
@@ -39,25 +49,23 @@ Configure YUM with the OpenVINO repository to install OpenVINO. You have two opt
```
* **OPTION 2:** Create the repository file manually:
- 1. Navigate to the repository directory:
- ```sh
- cd /etc/yum.repos.d
- ```
- 2. Edit the repo file:
- ```sh
- vi intel-openvino-2021.repo
- ```
- 3. Append the following code:
+
+ 1. Create the YUM repo file in the /tmp directory as a normal user:
```sh
+ tee > /tmp/openvino-2021.repo << EOF
[intel-openvino-2021]
name=Intel(R) Distribution of OpenVINO 2021
baseurl=https://yum.repos.intel.com/openvino/2021
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-INTEL-OPENVINO-2021
+ EOF
```
- 4. Save and close the `intel-openvino-2021.repo` file.
- 5. Import the gpg public key for the repository:
+ 2. Move the newly created openvino-2021.repo file to the YUM configuration directory /etc/yum.repos.d:
+ ```sh
+ sudo mv /tmp/openvino-2021.repo /etc/yum.repos.d
+ ```
+ 3. Import the gpg public key for the repository:
```sh
sudo rpm --import https://yum.repos.intel.com/openvino/2021/setup/RPM-GPG-KEY-INTEL-OPENVINO-2021
```
@@ -103,6 +111,21 @@ To install the full runtime version of the OpenVINO package:
sudo yum install intel-openvino-runtime-centos7-..
```
+#### Examples
+
+```sh
+sudo yum install intel-openvino-runtime-centos7-2021.3.394
+ ```
+
+---
+
+### To check for installed packages and version
+
+To check a specific version of an OpenVINO package:
+```sh
+yum list installed intel-openvino*
+```
+
---
### To Uninstall a specific version
diff --git a/docs/install_guides/pypi-openvino-dev.md b/docs/install_guides/pypi-openvino-dev.md
index 89bb5f3db61..01616f7e85e 100644
--- a/docs/install_guides/pypi-openvino-dev.md
+++ b/docs/install_guides/pypi-openvino-dev.md
@@ -15,11 +15,11 @@ The **developer package** includes the following components installed by default
| Component | Console Script | Description |
|------------------|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [Model Optimizer](https://docs.openvinotoolkit.org/latest/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) | `mo` |**Model Optimizer** imports, converts, and optimizes models that were trained in popular frameworks to a format usable by Intel tools, especially the Inference Engine. Popular frameworks include Caffe\*, TensorFlow\*, MXNet\*, and ONNX\*. |
+| [Model Optimizer](https://docs.openvinotoolkit.org/latest/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) | `mo` |**Model Optimizer** imports, converts, and optimizes models that were trained in popular frameworks to a format usable by Intel tools, especially the Inference Engine. Supported frameworks include Caffe\*, TensorFlow\*, MXNet\*, and ONNX\*. |
| [Benchmark Tool](https://docs.openvinotoolkit.org/latest/openvino_inference_engine_tools_benchmark_tool_README.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. |
-| [Accuracy Checker](https://docs.openvinotoolkit.org/latest/omz_tools_accuracy_checker.html) and [Annotation Converter](https://docs.openvinotoolkit.org/latest/omz_tools_accuracy_checker_annotation_converters.html) | `accuracy_check` `convert_annotation` |**Accuracy Checker** is a deep learning accuracy validation tool that allows you to collect accuracy metrics against popular datasets. The main advantages of the tool are the flexibility of configuration and an impressive set of supported datasets, preprocessing, postprocessing, and metrics. **Annotation Converter** is a utility for offline conversion of datasets to the format suitable for metric evaluation used in Accuracy Checker. |
+| [Accuracy Checker](https://docs.openvinotoolkit.org/latest/omz_tools_accuracy_checker.html) and [Annotation Converter](https://docs.openvinotoolkit.org/latest/omz_tools_accuracy_checker_annotation_converters.html) | `accuracy_check` `convert_annotation` |**Accuracy Checker** is a deep learning accuracy validation tool that allows you to collect accuracy metrics against popular datasets. The main advantages of the tool are the flexibility of configuration and a set of supported datasets, preprocessing, postprocessing, and metrics. **Annotation Converter** is a utility that prepares datasets for evaluation with Accuracy Checker. |
| [Post-Training Optimization Tool](https://docs.openvinotoolkit.org/latest/pot_README.html)| `pot` |**Post-Training Optimization Tool** allows you to optimize trained models with advanced capabilities, such as quantization and low-precision optimizations, without the need to retrain or fine-tune models. Optimizations are also available through the [API](https://docs.openvinotoolkit.org/latest/pot_compression_api_README.html). |
-| [Model Downloader and other Open Model Zoo tools](https://docs.openvinotoolkit.org/latest/omz_tools_downloader.html)| `omz_downloader` `omz_converter` `omz_quantizer` `omz_info_dumper`| **Model Downloader** is a tool for getting access to the collection of high-quality and extremely fast pre-trained deep learning [public](https://docs.openvinotoolkit.org/latest/omz_models_group_public.html) and [intel](https://docs.openvinotoolkit.org/latest/omz_models_group_intel.html)-trained models. Use these free pre-trained models instead of training your own models to speed up the development and production deployment process. The principle of the tool is as follows: it downloads model files from online sources and, if necessary, patches them with Model Optimizer to make them more usable. A number of additional tools are also provided to automate the process of working with downloaded models: **Model Converter** is a tool for converting the models stored in a format other than the Intermediate Representation (IR) into that format using Model Optimizer. **Model Quantizer** is a tool for automatic quantization of full-precision IR models into low-precision versions using Post-Training Optimization Tool. **Model Information Dumper** is a helper utility for dumping information about the models in a stable machine-readable format.|
+| [Model Downloader and other Open Model Zoo tools](https://docs.openvinotoolkit.org/latest/omz_tools_downloader.html)| `omz_downloader` `omz_converter` `omz_quantizer` `omz_info_dumper`| **Model Downloader** is a tool for getting access to the collection of high-quality and extremely fast pre-trained deep learning [public](https://docs.openvinotoolkit.org/latest/omz_models_group_public.html) and [Intel](https://docs.openvinotoolkit.org/latest/omz_models_group_intel.html)-trained models. These free pre-trained models can be used to speed up the development and production deployment process without training your own models. The tool downloads model files from online sources and, if necessary, patches them to make them more usable with Model Optimizer. A number of additional tools are also provided to automate the process of working with downloaded models: **Model Converter** is a tool for converting Open Model Zoo models that are stored in an original deep learning framework format into the Inference Engine Intermediate Representation (IR) using Model Optimizer. **Model Quantizer** is a tool for automatic quantization of full-precision models in the IR format into low-precision versions using the Post-Training Optimization Tool. **Model Information Dumper** is a helper utility for dumping information about the models to a stable, machine-readable format.
**Developer package** also provides the **runtime package** installed as a dependency. The runtime package includes the following components:
@@ -54,17 +54,14 @@ To avoid dependency conflicts, use a virtual environment. Skip this
Create virtual environment:
-On Linux and macOS:
-```sh
-# Depending on your OS, this step may require installing python3-venv
-python3 -m venv openvino_env
-```
-
-On Windows:
```sh
+python -m pip install --user virtualenv
python -m venv openvino_env
```
+> **NOTE**: On Linux and macOS, you may need to type `python3` instead of
+`python`. You may also need to [install pip](https://pip.pypa.io/en/stable/installing/). For example, on Ubuntu execute the following command to get pip installed: `sudo apt install python3-venv python3-pip`.
+
### Step 2. Activate Virtual Environment
On Linux and macOS:
@@ -87,11 +84,11 @@ python -m pip install --upgrade pip
To install and configure the components of the development package for working with specific frameworks, use the `pip install openvino-dev[extras]` command, where `extras` is a list of extras from the table below:
-| DL Framework | Extra |
+| DL Framework | Extra |
| :------------------------------------------------------------------------------- | :-------------------------------|
| [Caffe*](https://caffe.berkeleyvision.org/) | caffe |
-| [Caffe2*](https://caffe2.ai/) | caffe2 |
-| [Kaldi*](https://kaldi-asr.org/) | kaldi |
+| [Caffe2*](https://github.com/pytorch/pytorch) | caffe2 |
+| [Kaldi*](https://github.com/kaldi-asr/kaldi) | kaldi |
| [MXNet*](https://mxnet.apache.org/) | mxnet |
| [ONNX*](https://github.com/microsoft/onnxruntime/) | onnx |
| [PyTorch*](https://pytorch.org/) | pytorch |
diff --git a/docs/install_guides/pypi-openvino-rt.md b/docs/install_guides/pypi-openvino-rt.md
index 7c20f9167b0..a41c02e7b2d 100644
--- a/docs/install_guides/pypi-openvino-rt.md
+++ b/docs/install_guides/pypi-openvino-rt.md
@@ -51,7 +51,7 @@ python -m venv openvino_env
```
> **NOTE**: On Linux and macOS, you may need to type `python3` instead of
-`python`. You may also need to [install pip](https://pip.pypa.io/en/stable/installing/).
+`python`. You may also need to [install pip](https://pip.pypa.io/en/stable/installing/). For example, on Ubuntu execute the following command to get pip installed: `sudo apt install python3-venv python3-pip`.
### Step 2. Activate Virtual Environment
diff --git a/docs/ops/activation/Clamp_1.md b/docs/ops/activation/Clamp_1.md
index 22d956cfc10..b391371f2d4 100644
--- a/docs/ops/activation/Clamp_1.md
+++ b/docs/ops/activation/Clamp_1.md
@@ -25,7 +25,6 @@ clamp( x_{i} )=\min\big( \max\left( x_{i}, min\_value \right), max\_value \big)
* **Description**: *min* is the lower bound of values in the output.
* **Range of values**: arbitrary floating point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *max*
@@ -33,7 +32,6 @@ clamp( x_{i} )=\min\big( \max\left( x_{i}, min\_value \right), max\_value \big)
* **Description**: *max* is the upper bound of values in the output.
* **Range of values**: arbitrary floating point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
**Inputs**:
diff --git a/docs/ops/activation/Elu_1.md b/docs/ops/activation/Elu_1.md
index ad09c00556e..7635e62ce54 100644
--- a/docs/ops/activation/Elu_1.md
+++ b/docs/ops/activation/Elu_1.md
@@ -29,7 +29,6 @@ where α corresponds to *alpha* attribute.
* **Description**: scale for the negative factor
* **Range of values**: non-negative arbitrary floating-point number
* **Type**: `float`
- * **Default value**: none
* **Required**: *yes*
**Inputs**:
diff --git a/docs/ops/convolution/BinaryConvolution_1.md b/docs/ops/convolution/BinaryConvolution_1.md
index 314e9aad029..3ef2034a2d5 100644
--- a/docs/ops/convolution/BinaryConvolution_1.md
+++ b/docs/ops/convolution/BinaryConvolution_1.md
@@ -6,13 +6,13 @@
**Short description**: Computes 2D convolution of binary input and binary kernel tensors.
-**Detailed description**: The operation behaves as regular *Convolution* but uses specialized algorithm for computations on binary data. More thorough explanation can be found in [Understanding Binary Neural Networks](https://sushscience.wordpress.com/2017/10/01/understanding-binary-neural-networks/) and [Bitwise Neural Networks](https://saige.sice.indiana.edu/wp-content/uploads/icml2015_mkim.pdf).
+**Detailed description**: The operation behaves as regular *Convolution* but uses specialized algorithm for computations on binary data. More thorough explanation can be found in [Understanding Binary Neural Networks](https://sushscience.wordpress.com/2017/10/01/understanding-binary-neural-networks/) and [Bitwise Neural Networks](https://saige.sice.indiana.edu/wp-content/uploads/icml2015_mkim.pdf).
-Computation algorithm for mode *xnor-popcount*:
+Computation algorithm for mode *xnor-popcount*:
- `X = XNOR(input_patch, filter)`, where XNOR is bitwise operation on two bit streams
-- `P = popcount(X)`, where popcount is the number of `1` bits in the `X` bit stream
-- `Output = 2 * P - B`, where `B` is the total number of bits in the `P` bit stream
+- `P = popcount(X)`, where popcount is the number of `1` bits in the `X` bit stream
+- `Output = 2 * P - B`, where `B` is the total number of bits in the `P` bit stream
**Attributes**:
@@ -21,7 +21,6 @@ Computation algorithm for mode *xnor-popcount*:
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(y, x)` axes for 2D convolutions. For example, *strides* equal `2,1` means sliding the filter 2 pixel at a time over height dimension and 1 over width dimension.
* **Range of values**: integer values starting from 0
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* *pads_begin*
@@ -29,7 +28,6 @@ Computation algorithm for mode *xnor-popcount*:
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
* **Range of values**: integer values starting from 0
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -38,7 +36,6 @@ Computation algorithm for mode *xnor-popcount*:
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
* **Range of values**: integer values starting from 0
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -47,7 +44,6 @@ Computation algorithm for mode *xnor-popcount*:
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
* **Range of values**: integer value starting from 0
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* *mode*
@@ -56,7 +52,6 @@ Computation algorithm for mode *xnor-popcount*:
* **Range of values**:
* *xnor-popcount*
* **Type**: `string`
- * **Default value**: None
* **Required**: *yes*
* **Note**: value `0` in inputs is interpreted as `-1`, value `1` as `1`
@@ -65,7 +60,6 @@ Computation algorithm for mode *xnor-popcount*:
* **Description**: *pad_value* is a floating-point value used to fill pad area.
* **Range of values**: a floating-point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *auto_pad*
@@ -79,7 +73,7 @@ Computation algorithm for mode *xnor-popcount*:
* **Default value**: explicit
* **Required**: *no*
* **Note**: *pads_begin* and *pads_end* attributes are ignored when *auto_pad* is specified.
-
+
**Inputs**:
* **1**: Input tensor of type *T1* and rank 4. Layout is `[N, C_IN, Y, X]` (number of batches, number of channels, spatial axes Y, X). Required.
@@ -89,7 +83,7 @@ Computation algorithm for mode *xnor-popcount*:
**Outputs**:
* **1**: Output tensor of type *T3* and rank 4. Layout is `[N, C_OUT, Y, X]` (number of batches, number of kernel output channels, spatial axes Y, X).
-
+
**Types**:
* *T1*: any numeric type with values `0` or `1`.
diff --git a/docs/ops/convolution/ConvolutionBackpropData_1.md b/docs/ops/convolution/ConvolutionBackpropData_1.md
index f29ce44e259..117ff744995 100644
--- a/docs/ops/convolution/ConvolutionBackpropData_1.md
+++ b/docs/ops/convolution/ConvolutionBackpropData_1.md
@@ -43,7 +43,6 @@ else:
* **Description**: *strides* has the same definition as *strides* for a regular Convolution but applied in the backward way, for the output tensor.
* **Range of values**: positive integers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *pads_begin*
@@ -51,7 +50,6 @@ else:
* **Description**: *pads_begin* has the same definition as *pads_begin* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted specified, in which case pads are calculated automatically.
* **Range of values**: non-negative integers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -60,21 +58,19 @@ else:
* **Description**: *pads_end* has the same definition as *pads_end* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted, in which case pads are calculated automatically.
* **Range of values**: non-negative integers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
-
+
* *dilations*
* **Description**: *dilations* has the same definition as *dilations* for a regular Convolution but applied in the backward way, for the output tensor.
* **Range of values**: positive integers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *auto_pad*
- * **Description**: *auto_pad* has the same definition as *auto_pad* for a regular Convolution but applied in the backward way, for the output tensor.
+ * **Description**: *auto_pad* has the same definition as *auto_pad* for a regular Convolution but applied in the backward way, for the output tensor.
* *explicit*: use explicit padding values from `pads_begin` and `pads_end`.
* *same_upper* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end.
* *same_lower* the input is padded to match the output size. In case of odd padding value an extra padding is added at the beginning.
diff --git a/docs/ops/convolution/Convolution_1.md b/docs/ops/convolution/Convolution_1.md
index 8e50b3ffada..64971670154 100644
--- a/docs/ops/convolution/Convolution_1.md
+++ b/docs/ops/convolution/Convolution_1.md
@@ -6,27 +6,27 @@
**Short description**: Computes 1D, 2D or 3D convolution (cross-correlation to be precise) of input and kernel tensors.
-**Detailed description**: Basic building block of convolution is a dot product of input patch and kernel. Whole operation consist of multiple such computations over multiple input patches and kernels. More thorough explanation can be found in [Convolutional Neural Networks](http://cs231n.github.io/convolutional-networks/#conv) and [Convolution operation](https://medium.com/apache-mxnet/convolutions-explained-with-ms-excel-465d6649831c).
+**Detailed description**: Basic building block of convolution is a dot product of input patch and kernel. Whole operation consist of multiple such computations over multiple input patches and kernels. More thorough explanation can be found in [Convolutional Neural Networks](http://cs231n.github.io/convolutional-networks/#conv) and [Convolution operation](https://medium.com/apache-mxnet/convolutions-explained-with-ms-excel-465d6649831c).
-For the convolutional layer, the number of output features in each dimension is calculated using the formula:
+For the convolutional layer, the number of output features in each dimension is calculated using the formula:
\f[
n_{out} = \left ( \frac{n_{in} + 2p - k}{s} \right ) + 1
-\f]
+\f]
-The receptive field in each layer is calculated using the formulas:
-* Jump in the output feature map:
+The receptive field in each layer is calculated using the formulas:
+* Jump in the output feature map:
\f[
j_{out} = j_{in} * s
\f]
-* Size of the receptive field of output feature:
+* Size of the receptive field of output feature:
\f[
r_{out} = r_{in} + ( k - 1 ) * j_{in}
\f]
-* Center position of the receptive field of the first output feature:
+* Center position of the receptive field of the first output feature:
\f[
start_{out} = start_{in} + ( \frac{k - 1}{2} - p ) * j_{in}
\f]
-* Output is calculated using the following formula:
+* Output is calculated using the following formula:
\f[
out = \sum_{i = 0}^{n}w_{i}x_{i} + b
\f]
@@ -38,7 +38,6 @@ The receptive field in each layer is calculated using the formulas:
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(z, y, x)` axes for 3D convolutions and `(y, x)` axes for 2D convolutions. For example, *strides* equal `4,2,1` means sliding the filter 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
* **Range of values**: integer values starting from 0
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *pads_begin*
@@ -46,7 +45,6 @@ The receptive field in each layer is calculated using the formulas:
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
* **Range of values**: integer values starting from 0
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -55,7 +53,6 @@ The receptive field in each layer is calculated using the formulas:
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
* **Range of values**: integer values starting from 0
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -64,7 +61,6 @@ The receptive field in each layer is calculated using the formulas:
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
* **Range of values**: integer value starting from 0
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *auto_pad*
@@ -182,4 +178,4 @@ The receptive field in each layer is calculated using the formulas:
-```
\ No newline at end of file
+```
diff --git a/docs/ops/convolution/DeformableConvolution_1.md b/docs/ops/convolution/DeformableConvolution_1.md
index 612d3c419d4..69dd28342d1 100644
--- a/docs/ops/convolution/DeformableConvolution_1.md
+++ b/docs/ops/convolution/DeformableConvolution_1.md
@@ -8,15 +8,15 @@
**Detailed description**: *Deformable Convolution* is similar to regular *Convolution* but its receptive field is deformed because of additional spatial offsets used during input sampling. More thorough explanation can be found in [Deformable Convolutions Demystified](https://towardsdatascience.com/deformable-convolutions-demystified-2a77498699e8) and [Deformable Convolutional Networks](https://arxiv.org/abs/1703.06211).
-Output is calculated using the following formula:
+Output is calculated using the following formula:
\f[
y(p) = \sum_{k = 1}^{K}w_{k}x(p + p_{k} + {\Delta}p_{k})
-
+
\f]
-Where
+Where
* K is a number of sampling locations, e.g. for kernel 3x3 and dilation = 1, K = 9
* \f$x(p)\f$ and \f$y(p)\f$ denote the features at location p from the input feature maps x and output feature maps y
@@ -35,7 +35,6 @@ Where
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(y,x)` axes. For example, *strides* equal `2,1` means sliding the filter 2 pixel at a time over height dimension and 1 over width dimension.
* **Range of values**: integer values starting from `0`
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *pads_begin*
@@ -43,7 +42,6 @@ Where
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
* **Range of values**: integer values starting from `0`
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -52,7 +50,6 @@ Where
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
* **Range of values**: integer values starting from `0`
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -61,7 +58,6 @@ Where
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
* **Range of values**: integer value starting from `0`
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *auto_pad*
@@ -109,7 +105,7 @@ Where
**Types**:
* *T*: Any numeric type.
-
+
**Example**
2D DeformableConvolution (deformable_group=1)
diff --git a/docs/ops/convolution/DeformableConvolution_8.md b/docs/ops/convolution/DeformableConvolution_8.md
index a9ae67d790d..0474a71193d 100644
--- a/docs/ops/convolution/DeformableConvolution_8.md
+++ b/docs/ops/convolution/DeformableConvolution_8.md
@@ -10,14 +10,14 @@
Modification of DeformableConvolution using modulating scalars is also supported. Please refer to [Deformable ConvNets v2: More Deformable, Better Results](https://arxiv.org/pdf/1811.11168.pdf).
-Output is calculated using the following formula:
+Output is calculated using the following formula:
\f[
y(p) = \sum_{k = 1}^{K}w_{k}x(p + p_{k} + {\Delta}p_{k}) * {\Delta}m_{k}
-
+
\f]
-Where
+Where
* K is a number of sampling locations, e.g. for kernel 3x3 and dilation = 1, K = 9
* \f$x(p)\f$ and \f$y(p)\f$ denote the features at location p from the input feature maps x and output feature maps y
@@ -38,7 +38,6 @@ Where
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(y,x)` axes. For example, *strides* equal `2,1` means sliding the filter 2 pixel at a time over height dimension and 1 over width dimension.
* **Range of values**: integer values starting from `0`
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *pads_begin*
@@ -46,7 +45,6 @@ Where
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
* **Range of values**: integer values starting from `0`
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -55,7 +53,6 @@ Where
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
* **Range of values**: integer values starting from `0`
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -64,7 +61,6 @@ Where
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
* **Range of values**: integer value starting from `0`
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *auto_pad*
@@ -103,7 +99,7 @@ Where
* **Type**: `boolean`
* **Default value**: `False`
* **Required**: *no*
-
+
**Inputs**:
* **1**: Input tensor of type *T* and rank 4. Layout is `NCYX` (number of batches, number of channels, spatial axes Y and X). **Required.**
@@ -122,7 +118,7 @@ Where
**Types**:
* *T*: Any numeric type.
-
+
**Example**
2D DeformableConvolution (deformable_group=1)
diff --git a/docs/ops/convolution/GroupConvolutionBackpropData_1.md b/docs/ops/convolution/GroupConvolutionBackpropData_1.md
index 2d76aa905ea..f56ff5a8fa5 100644
--- a/docs/ops/convolution/GroupConvolutionBackpropData_1.md
+++ b/docs/ops/convolution/GroupConvolutionBackpropData_1.md
@@ -8,14 +8,13 @@
**Detailed description**: Splits input and filters into multiple groups, computes *ConvolutionBackpropData* on them and concatenates the results. It is equivalent to GroupConvolution and Convolution relationship.
-**Attributes**: The operation has the same attributes as a *ConvolutionBackpropData*. Number of groups is derived from the kernel shape.
+**Attributes**: The operation has the same attributes as a *ConvolutionBackpropData*. Number of groups is derived from the kernel shape.
* *strides*
* **Description**: *strides* has the same definition as *strides* for a regular Convolution but applied in the backward way, for the output tensor.
* **Range of values**: positive integers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *pads_begin*
@@ -23,7 +22,6 @@
* **Description**: *pads_begin* has the same definition as *pads_begin* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted, in which case pads are calculated automatically.
* **Range of values**: non-negative integers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -32,7 +30,6 @@
* **Description**: *pads_end* has the same definition as *pads_end* for a regular Convolution but applied in the backward way, for the output tensor. May be omitted, in which case pads are calculated automatically.
* **Range of values**: non-negative integers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -41,12 +38,11 @@
* **Description**: *dilations* has the same definition as *dilations* for a regular Convolution but applied in the backward way, for the output tensor.
* **Range of values**: positive integers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *auto_pad*
- * **Description**: *auto_pad* has the same definition as *auto_pad* for a regular Convolution but applied in the backward way, for the output tensor.
+ * **Description**: *auto_pad* has the same definition as *auto_pad* for a regular Convolution but applied in the backward way, for the output tensor.
* *explicit* - use explicit padding values from *pads_begin* and *pads_end*.
* *same_upper* - the input is padded to match the output size. In case of odd padding value an extra padding is added at the end.
* *same_lower* - the input is padded to match the output size. In case of odd padding value an extra padding is added at the beginning.
@@ -71,12 +67,12 @@
* **2**: Kernel tensor of type `T1` and rank 4, 5 or 6. Layout is `[GROUPS, C_IN, C_OUT, X, Y, Z]` (number of groups, number of input channels, number of output channels, spatial axes X, Y, Z). Required.
* **3**: Output shape tensor of type `T2` and rank 1. It specifies spatial shape of the output. Optional.
-* **Note** Number of groups is derived from the shape of the kernel and not specified by any attribute.
+* **Note** Number of groups is derived from the shape of the kernel and not specified by any attribute.
* **Note**: Type of the convolution (1D, 2D or 3D) is derived from the rank of the input tensors and not specified by any attribute:
* 1D convolution (input tensors rank 3) means that there is only one spatial axis X
* 2D convolution (input tensors rank 4) means that there are two spatial axes Y, X
* 3D convolution (input tensors rank 5) means that there are three spatial axes Z, Y, X
-
+
**Outputs**:
* **1**: Output tensor of type `T1` and rank 3, 4 or 5 (the same as input *1*). Layout is `[N, GROUPS * C_OUT, Z, Y, X]` (number of batches, number of kernel output channels, spatial axes Z, Y, X).
diff --git a/docs/ops/convolution/GroupConvolution_1.md b/docs/ops/convolution/GroupConvolution_1.md
index 49d6d9c05a8..632bd6f136f 100644
--- a/docs/ops/convolution/GroupConvolution_1.md
+++ b/docs/ops/convolution/GroupConvolution_1.md
@@ -4,19 +4,18 @@
**Category**: Convolution
-**Short description**: Computes 1D, 2D or 3D GroupConvolution of input and kernel tensors.
+**Short description**: Computes 1D, 2D or 3D GroupConvolution of input and kernel tensors.
**Detailed description**: Splits input into multiple groups, convolves them with group filters as in regular convolution and concatenates the results. More thorough explanation can be found in [ImageNet Classification with Deep Convolutional
Neural Networks](https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf)
-**Attributes**: The operation has the same attributes as a regular _Convolution_. Number of groups is derived from the kernel shape.
+**Attributes**: The operation has the same attributes as a regular _Convolution_. Number of groups is derived from the kernel shape.
* *strides*
* **Description**: *strides* is a distance (in pixels) to slide the filter on the feature map over the `(z, y, x)` axes for 3D convolutions and `(y, x)` axes for 2D convolutions. For example, *strides* equal `4,2,1` means sliding the filter 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
* **Range of values**: positive integer numbers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *pads_begin*
@@ -24,25 +23,22 @@ Neural Networks](https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal `1,2` means adding 1 pixel to the top of the input and 2 to the left of the input.
* **Range of values**: positive integer numbers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
- * **Note**: the attribute is ignored when *auto_pad* attribute is specified.
+ * **Note**: the attribute is ignored when *auto_pad* attribute is specified.
* *pads_end*
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal `1,2` means adding 1 pixel to the bottom of the input and 2 to the right of the input.
* **Range of values**: positive integer numbers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
- * **Note**: the attribute is ignored when *auto_pad* attribute is specified.
+ * **Note**: the attribute is ignored when *auto_pad* attribute is specified.
* *dilations*
* **Description**: *dilations* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal `1,1` means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal `2,2` means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.
* **Range of values**: positive integer numbers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *auto_pad*
@@ -61,7 +57,7 @@ Neural Networks](https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76
* **1**: Input tensor of type *T* and rank 3, 4 or 5. Layout is `[N, GROUPS * C_IN, Z, Y, X]` (number of batches, number of channels, spatial axes Z, Y, X). Required.
* **2**: Convolution kernel tensor of type *T* and rank 4, 5 or 6. Layout is `[GROUPS, C_OUT, C_IN, Z, Y, X]` (number of groups, number of output channels, number of input channels, spatial axes Z, Y, X),
- * **Note** Number of groups is derived from the shape of the kernel and not specified by any attribute.
+ * **Note** Number of groups is derived from the shape of the kernel and not specified by any attribute.
* **Note**: Type of the convolution (1D, 2D or 3D) is derived from the rank of the input tensors and not specified by any attribute:
* 1D convolution (input tensors rank 3) means that there is only one spatial axis X
* 2D convolution (input tensors rank 4) means that there are two spatial axes Y, X
@@ -75,7 +71,7 @@ Neural Networks](https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76
* *T*: any numeric type.
-**Example**:
+**Example**:
1D GroupConvolution
```xml
@@ -161,4 +157,4 @@ Neural Networks](https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76
224
-```
\ No newline at end of file
+```
diff --git a/docs/ops/detection/DeformablePSROIPooling_1.md b/docs/ops/detection/DeformablePSROIPooling_1.md
index e9786a0b25a..0008f28eda6 100644
--- a/docs/ops/detection/DeformablePSROIPooling_1.md
+++ b/docs/ops/detection/DeformablePSROIPooling_1.md
@@ -22,15 +22,13 @@ This operation is compatible with [MXNet DeformablePSROIPooling](https://mxnet.a
* **Description**: *output_dim* is the number of the output channels, size of output `C` dimension.
* **Range of values**: a positive integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *spatial_scale*
- * **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input original size to the pooling input. Ratio of the input score map size to the original image size.
+ * **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input original size to the pooling input. Ratio of the input score map size to the original image size.
* **Range of values**: a positive floating-point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *group_size*
@@ -74,7 +72,7 @@ This operation is compatible with [MXNet DeformablePSROIPooling](https://mxnet.a
* **Required**: *no*
* *part_size*
- * **Description**: *part_size* is the size of `H` and `W` dimensions of the third input (offsets). Basically it is the height and width of the third input
+ * **Description**: *part_size* is the size of `H` and `W` dimensions of the third input (offsets). Basically it is the height and width of the third input
with transformation values.
* **Range of values**: positive integer number
* **Type**: `int`
@@ -96,7 +94,7 @@ Batch indices must be in the range of `[0, N_in-1]`.
**Types**:
* *T*: Any floating point type.
-
+
**Example**
diff --git a/docs/ops/detection/DetectionOutput_1.md b/docs/ops/detection/DetectionOutput_1.md
index 363ef6ae4ea..7615ca830b0 100644
--- a/docs/ops/detection/DetectionOutput_1.md
+++ b/docs/ops/detection/DetectionOutput_1.md
@@ -17,7 +17,6 @@ At each feature map cell, *DetectionOutput* predicts the offsets relative to the
* **Description**: number of classes to be predicted
* **Range of values**: positive integer number
* **Type**: int
- * **Default value**: None
* **Required**: *yes*
* *background_label_id*
@@ -49,7 +48,6 @@ At each feature map cell, *DetectionOutput* predicts the offsets relative to the
* **Description**: maximum number of bounding boxes per batch to be kept after NMS step. -1 means keeping all bounding boxes after NMS step.
* **Range of values**: integer values
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* *code_type*
@@ -73,7 +71,6 @@ At each feature map cell, *DetectionOutput* predicts the offsets relative to the
* **Description**: threshold to be used in the NMS stage
* **Range of values**: floating point values
* **Type**: float
- * **Default value**: None
* **Required**: *yes*
* *confidence_threshold*
diff --git a/docs/ops/detection/PSROIPooling_1.md b/docs/ops/detection/PSROIPooling_1.md
index 98841ccf4dc..487a5453059 100644
--- a/docs/ops/detection/PSROIPooling_1.md
+++ b/docs/ops/detection/PSROIPooling_1.md
@@ -8,8 +8,8 @@
**Detailed description**: [Reference](https://arxiv.org/pdf/1703.06211.pdf).
-*PSROIPooling* operation takes two input blobs: with feature maps and with regions of interests (box coordinates).
-The latter is specified as five element tuples: *[batch_id, x_1, y_1, x_2, y_2]*.
+*PSROIPooling* operation takes two input blobs: with feature maps and with regions of interests (box coordinates).
+The latter is specified as five element tuples: *[batch_id, x_1, y_1, x_2, y_2]*.
ROIs coordinates are specified in absolute values for the average mode and in normalized values (to *[0,1]* interval) for bilinear interpolation.
**Attributes**
@@ -19,7 +19,6 @@ ROIs coordinates are specified in absolute values for the average mode and in no
* **Description**: *output_dim* is a pooled output channel number.
* **Range of values**: a positive integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *group_size*
@@ -35,7 +34,6 @@ ROIs coordinates are specified in absolute values for the average mode and in no
* **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling.
* **Range of values**: a positive floating-point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *mode*
diff --git a/docs/ops/detection/PriorBoxClustered_1.md b/docs/ops/detection/PriorBoxClustered_1.md
index b2047c242ff..3ce48827581 100644
--- a/docs/ops/detection/PriorBoxClustered_1.md
+++ b/docs/ops/detection/PriorBoxClustered_1.md
@@ -28,7 +28,7 @@
* *step (step_w, step_h)*
- * **Description**: *step (step_w, step_h)* is a distance between box centers. For example, *step* equal 85 means that the distance between neighborhood prior boxes centers is 85. If both *step_h* and *step_w* are 0 then they are updated with value of *step*. If after that they are still 0 then they are calculated as input image width(height) divided with first input width(height).
+ * **Description**: *step (step_w, step_h)* is a distance between box centers. For example, *step* equal 85 means that the distance between neighborhood prior boxes centers is 85. If both *step_h* and *step_w* are 0 then they are updated with value of *step*. If after that they are still 0 then they are calculated as input image width(height) divided with first input width(height).
* **Range of values**: floating point positive number
* **Type**: float
* **Default value**: 0.0
@@ -39,7 +39,6 @@
* **Description**: *offset* is a shift of box respectively to top left corner. For example, *offset* equal 85 means that the shift of neighborhood prior boxes centers is 85.
* **Range of values**: floating point positive number
* **Type**: float
- * **Default value**: None
* **Required**: *yes*
* *variance*
@@ -126,4 +125,4 @@ If *clip* is defined, the coordinates of prior boxes are recalculated with the f
-```
\ No newline at end of file
+```
diff --git a/docs/ops/detection/PriorBox_1.md b/docs/ops/detection/PriorBox_1.md
index 0cc07eb04dd..b278252dbc6 100644
--- a/docs/ops/detection/PriorBox_1.md
+++ b/docs/ops/detection/PriorBox_1.md
@@ -92,7 +92,6 @@
* **Description**: *offset* is a shift of box respectively to top left corner.
* **Range of values**: floating point non-negative number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *variance*
@@ -172,4 +171,4 @@
-```
\ No newline at end of file
+```
diff --git a/docs/ops/detection/Proposal_1.md b/docs/ops/detection/Proposal_1.md
index be197259123..d7dcece4e64 100644
--- a/docs/ops/detection/Proposal_1.md
+++ b/docs/ops/detection/Proposal_1.md
@@ -28,21 +28,18 @@
* **Description**: *base_size* is the size of the anchor to which *scale* and *ratio* attributes are applied.
* **Range of values**: a positive integer number
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *pre_nms_topn*
* **Description**: *pre_nms_topn* is the number of bounding boxes before the NMS operation. For example, *pre_nms_topn* equal to 15 means to take top 15 boxes with the highest scores.
* **Range of values**: a positive integer number
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *post_nms_topn*
* **Description**: *post_nms_topn* is the number of bounding boxes after the NMS operation. For example, *post_nms_topn* equal to 15 means to take after NMS top 15 boxes with the highest scores.
* **Range of values**: a positive integer number
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *nms_thresh*
@@ -50,7 +47,6 @@
* **Description**: *nms_thresh* is the minimum value of the proposal to be taken into consideration. For example, *nms_thresh* equal to 0.5 means that all boxes with prediction probability less than 0.5 are filtered out.
* **Range of values**: a positive floating-point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *feat_stride*
@@ -58,7 +54,6 @@
* **Description**: *feat_stride* is the step size to slide over boxes (in pixels). For example, *feat_stride* equal to 16 means that all boxes are analyzed with the slide 16.
* **Range of values**: a positive integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *min_size*
@@ -66,7 +61,6 @@
* **Description**: *min_size* is the minimum size of box to be taken into consideration. For example, *min_size* equal 35 means that all boxes with box size less than 35 are filtered out.
* **Range of values**: a positive integer number
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *ratio*
@@ -74,7 +68,6 @@
* **Description**: *ratio* is the ratios for anchor generation.
* **Range of values**: a list of floating-point numbers
* **Type**: `float[]`
- * **Default value**: None
* **Required**: *yes*
* *scale*
@@ -82,7 +75,6 @@
* **Description**: *scale* is the scales for anchor generation.
* **Range of values**: a list of floating-point numbers
* **Type**: `float[]`
- * **Default value**: None
* **Required**: *yes*
* *clip_before_nms*
diff --git a/docs/ops/detection/Proposal_4.md b/docs/ops/detection/Proposal_4.md
index 7402cf2ff55..e3f4fa7fd5b 100644
--- a/docs/ops/detection/Proposal_4.md
+++ b/docs/ops/detection/Proposal_4.md
@@ -8,12 +8,12 @@
**Detailed description**
-*Proposal* has three inputs: a 4D tensor of shape `[num_batches, 2*K, H, W]` with probabilities whether particular
-bounding box corresponds to background or foreground, a 4D tensor of shape `[num_batches, 4*K, H, W]` with deltas for each
-of the bound box, and a tensor with input image size in the `[image_height, image_width, scale_height_and_width]` or
-`[image_height, image_width, scale_height, scale_width]` format. `K` is number of anchors and `H, W` are height and
-width of the feature map. Operation produces two tensors:
-the first mandatory tensor of shape `[batch_size * post_nms_topn, 5]` with proposed boxes and
+*Proposal* has three inputs: a 4D tensor of shape `[num_batches, 2*K, H, W]` with probabilities whether particular
+bounding box corresponds to background or foreground, a 4D tensor of shape `[num_batches, 4*K, H, W]` with deltas for each
+of the bound box, and a tensor with input image size in the `[image_height, image_width, scale_height_and_width]` or
+`[image_height, image_width, scale_height, scale_width]` format. `K` is number of anchors and `H, W` are height and
+width of the feature map. Operation produces two tensors:
+the first mandatory tensor of shape `[batch_size * post_nms_topn, 5]` with proposed boxes and
the second optional tensor of shape `[batch_size * post_nms_topn]` with probabilities (sometimes referred as scores).
*Proposal* layer does the following with the input tensor:
@@ -37,21 +37,18 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
* **Description**: *base_size* is the size of the anchor to which *scale* and *ratio* attributes are applied.
* **Range of values**: a positive integer number
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *pre_nms_topn*
* **Description**: *pre_nms_topn* is the number of bounding boxes before the NMS operation. For example, *pre_nms_topn* equal to 15 means to take top 15 boxes with the highest scores.
* **Range of values**: a positive integer number
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *post_nms_topn*
* **Description**: *post_nms_topn* is the number of bounding boxes after the NMS operation. For example, *post_nms_topn* equal to 15 means to take after NMS top 15 boxes with the highest scores.
* **Range of values**: a positive integer number
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *nms_thresh*
@@ -59,7 +56,6 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
* **Description**: *nms_thresh* is the minimum value of the proposal to be taken into consideration. For example, *nms_thresh* equal to 0.5 means that all boxes with prediction probability less than 0.5 are filtered out.
* **Range of values**: a positive floating-point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *feat_stride*
@@ -67,7 +63,6 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
* **Description**: *feat_stride* is the step size to slide over boxes (in pixels). For example, *feat_stride* equal to 16 means that all boxes are analyzed with the slide 16.
* **Range of values**: a positive integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *min_size*
@@ -75,7 +70,6 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
* **Description**: *min_size* is the minimum size of box to be taken into consideration. For example, *min_size* equal 35 means that all boxes with box size less than 35 are filtered out.
* **Range of values**: a positive integer number
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *ratio*
@@ -83,7 +77,6 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
* **Description**: *ratio* is the ratios for anchor generation.
* **Range of values**: a list of floating-point numbers
* **Type**: `float[]`
- * **Default value**: None
* **Required**: *yes*
* *scale*
@@ -91,7 +84,6 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil
* **Description**: *scale* is the scales for anchor generation.
* **Range of values**: a list of floating-point numbers
* **Type**: `float[]`
- * **Default value**: None
* **Required**: *yes*
* *clip_before_nms*
diff --git a/docs/ops/detection/ROIAlign_3.md b/docs/ops/detection/ROIAlign_3.md
index 533a84a021d..1bcec40a059 100644
--- a/docs/ops/detection/ROIAlign_3.md
+++ b/docs/ops/detection/ROIAlign_3.md
@@ -20,7 +20,6 @@
* **Description**: *pooled_h* is the height of the ROI output feature map.
* **Range of values**: a positive integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *pooled_w*
@@ -28,7 +27,6 @@
* **Description**: *pooled_w* is the width of the ROI output feature map.
* **Range of values**: a positive integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *sampling_ratio*
@@ -37,7 +35,6 @@
is equal to 0 then use adaptive number of elements over height and width: `ceil(roi_height / pooled_h)` and `ceil(roi_width / pooled_w)` respectively.
* **Range of values**: a non-negative integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *spatial_scale*
@@ -45,17 +42,15 @@
* **Description**: *spatial_scale* is a multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling.
* **Range of values**: a positive floating-point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *mode*
* **Description**: *mode* specifies a method to perform pooling to produce output feature map elements.
- * **Range of values**:
+ * **Range of values**:
* *max* - maximum pooling
- * *avg* - average pooling
+ * *avg* - average pooling
* **Type**: string
- * **Default value**: None
* **Required**: *yes*
**Inputs**:
@@ -63,7 +58,7 @@
* **1**: 4D input tensor of shape `[N, C, H, W]` with feature maps of type *T*. Required.
* **2**: 2D input tensor of shape `[NUM_ROIS, 4]` describing box consisting of 4 element tuples: `[x_1, y_1, x_2, y_2]` in relative coordinates of type *T*.
-The box height and width are calculated the following way: `roi_width = max(spatial_scale * (x_2 - x_1), 1.0)`,
+The box height and width are calculated the following way: `roi_width = max(spatial_scale * (x_2 - x_1), 1.0)`,
`roi_height = max(spatial_scale * (y_2 - y_1), 1.0)`, so the malformed boxes are expressed as a box of size `1 x 1`. Required.
* **3**: 1D input tensor of shape `[NUM_ROIS]` with batch indices of type *IND_T*. Required.
@@ -106,7 +101,6 @@ The box height and width are calculated the following way: `roi_width = max(spat
66
-
+
```
-
diff --git a/docs/ops/detection/ROIPooling_1.md b/docs/ops/detection/ROIPooling_1.md
index 7699b133f3c..7114aee0d6c 100644
--- a/docs/ops/detection/ROIPooling_1.md
+++ b/docs/ops/detection/ROIPooling_1.md
@@ -26,7 +26,6 @@ The box height and width have different representation based on **method** attri
* **Description**: *pooled_h* is the height of the ROI output feature map. For example, *pooled_h* equal to 6 means that the height of the output of *ROIPooling* is 6.
* **Range of values**: a non-negative integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *pooled_w*
@@ -34,7 +33,6 @@ The box height and width have different representation based on **method** attri
* **Description**: *pooled_w* is the width of the ROI output feature map. For example, *pooled_w* equal to 6 means that the width of the output of *ROIPooling* is 6.
* **Range of values**: a non-negative integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *spatial_scale*
@@ -42,7 +40,6 @@ The box height and width have different representation based on **method** attri
* **Description**: *spatial_scale* is the ratio of the input feature map over the input image size.
* **Range of values**: a positive floating-point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *method*
diff --git a/docs/ops/detection/RegionYolo_1.md b/docs/ops/detection/RegionYolo_1.md
index 68826178b47..776d4b8af1b 100644
--- a/docs/ops/detection/RegionYolo_1.md
+++ b/docs/ops/detection/RegionYolo_1.md
@@ -23,7 +23,6 @@
* **Description**: starting axis index in the input tensor `data` shape that will be flattened in the output; the end of flattened range is defined by `end_axis` attribute.
* **Range of values**: `-rank(data) .. rank(data)-1`
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *coords*
@@ -31,7 +30,6 @@
* **Description**: *coords* is the number of coordinates for each region.
* **Range of values**: an integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *classes*
@@ -39,7 +37,6 @@
* **Description**: *classes* is the number of classes for each region.
* **Range of values**: an integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *end_axis*
@@ -47,7 +44,6 @@
* **Description**: ending axis index in the input tensor `data` shape that will be flattened in the output; the beginning of the flattened range is defined by `axis` attribute.
* **Range of values**: `-rank(data)..rank(data)-1`
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *num*
@@ -55,7 +51,6 @@
* **Description**: *num* is the number of regions.
* **Range of values**: an integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *do_softmax*
diff --git a/docs/ops/detection/ReorgYolo_1.md b/docs/ops/detection/ReorgYolo_1.md
index 156657a98ba..0c20ed9ee3b 100644
--- a/docs/ops/detection/ReorgYolo_1.md
+++ b/docs/ops/detection/ReorgYolo_1.md
@@ -17,7 +17,6 @@
* **Description**: *stride* is the distance between cut throws in output blobs.
* **Range of values**: positive integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
**Inputs**:
diff --git a/docs/ops/generation/Range_4.md b/docs/ops/generation/Range_4.md
index 037784b8576..df4ebb2a9e5 100644
--- a/docs/ops/generation/Range_4.md
+++ b/docs/ops/generation/Range_4.md
@@ -13,8 +13,7 @@
* **Description**: the output tensor type
* **Range of values**: any numeric type
* **Type**: string
- * **Default value**: None
- * **Required**: *Yes*
+ * **Required**: *yes*
**Inputs**:
diff --git a/docs/ops/image/Interpolate_1.md b/docs/ops/image/Interpolate_1.md
index b3a45838394..a4c5e77a43f 100644
--- a/docs/ops/image/Interpolate_1.md
+++ b/docs/ops/image/Interpolate_1.md
@@ -13,7 +13,6 @@
* **Description**: `axes` specify spatial dimension indices where interpolation is applied. Other dimensions are treated as batch dimensions. The order of elements in `axes` attribute matters and mapped directly to elements with the same indices in the 2nd input `target_spatial_shape`.
* **Range of values**: list of non-negative integer numbers
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
* *mode*
@@ -21,7 +20,6 @@
* **Description**: specifies type of interpolation
* **Range of values**: one of `nearest`, `linear`, `cubic`, `area`
* **Type**: string
- * **Default value**: none
* **Required**: *yes*
* *align_corners*
@@ -95,4 +93,4 @@ This is a scalar that specifies padding for each spatial dimension.
-```
\ No newline at end of file
+```
diff --git a/docs/ops/image/Interpolate_4.md b/docs/ops/image/Interpolate_4.md
index b0c9c846f53..375691e1d87 100644
--- a/docs/ops/image/Interpolate_4.md
+++ b/docs/ops/image/Interpolate_4.md
@@ -13,7 +13,6 @@
* **Description**: specifies type of interpolation
* **Range of values**: one of `nearest`, `linear`, `linear_onnx`, `cubic`
* **Type**: string
- * **Default value**: none
* **Required**: *yes*
**Note**: Only 2D, 3D, 4D, 5D tensors with `axes = {0, 1}`, `axes = {0, 1, 2}`, `axes = {2, 3}`, `axes = {2, 3, 4}` respectively are supported for `"mode" == "linear_onnx"`.
@@ -24,7 +23,6 @@
* `sizes` - an output shape is calculated as `output_shape[axes[i]] = sizes[i]` for all `i in range(0, len(axes))` and `output_shape[j] = input_shape[j] + pads_begin[j] + pads_end[j]` for `j not in axes`, `j in range(0, rank(data))`.
* `scales` - an output shape is calculated as `output_shape[axes[i]] = floor(scales[i] * (input_shape[axes[i]] + pads_begin[axes[i]] + pads_end[axes[i]]))` for all `i in range(0, len(axes))` and `output_shape[j] = input_shape[j] + pads_begin[j] + pads_end[j]` for `j not in axes`, `j in range(0, rank(data))`
* **Type**: string
- * **Default value**: none
* **Required**: *yes*
* *coordinate_transformation_mode*
diff --git a/docs/ops/infrastructure/Assign_3.md b/docs/ops/infrastructure/Assign_3.md
index 783492f3e82..5d74add67b3 100644
--- a/docs/ops/infrastructure/Assign_3.md
+++ b/docs/ops/infrastructure/Assign_3.md
@@ -9,18 +9,17 @@
**Detailed description**:
*Assign* operation sets an input value to the `variable_id` variable. This value will be returned by *ReadValue* operation on next infer if variable was not reset.
-The operation checks that the type and shape of the input are the same as
+The operation checks that the type and shape of the input are the same as
declared in `variable_id` and returns an error otherwise.
-**Attributes**:
+**Attributes**:
* *variable_id*
* **Description**: identificator of the variable to be updated
* **Range of values**: any non-empty string
* **Type**: string
- * **Default value**: None
- * **Required**: *Yes*
+ * **Required**: *yes*
**Inputs**
@@ -44,4 +43,4 @@ declared in `variable_id` and returns an error otherwise.
-```
\ No newline at end of file
+```
diff --git a/docs/ops/infrastructure/Constant_1.md b/docs/ops/infrastructure/Constant_1.md
index ea3cbc87c2d..b9211590e72 100644
--- a/docs/ops/infrastructure/Constant_1.md
+++ b/docs/ops/infrastructure/Constant_1.md
@@ -13,7 +13,6 @@
* **Description**: specifies position in binary file with weights where the content of the constant begins; value in bytes
* **Range of values**: non-negative integer value
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *size*
@@ -21,7 +20,6 @@
* **Description**: size of constant content in binary files; value in bytes
* **Range of values**: positive integer bigger than zero
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *element_type*
@@ -29,16 +27,14 @@
* **Description**: the type of element of output tensor
* **Range of values**: u1, u8, u16, u32, u64, i8, i16, i32, i64, f16, f32, boolean, bf16
* **Type**: `string`
- * **Default value**: None
- * **Required**: *Yes*
+ * **Required**: *yes*
* *shape*
* **Description**: the shape of the output tensor
* **Range of values**: list of non-negative integers, empty list is allowed, which means 0D or scalar tensor
* **Type**: `int[]`
- * **Default value**: None
- * **Required**: *Yes*
+ * **Required**: *yes*
**Outputs**
diff --git a/docs/ops/infrastructure/Parameter_1.md b/docs/ops/infrastructure/Parameter_1.md
index 879880002e6..373e8ed2b15 100644
--- a/docs/ops/infrastructure/Parameter_1.md
+++ b/docs/ops/infrastructure/Parameter_1.md
@@ -13,7 +13,6 @@
* **Description**: the type of element of output tensor
* **Range of values**: u1, u4, u8, u16, u32, u64, i4, i8, i16, i32, i64, f16, f32, boolean, bf16
* **Type**: `string`
- * **Default value**: None
* **Required**: *yes*
* *shape*
@@ -21,7 +20,6 @@
* **Description**: the shape of the output tensor
* **Range of values**: list of non-negative integers, empty list is allowed, which means 0D or scalar tensor
* **Type**: `int[]`
- * **Default value**: None
* **Required**: *yes*
diff --git a/docs/ops/infrastructure/ReadValue_3.md b/docs/ops/infrastructure/ReadValue_3.md
index d62113f0d7b..c60ac71dd84 100644
--- a/docs/ops/infrastructure/ReadValue_3.md
+++ b/docs/ops/infrastructure/ReadValue_3.md
@@ -6,9 +6,9 @@
**Short description**: *ReadValue* returns value of the `variable_id` variable.
-**Detailed description**:
+**Detailed description**:
-*ReadValue* returns value from the corresponding `variable_id` variable if the variable was set already by *Assign* operation and was not reset.
+*ReadValue* returns value from the corresponding `variable_id` variable if the variable was set already by *Assign* operation and was not reset.
The operation checks that the type and shape of the output are the same as
declared in `variable_id` and returns an error otherwise. If the corresponding variable was not set or was reset,
the operation returns the value from the 1 input, and initializes the `variable_id` shape and type
@@ -21,8 +21,7 @@ with the shape and type from the 1 input.
* **Description**: identificator of the variable to be read
* **Range of values**: any non-empty string
* **Type**: string
- * **Default value**: None
- * **Required**: *Yes*
+ * **Required**: *yes*
**Inputs**
@@ -54,4 +53,4 @@ with the shape and type from the 1 input.
-```
\ No newline at end of file
+```
diff --git a/docs/ops/movement/Concat_1.md b/docs/ops/movement/Concat_1.md
index 6a8bdb6ab4c..36f2ba620c7 100644
--- a/docs/ops/movement/Concat_1.md
+++ b/docs/ops/movement/Concat_1.md
@@ -13,7 +13,6 @@
* **Description**: *axis* specifies dimension to concatenate along
* **Range of values**: integer number. Negative value means counting dimension from the end. The range is `[-R, R-1]`, where `R` is the rank of all inputs.
* **Type**: int
- * **Default value**: None
* **Required**: *yes*
**Inputs**:
diff --git a/docs/ops/movement/DepthToSpace_1.md b/docs/ops/movement/DepthToSpace_1.md
index 3fcb223c47f..8aa6b54fd19 100644
--- a/docs/ops/movement/DepthToSpace_1.md
+++ b/docs/ops/movement/DepthToSpace_1.md
@@ -43,7 +43,6 @@ If `mode = depth_first`:
* *blocks_first*: the input depth is divided to `[block_size, ..., block_size, new_depth]`
* *depth_first*: the input depth is divided to `[new_depth, block_size, ..., block_size]`
* **Type**: `string`
- * **Default value**: None
* **Required**: *yes*
**Inputs**
diff --git a/docs/ops/movement/ExtractImagePatches_3.md b/docs/ops/movement/ExtractImagePatches_3.md
index 5046854ee22..fc1d724f701 100644
--- a/docs/ops/movement/ExtractImagePatches_3.md
+++ b/docs/ops/movement/ExtractImagePatches_3.md
@@ -22,7 +22,6 @@ The "auto_pad" attribute has no effect on the size of each patch, it determines
* **Description**: *sizes* is a size `[size_rows, size_cols]` of the extracted patches.
* **Range of values**: non-negative integer number
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* *strides*
@@ -30,15 +29,13 @@ The "auto_pad" attribute has no effect on the size of each patch, it determines
* **Description**: *strides* is a distance `[stride_rows, stride_cols]` between centers of two consecutive patches in an input tensor.
* **Range of values**: non-negative integer number
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* *rates*
- * **Description**: *rates* is the input stride `[rate_rows, rate_cols]`, specifying how far two consecutive patch samples are in the input. Equivalent to extracting patches with `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling them spatially by a factor of rates. This is equivalent to rate in dilated (a.k.a. Atrous) convolutions.
+ * **Description**: *rates* is the input stride `[rate_rows, rate_cols]`, specifying how far two consecutive patch samples are in the input. Equivalent to extracting patches with `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling them spatially by a factor of rates. This is equivalent to rate in dilated (a.k.a. Atrous) convolutions.
* **Range of values**: non-negative integer number
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* *auto_pad*
@@ -47,7 +44,6 @@ The "auto_pad" attribute has no effect on the size of each patch, it determines
* *same_upper (same_lower)* the input is padded by zeros to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
* *valid* - do not use padding.
* **Type**: string
- * **Default value**: None
* **Required**: *yes*
**Inputs**
@@ -77,7 +73,7 @@ The "auto_pad" attribute has no effect on the size of each patch, it determines
-```
\ No newline at end of file
+```
diff --git a/docs/ops/movement/SpaceToDepth_1.md b/docs/ops/movement/SpaceToDepth_1.md
index 42ade5d6528..6733faefbd8 100644
--- a/docs/ops/movement/SpaceToDepth_1.md
+++ b/docs/ops/movement/SpaceToDepth_1.md
@@ -44,7 +44,6 @@ If `mode = depth_first`:
* *blocks_first*: the output depth is gathered from `[block_size, ..., block_size, C]`
* *depth_first*: the output depth is gathered from `[C, block_size, ..., block_size]`
* **Type**: `string`
- * **Default value**: None
* **Required**: *yes*
**Inputs**
diff --git a/docs/ops/movement/Split_1.md b/docs/ops/movement/Split_1.md
index 6664393d7e0..7dd0ed6f102 100644
--- a/docs/ops/movement/Split_1.md
+++ b/docs/ops/movement/Split_1.md
@@ -24,7 +24,6 @@ Where D is the rank of input tensor `data`. The axis being split must be evenly
* **Description**: number of outputs into which the input tensor `data` will be split along `axis` dimension. The dimension of `data` shape along `axis` must be evenly divisible by *num_splits*
* **Range of values**: an integer within the range `[1, data.shape[axis]]`
* **Type**: `int`
- * **Default value**: none
* **Required**: *yes*
**Inputs**
diff --git a/docs/ops/normalization/BatchNormInference_1.md b/docs/ops/normalization/BatchNormInference_1.md
index 694a9989e9f..a0a9cac3fa1 100644
--- a/docs/ops/normalization/BatchNormInference_1.md
+++ b/docs/ops/normalization/BatchNormInference_1.md
@@ -60,7 +60,6 @@ For a particular activation, consider a mini-batch \f$\mathcal{B}\f$ of m values
* **Description**: *epsilon* is a constant added to the variance for numerical stability.
* **Range of values**: a floating-point number greater than or equal to zero
* **Type**: `float`
- * **Default value**: none
* **Required**: *yes*
**Inputs**
diff --git a/docs/ops/normalization/BatchNormInference_5.md b/docs/ops/normalization/BatchNormInference_5.md
index f5019d08b2d..61a9c252603 100644
--- a/docs/ops/normalization/BatchNormInference_5.md
+++ b/docs/ops/normalization/BatchNormInference_5.md
@@ -60,7 +60,6 @@ For a particular activation, consider a mini-batch \f$\mathcal{B}\f$ of m values
* **Description**: *epsilon* is a constant added to the variance for numerical stability.
* **Range of values**: a floating-point number greater than or equal to zero
* **Type**: `float`
- * **Default value**: none
* **Required**: *yes*
**Inputs**
diff --git a/docs/ops/normalization/GRN_1.md b/docs/ops/normalization/GRN_1.md
index 656a46e3cc1..7a068e8e3c9 100644
--- a/docs/ops/normalization/GRN_1.md
+++ b/docs/ops/normalization/GRN_1.md
@@ -19,7 +19,6 @@
* **Description**: *bias* is added to the variance.
* **Range of values**: a non-negative floating point value
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
**Inputs**
@@ -52,4 +51,4 @@
-```
\ No newline at end of file
+```
diff --git a/docs/ops/normalization/LRN_1.md b/docs/ops/normalization/LRN_1.md
index 2e7f049dbfe..0cc8d07d618 100644
--- a/docs/ops/normalization/LRN_1.md
+++ b/docs/ops/normalization/LRN_1.md
@@ -34,7 +34,6 @@ output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
* **Description**: *alpha* represents the scaling attribute for the normalizing sum. For example, *alpha* equal `0.0001` means that the normalizing sum is multiplied by `0.0001`.
* **Range of values**: no restrictions
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *beta*
@@ -42,7 +41,6 @@ output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
* **Description**: *beta* represents the exponent for the normalizing sum. For example, *beta* equal `0.75` means that the normalizing sum is raised to the power of `0.75`.
* **Range of values**: positive number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *bias*
@@ -50,7 +48,6 @@ output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
* **Description**: *bias* represents the offset. Usually positive number to avoid dividing by zero.
* **Range of values**: no restrictions
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *size*
@@ -58,7 +55,6 @@ output = data / (bias + (alpha / size ** len(axes)) * sqr_sum) ** beta
* **Description**: *size* represents the side length of the region to be used for the normalization sum. The region can have one or more dimensions depending on the second input axes indices.
* **Range of values**: positive integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
**Inputs**
diff --git a/docs/ops/normalization/MVN_1.md b/docs/ops/normalization/MVN_1.md
index 1c55d626679..4f0c5dec07c 100644
--- a/docs/ops/normalization/MVN_1.md
+++ b/docs/ops/normalization/MVN_1.md
@@ -44,7 +44,6 @@ o_{i}=\frac{o_{i}}{\sum \sqrt {o_{k}^2}+\epsilon}
* **Description**: *eps* is the number to be added to the variance to avoid division by zero when normalizing the value. For example, *epsilon* equal to 0.001 means that 0.001 is added to the variance.
* **Range of values**: a positive floating-point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
**Inputs**
@@ -77,4 +76,4 @@ o_{i}=\frac{o_{i}}{\sum \sqrt {o_{k}^2}+\epsilon}
-```
\ No newline at end of file
+```
diff --git a/docs/ops/normalization/MVN_6.md b/docs/ops/normalization/MVN_6.md
index cb6ebeaf4d6..3d9f04f19e9 100644
--- a/docs/ops/normalization/MVN_6.md
+++ b/docs/ops/normalization/MVN_6.md
@@ -33,7 +33,6 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
* `false` -- Do not normalize variance
* `true` -- Normalize variance
* **Type**: `boolean`
- * **Default value**: None
* **Required**: *yes*
* *eps*
@@ -41,7 +40,6 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
* **Description**: *eps* is the number to be added to the variance to avoid division by zero when normalizing the value.
* **Range of values**: a positive floating-point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *eps_mode*
@@ -51,7 +49,6 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
* `inside_sqrt` -- Add epsilon inside sqrt
* `outside_sqrt` -- Add epsilon outside of sqrt
* **Type**: `string`
- * **Default value**: None
* **Required**: *yes*
**Inputs**
@@ -95,4 +92,4 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
-```
\ No newline at end of file
+```
diff --git a/docs/ops/normalization/NormalizeL2_1.md b/docs/ops/normalization/NormalizeL2_1.md
index 56fd13092ad..4668519030f 100644
--- a/docs/ops/normalization/NormalizeL2_1.md
+++ b/docs/ops/normalization/NormalizeL2_1.md
@@ -4,47 +4,57 @@
**Category**: *Normalization*
-**Short description**: *NormalizeL2* operation performs L2 normalization of the 1st input tensor in slices specified by the 2nd input.
+**Short description**: *NormalizeL2* operation performs L2 normalization on a given input `data` along dimensions specified by `axes` input.
+
+**Detailed Description**
+
+Each element in the output is the result of dividing the corresponding element of `data` input by the result of L2 reduction along dimensions specified by the `axes` input:
+
+ output[i0, i1, ..., iN] = x[i0, i1, ..., iN] / sqrt(eps_mode(sum[j0,..., jN](x[j0, ..., jN]**2), eps))
+
+Where indices `i0, ..., iN` run through all valid indices for the `data` input and summation `sum[j0, ..., jN]` has `jk = ik` for those dimensions `k` that are not in the set of indices specified by the `axes` input of the operation.
+`eps_mode` selects how the reduction value and `eps` are combined. It can be `max` or `add` depending on `eps_mode` attribute value.
+
+Particular cases:
+
+1. If `axes` is an empty list, then each input element is divided by itself resulting value `1` for all non-zero elements.
+2. If `axes` contains all dimensions of input `data`, a single L2 reduction value is calculated for the entire input tensor and each input element is divided by that value.
+
**Attributes**
* *eps*
- * **Description**: *eps* is the number to be added/maximized to/with the variance to avoid division by zero when normalizing the value. For example, *eps* equal to 0.001 means that 0.001 is used if all the values in normalization are equal to zero.
+ * **Description**: *eps* is the number applied by *eps_mode* function to the sum of squares to avoid division by zero when normalizing the value.
* **Range of values**: a positive floating-point number
* **Type**: `float`
- * **Default value**: None
* **Required**: *yes*
* *eps_mode*
- * **Description**: Specifies how *eps* is combined with L2 value calculated before division.
- * **Range of values**: `add`, `max`
+ * **Description**: Specifies how *eps* is combined with the sum of squares to avoid division by zero.
+ * **Range of values**: `add` or `max`
* **Type**: `string`
- * **Default value**: None
* **Required**: *yes*
**Inputs**
-* **1**: `data` - input tensor to be normalized. Type of elements is any floating point type. Required.
+* **1**: `data` - A tensor of type *T* and arbitrary shape. **Required.**
-* **2**: `axes` - scalar or 1D tensor with axis indices for the `data` input along which L2 reduction is calculated. Required.
+* **2**: `axes` - Axis indices of `data` input tensor, along which L2 reduction is calculated. A scalar or 1D tensor of unique elements and type *T_IND*. The range of elements is `[-r, r-1]`, where `r` is the rank of `data` input tensor. **Required.**
**Outputs**
-* **1**: Tensor of the same shape and type as the `data` input and normalized slices defined by `axes` input.
+* **1**: The result of *NormalizeL2* function applied to `data` input tensor. Normalized tensor of the same type and shape as the data input.
-**Detailed Description**
+**Types**
-Each element in the output is the result of division of corresponding element from the `data` input tensor by the result of L2 reduction along dimensions specified by the `axes` input:
+* *T*: arbitrary supported floating-point type.
+* *T_IND*: any supported integer type.
- output[i0, i1, ..., iN] = x[i0, i1, ..., iN] / sqrt(eps_mode(sum[j0,..., jN](x[j0, ..., jN]**2), eps))
+**Examples**
-Where indices `i0, ..., iN` run through all valid indices for the 1st input and summation `sum[j0, ..., jN]` have `jk = ik` for those dimensions `k` that are not in the set of indices specified by the `axes` input of the operation. One of the corner cases is when `axes` is an empty list, then we divide each input element by itself resulting value 1 for all non-zero elements. Another corner case is where `axes` input contains all dimensions from `data` tensor, which means that a single L2 reduction value is calculated for entire input tensor and each input element is divided by that value.
-
-`eps_mode` selects how the reduction value and `eps` are combined. It can be `max` or `add` depending on `eps_mode` attribute value.
-
-**Example**
+*Example: Normalization over channel dimension for `NCHW` layout*
```xml
@@ -57,7 +67,7 @@ Where indices `i0, ..., iN` run through all valid indices for the 1st input and
24
- 2
+ 1
-```
\ No newline at end of file
+```
+
+*Example: Normalization over channel and spatial dimensions for `NCHW` layout*
+
+```xml
+
+
+
+
+ 6
+ 12
+ 10
+ 24
+
+
+ 3
+
+
+
+
+```
diff --git a/docs/ops/pooling/AvgPool_1.md b/docs/ops/pooling/AvgPool_1.md
index 78792d77d11..c698f38afd1 100644
--- a/docs/ops/pooling/AvgPool_1.md
+++ b/docs/ops/pooling/AvgPool_1.md
@@ -6,11 +6,11 @@
**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/pooling.html)
-**Detailed description**: [Reference](http://cs231n.github.io/convolutional-networks/#pool). Average Pool is a pooling operation that performs down-sampling by dividing the input into pooling regions of size specified by kernel attribute and computing the average values of each region. Output shape is calculated as follows:
- `H_out = (H + pads_begin[0] + pads_end[0] - kernel[0] / strides[0]) + 1`
- `W_out = (H + pads_begin[1] + pads_end[1] - kernel[1] / strides[1]) + 1`
- `D_out = (H + pads_begin[2] + pads_end[2] - kernel[2] / strides[2]) + 1`
-
+**Detailed description**: [Reference](http://cs231n.github.io/convolutional-networks/#pool). Average Pool is a pooling operation that performs down-sampling by dividing the input into pooling regions of size specified by kernel attribute and computing the average values of each region. Output shape is calculated as follows:
+ `H_out = (H + pads_begin[0] + pads_end[0] - kernel[0] / strides[0]) + 1`
+ `W_out = (H + pads_begin[1] + pads_end[1] - kernel[1] / strides[1]) + 1`
+ `D_out = (H + pads_begin[2] + pads_end[2] - kernel[2] / strides[2]) + 1`
+
**Attributes**: *Pooling* attributes are specified in the `data` node, which is a child of the layer node.
@@ -19,7 +19,6 @@
* **Description**: *strides* is a distance (in pixels) to slide the window on the feature map over the (z, y, x) axes for 3D poolings and (y, x) axes for 2D poolings. For example, *strides* equal "4,2,1" means sliding the window 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
* **Range of values**: integer values starting from 0
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* *pads_begin*
@@ -27,7 +26,6 @@
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal "1,2" means adding 1 pixel to the top of the input and 2 to the left of the input.
* **Range of values**: integer values starting from 0
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -36,7 +34,6 @@
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal "1,2" means adding 1 pixel to the bottom of the input and 2 to the right of the input.
* **Range of values**: integer values starting from 0
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -45,15 +42,13 @@
* **Description**: *kernel* is a size of each filter. For example, *kernel* equal (2, 3) means that each filter has height equal to 2 and width equal to 3.
* **Range of values**: integer values starting from 1
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
-
+
* *exclude-pad*
* **Description**: *exclude-pad* is a type of pooling strategy for values in the padding area. For example, if *exclude-pad* is "true", then zero-values that came from padding are not included in averaging calculation.
- * **Range of values**: true or false
+ * **Range of values**: true or false
* **Type**: boolean
- * **Default value**: None
* **Required**: *yes*
* *rounding_type*
@@ -95,7 +90,7 @@ output_{j} = \frac{\sum_{i = 0}^{n}x_{i}}{n}
```xml
-
+
13
@@ -115,7 +110,7 @@ output_{j} = \frac{\sum_{i = 0}^{n}x_{i}}{n}
-
+
13
@@ -135,7 +130,7 @@ output_{j} = \frac{\sum_{i = 0}^{n}x_{i}}{n}
-
+
13
@@ -155,7 +150,7 @@ output_{j} = \frac{\sum_{i = 0}^{n}x_{i}}{n}
-
+
13
@@ -175,7 +170,7 @@ output_{j} = \frac{\sum_{i = 0}^{n}x_{i}}{n}
-
+
13
@@ -192,4 +187,4 @@ output_{j} = \frac{\sum_{i = 0}^{n}x_{i}}{n}
-```
\ No newline at end of file
+```
diff --git a/docs/ops/pooling/MaxPool_1.md b/docs/ops/pooling/MaxPool_1.md
index 8ebc653c18d..8e774821310 100644
--- a/docs/ops/pooling/MaxPool_1.md
+++ b/docs/ops/pooling/MaxPool_1.md
@@ -15,7 +15,6 @@
* **Description**: *strides* is a distance (in pixels) to slide the window on the feature map over the (z, y, x) axes for 3D poolings and (y, x) axes for 2D poolings. For example, *strides* equal "4,2,1" means sliding the window 4 pixel at a time over depth dimension, 2 over height dimension and 1 over width dimension.
* **Range of values**: integer values starting from 0
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* *pads_begin*
@@ -23,7 +22,6 @@
* **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal "1,2" means adding 1 pixel to the top of the input and 2 to the left of the input.
* **Range of values**: integer values starting from 0
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -32,7 +30,6 @@
* **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal "1,2" means adding 1 pixel to the bottom of the input and 2 to the right of the input.
* **Range of values**: integer values starting from 0
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* **Note**: the attribute is ignored when *auto_pad* attribute is specified.
@@ -41,7 +38,6 @@
* **Description**: *kernel* is a size of each filter. For example, *kernel* equal (2, 3) means that each filter has height equal to 2 and width equal to 3.
* **Range of values**: integer values starting from 1
* **Type**: int[]
- * **Default value**: None
* **Required**: *yes*
* *rounding_type*
diff --git a/docs/ops/quantization/FakeQuantize_1.md b/docs/ops/quantization/FakeQuantize_1.md
index 70670b6c83b..9a203283974 100644
--- a/docs/ops/quantization/FakeQuantize_1.md
+++ b/docs/ops/quantization/FakeQuantize_1.md
@@ -29,7 +29,6 @@ else:
* **Description**: *levels* is the number of quantization levels (e.g. 2 is for binarization, 255/256 is for int8 quantization)
* **Range of values**: an integer greater than or equal to 2
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *auto_broadcast*
diff --git a/docs/ops/sequence/GRUCell_3.md b/docs/ops/sequence/GRUCell_3.md
index c154c686238..3b5d2256d0a 100644
--- a/docs/ops/sequence/GRUCell_3.md
+++ b/docs/ops/sequence/GRUCell_3.md
@@ -13,7 +13,6 @@
* **Description**: *hidden_size* specifies hidden state size.
* **Range of values**: a positive integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *activations*
diff --git a/docs/ops/sequence/GRUSequence_5.md b/docs/ops/sequence/GRUSequence_5.md
index f44a1d71078..a18df16bbdc 100644
--- a/docs/ops/sequence/GRUSequence_5.md
+++ b/docs/ops/sequence/GRUSequence_5.md
@@ -18,7 +18,6 @@ A single cell in the sequence is implemented in the same way as in
-```
\ No newline at end of file
+```
diff --git a/docs/ops/sequence/OneHot_1.md b/docs/ops/sequence/OneHot_1.md
index 38ba2a91f93..3df74c1b296 100644
--- a/docs/ops/sequence/OneHot_1.md
+++ b/docs/ops/sequence/OneHot_1.md
@@ -27,7 +27,6 @@ The types of input scalars `on_value` and `off_value` should match and be equal
* **Description**: *axis* is a new axis position in the output shape to fill with one-hot values.
* **Range of values**: an integer. Negative value means counting dimension from the end.
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
**Inputs**:
@@ -95,4 +94,4 @@ The types of input scalars `on_value` and `off_value` should match and be equal
-```
\ No newline at end of file
+```
diff --git a/docs/ops/sequence/RNNCell_3.md b/docs/ops/sequence/RNNCell_3.md
index bb0d5d21b62..11fb4a54868 100644
--- a/docs/ops/sequence/RNNCell_3.md
+++ b/docs/ops/sequence/RNNCell_3.md
@@ -13,7 +13,6 @@
* **Description**: *hidden_size* specifies hidden state size.
* **Range of values**: a positive integer
* **Type**: `int`
- * **Default value**: None
* **Required**: *yes*
* *activations*
@@ -92,4 +91,4 @@
-```
\ No newline at end of file
+```
diff --git a/docs/ops/sequence/RNNSequence_5.md b/docs/ops/sequence/RNNSequence_5.md
index 3bb2c6f1e04..82d72867ef9 100644
--- a/docs/ops/sequence/RNNSequence_5.md
+++ b/docs/ops/sequence/RNNSequence_5.md
@@ -18,7 +18,6 @@ A single cell in the sequence is implemented in the same way as in
+
+#include "transformations/utils/utils.hpp"
+
+using namespace InferenceEngine;
+
+CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") {
+ core = PluginCache::get().ie(targetDevice);
+}
+
+void CommonReferenceTest::Exec() {
+ LoadNetwork();
+ FillInputs();
+ Infer();
+ Validate();
+}
+
+void CommonReferenceTest::LoadNetwork() {
+ InferenceEngine::CNNNetwork cnnNetwork(function);
+ auto inputInfo = cnnNetwork.getInputsInfo();
+ auto outputInfo = cnnNetwork.getOutputsInfo();
+ for (const auto& param : function->get_parameters()) {
+ inputInfo[param->get_friendly_name()]->setPrecision(InferenceEngine::details::convertPrecision(param->get_element_type()));
+ }
+ for (const auto& result : function->get_results()) {
+ outputInfo[ngraph::op::util::create_ie_output_name(result->input_value(0))]->setPrecision(
+ InferenceEngine::details::convertPrecision(result->get_element_type()));
+ }
+ executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
+}
+
+void CommonReferenceTest::FillInputs() {
+ const auto& inputInfo = executableNetwork.GetInputsInfo();
+ const auto& params = function->get_parameters();
+ ASSERT_EQ(params.size(), inputData.size());
+ ASSERT_EQ(inputInfo.size(), inputData.size());
+
+ for (size_t i = 0; i < params.size(); i++) {
+ const auto& param = params[i];
+ const auto infoIt = inputInfo.find(param->get_friendly_name());
+ GTEST_ASSERT_NE(infoIt, inputInfo.cend());
+
+ const auto& info = infoIt->second;
+ auto blob = make_blob_with_precision(info->getTensorDesc());
+ blob->allocate();
+
+ ASSERT_EQ(blob->byteSize(), inputData[i]->byteSize());
+
+ MemoryBlob::Ptr mInputData = as(inputData[i]);
+ ASSERT_NE(mInputData, nullptr);
+ auto minputDataHolder = mInputData->rmap();
+
+ MemoryBlob::Ptr mBlob = as(blob);
+ ASSERT_NE(mBlob, nullptr);
+ auto mBlobHolder = mBlob->wmap();
+
+ std::memcpy(mBlobHolder.as(), minputDataHolder.as(), inputData[i]->byteSize());
+ inputData[i] = blob;
+ }
+}
+
+void CommonReferenceTest::Infer() {
+ inferRequest = executableNetwork.CreateInferRequest();
+
+ const auto& inputsInfo = executableNetwork.GetInputsInfo();
+ const auto& functionParams = function->get_parameters();
+ for (size_t i = 0; i < functionParams.size(); ++i) {
+ const auto& param = functionParams[i];
+ const auto infoIt = inputsInfo.find(param->get_friendly_name());
+ GTEST_ASSERT_NE(infoIt, inputsInfo.cend());
+
+ const auto& info = infoIt->second;
+ auto blob = inputData[i];
+
+ inferRequest.SetBlob(info->name(), blob);
+ }
+ inferRequest.Infer();
+}
+
+void CommonReferenceTest::Validate() {
+ ASSERT_EQ(executableNetwork.GetOutputsInfo().size(), refOutData.size());
+ std::vector outputs;
+ for (const auto& result : function->get_results()) {
+ auto name = ngraph::op::util::create_ie_output_name(result->input_value(0));
+ outputs.emplace_back(inferRequest.GetBlob(name));
+ }
+
+ ASSERT_EQ(refOutData.size(), outputs.size());
+ for (size_t i = 0; i < refOutData.size(); i++) {
+ ValidateBlobs(refOutData[i], outputs[i]);
+ }
+}
+void CommonReferenceTest::ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob) {
+ ASSERT_TRUE(refBlob != nullptr);
+ ASSERT_TRUE(outBlob != nullptr);
+ ASSERT_EQ(refBlob->getTensorDesc().getPrecision(), outBlob->getTensorDesc().getPrecision());
+ ASSERT_EQ(refBlob->byteSize(), outBlob->byteSize());
+
+ auto mRef = as(refBlob);
+ IE_ASSERT(mRef);
+ const auto refLockMemory = mRef->rmap();
+ const auto refBuffer = refLockMemory.as();
+
+ auto mOut = as(outBlob);
+ IE_ASSERT(mOut);
+ const auto outLockMemory = mOut->rmap();
+ const auto outBuffer = outLockMemory.as();
+
+ const auto& precision = refBlob->getTensorDesc().getPrecision();
+ switch (precision) {
+ case InferenceEngine::Precision::BF16:
+ LayerTestsUtils::LayerTestsCommon::Compare(
+ reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::FP16:
+ LayerTestsUtils::LayerTestsCommon::Compare(
+ reinterpret_cast(refBuffer), reinterpret_cast(outBuffer), refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::FP32:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer),
+ refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::I8:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer),
+ refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::I16:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer),
+ refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::I32:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer),
+ refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::I64:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer),
+ refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::BOOL:
+ case InferenceEngine::Precision::U8:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer),
+ refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::U16:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer),
+ reinterpret_cast(outBuffer), refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::U32:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer),
+ reinterpret_cast(outBuffer), refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::U64:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer),
+ reinterpret_cast(outBuffer), refBlob->size(), threshold);
+ break;
+ case InferenceEngine::Precision::I4:
+ case InferenceEngine::Precision::U4:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer),
+ refBlob->size() / 2, threshold);
+ break;
+ case InferenceEngine::Precision::BIN:
+ LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(refBuffer), reinterpret_cast(outBuffer),
+ refBlob->size() / 8, threshold);
+ break;
+ default:
+ FAIL() << "Comparator for " << precision << " precision isn't supported";
+ }
+}
diff --git a/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp b/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp
new file mode 100644
index 00000000000..6e3fd942a9e
--- /dev/null
+++ b/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp
@@ -0,0 +1,53 @@
+// Copyright (C) 2018-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+#include
+#include
+#include
+
+class CommonReferenceTest {
+public:
+ CommonReferenceTest();
+
+ void Exec();
+
+ void LoadNetwork();
+
+ void FillInputs();
+
+ void Infer();
+
+ void Validate();
+
+private:
+ void ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlob, const InferenceEngine::Blob::Ptr& outBlob);
+
+protected:
+ const std::string targetDevice;
+ std::shared_ptr core;
+ std::shared_ptr function;
+
+ InferenceEngine::ExecutableNetwork executableNetwork;
+ InferenceEngine::InferRequest inferRequest;
+ std::vector inputData;
+ std::vector refOutData;
+ float threshold = 1e-2f;
+};
+
+template
+InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type, const std::vector& values, size_t size = 0) {
+ size_t real_size = size ? size : values.size() * sizeof(T) / element_type.size();
+ auto blob = make_blob_with_precision(
+ InferenceEngine::TensorDesc(InferenceEngine::details::convertPrecision(element_type), {real_size}, InferenceEngine::Layout::C));
+ blob->allocate();
+ InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as(blob);
+ IE_ASSERT(minput);
+ auto minputHolder = minput->wmap();
+
+ std::memcpy(minputHolder.as(), values.data(), std::min(real_size * element_type.size(), sizeof(T) * values.size()));
+
+ return blob;
+}
+
diff --git a/docs/template_plugin/tests/functional/op_reference/convert.cpp b/docs/template_plugin/tests/functional/op_reference/convert.cpp
new file mode 100644
index 00000000000..fb32fda4cbb
--- /dev/null
+++ b/docs/template_plugin/tests/functional/op_reference/convert.cpp
@@ -0,0 +1,441 @@
+// Copyright (C) 2018-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "base_reference_test.hpp"
+
+using namespace ngraph;
+using namespace InferenceEngine;
+
+struct ConvertParams {
+ template
+ ConvertParams(const ngraph::PartialShape& shape, const ngraph::element::Type& iType, const ngraph::element::Type& oType, const std::vector& iValues,
+ const std::vector& oValues, size_t iSize = 0, size_t oSize = 0)
+ : pshape(shape), inType(iType), outType(oType), inputData(CreateBlob(iType, iValues, iSize)), refData(CreateBlob(oType, oValues, oSize)) {}
+ ngraph::PartialShape pshape;
+ ngraph::element::Type inType;
+ ngraph::element::Type outType;
+ InferenceEngine::Blob::Ptr inputData;
+ InferenceEngine::Blob::Ptr refData;
+};
+
+class ReferenceConvertLayerTest : public testing::TestWithParam, public CommonReferenceTest {
+public:
+ void SetUp() override {
+ auto params = GetParam();
+ function = CreateFunction(params.pshape, params.inType, params.outType);
+ inputData = {params.inputData};
+ refOutData = {params.refData};
+ }
+ static std::string getTestCaseName(const testing::TestParamInfo& obj) {
+ auto param = obj.param;
+ std::ostringstream result;
+ result << "shape=" << param.pshape << "_";
+ result << "iType=" << param.inType << "_";
+ result << "oType=" << param.outType;
+ return result.str();
+ }
+
+private:
+ static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
+ const element::Type& expected_output_type) {
+ const auto in = std::make_shared(input_type, input_shape);
+ const auto convert = std::make_shared(in, expected_output_type);
+ return std::make_shared(NodeVector {convert}, ParameterVector {in});
+ }
+};
+
+TEST_P(ReferenceConvertLayerTest, CompareWithHardcodedRefs) {
+ Exec();
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ smoke_Convert_With_Hardcoded_Refs, ReferenceConvertLayerTest,
+ ::testing::Values(
+ // destination boolean
+ ConvertParams(ngraph::PartialShape {2, 3}, ngraph::element::u8, ngraph::element::boolean,
+ std::vector {0, 12, 23, 0, std::numeric_limits::lowest(), std::numeric_limits::max()},
+ std::vector {0, 1, 1, 0, 0, 1}),
+ ConvertParams(ngraph::PartialShape {2, 3}, ngraph::element::i32, ngraph::element::boolean,
+ std::vector {0, -12, 23, 0, std::numeric_limits::lowest(), std::numeric_limits::max()},
+ std::vector {0, 1, 1, 0, 1, 1}),
+ ConvertParams(ngraph::PartialShape {3, 3}, ngraph::element::f32, ngraph::element::boolean,
+ std::vector {0.f, 1.5745f, 0.12352f, 0.f, std::numeric_limits::lowest(), std::numeric_limits::max(),
+ std::numeric_limits::min(), std::numeric_limits::infinity(), -std::numeric_limits::infinity()},
+ std::vector {0, 1, 1, 0, 1, 1, 1, 1, 1}),
+
+ // destination bf16
+ ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::bf16,
+ std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
+ std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
+ ConvertParams(ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::bf16,
+ std::vector {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
+ std::vector {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
+
+ // destination f16
+ ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f16,
+ std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
+ std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
+ ConvertParams(ngraph::PartialShape {11}, ngraph::element::u8, ngraph::element::f16, std::vector {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142},
+ std::vector {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}),
+
+ // destination f32
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u1, ngraph::element::f32, std::vector {0xA0},
+ std::vector {1.0f, 0.0f, 1.0f, 0.0f}, 4),
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u4, ngraph::element::f32, std::vector {0xFB, 0x0A},
+ std::vector {15.0f, 11.0f, 0.0f, 10.0f}, 4),
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u8, ngraph::element::f32, std::vector {255, 128, 32, 0},
+ std::vector {255.0f, 128.0f, 32.0f, 0.0f}),
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u16, ngraph::element::f32, std::vector {64000, 32000, 128, 0},
+ std::vector {64000.0f, 32000.0f, 128.0f, 0.0f}),
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u32, ngraph::element::f32, std::vector {4000000, 2000000, 128, 0},
+ std::vector {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::u64, ngraph::element::f32, std::vector {4000000, 2000000, 128, 0},
+ std::vector {4000000.0f, 2000000.0f, 128.0f, 0.0f}),
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i4, ngraph::element::f32, std::vector {0xFE, 0xF2},
+ std::vector {-1.0f, -2.0f, -1.0f, 2.0f}, 4),
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i8, ngraph::element::f32, std::vector {-127, -0, 0, 127},
+ std::vector {-127.0f, -0.0f, 0.0f, 127.0f}),
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i16, ngraph::element::f32, std::vector {-32000, -0, 0, 32000},
+ std::vector {-32000.0f, -0.0f, 0.0f, 32000.0f}),
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i32, ngraph::element::f32, std::vector {-64000, -0, 0, 64000},
+ std::vector {-64000.0f, -0.0f, 0.0f, 64000.0f}),
+ ConvertParams(ngraph::PartialShape {2, 2}, ngraph::element::i64, ngraph::element::f32, std::vector {-64000, -0, 0, 64000},
+ std::vector {-64000.0f, -0.0f, 0.0f, 64000.0f}),
+ ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::bf16, ngraph::element::f32,
+ std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
+ std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
+ ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f16, ngraph::element::f32,
+ std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
+ std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
+ ConvertParams(ngraph::PartialShape {1, 1, 3, 5}, ngraph::element::f32, ngraph::element::f32,
+ std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f},
+ std::vector {0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}),
+
+ // destination i4
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u1, ngraph::element::i4, std::vector {0xA0}, std::vector {0x10, 0x10}, 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i4, std::vector {0x12, 0x03}, std::vector {0x12, 0x03},
+ 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i4, std::vector {1, 2, 0, 3}, std::vector {0x12, 0x03},
+ 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i4, std::vector {1, 2, 0, 3},
+ std::vector {0x12, 0x03}, 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i4, std::vector {1, 2, 0, 3},
+ std::vector {0x12, 0x03}, 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i4, std::vector {1, 2, 0, 3},
+ std::vector {0x12, 0x03}, 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i4, std::vector {0xFE, 0x03}, std::vector {0xFE, 0x03},
+ 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i4, std::vector {-1, -2, 2, 3}, std::vector {0xFE, 0x23},
+ 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i4, std::vector {-1, -2, 2, 3},
+ std::vector {0xFE, 0x23}, 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i4, std::vector {-1, -2, 2, 3},
+ std::vector {0xFE, 0x23}, 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i4, std::vector {-1, -2, 2, 3},
+ std::vector {0xFE, 0x23}, 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i4, std::vector {-1, -2, 0, 3},
+ std::vector {0xFE, 0x03}, 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i4, std::vector {-1, -2, 0, 3},
+ std::vector {0xFE, 0x03}, 4, 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i4, std::vector {-1, -2, 2, 3}, std::vector {0xFE, 0x23},
+ 4, 4),
+ // destination i8
+ ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i8, std::vector {0x81},
+ std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i8, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3},
+ 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i8, std::vector {1, 2, 0, 3}, std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i8, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i8, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i8, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i8, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3},
+ 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i8, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i8, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i8, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i8, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i8, std::vector {-1, -2, 0, 3},
+ std::vector {-1, -2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i8, std::vector {-1, -2, 0, 3},
+ std::vector {-1, -2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i8, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ // destination i16
+ ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i16, std::vector {0x81},
+ std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i16, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3},
+ 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i16, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i16, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i16, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i16, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i16, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3},
+ 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i16, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i16, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i32, ngraph::element::i16, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i64, ngraph::element::i16, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::f16, ngraph::element::i16, std::vector {-1, -2, 0, 3},
+ std::vector {-1, -2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::bf16, ngraph::element::i16, std::vector {-1, -2, 0, 3},
+ std::vector {-1, -2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::f32, ngraph::element::i16, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ // destination i32
+ ConvertParams(ngraph::PartialShape {8}, ngraph::element::u1, ngraph::element::i32, std::vector {0x81},
+ std::vector {1, 0, 0, 0, 0, 0, 0, 1}, 8),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u4, ngraph::element::i32, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3},
+ 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u8, ngraph::element::i32, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u16, ngraph::element::i32, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u32, ngraph::element::i32, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::u64, ngraph::element::i32, std::vector {1, 2, 0, 3},
+ std::vector {1, 2, 0, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i4, ngraph::element::i32, std::vector {0x21, 0x43}, std::vector {2, 1, 4, 3},
+ 4),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i8, ngraph::element::i32, std::vector {-1, -2, 2, 3},
+ std::vector {-1, -2, 2, 3}),
+ ConvertParams(ngraph::PartialShape {4}, ngraph::element::i16, ngraph::element::i32, std::vector