diff --git a/.ci/azure/linux_ngraph_onnx.yml b/.ci/azure/linux_ngraph_onnx.yml
index c6071fc127f..5521d224630 100644
--- a/.ci/azure/linux_ngraph_onnx.yml
+++ b/.ci/azure/linux_ngraph_onnx.yml
@@ -4,17 +4,13 @@ jobs:
matrix:
Release:
BUILD_TYPE: 'Release'
- PROTOBUF_LITE: 'OFF'
+ PROTOBUF_LITE: 'ON'
TOX_COMMAND: 'tox && tox -e zoo_models'
Debug:
BUILD_TYPE: 'Debug'
- PROTOBUF_LITE: 'OFF'
- TOX_COMMAND: 'tox'
- Protobuf_lite:
- BUILD_TYPE: 'Release'
PROTOBUF_LITE: 'ON'
- TOX_COMMAND: 'tox && tox -e zoo_models'
- maxParallel: 3
+ TOX_COMMAND: 'tox'
+ maxParallel: 2
# About 300% of total time
timeoutInMinutes: 90
@@ -56,10 +52,10 @@ jobs:
- script: |
rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR)
- sudo rm -rf $(TMP_DIR) ; sudo mkdir $(TMP_DIR) ; sudo chmod 777 -R $(TMP_DIR)
sudo mkdir -p $(MODELS_DIR)
sudo apt --assume-yes install nfs-common
sudo mount -vvv -t nfs cinfsshare.file.core.windows.net:/cinfsshare/onnxtestdata $(MODELS_DIR) -o vers=4,minorversion=1,sec=sys
+ mkdir -p $(MODELS_DIR)/models_data
displayName: 'Make dirs'
- checkout: self
@@ -76,15 +72,15 @@ jobs:
workingDirectory: $(WORK_DIR)
displayName: 'Install dependencies'
+ - script: ngraph/python/tests/test_onnx/model_zoo_preprocess.sh -d $(MODELS_DIR)/models_data -o -s "$(ONNX_MODEL_ZOO_SHA)"
+ displayName: 'Update models'
+ condition: ne(variables['BUILD_TYPE'], 'Debug')
+
- script: sudo docker build --tag=openvino-onnx-ci-image --file=.ci/openvino-onnx/Dockerfile --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg PROTOBUF_LITE=$(PROTOBUF_LITE) .
displayName: 'Docker build $(BUILD_TYPE) protobuf-lite: $(PROTOBUF_LITE)'
- - script: ngraph/python/tests/test_onnx/model_zoo_preprocess.sh -d $(TMP_DIR) -o -s "$(ONNX_MODEL_ZOO_SHA)"
- displayName: 'Get models'
- condition: ne(variables['BUILD_TYPE'], 'Debug')
-
- script: sudo fallocate -l 64G /swapfile ; sudo mkswap /swapfile ; sudo swapon /swapfile ; df ; free -h
displayName: 'Create swap'
- - script: sudo docker run --name openvino-onnx-ci-container --volume $(TMP_DIR)/model_zoo/onnx_model_zoo_$(ONNX_MODEL_ZOO_SHA):/root/.onnx/model_zoo/onnx_model_zoo --volume $(MODELS_DIR)/msft:/root/.onnx/model_zoo/MSFT openvino-onnx-ci-image /bin/bash -c "$(TOX_COMMAND)"
+ - script: sudo docker run --name openvino-onnx-ci-container --volume $(MODELS_DIR)/models_data/model_zoo/onnx_model_zoo_$(ONNX_MODEL_ZOO_SHA):/root/.onnx/model_zoo/onnx_model_zoo --volume $(MODELS_DIR)/msft:/root/.onnx/model_zoo/MSFT openvino-onnx-ci-image /bin/bash -c "$(TOX_COMMAND)"
displayName: 'Docker run $(BUILD_TYPE) protobuf-lite: $(PROTOBUF_LITE)'
diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml
index e5ec0486f9b..3d0936c5411 100644
--- a/.ci/azure/windows.yml
+++ b/.ci/azure/windows.yml
@@ -16,7 +16,7 @@ jobs:
timeoutInMinutes: 120
pool:
- name: WIN_VMSS_VENV_F8S_WU2
+ name: WIN_VMSS_VENV_F16S_WU2
variables:
system.debug: true
@@ -34,8 +34,6 @@ jobs:
INSTALL_DIR: $(WORK_DIR)\install_pkg
INSTALL_TEST_DIR: $(INSTALL_DIR)\tests
SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat
- IB_DIR: C:\Program Files (x86)\IncrediBuild
- IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe
steps:
- script: |
@@ -59,12 +57,6 @@ jobs:
rd /Q /S $(BUILD_SAMPLES_DIR) & mkdir $(BUILD_SAMPLES_DIR)
displayName: 'Make dir'
- - script: |
- certutil -urlcache -split -f https://openvinoweb.z5.web.core.windows.net/incredibuild/install_ib_console.bat install_ib_console.bat
- call install_ib_console.bat
- workingDirectory: $(WORK_DIR)
- displayName: 'Install IncrediBuild'
-
- checkout: self
clean: true
lfs: false
@@ -109,9 +101,7 @@ jobs:
- script: dir $(REPO_DIR)\inference-engine\temp\ /s
displayName: 'List temp SDKs'
- - script: |
- set PATH=$(WORK_DIR)\ninja-win;%PATH%
- call "$(MSVS_VARS_PATH)" && "C:\Program Files (x86)\IncrediBuild\BuildConsole.exe" /COMMAND="ninja"
+ - script: call "$(MSVS_VARS_PATH)" && $(WORK_DIR)\ninja-win\ninja
workingDirectory: $(BUILD_DIR)
displayName: 'Build Win'
@@ -153,10 +143,8 @@ jobs:
displayName: 'PaddlePaddle Frontend UT'
continueOnError: false
- - script: |
- set PATH=$(IB_DIR);%PATH%
- call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests-IB.xml
- displayName: 'IE UT old - IB'
+ - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\InferenceEngineUnitTests.exe --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
+ displayName: 'IE UT old'
continueOnError: false
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
@@ -187,11 +175,8 @@ jobs:
displayName: 'TEMPLATE FuncTests'
continueOnError: false
- # call $(SETUPVARS) && $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-cpuFuncTests.xml
- - script: |
- set PATH=$(IB_DIR);%PATH%
- call $(SETUPVARS) && "$(IB_TESTCONSOLE)" $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke*:-*CompareWithRefs/base_size=16_pre_nms_topn=100_post_nms_topn=100_nms_thresh=0.7_feat_stride=1_min_size=1_ratio*:*smoke_GRUSequenceCommonZeroClip/GRUSequenceTest.CompareWithRefs/mode=CONVERT_TO_TI_MAX_SEQ_LEN_CONST_seq_lengths* --gtest_output=xml:TEST-cpuFuncTests-IB.xml /testlevel=24
- displayName: 'CPU FuncTests - IB'
+ - script: $(SETUPVARS) && $(INSTALL_TEST_DIR)\cpuFuncTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-cpuFuncTests.xml
+ displayName: 'CPU FuncTests'
continueOnError: false
- script: |
@@ -213,8 +198,3 @@ jobs:
buildPlatform: 'x64' # Optional
buildConfiguration: 'Windows' # Optional
#publishRunAttachments: true # Optional
-
- - script: echo Stop IncrediBuild_Agent && net stop IncrediBuild_Agent
- displayName: Stop IncrediBuild
- continueOnError: true
- enabled: false
diff --git a/.ci/azure/windows_conditional_compilation.yml b/.ci/azure/windows_conditional_compilation.yml
index 719e02d7574..9024ede46f6 100644
--- a/.ci/azure/windows_conditional_compilation.yml
+++ b/.ci/azure/windows_conditional_compilation.yml
@@ -1,7 +1,7 @@
jobs:
- job: WinCC
# About 150% of total time
- timeoutInMinutes: 120
+ timeoutInMinutes: 60
pool:
name: WIN_VMSS_VENV_F8S_WU2
@@ -10,26 +10,22 @@ jobs:
system.debug: true
VSTS_HTTP_RETRY: 5
VSTS_HTTP_TIMEOUT: 200
- WORKERS_NUMBER: 8
BUILD_TYPE: Release
REPO_DIR: $(Build.Repository.LocalPath)
OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)\..\openvino_contrib
MODELS_PATH: $(REPO_DIR)\..\testdata
WORK_DIR: $(Pipeline.Workspace)\_w
BUILD_DIR: D:\build
- BIN_DIR: $(REPO_DIR)\bin\intel64
MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe
INSTALL_DIR: $(WORK_DIR)\install_pkg
SETUPVARS: $(INSTALL_DIR)\bin\setupvars.bat
- IB_DIR: C:\Program Files (x86)\IncrediBuild
- IB_TESTCONSOLE: $(IB_DIR)\IBTestConsole.exe
- TEST_ENV_PATH: $(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.2\opencv\bin;$(IB_DIR);%PATH%
steps:
- script: |
powershell -command "Invoke-RestMethod -Headers @{\"Metadata\"=\"true\"} -Method GET -Uri http://169.254.169.254/metadata/instance/compute?api-version=2019-06-01 | format-custom"
where python3
+ python3 --version
where python
python --version
where java
@@ -46,12 +42,6 @@ jobs:
rd /Q /S $(BUILD_DIR) & mkdir $(BUILD_DIR)
displayName: 'Make dir'
- - script: |
- certutil -urlcache -split -f https://openvinoweb.z5.web.core.windows.net/incredibuild/install_ib_console.bat install_ib_console.bat
- call install_ib_console.bat
- workingDirectory: $(WORK_DIR)
- displayName: 'Install IncrediBuild'
-
- checkout: self
clean: true
lfs: false
@@ -59,7 +49,8 @@ jobs:
path: openvino
- script: |
- certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-win.zip ninja-win.zip
+ rem Speed up build
+ certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-win.zip ninja-win.zip
powershell -command "Expand-Archive -Force ninja-win.zip"
workingDirectory: $(WORK_DIR)
displayName: 'Install dependencies'
@@ -70,20 +61,19 @@ jobs:
workingDirectory: $(BUILD_DIR)
displayName: 'CMake'
- - script: |
- set PATH=$(WORK_DIR)\ninja-win;%PATH%
- call "$(MSVS_VARS_PATH)" && "C:\Program Files (x86)\IncrediBuild\BuildConsole.exe" /COMMAND="ninja"
+ - script: dir $(REPO_DIR)\inference-engine\temp\ /s
+ displayName: 'List temp SDKs'
+
+ - script: call "$(MSVS_VARS_PATH)" && $(WORK_DIR)\ninja-win\ninja
workingDirectory: $(BUILD_DIR)
- displayName: 'Build Win'
+ displayName: 'Build Win CC'
- script: dir $(REPO_DIR)\bin\ /s
- displayName: 'List files'
+ displayName: 'List bin files'
- script: cmake -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) -P cmake_install.cmake
workingDirectory: $(BUILD_DIR)
displayName: 'Install'
- - script: echo Stop IncrediBuild_Agent && net stop IncrediBuild_Agent
- displayName: Stop IncrediBuild
- continueOnError: true
- enabled: false
+ - script: dir $(INSTALL_DIR) /s
+ displayName: 'List install files'
diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile
index ca2cbd8afbe..314ab2c1037 100644
--- a/.ci/openvino-onnx/Dockerfile
+++ b/.ci/openvino-onnx/Dockerfile
@@ -4,7 +4,7 @@ LABEL version=2021.03.30.1
# Build configuration arguments
ARG BUILD_TYPE=Release
-ARG PROTOBUF_LITE=OFF
+ARG PROTOBUF_LITE=ON
ARG http_proxy
ARG https_proxy
diff --git a/CODEOWNERS b/CODEOWNERS
index 2894fac8ff3..165bc745563 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -75,6 +75,6 @@ azure-pipelines.yml @openvinotoolkit/openvino-admins
*.md @openvinotoolkit/openvino-docs-maintainers
# Control 3d party dependencies
-*requirements* @openvino-configuration-mgmt
-*setup.py @openvino-configuration-mgmt
+**/*requirements*.* @openvino-configuration-mgmt
+**/setup.py @openvino-configuration-mgmt
/scripts/install_dependencies/ @openvino-configuration-mgmt
diff --git a/cmake/developer_package/ncc_naming_style/openvino.style b/cmake/developer_package/ncc_naming_style/openvino.style
index 1dc53167129..6832847ae3a 100644
--- a/cmake/developer_package/ncc_naming_style/openvino.style
+++ b/cmake/developer_package/ncc_naming_style/openvino.style
@@ -18,9 +18,11 @@ FunctionTemplate: '^(operator.+|\w+)$'
TypeAliasName: '^\w+$'
VariableReference: '^\w+$'
+EnumName: '^[A-Z][\w]+$'
+# excepts element_type
+# TODO: Fix interpolate
+EnumConstantName: '^([A-Z\d_]+|undefined|dynamic|boolean|bf16|f16|f32|f64|i4|i8|i16|i32|i64|u1|u4|u8|u16|u32|u64|asymmetric|align_corners|round_prefer_floor|round_prefer_ceil|floor|ceil|simple|nearest|linear|linear_onnx|cubic|area|scales|sizes|half_pixel|tf_half_pixel_for_nn|pytorch_half_pixel|asymetric)$'
# TODO: align
-EnumConstantName: '^.*$'
-EnumName: '^.*$'
UsingDeclaration: '^.*$'
TypedefName: '^.*$'
diff --git a/docs/IE_DG/Paddle_Support.md b/docs/IE_DG/Paddle_Support.md
new file mode 100644
index 00000000000..03dddc6cdcc
--- /dev/null
+++ b/docs/IE_DG/Paddle_Support.md
@@ -0,0 +1,34 @@
+# Paddle Support in the OpenVINO™ {#openvino_docs_IE_DG_Paddle_Support}
+
+Starting from the 2022.1 release, OpenVINO™ supports reading native Paddle models.
+`Core::ReadNetwork()` method provides a uniform way to read models from IR or Paddle format, it is a recommended approach to reading models.
+
+## Read Paddle Models from IR
+
+After [Converting a Paddle Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md) to [Intermediate Representation (IR)](../MO_DG/IR_and_opsets.md), it can be read as recommended. Example:
+
+```cpp
+InferenceEngine::Core core;
+auto network = core.ReadNetwork("model.xml");
+```
+
+## Read Paddle Models from Paddle Format (Paddle `inference model` model type)
+
+**Example:**
+
+```cpp
+InferenceEngine::Core core;
+auto network = core.ReadNetwork("model.pdmodel");
+```
+
+**Reshape feature:**
+
+OpenVINO™ does not provide a mechanism to specify pre-processing, such as mean values subtraction and reverse input channels, for the Paddle format.
+If a Paddle model contains dynamic shapes for input, use the `CNNNetwork::reshape` method for shape specialization.
+
+## NOTE
+
+* Paddle [`inference model`](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/doc/doc_en/inference_en.md) mainly contains two kinds of files `model.pdmodel`(model file) and `model.pdiparams`(params file), which are used for inference.
+* Supported Paddle models list and how to export these models are described in [Convert a Paddle Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md).
+* For `Normalize` Paddle Models, the input data should be in FP32 format.
+* When reading Paddle models from Paddle format, make sure that `model.pdmodel` and `model.pdiparams` are in the same folder directory.
diff --git a/docs/IE_DG/supported_plugins/AUTO.md b/docs/IE_DG/supported_plugins/AUTO.md
new file mode 100644
index 00000000000..55a5e01f212
--- /dev/null
+++ b/docs/IE_DG/supported_plugins/AUTO.md
@@ -0,0 +1,128 @@
+# Auto-Device Plugin {#openvino_docs_IE_DG_supported_plugins_AUTO}
+
+## Auto-Device Plugin Execution
+
+Auto-device is a new special "virtual" or "proxy" device in the OpenVINO™ toolkit.
+
+Use "AUTO" as the device name to delegate selection of an actual accelerator to OpenVINO.
+With the 2021.4 release, Auto-device internally recognizes and selects devices from CPU,
+integrated GPU and discrete Intel GPUs (when available) depending on the device capabilities and the characteristic of CNN models,
+for example, precisions. Then Auto-device assigns inference requests to the selected device.
+
+From the application point of view, this is just another device that handles all accelerators in full system.
+
+With the 2021.4 release, Auto-device setup is done in three major steps:
+* Step 1: Configure each device as usual (for example, via the conventional SetConfig method)
+* Step 2: Load a network to the Auto-device plugin. This is the only change needed in your application
+* Step 3: Just like with any other executable network (resulted from LoadNetwork), create as many requests as needed to saturate the devices.
+These steps are covered below in details.
+
+
+## Defining and Configuring the Auto-Device Plugin
+Following the OpenVINO notions of “devices”, the Auto-device has “AUTO” name. The only configuration option for Auto-device is a limited device list:
+
+| Parameter name | Parameter values | Default | Description |
+| :--- | :--- | :--- |:-----------------------------------------------------------------------------|
+| "AUTO_DEVICE_LIST" | comma-separated device names with no spaces| N/A | Device candidate list to be selected |
+
+You can use the configuration name directly as a string or use IE::KEY_AUTO_DEVICE_LIST from ie_plugin_config.hpp,
+which defines the same string.
+
+There are two ways to use Auto-device:
+1. Directly indicate device by “AUTO” or empty string:
+
+@snippet snippets/AUTO0.cpp part0
+
+2. Use Auto-device configuration to limit the device candidates list to be selected:
+
+@snippet snippets/AUTO1.cpp part1
+
+Auto-device supports query device optimization capabilities in metric;
+
+| Parameter name | Parameter values |
+| :--- | :--- |
+| "OPTIMIZATION_CAPABILITIES" | Auto-Device capabilities |
+
+## Enumerating Available Devices and Auto-Device Selecting Logic
+
+### Enumerating Available Devices
+
+Inference Engine now features a dedicated API to enumerate devices and their capabilities.
+See [Hello Query Device C++ Sample](../../../inference-engine/samples/hello_query_device/README.md).
+This is the example output from the sample (truncated to the devices' names only):
+
+```sh
+./hello_query_device
+Available devices:
+ Device: CPU
+...
+ Device: GPU.0
+...
+ Device: GPU.1
+```
+
+### Default Auto-Device selecting logic
+
+With the 2021.4 release, Auto-Device selects the most suitable device with following default logic:
+1. Check if dGPU, iGPU and CPU device are available
+2. Get the precision of the input model, such as FP32
+3. According to the priority of dGPU, iGPU and CPU (in this order), if the device supports the precision of input network, select it as the most suitable device
+
+For example, CPU, dGPU and iGPU can support below precision and optimization capabilities:
+
+| Device | OPTIMIZATION_CAPABILITIES |
+| :--- | :--- |
+| CPU | WINOGRAD FP32 FP16 INT8 BIN |
+| dGPU | FP32 BIN BATCHED_BLOB FP16 INT8 |
+| iGPU | FP32 BIN BATCHED_BLOB FP16 INT8 |
+
+When application use Auto-device to run FP16 IR on system with CPU, dGPU and iGPU, Auto-device will offload this workload to dGPU.
+
+When application use Auto-device to run FP16 IR on system with CPU and iGPU, Auto-device will offload this workload to iGPU.
+
+When application use Auto-device to run WINOGRAD-enabled IR on system with CPU, dGPU and iGPU, Auto-device will offload this workload to CPU.
+
+In any case, when loading the network to dGPU or iGPU fails, the networks falls back to CPU as the last choice.
+
+### Limit Auto Target Devices Logic
+
+According to the Auto-device selection logic from the previous section,
+the most suitable device from available devices to load mode as follows:
+
+@snippet snippets/AUTO2.cpp part2
+
+Another way to load mode to device from limited choice of devices is with Auto-device:
+
+@snippet snippets/AUTO3.cpp part3
+
+## Configuring the Individual Devices and Creating the Auto-Device on Top
+
+As described in the first section, configure each individual device as usual and then just create the "AUTO" device on top:
+
+@snippet snippets/AUTO4.cpp part4
+
+Alternatively, you can combine all the individual device settings into single config and load it,
+allowing the Auto-device plugin to parse and apply it to the right devices. See the code example here:
+
+@snippet snippets/AUTO5.cpp part5
+
+## Using the Auto-Device with OpenVINO Samples and Benchmark App
+
+Note that every OpenVINO sample that supports "-d" (which stands for "device") command-line option transparently accepts the Auto-device.
+The Benchmark Application is the best example of the optimal usage of the Auto-device.
+You do not need to set the number of requests and CPU threads, as the application provides optimal out-of-the-box performance.
+Below is the example command-line to evaluate AUTO performance with that:
+
+```sh
+./benchmark_app –d AUTO –m -i -niter 1000
+```
+You can also use the auto-device with limit device choice:
+
+```sh
+./benchmark_app –d AUTO:CPU,GPU –m -i -niter 1000
+```
+Note that the default CPU stream is 1 if using “-d AUTO”.
+
+Note that you can use the FP16 IR to work with auto-device.
+Also note that no demos are (yet) fully optimized for the auto-device, by means of selecting the most suitable device,
+using the GPU streams/throttling, and so on.
diff --git a/docs/IE_DG/supported_plugins/MYRIAD.md b/docs/IE_DG/supported_plugins/MYRIAD.md
index 8983f20a925..6425cc5ed4b 100644
--- a/docs/IE_DG/supported_plugins/MYRIAD.md
+++ b/docs/IE_DG/supported_plugins/MYRIAD.md
@@ -66,10 +66,8 @@ In addition to common parameters, the MYRIAD plugin accepts the following option
| Parameter Name | Parameter Values | Default | Description |
| :--- | :--- | :--- | :--- |
-| `KEY_VPU_MYRIAD_PLATFORM` | empty string/`VPU_MYRIAD_2450`/`VPU_MYRIAD_2480` | empty string | If set, the plugin will use a device with specific platform to allocate a network. |
| `KEY_VPU_MYRIAD_PROTOCOL` | empty string/`VPU_MYRIAD_USB`/`VPU_MYRIAD_PCIE` | empty string | If set, the plugin will use a device with specific protocol to allocate a network. |
| `KEY_VPU_MYRIAD_FORCE_RESET` | `YES`/`NO` | `NO` | Enables force reset of all booted devices when new ExecutableNetwork is created. This is a plugin scope option and must be used with the plugin's SetConfig method only. See Device allocation section for details. |
-| `KEY_VPU_PLATFORM` | empty string/`VPU_2450`/`VPU_2480` | empty string | **Deprecated** Use `KEY_VPU_MYRIAD_PLATFORM` instead. If set, the plugin will use a device with specific platform to allocate a network. |
| `KEY_VPU_FORCE_RESET` | `YES`/`NO` | `NO` | **Deprecated** Use `KEY_VPU_MYRIAD_FORCE_RESET` instead. Enables force reset of all booted devices when new ExecutableNetwork is created. This is a plugin scope option and must be used with the plugin's SetConfig method only. See Device allocation section for details. |
## Device allocation
diff --git a/docs/IE_DG/supported_plugins/Supported_Devices.md b/docs/IE_DG/supported_plugins/Supported_Devices.md
index e1140ae4b74..5c003fc86bb 100644
--- a/docs/IE_DG/supported_plugins/Supported_Devices.md
+++ b/docs/IE_DG/supported_plugins/Supported_Devices.md
@@ -13,7 +13,8 @@ The Inference Engine provides unique capabilities to infer deep learning models
|[CPU plugin](CPU.md) |Intel® Xeon® with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® Streaming SIMD Extensions (Intel® SSE) |
|[VPU plugins](VPU.md) (available in the Intel® Distribution of OpenVINO™ toolkit) |Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X, Intel® Vision Accelerator Design with Intel® Movidius™ VPUs |
|[GNA plugin](GNA.md) (available in the Intel® Distribution of OpenVINO™ toolkit) |Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver J5005 Processor, Intel® Pentium® Silver N5000 Processor, Intel® Celeron® J4005 Processor, Intel® Celeron® J4105 Processor, Intel® Celeron® Processor N4100, Intel® Celeron® Processor N4000, Intel® Core™ i3-8121U Processor, Intel® Core™ i7-1065G7 Processor, Intel® Core™ i7-1060G7 Processor, Intel® Core™ i5-1035G4 Processor, Intel® Core™ i5-1035G7 Processor, Intel® Core™ i5-1035G1 Processor, Intel® Core™ i5-1030G7 Processor, Intel® Core™ i5-1030G4 Processor, Intel® Core™ i3-1005G1 Processor, Intel® Core™ i3-1000G1 Processor, Intel® Core™ i3-1000G4 Processor|
-|[Multi-Device plugin](MULTI.md) |Multi-Device plugin enables simultaneous inference of the same network on several Intel® devices in parallel |
+|[Multi-Device plugin](MULTI.md) |Multi-Device plugin enables simultaneous inference of the same network on several Intel® devices in parallel |
+|[Auto-Device plugin](AUTO.md) |Auto-Device plugin enables selecting Intel® device for inference automatically |
|[Heterogeneous plugin](HETERO.md) |Heterogeneous plugin enables automatic inference splitting between several Intel® devices (for example if a device doesn't [support certain layers](#supported-layers)). |
Devices similar to the ones we have used for benchmarking can be accessed using [Intel® DevCloud for the Edge](https://devcloud.intel.com/edge/), a remote development environment with access to Intel® hardware and the latest versions of the Intel® Distribution of the OpenVINO™ Toolkit. [Learn more](https://devcloud.intel.com/edge/get_started/devcloud/) or [Register here](https://inteliot.force.com/DevcloudForEdge/s/).
diff --git a/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md
new file mode 100644
index 00000000000..65f5c8fbbab
--- /dev/null
+++ b/docs/MO_DG/prepare_model/convert_model/Convert_Model_From_Paddle.md
@@ -0,0 +1,62 @@
+# Converting a Paddle* Model {#openvino_docs_MO_DG_prepare_model_convert_model_Convert_Model_From_Paddle}
+
+A summary of the steps for optimizing and deploying a model that was trained with Paddle\*:
+
+1. [Configure the Model Optimizer](../Config_Model_Optimizer.md) for Paddle\*.
+2. [Convert a Paddle\* Model](#Convert_From_Paddle) to produce an optimized [Intermediate Representation (IR)](../../IR_and_opsets.md) of the model based on the trained network topology, weights, and biases values.
+3. Test the model in the Intermediate Representation format using the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in the target environment via provided Inference Engine [sample applications](../../../IE_DG/Samples_Overview.md).
+4. [Integrate](../../../IE_DG/Samples_Overview.md) the [Inference Engine](../../../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) in your application to deploy the model in the target environment.
+
+## Supported Topologies
+
+| Model Name| Model Type| Description|
+| ------------- | ------------ | ------------- |
+|ppocr-det| optical character recognition| Models are exported from [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.1/). Refer to [READ.md](https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.1/#pp-ocr-20-series-model-listupdate-on-dec-15).|
+|ppocr-rec| optical character recognition| Models are exported from [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.1/). Refer to [READ.md](https://github.com/PaddlePaddle/PaddleOCR/tree/release/2.1/#pp-ocr-20-series-model-listupdate-on-dec-15).|
+|ResNet-50| classification| Models are exported from [PaddleClas](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.1/). Refer to [getting_started_en.md](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.1/docs/en/tutorials/getting_started_en.md#4-use-the-inference-model-to-predict)|
+|MobileNet v2| classification| Models are exported from [PaddleClas](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.1/). Refer to [getting_started_en.md](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.1/docs/en/tutorials/getting_started_en.md#4-use-the-inference-model-to-predict)|
+|MobileNet v3| classification| Models are exported from [PaddleClas](https://github.com/PaddlePaddle/PaddleClas/tree/release/2.1/). Refer to [getting_started_en.md](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.1/docs/en/tutorials/getting_started_en.md#4-use-the-inference-model-to-predict)|
+|BiSeNet v2| semantic segmentation| Models are exported from [PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.1). Refer to [model_export.md](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.1/docs/model_export.md#)|
+|DeepLab v3 plus| semantic segmentation| Models are exported from [PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.1). Refer to [model_export.md](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.1/docs/model_export.md#)|
+|Faster-SCNN| semantic segmentation| Models are exported from [PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.1). Refer to [model_export.md](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.1/docs/model_export.md#)|
+|OCRNET| semantic segmentation| Models are exported from [PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.1). Refer to [model_export.md](https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.1/docs/model_export.md#)|
+|Yolo v3| detection| Models are exported from [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.1). Refer to [EXPORT_MODEL.md](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md#).|
+|ppyolo| detection| Models are exported from [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.1). Refer to [EXPORT_MODEL.md](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md#).|
+
+> **NOTE:** The verified models are exported from the repository of branch release/2.1.
+
+## Convert a Paddle* Model
+
+To convert a Paddle\* model:
+
+1. Go to the `$INTEL_OPENVINO_DIR/deployment_tools/model_optimizer` directory.
+2. Use the `mo.py` script to simply convert a model, specifying the framework, the path to the input model `.pdmodel` file and the path to an output directory with write permissions:
+```sh
+python3 mo.py --input_model .pdmodel --output_dir --framework=paddle
+```
+
+Parameters to convert your model:
+
+* [Framework-agnostic parameters](Converting_Model_General.md): These parameters are used to convert a model trained with any supported framework.
+> **NOTE:** `--scale`, `--scale_values`, `--mean_values`, `--mean_file` are unsupported in the current version of mo_paddle.
+
+### Example of Converting a Paddle* Model
+Below is the example command to convert yolo v3 Paddle\* network to OpenVINO IR network with Model Optimizer.
+```sh
+python3 mo.py --model_name yolov3_darknet53_270e_coco --output_dir --framework=paddle --data_type=FP32 --reverse_input_channels --input_shape=[2,3,608,608],[1,2],[1,2] --input=image,im_shape,scale_factor --output=save_infer_model/scale_0.tmp_1,save_infer_model/scale_1.tmp_1 --input_model=yolov3.pdmodel
+```
+
+## Supported Paddle\* Layers
+Refer to [Supported Framework Layers](../Supported_Frameworks_Layers.md) for the list of supported standard layers.
+
+## Frequently Asked Questions (FAQ)
+
+The Model Optimizer provides explanatory messages if it is unable to run to completion due to issues like typographical errors, incorrectly used options, or other issues. The message describes the potential cause of the problem and gives a link to the [Model Optimizer FAQ](../Model_Optimizer_FAQ.md). The FAQ has instructions on how to resolve most issues. The FAQ also includes links to relevant sections in the Model Optimizer Developer Guide to help you understand what went wrong.
+
+## Summary
+
+In this document, you learned:
+
+* Basic information about how the Model Optimizer works with Paddle\* models
+* Which Paddle\* models are supported
+* How to convert a trained Paddle\* model using the Model Optimizer with framework-agnostic command-line options
diff --git a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md
index ed6451a7632..26ce1289b8c 100644
--- a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md
+++ b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md
@@ -16,7 +16,7 @@ The mo.py script is the universal entry point that can deduce the f
* `.onnx` - ONNX\* models
* `.nnet` - Kaldi\* models.
-If the model files do not have standard extensions, you can use the ``--framework {tf,caffe,kaldi,onnx,mxnet}`` option to specify the framework type explicitly.
+If the model files do not have standard extensions, you can use the ``--framework {tf,caffe,kaldi,onnx,mxnet,paddle}`` option to specify the framework type explicitly.
For example, the following commands are equivalent:
```sh
@@ -33,6 +33,7 @@ Framework-specific parameters for:
* [MXNet](Convert_Model_From_MxNet.md),
* [ONNX](Convert_Model_From_ONNX.md),
* [Kaldi](Convert_Model_From_Kaldi.md).
+* [Paddle](Convert_Model_From_Paddle.md).
## See Also
diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml
index f5ef147751f..ee07308a19a 100644
--- a/docs/doxygen/ie_docs.xml
+++ b/docs/doxygen/ie_docs.xml
@@ -326,6 +326,7 @@ limitations under the License.
+
diff --git a/docs/ops/comparison/Greater_1.md b/docs/ops/comparison/Greater_1.md
index 928eca83878..a1fe52e0364 100644
--- a/docs/ops/comparison/Greater_1.md
+++ b/docs/ops/comparison/Greater_1.md
@@ -4,7 +4,16 @@
**Category**: Comparison binary operation
-**Short description**: *Greater* performs element-wise comparison operation with two given tensors applying multi-directional broadcast rules.
+**Short description**: *Greater* performs element-wise comparison operation with two given tensors applying broadcast rules specified in the `auto_broadcast` attribute.
+
+**Detailed description**
+Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attribute is not `none`. Broadcasting is performed according to `auto_broadcast` value.
+
+After broadcasting, *Greater* does the following with the input tensors *a* and *b*:
+
+\f[
+o_{i} = a_{i} > b_{i}
+\f]
**Attributes**:
@@ -13,39 +22,33 @@
* **Description**: specifies rules used for auto-broadcasting of input tensors.
* **Range of values**:
* *none* - no auto-broadcasting is allowed, all input shapes should match
- * *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in ONNX docs.
+ * *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md),
+ * *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md).
* **Type**: string
* **Default value**: "numpy"
* **Required**: *no*
**Inputs**
-* **1**: A tensor of type *T*. **Required.**
-* **2**: A tensor of type *T*. **Required.**
+* **1**: A tensor of type *T* and arbitrary shape. **Required.**
+* **2**: A tensor of type *T* and arbitrary shape. **Required.**
**Outputs**
-* **1**: The result of element-wise comparison operation. A tensor of type boolean.
+* **1**: The result of element-wise comparison operation applied to the input tensors. A tensor of type *T_BOOL* and shape equal to broadcasted shape of two inputs.
**Types**
* *T*: arbitrary supported type.
-
-**Detailed description**
-Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
-
-After broadcasting *Greater* does the following with the input tensors *a* and *b*:
-
-\f[
-o_{i} = a_{i} > b_{i}
-\f]
+* *T_BOOL*: `boolean`.
**Examples**
-*Example 1*
+*Example 1: no broadcast*
```xml
+
256
@@ -65,9 +68,10 @@ o_{i} = a_{i} > b_{i}
```
-*Example 2: broadcast*
+*Example 2: numpy broadcast*
```xml
+
8
diff --git a/docs/ops/comparison/NotEqual_1.md b/docs/ops/comparison/NotEqual_1.md
index 448f4bcb66a..691da41c175 100644
--- a/docs/ops/comparison/NotEqual_1.md
+++ b/docs/ops/comparison/NotEqual_1.md
@@ -4,7 +4,18 @@
**Category**: Comparison binary operation
-**Short description**: *NotEqual* performs element-wise comparison operation with two given tensors applying multi-directional broadcast rules.
+**Short description**: *NotEqual* performs element-wise comparison operation with two given tensors applying
+multi-directional broadcast rules specified in the `auto_broadcast` attribute.
+
+**Detailed description**
+Before performing comparison operation, input tensors *a* and *b* are broadcasted if their shapes are different.
+Broadcasting is performed according to `auto_broadcast` value.
+
+After broadcasting, *NotEqual* does the following with the input tensors *a* and *b*:
+
+\f[
+o_{i} = a_{i} != b_{i}
+\f]
**Attributes**:
@@ -13,7 +24,8 @@
* **Description**: specifies rules used for auto-broadcasting of input tensors.
* **Range of values**:
* *none* - no auto-broadcasting is allowed, all input shapes should match
- * *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in ONNX docs.
+ * *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md),
+ * *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md).
* **Type**: string
* **Default value**: "numpy"
* **Required**: *no*
@@ -31,15 +43,6 @@
* *T*: arbitrary supported type.
-**Detailed description**
-Before performing arithmetic operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
-
-After broadcasting *NotEqual* does the following with the input tensors *a* and *b*:
-
-\f[
-o_{i} = a_{i} \neq b_{i}
-\f]
-
**Examples**
*Example 1*
diff --git a/docs/ops/logical/LogicalXor_1.md b/docs/ops/logical/LogicalXor_1.md
index 16072f01183..a6a832308ae 100644
--- a/docs/ops/logical/LogicalXor_1.md
+++ b/docs/ops/logical/LogicalXor_1.md
@@ -6,33 +6,7 @@
**Short description**: *LogicalXor* performs element-wise logical XOR operation with two given tensors applying multi-directional broadcast rules.
-**Attributes**:
-
-* *auto_broadcast*
-
- * **Description**: specifies rules used for auto-broadcasting of input tensors.
- * **Range of values**:
- * *none* - no auto-broadcasting is allowed, all input shapes should match
- * *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in ONNX docs.
- * **Type**: string
- * **Default value**: "numpy"
- * **Required**: *no*
-
-**Inputs**
-
-* **1**: A tensor of type *T*. **Required.**
-* **2**: A tensor of type *T*. **Required.**
-
-**Outputs**
-
-* **1**: The result of element-wise logical XOR operation. A tensor of type *T*.
-
-**Types**
-
-* *T*: boolean type.
-
-**Detailed description**
-Before performing logical operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
+**Detailed description**: Before performing logical operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
After broadcasting *LogicalXor* does the following with the input tensors *a* and *b*:
@@ -40,9 +14,35 @@ After broadcasting *LogicalXor* does the following with the input tensors *a* an
o_{i} = a_{i} \oplus b_{i}
\f]
+**Attributes**:
+
+* *auto_broadcast*
+
+ * **Description**: specifies rules used for auto-broadcasting of input tensors.
+ * **Range of values**:
+ * *none* - no auto-broadcasting is allowed, all input shapes must match
+ * *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md),
+ * *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md).
+ * **Type**: string
+ * **Default value**: "numpy"
+ * **Required**: *no*
+
+**Inputs**
+
+* **1**: A tensor of type *T_BOOL* and arbitrary shape. **Required.**
+* **2**: A tensor of type *T_BOOL* and arbitrary shape. **Required.**
+
+**Outputs**
+
+* **1**: The result of element-wise *logicalXor* operation. A tensor of type *T_BOOL* and the same shape equal to broadcasted shape of two inputs.
+
+**Types**
+
+* *T_BOOL*: `boolean`.
+
**Examples**
-*Example 1*
+*Example 1: no broadcast*
```xml
@@ -65,7 +65,7 @@ o_{i} = a_{i} \oplus b_{i}
```
-*Example 2: broadcast*
+*Example 2: numpy broadcast*
```xml
diff --git a/docs/ops/pooling/MaxPool_1.md b/docs/ops/pooling/MaxPool_1.md
index 182df220b52..9ea1be7380f 100644
--- a/docs/ops/pooling/MaxPool_1.md
+++ b/docs/ops/pooling/MaxPool_1.md
@@ -163,7 +163,7 @@ strides = [1, 1]
kernel = [2, 2]
rounding_type = "floor"
auto_pad = "same_upper"
-output = [[[[5, 5, -6],
+output = [[[[5, 5, 3],
[8, 9, 9]
[8, 9, 9]],
[[6, 5, 5],
diff --git a/docs/ops/pooling/MaxPool_8.md b/docs/ops/pooling/MaxPool_8.md
new file mode 100644
index 00000000000..808b2e616fc
--- /dev/null
+++ b/docs/ops/pooling/MaxPool_8.md
@@ -0,0 +1,360 @@
+## MaxPool {#openvino_docs_ops_pooling_MaxPool_8}
+
+**Versioned name**: *MaxPool-8*
+
+**Category**: *Pooling*
+
+**Short description**: Performs the max pooling operation on input.
+
+**Detailed description**: Input shape can be either 3D, 4D, or 5D. The max pooling operation is performed with respect to input shape from the third dimension to the last dimension. If paddings are used, during the pooling calculation their values are `-inf`. The max pooling operation involves sliding a filter over each channel of a feature map and downsampling by choosing the largest value within the region covered by the filter.
+
+**Attributes**: *Pooling* attributes are specified in the `data` node, which is a child of the layer node.
+
+* *strides*
+
+ * **Description**: *strides* is a distance (in pixels) to slide the window on the feature map over the (z, y, x) axes for 3D poolings and (y, x) axes for 2D poolings. For example, *strides* equal to "4,2,1" means sliding the window 4 pixels at a time over depth dimension, 2 over height dimension, and 1 over width dimension.
+ * **Range of values**: integer values starting from 0
+ * **Type**: int[]
+ * **Required**: *yes*
+
+* *dilations*
+
+ * **Description**: *dilations* specify the index of the next pixel to select when pooling. If not present, the dilation defaults to 1, meaning the adjacent pixel is chosen. A value of 2 indicates that one pixel is skipped and every other pixel is considered. Dilations specify one value for each spatial axis of the kernel: `(z, y, x)` for 3D poolings and `(y, x)` for 2D poolings.
+ * **Range of values**: integer values starting from 0
+ * **Type**: int[]
+ * **Default value**: `[1,1,...]`
+ * **Required**: *no*
+
+* *pads_begin*
+
+ * **Description**: *pads_begin* is a number of pixels to add to the beginning along each axis. For example, *pads_begin* equal to "1,2" means adding 1 pixel to the top of the input and 2 to the left of the input. All added padding values are equal to negative infinity.
+ * **Range of values**: integer values starting from 0
+ * **Type**: int[]
+ * **Required**: *yes*
+ * **Note**: the attribute is ignored when *auto_pad* attribute is specified.
+
+* *pads_end*
+
+ * **Description**: *pads_end* is a number of pixels to add to the ending along each axis. For example, *pads_end* equal to "1,2" means adding 1 pixel to the bottom of the input and 2 to the right of the input. All added padding values are equal to negative infinity.
+ * **Range of values**: integer values starting from 0
+ * **Type**: int[]
+ * **Required**: *yes*
+ * **Note**: the attribute is ignored when the *auto_pad* attribute is specified.
+
+* *kernel*
+
+ * **Description**: *kernel* is a size of each filter. For example, *kernel* equal to (2, 3) means that each filter has height equal to 2 and width equal to 3.
+ * **Range of values**: integer values starting from 1
+ * **Type**: int[]
+ * **Required**: *yes*
+
+* *rounding_type*
+
+ * **Description**: *rounding_type* is a type of rounding to be used to compute output shape.
+ * **Range of values**:
+ * *ceil*
+ * *floor*
+ * **Type**: string
+ * **Default value**: *floor*
+ * **Required**: *no*
+
+* *auto_pad*
+
+ * **Description**: *auto_pad* how the padding is calculated. Possible values:
+ * *explicit*: explicit padding values from `pads_begin` and `pads_end` are used.
+ * *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value, an extra padding is added at the end (at the beginning).
+ * *valid* padding is not used.
+ * **Type**: string
+ * **Default value**: *explicit*
+ * **Required**: *no*
+ * **Note**: *pads_begin* and *pads_end* attributes are ignored when *auto_pad* is not equal to explicit.
+
+* *index_element_type*
+
+ * **Description**: the type of output tensor with indices
+ * **Range of values**: "i64" or "i32"
+ * **Type**: string
+ * **Default value**: "i64"
+ * **Required**: *No*
+
+* *axis*
+
+ * **Description**: indicator of the first dimension in the input shape that should be used to calculate the upper bound of allowed index output values. The upper bound is the product of dimensions starting from the one pointed by the 'axis' attribute until the end of the input shape.
+ * **Range of values**: integer number. Negative value means counting dimension from the end. The range is `[-R, R - 1]`, where `R` is the rank of the input tensor.
+ * **Type**: int
+ * **Default value**: 0
+ * **Required**: *No*
+
+**Inputs**:
+
+* **1**: 3D, 4D, or 5D input tensor of type T. Required.
+
+**Outputs**:
+ * **1**: Input shape can be either `[N, C, H]`, `[N, C, H, W]`, or `[N, C, H, W, D]`. The corresponding output shape is `[N, C, H_out]`, `[N, C, H_out, W_out]` or `[N, C, H_out, W_out, D_out]`. Output tensor has the same data type as the input tensor.
+
+ * **2**: Output tensor of type *T_IND* with indices of values selected by the pooling operation.
+ Shape of this output matches the first output. The type of this output can be specified using the `index_element_type` attribute.
+ Values are computed as indices in a tensor flattened to 1D, not considering padding. Examples for a 5D input tensor:
+ * When `axis == 0`, the values are in the range `[0, N * C * H * W * D)`.
+ * When `axis == 2`, the values are in the range `[0, H * W * D)`.
+
+ Note: the values of this output can only be calculated correctly if `pads_value` is set to `-infinity`.
+
+
+**Types**
+
+* *T*: floating point or integer type.
+
+* *T_IND*: `int64` or `int32`.
+
+
+**Mathematical Formulation**
+Output shape calculation based on `auto_pad` and `rounding_type`:
+ * `auto_pad = explicit` and `rounding_type = floor`
+ `H_out = floor((H + pads_begin[0] + pads_end[0] - ((kernel[0] - 1) * dilations[0] + 1)) / strides[0] + 1)`
+ `W_out = floor((W + pads_begin[1] + pads_end[1] - ((kernel[1] - 1) * dilations[1] + 1)) / strides[1] + 1)`
+ `D_out = floor((D + pads_begin[2] + pads_end[2] - ((kernel[2] - 1) * dilations[2] + 1)) / strides[2] + 1)`
+
+ * `auto_pad = explicit` and `rounding_type = ceil`
+ `H_out = ceil((H + pads_begin[0] + pads_end[0] - ((kernel[0] - 1) * dilations[0] + 1)) / strides[0] + 1)`
+ `W_out = ceil((W + pads_begin[1] + pads_end[1] - ((kernel[1] - 1) * dilations[1] + 1)) / strides[1] + 1)`
+ `D_out = ceil((D + pads_begin[2] + pads_end[2] - ((kernel[2] - 1) * dilations[2] + 1)) / strides[2] + 1)`
+
+ * `auto_pad = valid`
+ `H_out = ceil((H - ((kernel[0] - 1) * dilations[0] + 1) + 1) / strides[0])`
+ `W_out = ceil((W - ((kernel[1] - 1) * dilations[1] + 1) + 1) / strides[1])`
+ `D_out = ceil((D - ((kernel[2] - 1) * dilations[2] + 1) + 1) / strides[2])`
+
+ * `auto_pad = same_upper / same_lower`
+ `H_out = H`
+ `W_out = W`
+ `D_out = D`
+
+
+If `H + pads_begin[i] + pads_end[i] - kernel[i]` is not divisible by `strides[i]` evenly, the result is rounded with respect to the `rounding_type` attribute.
+
+Example 1 shows how *MaxPool* operates with 4D input using 2D kernel and `auto_pad = explicit`.
+
+```
+input = [[[[-1, 2, 3],
+ [4, 5, -6],
+ [-7, 8, 9]]]]
+strides = [1, 1]
+pads_begin = [1, 1]
+pads_end = [1, 1]
+kernel = [2, 2]
+rounding_type = "floor"
+auto_pad = "explicit"
+output0 = [[[[-1, 2, 3, 3],
+ [4, 5, 5, -6],
+ [4, 8, 9, 9],
+ [-7, 8, 9, 9]]]]
+output1 = [[[[0, 1, 2, 2],
+ [3, 4, 4, 5],
+ [3, 7, 8, 8],
+ [6, 7, 8, 8]]]]
+```
+
+Example 2 shows how *MaxPool* operates with 3D input using 1D kernel and `auto_pad = valid`.
+
+```
+input = [[[-1, 2, 3, 5, -7, 9, 1]]]
+strides = [1]
+kernel = [3]
+rounding_type = "floor"
+auto_pad = "valid"
+output0 = [[[3, 5, 5, 9, 9]]]
+output1 = [[[2, 3, 3, 5, 5]]]
+```
+
+Example 3 shows how *MaxPool* operates with 4D input using 2D kernel and `auto_pad = same_lower`.
+
+```
+input = [[[[-1, 2, 3],
+ [4, 5, -6],
+ [-7, 8, 9]]]]
+strides = [1, 1]
+kernel = [2, 2]
+rounding_type = "floor"
+auto_pad = "same_lower"
+output0 = [[[[-1, 2, 3],
+ [4, 5, 5]
+ [4, 8, 9]]]]
+output1 = [[[[0, 1, 2],
+ [3, 4, 4]
+ [3, 7, 8]]]]
+```
+
+Example 4 shows how *MaxPool* operates with 4D input using 2D kernel and `auto_pad = same_upper`.
+
+```
+input = [[[[-1, 2, 3],
+ [4, 5, -6],
+ [-7, 8, 9]],
+ [[2, -1, 5],
+ [6, -7, 1],
+ [8, 2, -3]]]]
+strides = [1, 1]
+kernel = [2, 2]
+rounding_type = "floor"
+auto_pad = "same_upper"
+output0 = [[[[5, 5, 3],
+ [8, 9, 9]
+ [8, 9, 9]],
+ [[6, 5, 5],
+ [8, 2, 1],
+ [8, 2, -3]]]]
+output1 = [[[[4, 4, 2],
+ [7, 8, 8]
+ [7, 8, 8]],
+ [[12, 11, 11],
+ [15, 16, 14],
+ [15, 16, 17]]]]
+```
+
+Example 5 shows how *MaxPool* operates with 4D input using 2D kernel, `auto_pad = valid` and `rounding_type = ceil`.
+
+```
+input = [[[[-1, 2, 3],
+ [4, 5, -6],
+ [-7, 8, 9]]]]
+strides = [2, 2]
+kernel = [2, 2]
+rounding_type = "ceil"
+auto_pad = "valid"
+output0 = [[[[5, 3],
+ [8, 9]]]]
+output1 = [[[[4, 2],
+ [7, 8]]]]
+```
+
+Example 6 shows how *MaxPool* operates on 4D input using dilated 2D kernel, `auto_pad = explicit` and `rounding_type = floor`.
+
+```
+input = [[[[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]]]
+strides = [1, 1]
+kernel = [2, 2]
+dilations = [2, 2]
+rounding_type = "floor"
+auto_pad = "explicit"
+pads_begin = [1, 1]
+pads_end = [1, 1]
+output0 = [[[[5, 6, 5],
+ [8, 9, 8],
+ [5, 6, 5]]]]
+output1 = [[[[4, 5, 4],
+ [7, 8, 7],
+ [4, 5, 4]]]]
+```
+
+Example 7 shows how *MaxPool* operates on 4D input using 2D kernel, with non-default `axis` value.
+
+```
+input = [[[[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]],
+ [[10, 11, 12],
+ [13, 14, 15],
+ [16, 17, 18]]
+ ]]
+strides = [1, 1]
+kernel = [2, 2]
+dilations = [1, 1]
+rounding_type = "floor"
+auto_pad = "explicit"
+pads_begin = [0, 0]
+pads_end = [0, 0]
+axis = 2
+output0 = [[[[5, 6],
+ [8, 9]],
+ [[14, 15],
+ [17, 18]]]]
+output1 = [[[[4, 5],
+ [7, 8]],
+ [[4, 5],
+ [7, 8]]]]
+```
+
+**Examples**
+
+```xml
+
+
+
+
+ 1
+ 3
+ 32
+ 32
+
+
+
+
+
+
+
+
+
+ 1
+ 3
+ 32
+ 32
+
+
+
+
+
+
+
+
+
+ 1
+ 3
+ 32
+ 32
+
+
+
+
+```
diff --git a/docs/snippets/AUTO0.cpp b/docs/snippets/AUTO0.cpp
new file mode 100644
index 00000000000..b546e61a1c6
--- /dev/null
+++ b/docs/snippets/AUTO0.cpp
@@ -0,0 +1,12 @@
+#include
+
+int main() {
+//! [part0]
+ InferenceEngine::Core ie;
+ InferenceEngine::CNNNetwork network = ie.ReadNetwork("sample.xml");
+ // these 2 lines below are equivalent
+ InferenceEngine::ExecutableNetwork exec0 = ie.LoadNetwork(network, "AUTO");
+ InferenceEngine::ExecutableNetwork exec1 = ie.LoadNetwork(network, "");
+//! [part0]
+return 0;
+}
diff --git a/docs/snippets/AUTO1.cpp b/docs/snippets/AUTO1.cpp
new file mode 100644
index 00000000000..22487b5aeb0
--- /dev/null
+++ b/docs/snippets/AUTO1.cpp
@@ -0,0 +1,15 @@
+#include
+
+int main() {
+//! [part1]
+ InferenceEngine::Core ie;
+ InferenceEngine::CNNNetwork network = ie.ReadNetwork("sample.xml");
+ // "AUTO" plugin is (globally) pre-configured with the explicit option:
+ ie.SetConfig({{"AUTO_DEVICE_LIST", "CPU,GPU"}}, "AUTO");
+ // the below 3 lines are equivalent (the first line leverages the pre-configured AUTO, while second and third explicitly pass the same settings)
+ InferenceEngine::ExecutableNetwork exec0 = ie.LoadNetwork(network, "AUTO", {});
+ InferenceEngine::ExecutableNetwork exec1 = ie.LoadNetwork(network, "AUTO", {{"AUTO_DEVICE_LIST", "CPU,GPU"}});
+ InferenceEngine::ExecutableNetwork exec2 = ie.LoadNetwork(network, "AUTO:CPU,GPU");
+//! [part1]
+return 0;
+}
diff --git a/docs/snippets/AUTO2.cpp b/docs/snippets/AUTO2.cpp
new file mode 100644
index 00000000000..c70e2923af7
--- /dev/null
+++ b/docs/snippets/AUTO2.cpp
@@ -0,0 +1,10 @@
+#include
+
+int main() {
+//! [part2]
+ InferenceEngine::Core ie;
+ InferenceEngine::CNNNetwork network = ie.ReadNetwork("sample.xml");
+ InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "AUTO");
+//! [part2]
+return 0;
+}
diff --git a/docs/snippets/AUTO3.cpp b/docs/snippets/AUTO3.cpp
new file mode 100644
index 00000000000..37e8e350768
--- /dev/null
+++ b/docs/snippets/AUTO3.cpp
@@ -0,0 +1,10 @@
+#include
+
+int main() {
+//! [part3]
+ InferenceEngine::Core ie;
+ InferenceEngine::CNNNetwork network = ie.ReadNetwork("sample.xml");
+ InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "AUTO:CPU,GPU");
+//! [part3]
+return 0;
+}
diff --git a/docs/snippets/AUTO4.cpp b/docs/snippets/AUTO4.cpp
new file mode 100644
index 00000000000..ee39e7103d7
--- /dev/null
+++ b/docs/snippets/AUTO4.cpp
@@ -0,0 +1,19 @@
+#include
+
+int main() {
+ const std::map cpu_config = { { InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES } };
+ const std::map gpu_config = { { InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES } };
+ //! [part4]
+ InferenceEngine::Core ie;
+ InferenceEngine::CNNNetwork network = ie.ReadNetwork("sample.xml");
+ // configure the CPU device first
+ ie.SetConfig(cpu_config, "CPU");
+ // configure the GPU device
+ ie.SetConfig(gpu_config, "GPU");
+ // load the network to the auto-device
+ InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "AUTO");
+ // new metric allows to query the optimization capabilities
+ std::vector device_cap = exeNetwork.GetMetric(METRIC_KEY(OPTIMIZATION_CAPABILITIES));
+ //! [part4]
+ return 0;
+}
diff --git a/docs/snippets/AUTO5.cpp b/docs/snippets/AUTO5.cpp
new file mode 100644
index 00000000000..e0678b4e0de
--- /dev/null
+++ b/docs/snippets/AUTO5.cpp
@@ -0,0 +1,15 @@
+#include
+
+int main() {
+ std::string device_name = "AUTO:CPU,GPU";
+ const std::map< std::string, std::string > full_config = {};
+ //! [part5]
+ InferenceEngine::Core ie;
+ InferenceEngine::CNNNetwork network = ie.ReadNetwork("sample.xml");
+ // 'device_name' can be "AUTO:CPU,GPU" to configure the auto-device to use CPU and GPU
+ InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(network, device_name, full_config);
+ // new metric allows to query the optimization capabilities
+ std::vector device_cap = exeNetwork.GetMetric(METRIC_KEY(OPTIMIZATION_CAPABILITIES));
+ //! [part5]
+ return 0;
+}
diff --git a/docs/template_plugin/tests/functional/op_reference/greater.cpp b/docs/template_plugin/tests/functional/op_reference/greater.cpp
new file mode 100644
index 00000000000..c08a46ccda8
--- /dev/null
+++ b/docs/template_plugin/tests/functional/op_reference/greater.cpp
@@ -0,0 +1,84 @@
+// Copyright (C) 2018-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+#include
+#include
+#include
+
+#include "comparison.hpp"
+
+using namespace ngraph;
+using namespace InferenceEngine;
+using ComparisonTypes = ngraph::helpers::ComparisonTypes;
+
+namespace reference_tests {
+namespace ComparisonOpsRefTestDefinitions {
+namespace {
+TEST_P(ReferenceComparisonLayerTest, GreaterCompareWithHardcodedRefs) {
+ Exec();
+}
+
+template
+std::vector generateComparisonParams(const element::Type& type) {
+ using T = typename element_type_traits::value_type;
+ std::vector compParams {
+ // 1D // 2D // 3D // 4D
+ Builder {}
+ .compType(ComparisonTypes::GREATER)
+ .input1({{2, 2}, type, std::vector {0, 12, 23, 0}})
+ .input2({{2, 2}, type, std::vector {0, 12, 23, 0}})
+ .expected({{2, 2}, element::boolean, std::vector {0, 0, 0, 0}}),
+ Builder {}
+ .compType(ComparisonTypes::GREATER)
+ .input1({{2, 3}, type, std::vector {0, 6, 45, 1, 21, 21}})
+ .input2({{2, 3}, type, std::vector {1, 18, 23, 1, 19, 21}})
+ .expected({{2, 3}, element::boolean, std::vector {0, 0, 1, 0, 1, 0}}),
+ Builder {}
+ .compType(ComparisonTypes::GREATER)
+ .input1({{1}, type, std::vector {53}})
+ .input2({{1}, type, std::vector {53}})
+ .expected({{1}, element::boolean, std::vector {0}}),
+ Builder {}
+ .compType(ComparisonTypes::GREATER)
+ .input1({{2, 4}, type, std::vector {0, 12, 23, 0, 1, 5, 12, 8}})
+ .input2({{2, 4}, type, std::vector {0, 12, 23, 0, 10, 5, 11, 8}})
+ .expected({{2, 4}, element::boolean, std::vector {0, 0, 0, 0, 0, 0, 1, 0}}),
+ Builder {}
+ .compType(ComparisonTypes::GREATER)
+ .input1({{3, 1, 2}, type, std::vector {2, 1, 4, 1, 3, 1}})
+ .input2({{1, 2, 1}, type, std::vector {1, 1}})
+ .expected({{3, 2, 2}, element::boolean, std::vector {1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0}}),
+ Builder {}
+ .compType(ComparisonTypes::GREATER)
+ .input1({{2, 1, 2, 1}, type, std::vector {2, 1, 4, 1}})
+ .input2({{1, 2, 1}, type, std::vector {1, 1}})
+ .expected({{2, 1, 2, 1}, element::boolean, std::vector {1, 0, 1, 0}})};
+ return compParams;
+}
+
+std::vector generateComparisonCombinedParams() {
+ const std::vector> compTypeParams {
+ generateComparisonParams(element::f32),
+ generateComparisonParams(element::f16),
+ generateComparisonParams(element::i32),
+ generateComparisonParams(element::i64),
+ generateComparisonParams(element::u32),
+ generateComparisonParams(element::u64),
+ generateComparisonParams(element::boolean)};
+ std::vector combinedParams;
+
+ for (const auto& params : compTypeParams) {
+ combinedParams.insert(combinedParams.end(), params.begin(), params.end());
+ }
+ return combinedParams;
+}
+
+} // namespace
+INSTANTIATE_TEST_SUITE_P(smoke_Comparison_With_Hardcoded_Refs, ReferenceComparisonLayerTest, ::testing::ValuesIn(generateComparisonCombinedParams()),
+ ReferenceComparisonLayerTest::getTestCaseName);
+} // namespace ComparisonOpsRefTestDefinitions
+} // namespace reference_tests
diff --git a/docs/template_plugin/tests/functional/op_reference/logical_xor.cpp b/docs/template_plugin/tests/functional/op_reference/logical_xor.cpp
new file mode 100644
index 00000000000..ac30a4c8352
--- /dev/null
+++ b/docs/template_plugin/tests/functional/op_reference/logical_xor.cpp
@@ -0,0 +1,48 @@
+// Copyright (C) 2018-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "logical.hpp"
+
+using namespace ngraph;
+using namespace InferenceEngine;
+using LogicalTypes = ngraph::helpers::LogicalTypes;
+
+namespace reference_tests {
+namespace LogicalOpsRefTestDefinitions {
+namespace {
+
+std::vector generateLogicalParams() {
+ std::vector logicalParams {
+ Builder {}
+ .opType(LogicalTypes::LOGICAL_XOR)
+ .input1({{2, 2}, element::boolean, std::vector {true, false, true, false}})
+ .input2({{2, 2}, element::boolean, std::vector {false, true, true, false}})
+ .expected({{2, 2}, element::boolean, std::vector {true, true, false, false}}),
+ Builder {}
+ .opType(LogicalTypes::LOGICAL_XOR)
+ .input1({{2, 1, 2, 1}, element::boolean, std::vector {true, false, true, false}})
+ .input2({{1, 1, 2, 1}, element::boolean, std::vector {true, false}})
+ .expected({{2, 1, 2, 1}, element::boolean, std::vector {false, false, false, false}}),
+ Builder {}
+ .opType(LogicalTypes::LOGICAL_XOR)
+ .input1({{3, 4}, element::boolean, std::vector {true, true, true, true, true, false, true, false, false, true, true, true}})
+ .input2({{3, 4}, element::boolean, std::vector {true, true, true, true, true, true, true, false, false, true, true, false}})
+ .expected({{3, 4}, element::boolean, std::vector {false, false, false, false, false, true, false, false, false, false, false, true}})};
+ return logicalParams;
+}
+
+INSTANTIATE_TEST_SUITE_P(smoke_LogicalOr_With_Hardcoded_Refs, ReferenceLogicalLayerTest, ::testing::ValuesIn(generateLogicalParams()),
+ ReferenceLogicalLayerTest::getTestCaseName);
+
+} // namespace
+} // namespace LogicalOpsRefTestDefinitions
+} // namespace reference_tests
diff --git a/inference-engine/ie_bridges/python/wheel/setup.py b/inference-engine/ie_bridges/python/wheel/setup.py
index 5d9ca16238a..6b233ff4284 100644
--- a/inference-engine/ie_bridges/python/wheel/setup.py
+++ b/inference-engine/ie_bridges/python/wheel/setup.py
@@ -7,10 +7,10 @@ import sys
import errno
import subprocess # nosec
import typing
+import multiprocessing
from fnmatch import fnmatchcase
from pathlib import Path
from shutil import copyfile, rmtree
-from distutils.command.install import install
from distutils.command.build import build
from distutils.command.clean import clean
from distutils.errors import DistutilsSetupError
@@ -27,11 +27,11 @@ PYTHON_VERSION = f'python{sys.version_info.major}.{sys.version_info.minor}'
# The following variables can be defined in environment or .env file
CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', '.')
-CORE_LIBS_DIR = config('CORE_LIBS_DIR', '')
-PLUGINS_LIBS_DIR = config('PLUGINS_LIBS_DIR', '')
-NGRAPH_LIBS_DIR = config('NGRAPH_LIBS_DIR', '')
-TBB_LIBS_DIR = config('TBB_LIBS_DIR', '')
-PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', '')
+CORE_LIBS_DIR = config('CORE_LIBS_DIR', 'deployment_tools/inference_engine/lib/intel64')
+PLUGINS_LIBS_DIR = config('PLUGINS_LIBS_DIR', 'deployment_tools/inference_engine/lib/intel64')
+NGRAPH_LIBS_DIR = config('NGRAPH_LIBS_DIR', 'deployment_tools/ngraph/lib')
+TBB_LIBS_DIR = config('TBB_LIBS_DIR', 'deployment_tools/inference_engine/external/tbb/lib')
+PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', f'python/{PYTHON_VERSION}')
LIBS_RPATH = '$ORIGIN' if sys.platform == 'linux' else '@loader_path'
LIB_INSTALL_CFG = {
@@ -118,7 +118,66 @@ class PrebuiltExtension(Extension):
class CustomBuild(build):
"""Custom implementation of build_clib"""
+ cmake_build_types = ['Release', 'Debug', 'RelWithDebInfo', 'MinSizeRel']
+ user_options = [
+ ('config=', None, 'Build configuration [{types}].'.format(types='|'.join(cmake_build_types))),
+ ('jobs=', None, 'Specifies the number of jobs to use with make.'),
+ ('cmake-args=', None, 'Additional options to be passed to CMake.'),
+ ]
+
+ def initialize_options(self):
+ """Set default values for all the options that this command supports."""
+ super().initialize_options()
+ self.build_base = 'build'
+ self.config = None
+ self.jobs = None
+ self.cmake_args = None
+
+ def finalize_options(self):
+ """Set final values for all the options that this command supports."""
+ super().finalize_options()
+
+ if not self.config:
+ if self.debug:
+ self.config = 'Debug'
+ else:
+ self.announce('Set default value for CMAKE_BUILD_TYPE = Release.', level=4)
+ self.config = 'Release'
+ else:
+ build_types = [item.lower() for item in self.cmake_build_types]
+ try:
+ i = build_types.index(str(self.config).lower())
+ self.config = self.cmake_build_types[i]
+ self.debug = True if 'Debug' == self.config else False
+ except ValueError:
+ self.announce('Unsupported CMAKE_BUILD_TYPE value: ' + self.config, level=4)
+ self.announce('Supported values: {types}'.format(types=', '.join(self.cmake_build_types)), level=4)
+ sys.exit(1)
+ if self.jobs is None and os.getenv('MAX_JOBS') is not None:
+ self.jobs = os.getenv('MAX_JOBS')
+ self.jobs = multiprocessing.cpu_count() if self.jobs is None else int(self.jobs)
+
def run(self):
+ global CMAKE_BUILD_DIR
+ self.jobs = multiprocessing.cpu_count()
+ plat_specifier = '.{0}-{1}.{2}'.format(self.plat_name, *sys.version_info[:2])
+ self.build_temp = os.path.join(self.build_base, 'temp' + plat_specifier, self.config)
+
+ # if setup.py is directly called use CMake to build product
+ if CMAKE_BUILD_DIR == '.':
+ openvino_root_dir = os.path.normpath(os.path.join(CMAKE_BUILD_DIR, '../../../../'))
+ self.announce('Configuring cmake project', level=3)
+
+ self.spawn(['cmake', '-H' + openvino_root_dir, '-B' + self.build_temp,
+ '-DCMAKE_BUILD_TYPE={type}'.format(type=self.config),
+ '-DENABLE_PYTHON=ON',
+ '-DNGRAPH_ONNX_FRONTEND_ENABLE=ON'])
+
+ self.announce('Building binaries', level=3)
+ self.spawn(['cmake', '--build', self.build_temp,
+ '--config', self.config, '-j', str(self.jobs)])
+ CMAKE_BUILD_DIR = self.build_temp
+
self.run_command('build_clib')
build.run(self)
# Copy extra package_data content filtered by find_packages
@@ -133,14 +192,6 @@ class CustomBuild(build):
copyfile(path, dst / path_rel)
-class CustomInstall(install):
- """Enable build_clib during the installation"""
-
- def run(self):
- self.run_command('build_clib')
- install.run(self)
-
-
class PrepareLibs(build_clib):
"""Prepare prebuilt libraries"""
@@ -369,6 +420,7 @@ if os.path.exists(package_license):
packages = find_namespace_packages(get_package_dir(PY_INSTALL_CFG))
package_data: typing.Dict[str, list] = {}
+
setup(
version=config('WHEEL_VERSION', '0.0.0'),
author_email=config('WHEEL_AUTHOR_EMAIL', 'openvino_pushbot@intel.com'),
@@ -376,14 +428,13 @@ setup(
license=config('WHEEL_LICENCE_TYPE', 'OSI Approved :: Apache Software License'),
author=config('WHEEL_AUTHOR', 'Intel Corporation'),
description=config('WHEEL_DESC', 'Inference Engine Python* API'),
- install_requires=get_dependencies(config('WHEEL_REQUIREMENTS', 'requirements.txt')),
- long_description=get_description(config('WHEEL_OVERVIEW', 'pypi_overview.md')),
+ install_requires=get_dependencies(config('WHEEL_REQUIREMENTS', 'meta/openvino.requirements.txt')),
+ long_description=get_description(config('WHEEL_OVERVIEW', 'meta/pypi_overview.md')),
long_description_content_type='text/markdown',
download_url=config('WHEEL_DOWNLOAD_URL', 'https://github.com/openvinotoolkit/openvino/tags'),
url=config('WHEEL_URL', 'https://docs.openvinotoolkit.org/latest/index.html'),
cmdclass={
'build': CustomBuild,
- 'install': CustomInstall,
'build_clib': PrepareLibs,
'build_ext': CopyExt,
'clean': CustomClean,
diff --git a/inference-engine/samples/benchmark_app/main.cpp b/inference-engine/samples/benchmark_app/main.cpp
index 18aa66e0a45..53aa0f1922d 100644
--- a/inference-engine/samples/benchmark_app/main.cpp
+++ b/inference-engine/samples/benchmark_app/main.cpp
@@ -212,6 +212,9 @@ int main(int argc, char* argv[]) {
bool perf_counts = false;
// Update config per device according to command line parameters
for (auto& device : devices) {
+ if (device == "AUTO") {
+ continue;
+ }
if (!config.count(device))
config[device] = {};
std::map& device_config = config.at(device);
diff --git a/inference-engine/samples/speech_sample/main.cpp b/inference-engine/samples/speech_sample/main.cpp
index f2366ae7ab9..d236fc84833 100644
--- a/inference-engine/samples/speech_sample/main.cpp
+++ b/inference-engine/samples/speech_sample/main.cpp
@@ -627,24 +627,21 @@ int main(int argc, char* argv[]) {
if (FLAGS_q.compare("user") == 0) {
if (!FLAGS_rg.empty()) {
- slog::warn
- << "Custom scale factor will be ignored - using scale factor from provided imported gna model: "
- << FLAGS_rg << slog::endl;
- } else {
- auto scaleFactorInput = ParseScaleFactors(FLAGS_sf);
- if (numInputFiles != scaleFactorInput.size()) {
- std::string errMessage(
- "Incorrect command line for multiple inputs: " + std::to_string(scaleFactorInput.size()) +
- " scale factors provided for " + std::to_string(numInputFiles) + " input files.");
- throw std::logic_error(errMessage);
- }
+ slog::warn << "Custom scale factor will be used for imported gna model: " << FLAGS_rg << slog::endl;
+ }
- for (size_t i = 0; i < scaleFactorInput.size(); ++i) {
- slog::info << "For input " << i << " using scale factor of " << scaleFactorInput[i] << slog::endl;
- std::string scaleFactorConfigKey =
- GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_") + std::to_string(i);
- gnaPluginConfig[scaleFactorConfigKey] = scaleFactorInput[i];
- }
+ auto scaleFactorInput = ParseScaleFactors(FLAGS_sf);
+ if (numInputFiles != scaleFactorInput.size()) {
+ std::string errMessage(
+ "Incorrect command line for multiple inputs: " + std::to_string(scaleFactorInput.size()) +
+ " scale factors provided for " + std::to_string(numInputFiles) + " input files.");
+ throw std::logic_error(errMessage);
+ }
+
+ for (size_t i = 0; i < scaleFactorInput.size(); ++i) {
+ slog::info << "For input " << i << " using scale factor of " << scaleFactorInput[i] << slog::endl;
+ std::string scaleFactorConfigKey = GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_") + std::to_string(i);
+ gnaPluginConfig[scaleFactorConfigKey] = scaleFactorInput[i];
}
} else {
// "static" quantization with calculated scale factor
diff --git a/inference-engine/src/auto_plugin/auto_exec_network.cpp b/inference-engine/src/auto_plugin/auto_exec_network.cpp
deleted file mode 100644
index 6bb3fb9ddfa..00000000000
--- a/inference-engine/src/auto_plugin/auto_exec_network.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright (C) 2018-2021 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include
-#include
-#include