diff --git a/docs/_static/download/operation_conformance_table_files/opset_report_omz_static.html b/docs/_static/download/operation_conformance_table_files/opset_report_omz_static.html
index 28fb6b75f68..a8c169ddf50 100644
--- a/docs/_static/download/operation_conformance_table_files/opset_report_omz_static.html
+++ b/docs/_static/download/operation_conformance_table_files/opset_report_omz_static.html
@@ -99,7 +99,7 @@
GPU |
NVIDIA |
TEMPLATE |
- VPUX |
+ VPU |
@@ -113,7 +113,7 @@
93 |
NOT RUN |
93 |
- NOT RUN |
+ NOT RUN |
@@ -125,7 +125,7 @@
| 75.3 % |
NOT RUN |
95.7 % |
- NOT RUN |
+ NOT RUN |
@@ -137,7 +137,7 @@
| 4030 |
NOT RUN |
4030 |
- NOT RUN |
+ NOT RUN |
@@ -149,7 +149,7 @@
| 89.7 % |
NOT RUN |
98.4 % |
- NOT RUN |
+ NOT RUN |
@@ -161,7 +161,7 @@
| 97.7 % |
NOT RUN |
99.4 % |
- NOT RUN |
+ NOT RUN |
@@ -260,7 +260,7 @@
- NOT RUN |
+ NOT RUN |
@@ -357,7 +357,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -454,7 +454,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -551,7 +551,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -648,7 +648,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -775,7 +775,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -872,7 +872,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -969,7 +969,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -1066,7 +1066,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -1163,7 +1163,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -1260,7 +1260,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -1357,7 +1357,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -1484,7 +1484,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -1581,7 +1581,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -1708,7 +1708,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -1835,7 +1835,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -1932,7 +1932,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -2029,7 +2029,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -2156,7 +2156,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -2253,7 +2253,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -2350,7 +2350,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -2447,7 +2447,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -2544,7 +2544,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -2671,7 +2671,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -2798,7 +2798,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -2925,7 +2925,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -3022,7 +3022,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -3149,7 +3149,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -3246,7 +3246,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -3373,7 +3373,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -3500,7 +3500,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -3627,7 +3627,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -3724,7 +3724,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -3851,7 +3851,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -3948,7 +3948,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -4045,7 +4045,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -4142,7 +4142,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -4239,7 +4239,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -4336,7 +4336,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -4433,7 +4433,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -4560,7 +4560,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -4687,7 +4687,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -4814,7 +4814,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -4941,7 +4941,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -5038,7 +5038,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -5135,7 +5135,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -5232,7 +5232,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -5359,7 +5359,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -5486,7 +5486,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -5613,7 +5613,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -5710,7 +5710,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -5807,7 +5807,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -5904,7 +5904,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -6001,7 +6001,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -6098,7 +6098,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -6195,7 +6195,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -6208,7 +6208,7 @@
| N/A |
NOT RUN |
N/A |
- NOT RUN |
+ NOT RUN |
@@ -6335,7 +6335,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -6462,7 +6462,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -6589,7 +6589,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -6686,7 +6686,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -6783,7 +6783,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -6910,7 +6910,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -7037,7 +7037,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -7134,7 +7134,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -7261,7 +7261,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -7358,7 +7358,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -7455,7 +7455,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -7582,7 +7582,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -7679,7 +7679,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -7776,7 +7776,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -7903,7 +7903,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -8000,7 +8000,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -8127,7 +8127,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -8224,7 +8224,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -8321,7 +8321,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -8448,7 +8448,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -8575,7 +8575,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -8702,7 +8702,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -8829,7 +8829,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -8956,7 +8956,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -9053,7 +9053,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -9150,7 +9150,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -9277,7 +9277,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -9374,7 +9374,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -9501,7 +9501,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -9598,7 +9598,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -9725,7 +9725,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -9822,7 +9822,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -9919,7 +9919,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -10016,7 +10016,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -10143,7 +10143,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -10240,7 +10240,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -10337,7 +10337,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -10434,7 +10434,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -10561,7 +10561,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -10658,7 +10658,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -10755,7 +10755,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -10882,7 +10882,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -11009,7 +11009,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -11106,7 +11106,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -11233,7 +11233,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -11330,7 +11330,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -11427,7 +11427,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -11524,7 +11524,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -11621,7 +11621,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -11748,7 +11748,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -11875,7 +11875,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -11972,7 +11972,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -12069,7 +12069,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -12196,7 +12196,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -12323,7 +12323,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -12450,7 +12450,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -12577,7 +12577,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -12674,7 +12674,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -12771,7 +12771,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -12868,7 +12868,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -12995,7 +12995,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -13092,7 +13092,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -13189,7 +13189,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -13316,7 +13316,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -13413,7 +13413,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -13510,7 +13510,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -13607,7 +13607,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -13704,7 +13704,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -13801,7 +13801,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -13898,7 +13898,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -14025,7 +14025,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -14122,7 +14122,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -14219,7 +14219,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -14346,7 +14346,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -14443,7 +14443,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -14570,7 +14570,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -14667,7 +14667,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -14794,7 +14794,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -14891,7 +14891,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -14988,7 +14988,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -15085,7 +15085,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -15182,7 +15182,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -15309,7 +15309,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -15406,7 +15406,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -15503,7 +15503,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -15600,7 +15600,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -15697,7 +15697,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -15794,7 +15794,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -15921,7 +15921,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -16018,7 +16018,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -16115,7 +16115,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -16212,7 +16212,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -16309,7 +16309,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -16406,7 +16406,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -16503,7 +16503,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -16630,7 +16630,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -16727,7 +16727,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -16824,7 +16824,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -16951,7 +16951,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -17078,7 +17078,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -17205,7 +17205,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -17332,7 +17332,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -17459,7 +17459,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -17586,7 +17586,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -17713,7 +17713,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -17810,7 +17810,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -17937,7 +17937,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -18034,7 +18034,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -18131,7 +18131,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -18228,7 +18228,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -18325,7 +18325,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -18452,7 +18452,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -18579,7 +18579,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -18706,7 +18706,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -18833,7 +18833,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -18960,7 +18960,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -19057,7 +19057,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -19184,7 +19184,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -19311,7 +19311,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -19438,7 +19438,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -19565,7 +19565,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -19662,7 +19662,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -19789,7 +19789,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -19886,7 +19886,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -20013,7 +20013,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -20140,7 +20140,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -20237,7 +20237,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -20364,7 +20364,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -20491,7 +20491,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -20618,7 +20618,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -20745,7 +20745,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -20872,7 +20872,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -20999,7 +20999,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -21096,7 +21096,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -21223,7 +21223,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -21350,7 +21350,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -21477,7 +21477,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -21604,7 +21604,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -21701,7 +21701,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -21828,7 +21828,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -21925,7 +21925,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -22052,7 +22052,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -22149,7 +22149,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -22276,7 +22276,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -22403,7 +22403,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -22500,7 +22500,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -22627,7 +22627,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -22754,7 +22754,7 @@
- | NOT RUN |
+ NOT RUN |
@@ -22851,7 +22851,7 @@
- | NOT RUN |
+ NOT RUN |
diff --git a/samples/cpp/speech_sample/README.md b/samples/cpp/speech_sample/README.md
index 4b3b8a55b40..d7c2b5e85d9 100644
--- a/samples/cpp/speech_sample/README.md
+++ b/samples/cpp/speech_sample/README.md
@@ -86,7 +86,7 @@ Several execution modes are supported via the ``-d`` flag:
- ``CPU`` - All calculations are performed on CPU device using CPU Plugin.
- ``GPU`` - All calculations are performed on GPU device using GPU Plugin.
-- ``VPUX`` - All calculations are performed on VPUX device using VPUX Plugin.
+- ``VPU`` - All calculations are performed on VPU device using VPU Plugin.
- ``GNA_AUTO`` - GNA hardware is used if available and the driver is installed. Otherwise, the GNA device is emulated in fast-but-not-bit-exact mode.
- ``GNA_HW`` - GNA hardware is used if available and the driver is installed. Otherwise, an error will occur.
- ``GNA_SW`` - Deprecated. The GNA device is emulated in fast-but-not-bit-exact mode.
@@ -134,7 +134,7 @@ Usage message:
-i "" Required. Path(s) to input file(s). Usage for a single file/layer: or . Example of usage for several files/layers: :=,:=.
-m "" Required. Path to an .xml file with a trained model (required if -rg is missing).
-o "" Optional. Output file name(s) to save scores (inference results). Example of usage for a single file/layer: or . Example of usage for several files/layers: :=,:=.
- -d "" Optional. Specify a target device to infer on. CPU, GPU, VPUX, GNA_AUTO, GNA_HW, GNA_HW_WITH_SW_FBACK, GNA_SW_FP32, GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU as a secondary (e.g. HETERO:GNA,CPU) are supported. The sample will look for a suitable plugin for device specified.
+ -d "" Optional. Specify a target device to infer on. CPU, GPU, VPU, GNA_AUTO, GNA_HW, GNA_HW_WITH_SW_FBACK, GNA_SW_FP32, GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU as a secondary (e.g. HETERO:GNA,CPU) are supported. The sample will look for a suitable plugin for device specified.
-pc Optional. Enables per-layer performance report.
-q "" Optional. Input quantization mode for GNA: static (default) or user defined (use with -sf).
-qb "" Optional. Weight resolution in bits for GNA quantization: 8 or 16 (default)
@@ -152,7 +152,7 @@ Usage message:
-compile_target "" Optional. Specify GNA compile target generation. May be one of GNA_TARGET_2_0, GNA_TARGET_3_0. By default, generation corresponds to the GNA HW available in the system or the latest fully supported generation by the software. See the GNA Plugin's GNA_COMPILE_TARGET config option description.
-memory_reuse_off Optional. Disables memory optimizations for compiled model.
- Available target devices: CPU GNA GPU VPUX
+ Available target devices: CPU GNA GPU VPU
.. _model-preparation-speech:
diff --git a/samples/cpp/speech_sample/speech_sample.hpp b/samples/cpp/speech_sample/speech_sample.hpp
index 4445e7e3442..9ba602a55e3 100644
--- a/samples/cpp/speech_sample/speech_sample.hpp
+++ b/samples/cpp/speech_sample/speech_sample.hpp
@@ -24,7 +24,7 @@ static const char model_message[] = "Required. Path to an .xml file with a train
/// @brief message for assigning calculation to device
static const char target_device_message[] =
- "Optional. Specify a target device to infer on. CPU, GPU, VPUX, GNA_AUTO, GNA_HW, "
+ "Optional. Specify a target device to infer on. CPU, GPU, VPU, GNA_AUTO, GNA_HW, "
"GNA_HW_WITH_SW_FBACK, GNA_SW_FP32, "
"GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU"
" as a secondary (e.g. HETERO:GNA,CPU) are supported. "
@@ -274,7 +274,7 @@ bool parse_and_check_command_line(int argc, char* argv[]) {
"HETERO:GNA_HW,CPU",
"HETERO:GNA_SW_EXACT,CPU",
"HETERO:GNA_SW_FP32,CPU",
- "VPUX"};
+ "VPU"};
if (std::find(supportedDevices.begin(), supportedDevices.end(), FLAGS_d) == supportedDevices.end()) {
throw std::logic_error("Specified device is not supported.");
diff --git a/samples/python/speech_sample/README.md b/samples/python/speech_sample/README.md
index 1fa293bed19..5a2176060ff 100644
--- a/samples/python/speech_sample/README.md
+++ b/samples/python/speech_sample/README.md
@@ -83,7 +83,7 @@ Several execution modes are supported via the ``-d`` flag:
- ``CPU`` - All calculations are performed on CPU device using CPU Plugin.
- ``GPU`` - All calculations are performed on GPU device using GPU Plugin.
-- ``VPUX`` - All calculations are performed on VPUX device using VPUX Plugin.
+- ``VPU`` - All calculations are performed on VPU device using VPU Plugin.
- ``GNA_AUTO`` - GNA hardware is used if available and the driver is installed. Otherwise, the GNA device is emulated in fast-but-not-bit-exact mode.
- ``GNA_HW`` - GNA hardware is used if available and the driver is installed. Otherwise, an error will occur.
- ``GNA_SW`` - Deprecated. The GNA device is emulated in fast-but-not-bit-exact mode.
@@ -143,7 +143,7 @@ Usage message:
Usage for a single file/layer: or .
Example of usage for several files/layers: :=,:=.
-d DEVICE, --device DEVICE
- Optional. Specify a target device to infer on. CPU, GPU, VPUX, GNA_AUTO, GNA_HW, GNA_SW_FP32,
+ Optional. Specify a target device to infer on. CPU, GPU, VPU, GNA_AUTO, GNA_HW, GNA_SW_FP32,
GNA_SW_EXACT and HETERO with combination of GNA as the primary device and CPU as a secondary (e.g.
HETERO:GNA,CPU) are supported. The sample will look for a suitable plugin for device specified.
Default value is CPU.
diff --git a/samples/python/speech_sample/arg_parser.py b/samples/python/speech_sample/arg_parser.py
index 64da5cc3425..d6e8c41d834 100644
--- a/samples/python/speech_sample/arg_parser.py
+++ b/samples/python/speech_sample/arg_parser.py
@@ -32,7 +32,7 @@ def build_arg_parser() -> argparse.ArgumentParser:
'Example of usage for several files/layers: :=,:=.')
args.add_argument('-d', '--device', default='CPU', type=str,
help='Optional. Specify a target device to infer on. '
- 'CPU, GPU, VPUX, GNA_AUTO, GNA_HW, GNA_SW_FP32, GNA_SW_EXACT and HETERO with combination of GNA'
+ 'CPU, GPU, VPU, GNA_AUTO, GNA_HW, GNA_SW_FP32, GNA_SW_EXACT and HETERO with combination of GNA'
' as the primary device and CPU as a secondary (e.g. HETERO:GNA,CPU) are supported. '
'The sample will look for a suitable plugin for device specified. Default value is CPU.')
args.add_argument('-bs', '--batch_size', type=int, choices=range(1, 9), metavar='[1-8]',
diff --git a/src/plugins/auto/src/auto_compiled_model.cpp b/src/plugins/auto/src/auto_compiled_model.cpp
index 5d4e42c5404..e397d8ca236 100644
--- a/src/plugins/auto/src/auto_compiled_model.cpp
+++ b/src/plugins/auto/src/auto_compiled_model.cpp
@@ -176,7 +176,7 @@ ov::Any AutoCompiledModel::get_property(const std::string& name) const {
iie.what());
}
real = (std::max)(requests, optimal_batch_size);
- } else if (device_info.device_name.find("VPUX") != std::string::npos) {
+ } else if (device_info.device_name.find("VPU") != std::string::npos) {
real = 8u;
} else {
real = upper_bound_streams_num ? 2 * upper_bound_streams_num : default_num_for_tput;
diff --git a/src/plugins/auto/src/auto_schedule.cpp b/src/plugins/auto/src/auto_schedule.cpp
index 1022e0111de..4efcd6e0253 100644
--- a/src/plugins/auto/src/auto_schedule.cpp
+++ b/src/plugins/auto/src/auto_schedule.cpp
@@ -301,10 +301,10 @@ void AutoSchedule::try_to_compile_model(AutoCompileContext& context, const std::
}
// need to recompile model, unregister it's priority
// there maybe potential issue.
- // for example they are dGPU, VPUX, iGPU, customer want to compile model with
- // configure 0 dGPU, 1 VPUX, if dGPU compile failed,
- // the result will be not sure, maybe two models are compiled into VPUX,
- // maybe 0 is compiled to VPUX, 1 is compiled to iGPU
+ // for example they are dGPU, VPU, iGPU, customer want to compile model with
+ // configure 0 dGPU, 1 VPU, if dGPU compile failed,
+ // the result will be not sure, maybe two models are compiled into VPU,
+ // maybe 0 is compiled to VPU, 1 is compiled to iGPU
m_plugin->unregister_priority(m_context->m_model_priority, context.m_device_info.unique_name);
// remove the current device from device_list
auto erase_device = deviceChecker().check_and_return_if_device_in_list(device, device_list, true);
diff --git a/src/plugins/auto/src/plugin.cpp b/src/plugins/auto/src/plugin.cpp
index 9e3f0eeb2cc..210f55eaabe 100644
--- a/src/plugins/auto/src/plugin.cpp
+++ b/src/plugins/auto/src/plugin.cpp
@@ -580,7 +580,7 @@ std::list Plugin::get_valid_device(
std::list dGPU;
std::list iGPU;
std::list MYRIAD;
- std::list VPUX;
+ std::list VPU;
for (auto& item : meta_devices) {
if (item.device_name.find("CPU") == 0) {
@@ -591,8 +591,8 @@ std::list Plugin::get_valid_device(
MYRIAD.push_back(item);
continue;
}
- if (item.device_name.find("VPUX") == 0) {
- VPUX.push_back(item);
+ if (item.device_name.find("VPU") == 0) {
+ VPU.push_back(item);
continue;
}
if (item.device_name.find("GPU") == 0) {
@@ -614,14 +614,14 @@ std::list Plugin::get_valid_device(
}
}
- // Priority of selecting device: dGPU > VPUX > iGPU > MYRIAD > CPU
+ // Priority of selecting device: dGPU > VPU > iGPU > MYRIAD > CPU
std::list devices;
if (model_precision == "INT8") {
- devices.splice(devices.end(), VPUX);
+ devices.splice(devices.end(), VPU);
devices.splice(devices.end(), dGPU);
} else {
devices.splice(devices.end(), dGPU);
- devices.splice(devices.end(), VPUX);
+ devices.splice(devices.end(), VPU);
}
devices.splice(devices.end(), iGPU);
devices.splice(devices.end(), MYRIAD);
diff --git a/src/plugins/auto/src/plugin_config.cpp b/src/plugins/auto/src/plugin_config.cpp
index 65bf106d6af..4fcace66dd3 100644
--- a/src/plugins/auto/src/plugin_config.cpp
+++ b/src/plugins/auto/src/plugin_config.cpp
@@ -8,7 +8,7 @@ namespace auto_plugin {
// AUTO will enable the blocklist if
// 1.No device priority passed to AUTO/MULTI.(eg. core.compile_model(model, "AUTO", configs);)
// 2.No valid device parsed out from device priority (eg. core.compile_model(model, "AUTO:-CPU,-GPU", configs);).
-const std::set PluginConfig::device_block_list = {"VPUX", "GNA", "notIntelGPU"};
+const std::set PluginConfig::device_block_list = {"VPU", "GNA", "notIntelGPU"};
PluginConfig::PluginConfig() {
set_default();
diff --git a/src/plugins/auto/tests/unit/auto_unit_test.cpp b/src/plugins/auto/tests/unit/auto_unit_test.cpp
index 9b511c34788..33ccecab73b 100644
--- a/src/plugins/auto/tests/unit/auto_unit_test.cpp
+++ b/src/plugins/auto/tests/unit/auto_unit_test.cpp
@@ -67,7 +67,7 @@ ov::mock_auto_plugin::tests::AutoTest::AutoTest() {
.WillByDefault(Return(12));
std::vector cpuCability = {"FP32", "FP16", "INT8", "BIN"};
std::vector gpuCability = {"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"};
- std::vector vpuxCability = {"INT8"};
+ std::vector vpuCability = {"INT8"};
std::vector myriadCability = {"FP16"};
std::string igpuArchitecture = "GPU: vendor=0x8086 arch=0";
std::string dgpuArchitecture = "GPU: vendor=0x8086 arch=1";
@@ -78,7 +78,7 @@ ov::mock_auto_plugin::tests::AutoTest::AutoTest() {
ON_CALL(*core, get_property(HasSubstr("GPU"),
StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(gpuCability));
ON_CALL(*core, get_property(StrEq(CommonTestUtils::DEVICE_KEEMBAY),
- StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(vpuxCability));
+ StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(vpuCability));
ON_CALL(*core, get_property(StrEq("MYRIAD"),
StrEq(ov::device::capabilities.name()), _)).WillByDefault(RETURN_MOCK_VALUE(myriadCability));
ON_CALL(*core, get_property(StrEq("GPU"),
@@ -106,7 +106,7 @@ ov::mock_auto_plugin::tests::AutoTest::AutoTest() {
StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(igpuFullDeviceName));
ON_CALL(*core, get_property(StrEq("GPU.1"),
StrEq(ov::device::full_name.name()), _)).WillByDefault(RETURN_MOCK_VALUE(dgpuFullDeviceName));
- const std::vector availableDevs = {"CPU", "GPU.0", "GPU.1", "VPUX"};
+ const std::vector availableDevs = {"CPU", "GPU.0", "GPU.1", "VPU"};
ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs));
ON_CALL(*plugin, parse_meta_devices)
.WillByDefault(
diff --git a/src/plugins/auto/tests/unit/get_device_list.cpp b/src/plugins/auto/tests/unit/get_device_list.cpp
index c7eb8fa8e81..50837ae17f1 100644
--- a/src/plugins/auto/tests/unit/get_device_list.cpp
+++ b/src/plugins/auto/tests/unit/get_device_list.cpp
@@ -7,8 +7,8 @@
using Config = std::map;
using namespace ov::mock_auto_plugin;
-const std::vector availableDevs = {"CPU", "GPU", "VPUX"};
-const std::vector availableDevsWithId = {"CPU", "GPU.0", "GPU.1", "VPUX"};
+const std::vector availableDevs = {"CPU", "GPU", "VPU"};
+const std::vector availableDevsWithId = {"CPU", "GPU.0", "GPU.1", "VPU"};
using Params = std::tuple;
using ConfigParams = std::tuple<
std::vector, // Available devices retrieved from Core
@@ -96,8 +96,8 @@ const std::vector testConfigsWithId = {Params{" ", " "},
Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"},
Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"},
Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"},
- Params{"CPU,GPU,VPUX,INVALID_DEVICE", "CPU,GPU.0,GPU.1,VPUX,INVALID_DEVICE"},
- Params{"VPUX,GPU,CPU,-GPU.0", "VPUX,GPU.1,CPU"},
+ Params{"CPU,GPU,VPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,VPU,INVALID_DEVICE"},
+ Params{"VPU,GPU,CPU,-GPU.0", "VPU,GPU.1,CPU"},
Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"},
Params{"-GPU.0,GPU", "GPU.1"},
Params{"-GPU,GPU.0", "GPU.0"},
@@ -131,13 +131,13 @@ const std::vector testConfigs = {Params{" ", " "},
Params{"CPU,GPU,GPU.0", "CPU,GPU"},
Params{"CPU,GPU,GPU.1", "CPU,GPU,GPU.1"},
Params{"CPU,GPU.1,GPU", "CPU,GPU.1,GPU"},
- Params{"CPU,VPUX", "CPU,VPUX"},
- Params{"CPU,-VPUX", "CPU"},
+ Params{"CPU,VPU", "CPU,VPU"},
+ Params{"CPU,-VPU", "CPU"},
Params{"INVALID_DEVICE", "INVALID_DEVICE"},
Params{"CPU,-INVALID_DEVICE", "CPU"},
Params{"CPU,INVALID_DEVICE", "CPU,INVALID_DEVICE"},
Params{"-CPU,INVALID_DEVICE", "INVALID_DEVICE"},
- Params{"CPU,GPU,VPUX", "CPU,GPU,VPUX"}};
+ Params{"CPU,GPU,VPU", "CPU,GPU,VPU"}};
const std::vector testConfigsWithIdNotInteldGPU = {Params{" ", " "},
Params{"", "CPU,GPU.0"},
@@ -147,8 +147,8 @@ const std::vector testConfigsWithIdNotInteldGPU = {Params{" ", " "},
Params{"CPU,,GPU", "CPU,GPU.0,GPU.1"},
Params{"CPU, ,GPU", "CPU, ,GPU.0,GPU.1"},
Params{"CPU,GPU,GPU.1", "CPU,GPU.0,GPU.1"},
- Params{"CPU,GPU,VPUX,INVALID_DEVICE", "CPU,GPU.0,GPU.1,VPUX,INVALID_DEVICE"},
- Params{"VPUX,GPU,CPU,-GPU.0", "VPUX,GPU.1,CPU"},
+ Params{"CPU,GPU,VPU,INVALID_DEVICE", "CPU,GPU.0,GPU.1,VPU,INVALID_DEVICE"},
+ Params{"VPU,GPU,CPU,-GPU.0", "VPU,GPU.1,CPU"},
Params{"-GPU.0,GPU,CPU", "GPU.1,CPU"},
Params{"-GPU.0,GPU", "GPU.1"},
Params{"-GPU,GPU.0", "GPU.0"},
diff --git a/src/plugins/auto/tests/unit/key_network_priority_test.cpp b/src/plugins/auto/tests/unit/key_network_priority_test.cpp
index b4790ee631c..c3ce9abca78 100644
--- a/src/plugins/auto/tests/unit/key_network_priority_test.cpp
+++ b/src/plugins/auto/tests/unit/key_network_priority_test.cpp
@@ -68,13 +68,13 @@ TEST_P(KeyNetworkPriorityTest, SelectDevice) {
{"GPU.0", {}, 2, "01", "iGPU_01", 1},
{"GPU.1", {}, 2, "01", "dGPU_01", 2},
{"MYRIAD", {}, 2, "01", "MYRIAD_01", 3},
- {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPUX_01", 4}};
+ {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPU_01", 4}};
} else {
metaDevices = {{CommonTestUtils::DEVICE_CPU, {}, 2, "", "CPU_01", 0},
{"GPU.0", {}, 2, "01", "iGPU_01", 0},
{"GPU.1", {}, 2, "01", "dGPU_01", 0},
{"MYRIAD", {}, 2, "01", "MYRIAD_01", 0},
- {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPUX_01", 0}};
+ {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPU_01", 0}};
}
EXPECT_CALL(*plugin, select_device(_, _, _)).Times(sizeOfConfigs);
@@ -97,13 +97,13 @@ TEST_P(KeyNetworkPriorityTest, MultiThreadsSelectDevice) {
{"GPU.0", {}, 2, "01", "iGPU_01", 1},
{"GPU.1", {}, 2, "01", "dGPU_01", 2},
{"MYRIAD", {}, 2, "01", "MYRIAD_01", 3},
- {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPUX_01", 4}};
+ {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPU_01", 4}};
} else {
metaDevices = {{CommonTestUtils::DEVICE_CPU, {}, 2, "", "CPU_01", 0},
{"GPU.0", {}, 2, "01", "iGPU_01", 0},
{"GPU.1", {}, 2, "01", "dGPU_01", 0},
{"MYRIAD", {}, 2, "01", "MYRIAD_01", 0},
- {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPUX_01", 0}};
+ {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPU_01", 0}};
}
EXPECT_CALL(*plugin, select_device(_, _, _)).Times(sizeOfConfigs * 2);
@@ -165,27 +165,27 @@ const std::vector testConfigs = {
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {3, "MYRIAD_01"}}},
- ConfigParams {"INT8", false, {PriorityParams {0, "VPUX_01"},
+ ConfigParams {"INT8", false, {PriorityParams {0, "VPU_01"},
PriorityParams {1, "CPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {2, "CPU_01"}}},
- ConfigParams {"INT8", false, {PriorityParams {2, "VPUX_01"},
+ ConfigParams {"INT8", false, {PriorityParams {2, "VPU_01"},
PriorityParams {3, "CPU_01"},
PriorityParams {4, "CPU_01"},
PriorityParams {5, "CPU_01"}}},
- ConfigParams {"INT8", false, {PriorityParams {2, "VPUX_01"},
- PriorityParams {0, "VPUX_01"},
+ ConfigParams {"INT8", false, {PriorityParams {2, "VPU_01"},
+ PriorityParams {0, "VPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {2, "CPU_01"}}},
- ConfigParams {"INT8", false, {PriorityParams {2, "VPUX_01"},
- PriorityParams {0, "VPUX_01"},
+ ConfigParams {"INT8", false, {PriorityParams {2, "VPU_01"},
+ PriorityParams {0, "VPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {3, "CPU_01"}}},
- ConfigParams {"INT8", false, {PriorityParams {0, "VPUX_01"},
+ ConfigParams {"INT8", false, {PriorityParams {0, "VPU_01"},
PriorityParams {1, "CPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {3, "CPU_01"},
- PriorityParams {0, "VPUX_01"},
+ PriorityParams {0, "VPU_01"},
PriorityParams {1, "CPU_01"},
PriorityParams {2, "CPU_01"},
PriorityParams {3, "CPU_01"}}},
@@ -217,8 +217,8 @@ const std::vector testConfigs = {
// {CommonTestUtils::DEVICE_GPU, {}, 2, "01", "iGPU_01", 1},
// {CommonTestUtils::DEVICE_GPU, {}, 2, "01", "dGPU_01", 2},
// {"MYRIAD", {}, 2, "01", "MYRIAD_01", 3},
- // {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPUX_01", 4}};
- // cpu > igpu > dgpu > MYRIAD > VPUX
+ // {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPU_01", 4}};
+ // cpu > igpu > dgpu > MYRIAD > VPU
ConfigParams {"FP32", true, {PriorityParams {0, "CPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "dGPU_01"},
@@ -244,29 +244,29 @@ const std::vector testConfigs = {
PriorityParams {2, "dGPU_01"},
PriorityParams {3, "MYRIAD_01"}}},
ConfigParams {"INT8", true, {PriorityParams {0, "CPU_01"},
- PriorityParams {1, "VPUX_01"},
- PriorityParams {2, "VPUX_01"},
- PriorityParams {2, "VPUX_01"}}},
+ PriorityParams {1, "VPU_01"},
+ PriorityParams {2, "VPU_01"},
+ PriorityParams {2, "VPU_01"}}},
ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"},
- PriorityParams {3, "VPUX_01"},
- PriorityParams {4, "VPUX_01"},
- PriorityParams {5, "VPUX_01"}}},
+ PriorityParams {3, "VPU_01"},
+ PriorityParams {4, "VPU_01"},
+ PriorityParams {5, "VPU_01"}}},
ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"},
PriorityParams {0, "CPU_01"},
- PriorityParams {2, "VPUX_01"},
- PriorityParams {2, "VPUX_01"}}},
+ PriorityParams {2, "VPU_01"},
+ PriorityParams {2, "VPU_01"}}},
ConfigParams {"INT8", true, {PriorityParams {2, "CPU_01"},
PriorityParams {0, "CPU_01"},
- PriorityParams {2, "VPUX_01"},
- PriorityParams {3, "VPUX_01"}}},
+ PriorityParams {2, "VPU_01"},
+ PriorityParams {3, "VPU_01"}}},
ConfigParams {"INT8", true, {PriorityParams {0, "CPU_01"},
- PriorityParams {1, "VPUX_01"},
- PriorityParams {2, "VPUX_01"},
- PriorityParams {3, "VPUX_01"},
+ PriorityParams {1, "VPU_01"},
+ PriorityParams {2, "VPU_01"},
+ PriorityParams {3, "VPU_01"},
PriorityParams {0, "CPU_01"},
- PriorityParams {1, "VPUX_01"},
- PriorityParams {2, "VPUX_01"},
- PriorityParams {3, "VPUX_01"}}},
+ PriorityParams {1, "VPU_01"},
+ PriorityParams {2, "VPU_01"},
+ PriorityParams {3, "VPU_01"}}},
ConfigParams {"BIN", true, {PriorityParams {0, "CPU_01"},
PriorityParams {1, "iGPU_01"},
PriorityParams {2, "dGPU_01"},
diff --git a/src/plugins/auto/tests/unit/parse_meta_device_test.cpp b/src/plugins/auto/tests/unit/parse_meta_device_test.cpp
index f79a1695ec0..83305daa578 100644
--- a/src/plugins/auto/tests/unit/parse_meta_device_test.cpp
+++ b/src/plugins/auto/tests/unit/parse_meta_device_test.cpp
@@ -10,7 +10,7 @@ using testing::Throw;
const char igpuFullDeviceName[] = "Intel(R) Gen9 HD Graphics (iGPU)";
const char dgpuFullDeviceName[] = "Intel(R) Iris(R) Xe MAX Graphics (dGPU)";
-const std::vector availableDevsNoID = {"CPU", "GPU", "VPUX"};
+const std::vector availableDevsNoID = {"CPU", "GPU", "VPU"};
using ConfigParams = std::tuple, // expect metaDevices
bool, // if throw exception
@@ -127,57 +127,57 @@ TEST_P(ParseMetaDeviceNoIDTest, ParseMetaDevices) {
// ConfigParams {devicePriority, expect metaDevices, ifThrowException}
const std::vector testConfigs = {
- ConfigParams{"CPU,GPU,VPUX",
+ ConfigParams{"CPU,GPU,VPU",
{{"CPU", {}, -1, "", "CPU_", 0},
{"GPU.0", {}, -1, "", std::string(igpuFullDeviceName) + "_0", 1},
{"GPU.1", {}, -1, "", std::string(dgpuFullDeviceName) + "_1", 1},
- {"VPUX", {}, -1, "", "VPUX_", 2}},
+ {"VPU", {}, -1, "", "VPU_", 2}},
false,
4},
- ConfigParams{"VPUX,GPU,CPU",
- {{"VPUX", {}, -1, "", "VPUX_", 0},
+ ConfigParams{"VPU,GPU,CPU",
+ {{"VPU", {}, -1, "", "VPU_", 0},
{"GPU.0", {}, -1, "", std::string(igpuFullDeviceName) + "_0", 1},
{"GPU.1", {}, -1, "", std::string(dgpuFullDeviceName) + "_1", 1},
{"CPU", {}, -1, "", "CPU_", 2}},
false,
4},
- ConfigParams{"VPUX,GPU,INVALID_DEVICE",
- {{"VPUX", {}, -1, "", "VPUX_", 0},
+ ConfigParams{"VPU,GPU,INVALID_DEVICE",
+ {{"VPU", {}, -1, "", "VPU_", 0},
{"GPU.0", {}, -1, "", std::string(igpuFullDeviceName) + "_0", 1},
{"GPU.1", {}, -1, "", std::string(dgpuFullDeviceName) + "_1", 1}},
false,
3},
- ConfigParams{"CPU(1),GPU(2),VPUX(4)",
+ ConfigParams{"CPU(1),GPU(2),VPU(4)",
{{"CPU", {}, 1, "", "CPU_", 0},
{"GPU.0", {}, 2, "", std::string(igpuFullDeviceName) + "_0", 1},
{"GPU.1", {}, 2, "", std::string(dgpuFullDeviceName) + "_1", 1},
- {"VPUX", {}, 4, "", "VPUX_", 2}},
+ {"VPU", {}, 4, "", "VPU_", 2}},
false,
4},
- ConfigParams{"CPU(-1),GPU,VPUX", {}, true, 0},
- ConfigParams{"CPU(NA),GPU,VPUX", {}, true, 0},
+ ConfigParams{"CPU(-1),GPU,VPU", {}, true, 0},
+ ConfigParams{"CPU(NA),GPU,VPU", {}, true, 0},
ConfigParams{"INVALID_DEVICE", {}, false, 0},
ConfigParams{"INVALID_DEVICE,CPU", {{"CPU", {}, -1, "", "CPU_", 1}}, false, 2},
- ConfigParams{"CPU(3),GPU.1,VPUX",
+ ConfigParams{"CPU(3),GPU.1,VPU",
{{"CPU", {}, 3, "", "CPU_", 0},
{"GPU.1", {}, -1, "", std::string(dgpuFullDeviceName) + "_1", 1},
- {"VPUX", {}, -1, "", "VPUX_", 2}},
+ {"VPU", {}, -1, "", "VPU_", 2}},
false,
3},
- ConfigParams{"VPUX,GPU.1,CPU(3)",
- {{"VPUX", {}, -1, "", "VPUX_", 0},
+ ConfigParams{"VPU,GPU.1,CPU(3)",
+ {{"VPU", {}, -1, "", "VPU_", 0},
{"GPU.1", {}, -1, "", std::string(dgpuFullDeviceName) + "_1", 1},
{"CPU", {}, 3, "", "CPU_", 2}},
false,
3}};
const std::vector testConfigsNoID = {
- ConfigParams{"CPU,GPU,VPUX",
+ ConfigParams{"CPU,GPU,VPU",
{{"CPU", {}, -1, "", "CPU_", 0},
{"GPU", {}, -1, "0", std::string(igpuFullDeviceName) + "_0", 1},
- {"VPUX", {}, -1, "", "VPUX_", 2}},
+ {"VPU", {}, -1, "", "VPU_", 2}},
false,
3},
};
diff --git a/src/plugins/auto/tests/unit/runtime_fallback_test.cpp b/src/plugins/auto/tests/unit/runtime_fallback_test.cpp
index a51dc54d6ca..d5925a6813c 100644
--- a/src/plugins/auto/tests/unit/runtime_fallback_test.cpp
+++ b/src/plugins/auto/tests/unit/runtime_fallback_test.cpp
@@ -13,23 +13,23 @@ class AutoRuntimeFallback : public tests::AutoTest,
public ::testing::TestWithParam {
public:
ov::SoPtr mockExeNetworkGPU_1;
- ov::SoPtr mockExeNetworkVPUX;
+ ov::SoPtr mockExeNetworkVPU;
std::shared_ptr> inferReqInternalGPU_1;
- std::shared_ptr> inferReqInternalVPUX;
+ std::shared_ptr> inferReqInternalVPU;
std::shared_ptr> mockIExeNetGPU_1;
- std::shared_ptr> mockIExeNetVPUX;
+ std::shared_ptr> mockIExeNetVPU;
std::shared_ptr mockInferrequest;
std::shared_ptr mockInferrequestGPU_0;
std::shared_ptr mockInferrequestGPU_1;
- std::shared_ptr mockInferrequestVPUX;
+ std::shared_ptr mockInferrequestVPU;
std::shared_ptr mockExecutor;
std::shared_ptr mockExecutorGPU_0;
std::shared_ptr mockExecutorGPU_1;
- std::shared_ptr mockExecutorVPUX;
+ std::shared_ptr mockExecutorVPU;
public:
static std::string getTestCaseName(testing::TestParamInfo obj) {
@@ -66,15 +66,15 @@ public:
void TearDown() override {
mockExeNetworkGPU_1 = {};
inferReqInternalGPU_1.reset();
- inferReqInternalVPUX.reset();
+ inferReqInternalVPU.reset();
mockIExeNetGPU_1.reset();
- mockIExeNetVPUX.reset();
+ mockIExeNetVPU.reset();
mockIExeNetGPU_1.reset();
- mockIExeNetVPUX.reset();
+ mockIExeNetVPU.reset();
mockExecutor.reset();
mockExecutorGPU_0.reset();
mockExecutorGPU_1.reset();
- mockExecutorVPUX.reset();
+ mockExecutorVPU.reset();
}
void SetUp() override {
@@ -82,8 +82,8 @@ public:
mockIExeNetGPU_1 = std::make_shared>(model, plugin);
mockExeNetworkGPU_1 = {mockIExeNetGPU_1, {}};
- mockIExeNetVPUX = std::make_shared>(model, plugin);
- mockExeNetworkVPUX = {mockIExeNetVPUX, {}};
+ mockIExeNetVPU = std::make_shared>(model, plugin);
+ mockExeNetworkVPU = {mockIExeNetVPU, {}};
// prepare mockicore and cnnNetwork for loading
ON_CALL(*core, compile_model(::testing::Matcher&>(_),
@@ -97,7 +97,7 @@ public:
ON_CALL(*core, compile_model(::testing::Matcher&>(_),
::testing::Matcher(StrEq(CommonTestUtils::DEVICE_KEEMBAY)), _)).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(200));
- return mockExeNetworkVPUX; }));
+ return mockExeNetworkVPU; }));
ON_CALL(*core, compile_model(::testing::Matcher&>(_),
::testing::Matcher(StrEq(CommonTestUtils::DEVICE_CPU)),
@@ -112,9 +112,9 @@ public:
ON_CALL(*mockIExeNetGPU_1, get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.WillByDefault(Return(optimalNum));
- inferReqInternalVPUX = std::make_shared>(mockIExeNetVPUX);
- mockExecutorVPUX = std::make_shared();
- ON_CALL(*mockIExeNetVPUX, get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
+ inferReqInternalVPU = std::make_shared>(mockIExeNetVPU);
+ mockExecutorVPU = std::make_shared();
+ ON_CALL(*mockIExeNetVPU, get_property(StrEq(ov::optimal_number_of_infer_requests.name())))
.WillByDefault(Return(optimalNum));
}
};
@@ -163,12 +163,12 @@ TEST_P(AutoRuntimeFallback, releaseResource) {
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestGPU_1; }));
}
- } else if (deviceName == "VPUX") {
- mockInferrequestVPUX = std::make_shared(
- inferReqInternalVPUX, mockExecutorVPUX, nullptr, ifThrow);
- ON_CALL(*mockIExeNetVPUX.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() {
+ } else if (deviceName == "VPU") {
+ mockInferrequestVPU = std::make_shared(
+ inferReqInternalVPU, mockExecutorVPU, nullptr, ifThrow);
+ ON_CALL(*mockIExeNetVPU.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(0));
- return mockInferrequestVPUX; }));
+ return mockInferrequestVPU; }));
} else {
return;
}
@@ -209,10 +209,10 @@ const std::vector testConfigs = {
ConfigParams{{{"GPU.0", false}, {"CPU", true}}, 2, true, false, false, false},
ConfigParams{{{"GPU.0", true}, {"CPU", true}}, 2, true, true, false, false},
// 3 devices
- ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"VPUX", false}}, 1, true, false, false, false},
- ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"VPUX", false}}, 2, true, false, false, false},
- ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"VPUX", false}}, 3, true, false, false, false},
- ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"VPUX", true}}, 3, true, true, false, false},
+ ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"VPU", false}}, 1, true, false, false, false},
+ ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"VPU", false}}, 2, true, false, false, false},
+ ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"VPU", false}}, 3, true, false, false, false},
+ ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"VPU", true}}, 3, true, true, false, false},
//CPU_HELP does not throw
ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", false}}, 2, true, false, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", false}}, 2, true, false, false, false},
@@ -233,10 +233,10 @@ const std::vector testConfigs = {
ConfigParams{{{"GPU.0", false}, {"CPU", true}}, 2, false, true, false, false},
ConfigParams{{{"GPU.0", true}, {"CPU", true}}, 2, false, true, false, false},
// 3 devices
- ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"VPUX", false}}, 1, false, false, false, false},
- ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"VPUX", false}}, 1, false, true, false, false},
- ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"VPUX", false}}, 1, false, true, false, false},
- ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"VPUX", true}}, 1, false, true, false, false},
+ ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"VPU", false}}, 1, false, false, false, false},
+ ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"VPU", false}}, 1, false, true, false, false},
+ ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"VPU", false}}, 1, false, true, false, false},
+ ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"VPU", true}}, 1, false, true, false, false},
//CPU_HELP does not throw
ConfigParams{{{"GPU.0", false}, {"GPU.1", false}, {"CPU", false}}, 2, false, false, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", false}}, 2, false, false, false, false},
@@ -246,8 +246,8 @@ const std::vector testConfigs = {
ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"CPU", true}}, 2, false, true, false, false},
ConfigParams{{{"GPU.0", true}, {"GPU.1", true}, {"CPU", true}}, 2, false, true, false, false},
// loadFail and CreateInferRequestFail
- ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"VPUX", false}}, 3, true, false, true, false},
- ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"VPUX", false}}, 3, true, false, false, true},
+ ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"VPU", false}}, 3, true, false, true, false},
+ ConfigParams{{{"GPU.0", true}, {"GPU.1", false}, {"VPU", false}}, 3, true, false, false, true},
};
INSTANTIATE_TEST_SUITE_P(smoke_AutoRuntimeFallback, AutoRuntimeFallback,
diff --git a/src/plugins/auto/tests/unit/select_device_test.cpp b/src/plugins/auto/tests/unit/select_device_test.cpp
index a5107616ae7..4a75686bf74 100644
--- a/src/plugins/auto/tests/unit/select_device_test.cpp
+++ b/src/plugins/auto/tests/unit/select_device_test.cpp
@@ -18,7 +18,7 @@ const DeviceInformation CPU_INFO = {CommonTestUtils::DEVICE_CPU, {}, 2, "01", "C
const DeviceInformation IGPU_INFO = {"GPU.0", {}, 2, "01", "iGPU_01"};
const DeviceInformation DGPU_INFO = {"GPU.1", {}, 2, "01", "dGPU_01"};
const DeviceInformation MYRIAD_INFO = {"MYRIAD", {}, 2, "01", "MYRIAD_01" };
-const DeviceInformation KEEMBAY_INFO = {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPUX_01" };
+const DeviceInformation KEEMBAY_INFO = {CommonTestUtils::DEVICE_KEEMBAY, {}, 2, "01", "VPU_01" };
const std::vector fp32DeviceVector = {DGPU_INFO, IGPU_INFO, CPU_INFO, MYRIAD_INFO};
const std::vector fp16DeviceVector = {DGPU_INFO, IGPU_INFO, MYRIAD_INFO, CPU_INFO};
const std::vector int8DeviceVector = {KEEMBAY_INFO, DGPU_INFO, IGPU_INFO, CPU_INFO};
diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/include/api_conformance_helpers.hpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/include/api_conformance_helpers.hpp
index f3d44dcbd23..0c3b344dc8b 100644
--- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/include/api_conformance_helpers.hpp
+++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/include/api_conformance_helpers.hpp
@@ -17,7 +17,7 @@ inline const std::string get_plugin_lib_name_by_device(const std::string& device
{ "HETERO", "openvino_hetero_plugin" },
{ "BATCH", "openvino_auto_batch_plugin" },
{ "MULTI", "openvino_auto_plugin" },
- { "VPUX", "openvino_intel_vpux_plugin" },
+ { "VPU", "openvino_intel_vpu_plugin" },
{ "CPU", "openvino_intel_cpu_plugin" },
{ "GNA", "openvino_intel_gna_plugin" },
{ "GPU", "openvino_intel_gpu_plugin" },
diff --git a/src/tests/test_utils/common_test_utils/src/test_constants.cpp b/src/tests/test_utils/common_test_utils/src/test_constants.cpp
index d601e9dec65..a4408818569 100644
--- a/src/tests/test_utils/common_test_utils/src/test_constants.cpp
+++ b/src/tests/test_utils/common_test_utils/src/test_constants.cpp
@@ -11,7 +11,7 @@ const char *DEVICE_CPU = "CPU";
const char *DEVICE_GNA = "GNA";
const char *DEVICE_GPU = "GPU";
const char *DEVICE_BATCH = "BATCH";
-const char *DEVICE_KEEMBAY = "VPUX";
+const char *DEVICE_KEEMBAY = "VPU";
const char *DEVICE_MULTI = "MULTI";
const char *DEVICE_TEMPLATE = "TEMPLATE";
const char *DEVICE_HETERO = "HETERO";
diff --git a/tests/time_tests/test_runner/test_timetest.py b/tests/time_tests/test_runner/test_timetest.py
index 28acb628545..a090161394f 100644
--- a/tests/time_tests/test_runner/test_timetest.py
+++ b/tests/time_tests/test_runner/test_timetest.py
@@ -43,7 +43,7 @@ def test_timetest(instance, executable, niter, cl_cache_dir, model_cache, model_
:param niter: number of times to run executable
:param cl_cache_dir: directory to store OpenCL cache
:param cpu_cache: flag to enable model CPU cache
- :param vpu_compiler: flag to change VPUX compiler type
+ :param vpu_compiler: flag to change VPU compiler type
:param perf_hint: performance hint (optimize device for latency or throughput settings)
:param model_cache_dir: directory to store IE model cache
:param test_info: custom `test_info` field of built-in `request` pytest fixture
diff --git a/thirdparty/dependencies.cmake b/thirdparty/dependencies.cmake
index e7035286d98..0be8a7cf30e 100644
--- a/thirdparty/dependencies.cmake
+++ b/thirdparty/dependencies.cmake
@@ -570,14 +570,14 @@ endif()
#
if(ENABLE_SAMPLES)
- # Note: VPUX requires 3.9.0 version, because it contains 'nlohmann::ordered_json'
+ # Note: VPU requires 3.9.0 version, because it contains 'nlohmann::ordered_json'
find_package(nlohmann_json 3.9.0 QUIET)
if(nlohmann_json_FOUND)
# conan and vcpkg create imported target nlohmann_json::nlohmann_json
else()
add_subdirectory(thirdparty/json EXCLUDE_FROM_ALL)
- # this is required only because of VPUX plugin reused this
+ # this is required only because of VPU plugin reused this
openvino_developer_export_targets(COMPONENT openvino_common TARGETS nlohmann_json)
# for nlohmann library versions older than v3.0.0
diff --git a/tools/benchmark_tool/openvino/tools/benchmark/utils/constants.py b/tools/benchmark_tool/openvino/tools/benchmark/utils/constants.py
index cafc3ec0911..d9e3ca2ae71 100644
--- a/tools/benchmark_tool/openvino/tools/benchmark/utils/constants.py
+++ b/tools/benchmark_tool/openvino/tools/benchmark/utils/constants.py
@@ -1,7 +1,7 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
-VPUX_DEVICE_NAME = 'VPUX'
+VPU_DEVICE_NAME = 'VPU'
CPU_DEVICE_NAME = 'CPU'
GPU_DEVICE_NAME = 'GPU'
HETERO_DEVICE_NAME = 'HETERO'
@@ -22,7 +22,7 @@ BINARY_EXTENSIONS = ['.bin']
DEVICE_DURATION_IN_SECS = {
CPU_DEVICE_NAME: 60,
GPU_DEVICE_NAME: 60,
- VPUX_DEVICE_NAME: 60,
+ VPU_DEVICE_NAME: 60,
GNA_DEVICE_NAME: 60,
UNKNOWN_DEVICE_TYPE: 120
}