Compare commits

...

72 Commits

Author SHA1 Message Date
Nikita Malinin
a090abbc92 Update remove_converts pass with shape inference (#10474) 2022-02-17 18:17:07 +03:00
Yegor Kruglov
6e5eb87340 Add note to YOLO-v3 conversion instructions (#10428)
* added note to yolo v3 conversion instructions

* fix typo

* Update docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md

style fix

Co-authored-by: Andrey Zaytsev <andrey.zaytsev@intel.com>

Co-authored-by: Andrey Zaytsev <andrey.zaytsev@intel.com>
2022-02-17 18:00:38 +03:00
Ivan Tikhonov
ade4c6c7f9 OpExtension: fix framework attributes handling (#10445)
* Fix attribute handling in OpExtension, add unit tests

* add missed file

* fix warning

* fix warning

* rename convert_from_py_object method to py_object_to_any, fix PEP8

* fix PEP8

* delete redundant include dir, fix includes
2022-02-17 17:42:12 +03:00
Anton Pankratov
61f657795c Streams property with special values (#10411)
* Streams  property with special values

* Fixed clang
2022-02-17 16:39:06 +03:00
Fedor Zharinov
198f44fdc7 Fix for missing throughput in case of Multi device (#10407)
* Fix for missing throughput in case of Multi device

* stylefix
2022-02-17 16:32:19 +03:00
Ilya Lavrenov
306b7611d9 repair TF FE tests after build (#10432)
* repair TF FE tests after build

* Small improvements

* Fixed static build
2022-02-17 16:28:24 +03:00
Maxim Gordeev
3144c5fab8 Added processing of layout for speech sample (#10254)
* Added processing of layout for speech sample

* fixed notes

* some improvements

* Code style format

* changed NCC value for NullStatement

* improved batch processing

* added loading batch for imported model

* fixed notes

* fixed notes

* added layout parameter to azure tests
2022-02-17 16:11:57 +03:00
Irina Efode
ccd7104108 [IE TESTS][CONFORMANCE] Add support of dynamic shapes in SubgraphDumper (#10380)
* Initial commit. Need to remove debug code

* Remove extra flags. Fix comparation in the matchers

* Fix small issue with the default args

* Update eltwise.hpp

* Update ov_subgraph.cpp
2022-02-17 15:52:37 +03:00
Nikolay Tyukaev
fc1157cf68 add api folder if enable python (#10477) 2022-02-17 15:24:29 +03:00
Egor Shulman
8ae4bc95fd [CPU] Coverity fixes (#10392) 2022-02-17 15:11:18 +03:00
Anton Pankratov
0882f863d6 Any compilation time optimization (#10335)
* Optimized any compilation time

* Fixed Any  compilation time

* any::addressof

* reverted

* Fixed read write

* format fix

* Fixed build

* format fix

* Moved any tests back

* removed inline

* fix format

* used static inline

* format fix

* removed inline static

* fixed merge confkicts
2022-02-17 14:55:37 +03:00
Anton Pankratov
7ce9801ec3 Added mkldnn ov properties test for compile_model (#10442)
* Added mkldnn ov properties test

* fixed  macos build
2022-02-17 13:54:02 +03:00
Anton Pankratov
d1378d94b8 Fixed default inference precision in benchmark app (#10443) 2022-02-17 13:53:50 +03:00
Vladislav Golubev
ff4e97ab09 [LPT] Security fixes (#10465) 2022-02-17 13:47:27 +03:00
Anton Chetverikov
e444715c8d [MO] Restore inputs order in IR Reader (#10403)
* Restore inputs order in IR Reader

* Add saving of outputs order
2022-02-17 13:07:34 +03:00
Tomasz Dołbniak
83a8ac800c ONNX model validator enhancements (#10456) 2022-02-17 11:01:47 +01:00
Anton Voronov
61f915b4f6 [CPU] changed checks with_cpu_x86...() to mayiuse() (#9911) 2022-02-17 12:56:55 +03:00
Pavel Esir
43784e2cec fix convert_nms_gather_path_to_unsigned: added opset8::Slice into patter_list (#10439) 2022-02-17 12:47:25 +03:00
Aleksandr Korolev
8abb949af9 [VPU] Coverity fixes (#10396)
Tickets:
-79244
-78866
2022-02-17 12:29:28 +03:00
Aleksandr Korolev
5ace7bb96f [MYX] Added missing supported properties in GetMetric method (#10440) 2022-02-17 12:23:41 +03:00
Anton Pankratov
a7b28953e2 Added Import export device capability into hetero plugin (#10455) 2022-02-17 12:15:45 +03:00
hyunback kim
8148921fa7 [GPU] Fix deconv b32 onednn regression in onednn (#10462)
After enabling deconv b32 onednn, colorization-siggraph f16 b32 has regresison,
Fix it. Add to check sum post ops in case deconv onednn.

Signed-off-by: hyunback <hyunback.kim@intel.com>
2022-02-17 18:09:51 +09:00
Irina Efode
68f523010e [IE TESTS][CONFORMANCE] Support dynamic shapes in Operation Conformance (#10400)
* emove namespeca unity

* [IE TESTS][IE CONFORMANCE] Suppot dynamic shapes in Operation Conformance runner

* Update CMakeLists.txt

* Fix dim generation
2022-02-17 11:27:45 +03:00
hyunback kim
ed323afc93 [GPU] Remove default bfyx quantize in get_preferred_format (#9654)
* [GPU] Remove default bfyx quantize in get_preferred_format

Default bfyx occurs redundant reorder in fsv-format network.
And remove onednn concat limitation for depdendency input should be
onednn impl.

Signed-off-by: hyunback <hyunback.kim@intel.com>
2022-02-17 17:25:55 +09:00
Taylor Yeonbok Lee
d35335193a [GPU] Adjust build batch size to 9 from 10 due to the compiler limitation w.r.t the entire module size (#10450) 2022-02-17 11:01:31 +03:00
Anastasia Kuporosova
861d43e06d [Python API] Fix benchmark hanging (#10457) 2022-02-17 10:59:55 +03:00
Liubov Talamanova
be6a3c34f1 [POT] Throw exception for IRv10 (#10345)
* [POT] Throw exception for IRv10

* Update reference models

* Updated AC framework name from dldt to openvino
2022-02-17 10:54:08 +03:00
Vladimir Dudnik
29883a152a fix 79520 (#10449) 2022-02-17 10:52:30 +03:00
Egor Shulman
ff293f5560 [CPU] Disable display of constant layers in PerfMap (#10307) 2022-02-17 10:51:07 +03:00
Egor Duplensky
541627d319 [CPU] [SANITIZER] Avoid possible stack-use-after-scope (#10377) 2022-02-17 10:27:58 +03:00
Ivan Tikhonov
3597ae61f9 Fix increased build time and memory consumption caused by multiple ov::Any instantiation (#10452)
* Fix increased build time and memory consumption caused by multiple instansion of ov::Any.

* delete unused method, correct exception message

* codestyle

* Resolve review comment

* fix exception: throw it in else branch
2022-02-17 09:08:55 +03:00
Gleb Kazantaev
926460e603 Fix Coverity issues (#10427) 2022-02-17 08:54:57 +03:00
Mateusz Tabaka
ab4a11b3bd Remove unnecessary AutoBroadcastSpec parameter in MatMulMultiplyFusion (#10005) 2022-02-17 08:51:32 +03:00
Julia Kamelina
1fc61299c8 update omz submodule (#10441) 2022-02-17 00:50:21 +03:00
Tomasz Dołbniak
90a100d5f6 Default opset bump in ONNX FE (#10437) 2022-02-17 00:47:07 +03:00
Fedor Zharinov
00abcbacc4 Fix for Layout and image_info related issues (#10258)
* bugfix78627

* stylefix

* fix
2022-02-17 00:42:51 +03:00
Maxim Vafin
5cadee20eb Fix issue with constants having inputs in TF FE (#10393) 2022-02-16 20:40:23 +03:00
Andrey Zaytsev
abeb910ce2 Removing the old Intel logo from docs (#10429)
* Added info on DockerHub CI Framework

* Feature/azaytsev/change layout (#3295)

* Changes according to feedback comments

* Replaced @ref's with html links

* Fixed links, added a title page for installing from repos and images, fixed formatting issues

* Added links

* minor fix

* Added DL Streamer to the list of components installed by default

* Link fixes

* Link fixes

* ovms doc fix (#2988)

* added OpenVINO Model Server

* ovms doc fixes

Co-authored-by: Trawinski, Dariusz <dariusz.trawinski@intel.com>

* Updated openvino_docs.xml

* Updated the link to software license agreements

* Revert "Updated the link to software license agreements"

This reverts commit 706dac500e.

* Removed the Intel logo

Co-authored-by: Trawinski, Dariusz <dariusz.trawinski@intel.com>
2022-02-16 17:26:26 +03:00
Yuan Xu
4f000b780d update pypi installation (#10217)
* Add Overview page

* update pypi installation

* Revert "Add Overview page"

* integrate review comments

* update formatting

* Update docs/install_guides/installing-openvino-pip.md

Co-authored-by: Andrey Zaytsev <andrey.zaytsev@intel.com>

* Update docs/install_guides/installing-openvino-pip.md

Co-authored-by: Andrey Zaytsev <andrey.zaytsev@intel.com>

* Update docs/install_guides/installing-openvino-pip.md

Co-authored-by: Andrey Zaytsev <andrey.zaytsev@intel.com>

Co-authored-by: Adrian Boguszewski <adekboguszewski@gmail.com>
Co-authored-by: Andrey Zaytsev <andrey.zaytsev@intel.com>
2022-02-16 17:09:56 +03:00
Egor Shulman
5bf9631073 Fixed ProfilingInfo layer status (#10342) 2022-02-16 16:10:19 +03:00
Anton Grishin
05650551b7 [GNA] Fix static analyzer issues (#10379)
* fix incorrect braces

* move pointer check

* add pointer check to VerifyConcat

* Prevent iterator invalidation
2022-02-16 15:46:32 +03:00
Ilya Churaev
434d7bbecc Fixed 4458 warning for Windows (#10418) 2022-02-16 11:39:43 +00:00
Anton Pankratov
5b8b698f88 Fixed ICore GetSupportedProperties (#10394)
* Added ICore::get_property

* Added tests

* Format fix

* All properties
2022-02-16 14:36:01 +03:00
Andrey Noskov
7a24f53b57 [GNA] Moved am_intel_dnn tests (#10294)
* [GNA] am_intel_dnn tests moved from deprecated tests

* fixed code style

* [GNA]fixed copyright date
2022-02-16 14:21:12 +03:00
Andrey Noskov
e2948a807c [GNA] Moved cpp_wrapper test (#10297)
* [GNA] Moved cpp_wrapper test

* [GNA] fixed copyright data
2022-02-16 14:19:29 +03:00
Nadezhda Ageeva
fc5a416423 [SAMPLES][GNA] Update C++ speech sample with new config API (#10357)
* [SAMPLES][GNA] Update speech sample with new cofig API

* Review comments

* Some additional checks
2022-02-16 13:23:50 +03:00
Alexander Zhogov
2e71fccd82 Azure CI: Disable tests on Mac due to long building 2022-02-16 13:12:06 +03:00
Anton Dudchenko
483b3828ca [VPU] Enable CheckTensorPrecision tests (#10390)
Enable CheckTensorPrecision tests for the myriad plugin.
-75944
2022-02-16 13:06:13 +03:00
Artyom Anokhov
ba69bae055 [Scripts] Remove MacOS install dependencies (#10397)
* OpenVINO scripts: Removed legacy install install_guide.html. Removed installation of scripts for MacOS

* scripts/CMakeLists: optimized if case
2022-02-16 12:52:57 +03:00
Chen Xu
4d954d0c13 [CPU] Fix the unnecessary calculation of blk_stride for dynamic shape (#10385) 2022-02-16 12:20:01 +03:00
Andrew Kwangwoong Park
2a1d8d7e99 [GPU] Minor fix for dump layer (#10291)
- Replace find with compare func to avoid dumping all layers that contain layer name

Signed-off-by: Andrew Kwangwoong Park <andrew.kwangwoong.park@intel.com>
2022-02-16 12:02:28 +03:00
Nikolay Tyukaev
0c4d50239a update requirements to fix tabs (#10409) 2022-02-16 11:47:11 +03:00
Gleb Kazantaev
709084888a Remove deprecated classes from openvino headers (#10371)
* Remove deprecated classes from openvino headers

* Fix tests
2022-02-16 11:41:16 +03:00
Ilya Churaev
0b27fb80b1 Fix for new coverity issues (#10378)
* Fix for new coverity issues

* Fixed cc coverity

* Fixed code style

* Revert some changes

* Fixed build
2022-02-16 11:12:24 +03:00
Nikita Malinin
c8ce93290e [POT] Sync mode only for gna_sample (#10355)
* Sync mode only for gna_sample

* Disable test
2022-02-16 11:00:13 +03:00
Vladimir Zinoviev
e22a2b3076 [CommonTransformations] Fix default output take from Split/VariadicSplit (#10395) 2022-02-16 10:59:11 +03:00
Mateusz Bencer
0a056857c5 fix handling stride_y (#10398) 2022-02-16 07:57:56 +00:00
Mingyu Kim
c0d54e48bb [GPU] Bugfix for onednn post op optimization (#10416)
When post-op has pattern like below, binary_mul was ignored previously.
1. binary_add
2. eltwise_linear
3. binary_mul
4. binary_add

It happens when prev_post_op_idx == 2, cur_post_op_idx == 4.
prev_post_op_idx was supposed to proceed to idx 3, but it did not.
2022-02-16 10:44:42 +03:00
Vladislav Golubev
fa4246d531 [LPT] Security fixes (#10381) 2022-02-16 10:31:17 +03:00
Taylor Yeonbok Lee
cbb5dff9c1 Fix coverity errors (#10384) 2022-02-16 10:10:10 +03:00
Ivan Tikhonov
06eb74b77f Fix MakeStateful transformation: use tensor names instead of friendly names (#8997)
* Use tensor names instead of friendly names, handle one output tensor to several Result ops case

* fix python tests

* fix python test

* fix incorrect merge

* remove redundant files

* fix variable names generation, fix python test

* Apply review comments

* fix python test
2022-02-16 09:26:31 +03:00
Jan Iwaszkiewicz
e71f23fc7e [PYTHON] Add __repr__ to main objects (#10365) 2022-02-15 21:30:33 +00:00
Evgenya Stepyreva
d14f1e54a5 MatMul Shape Inference (#10348)
* Proper dynamic dimension broadcasting

* make shape infer race condition reproducer

* Use ngraph only

* MatMul shape inference

* Style

* Dynamic rank case covered

* Build fix

Co-authored-by: Maksim Kutakov <maksim.kutakov@intel.com>
2022-02-16 00:22:46 +03:00
Vladimir Dudnik
eda4cbf30e [OMZ] rest of public models with layout (#10293)
* update OMZ submodule, rest of public models with layout

* sync with PR-10150

* ac fixes for WB

* fix CVS-78616
2022-02-15 23:42:41 +03:00
Maxim Shevtsov
317b956d2e fixed possible situation when auto-batching returns zero requests (#10388) 2022-02-15 15:13:25 +00:00
Mikhail Nosov
d5e8e0fb88 Fix coverity findings - add nullptr check before dereferencing (#10375)
Even though it is not possible to hit into this situation using existing plugins - there is theoretical possibility that some plugin may return 'nullptr' as it is allowed.
So this check shall remain in generic part which should not rely on plugin-specific behavior
2022-02-15 18:01:05 +03:00
Maxim Andronov
dc905f972a [CPU] AdaptivePooling child edges number check fix (#10372) 2022-02-15 17:59:51 +03:00
Ivan Novoselov
fa6865d569 [CPU] Disable MatMul+FQ(I8 out) if MatMul cant execute in I8 (#10316) 2022-02-15 17:59:06 +03:00
Maxim Vafin
0793a56260 Fix Conv3D translator in TF FE (#10387) 2022-02-15 17:53:13 +03:00
Mikhail Letavin
f150e2ad09 [GPU] Remove debug suffix from OpenCL.dll on Windows (#10361) 2022-02-15 16:43:40 +03:00
Sergey Lyubimtsev
498d865ea6 Correction for install guides: (#10373)
- OpenVINO installer path for macOS
- Default install pathnon macOS
- Red Hat Enterprise Linux 8.x, 64-bit is not part of IRC installer
2022-02-15 16:22:26 +03:00
Gleb Kazantaev
b837b7e32c Fix Coverity Isues (#10376) 2022-02-15 15:26:04 +03:00
216 changed files with 2754 additions and 1362 deletions

View File

@@ -99,7 +99,7 @@ jobs:
export PATH="/usr/local/opt/cython/bin:$PATH"
export CC=gcc
export CXX=g++
cmake -GNinja -DVERBOSE_BUILD=ON -DENABLE_REQUIREMENTS_INSTALL=OFF -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -DCMAKE_C_COMPILER_LAUNCHER=ccache $(REPO_DIR)
cmake -GNinja -DVERBOSE_BUILD=ON -DENABLE_REQUIREMENTS_INSTALL=OFF -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=OFF -DENABLE_STRICT_DEPENDENCIES=OFF -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -DCMAKE_C_COMPILER_LAUNCHER=ccache $(REPO_DIR)
workingDirectory: $(BUILD_DIR)
displayName: 'CMake'
@@ -159,30 +159,37 @@ jobs:
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_filter=-MKLDNNGraphStructureTests.TestNoRedundantReordersBeforeDWConvolution:TestConvolution/MKLDNNGraphConvolutionTests.TestsConvolution/0:TestConvolutionDefaultPrimitivesPriority/MKLDNNGraphConvolutionTests.TestsConvolution/0 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml
displayName: 'IE UT old'
continueOnError: false
enabled: false
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml
displayName: 'IE UT'
continueOnError: false
enabled: false
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml
displayName: 'CPU UT'
continueOnError: false
enabled: false
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml
displayName: 'VPU UT'
continueOnError: false
enabled: false
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml
displayName: 'ONNX Importer UT'
continueOnError: false
enabled: false
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieMultiPluginUnitTests --gtest_output=xml:TEST-ieMultiPluginUnitTests.xml
displayName: 'MULTI UT'
continueOnError: false
enabled: false
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml
displayName: 'IE FuncTests'
continueOnError: false
enabled: false
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/cpuFuncTests --gtest_filter=*smoke*:-smoke_LPT/ReduceMinTransformation.CompareWithRefImpl/f32_Shape* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml
displayName: 'CPU FuncTests'
@@ -195,6 +202,7 @@ jobs:
. $(SETUPVARS) && $(INSTALL_TEST_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml
displayName: 'IE CAPITests'
continueOnError: false
enabled: false
- task: PublishTestResults@2
condition: always()

View File

@@ -99,7 +99,7 @@ CxxCatchStatement: '^.*$'
CxxTryStatement: '^.*$'
CxxForRangeStatement: '^.*$'
MsAsmStatement: 'XXXX'
NullStatement: 'XXXX'
NullStatement: '^.*$'
DeclarationStatement: '^.*$'
TranslationUnit: 'XXXX'
UnexposedAttribute: '^.*$'

View File

@@ -133,6 +133,16 @@ function(build_docs)
)
endif()
list(APPEND commands
COMMAND ${CMAKE_COMMAND} -E copy ${API_DOCS_IN}/api_reference.rst ${API_DOCS_OUT}/api_reference.rst
)
if(ENABLE_PYTHON)
list(APPEND commands
COMMAND ${CMAKE_COMMAND} -E copy_directory ${API_DOCS_IN}/ie_python_api ${API_DOCS_OUT}/ie_python_api
)
endif()
# omz doc files
if(EXISTS "${OMZ_DOCS_DIR}")
get_filename_component(OMZ_DOCS_DIR "${OMZ_DOCS_DIR}" ABSOLUTE)
@@ -190,7 +200,6 @@ function(build_docs)
COMMAND ${PYTHON_EXECUTABLE} ${COPY_IMAGES_SCRIPT} ${XML_OUTPUT} ${RST_OUTPUT}
COMMAND ${PYTHON_EXECUTABLE} ${DOXYGEN_MAPPING_SCRIPT} ${XML_OUTPUT} ${DOCS_BUILD_DIR} ${OpenVINO_SOURCE_DIR}/../
COMMAND ${CMAKE_COMMAND} -E copy ${SPHINX_INDEX_IN} ${SPHINX_INDEX_OUT}
COMMAND ${CMAKE_COMMAND} -E copy_directory ${API_DOCS_IN} ${API_DOCS_OUT}
COMMAND ${CMAKE_COMMAND} -E copy_directory ${SPHINX_TEMPLATES_IN} ${SPHINX_TEMPLATES_OUT}
COMMAND ${CMAKE_COMMAND} -E copy_directory ${DOXYREST_IN} ${DOXYREST_OUT}
COMMAND ${CMAKE_COMMAND} -E copy_directory ${DOXYREST_SPHINX_IN} ${DOXYREST_SPHINX_OUT}

View File

@@ -72,6 +72,7 @@ git checkout ed60b90
pip install pillow
```
6. Run a converter:
> **NOTE**: This converter works with TensorFlow 1.x and numpy 1.19 or lower.
- For YOLO-v3:
```sh
python3 convert_weights_pb.py --class_names coco.names --data_format NHWC --weights_file yolov3.weights

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2d147adf801535e95d8b627a8a1d23f7b89dea1eabe06218235e756b0a9866fe
size 1636

View File

@@ -9,7 +9,6 @@
* Ubuntu 18.04.x long-term support (LTS), 64-bit
* Ubuntu 20.04.x long-term support (LTS), 64-bit
* Red Hat Enterprise Linux 8.x, 64-bit
.. note::
Since the OpenVINO™ 2022.1 release, CentOS 7.6, 64-bit is not longer supported.

View File

@@ -66,10 +66,9 @@ This guide provides step-by-step instructions on how to install the Intel® Dist
By default, the Intel® Distribution of OpenVINO™ is installed in the following directory, referred to as `<INSTALL_DIR>` elsewhere in the documentation:
* For root or administrator: `/opt/intel/openvino_<version>/`
* For regular users: `~/intel/openvino_<version>/`
`/opt/intel/openvino_<version>/`
For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino_2022/` or `~/intel/openvino_2022/`.
For simplicity, a symbolic link to the latest installation is also created: `/opt/intel/openvino_2022/`.
To check **Release Notes** please visit: [Release Notes](https://software.intel.com/en-us/articles/OpenVINO-RelNotes).

View File

@@ -1,11 +1,37 @@
# Install Intel® Distribution of OpenVINO™ Toolkit from PyPI Repository {#openvino_docs_install_guides_installing_openvino_pip}
OpenVINO™ toolkit is a comprehensive toolkit for quickly developing applications and solutions that solve a variety of tasks including emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, and many others. Based on the latest generations of artificial neural networks, including Convolutional Neural Networks (CNNs), recurrent and attention-based networks, the toolkit extends computer vision and non-vision workloads across Intel® hardware, maximizing performance. It accelerates applications with high-performance AI and deep learning inference deployed from edge to cloud.
You can install Intel® Distribution of OpenVINO™ toolkit through the PyPI repository, including both OpenVINO™ Runtime and OpenVINO™ Development Tools. Besides, from the 2022.1 release, OpenVINO Development Tools can only be installed via PyPI.
Intel® Distribution of OpenVINO™ Toolkit provides the following packages available for installation through the PyPI repository:
* Runtime package with the Inference Engine inside: [https://pypi.org/project/openvino/](https://pypi.org/project/openvino/)
* Developer package that includes the runtime package as a dependency, Model Optimizer, Accuracy Checker and Post-Training Optimization Tool: [https://pypi.org/project/openvino-dev](https://pypi.org/project/openvino-dev)
## Installing OpenVINO Runtime
The OpenVINO Runtime contains a set of libraries for an easy inference integration into your applications and supports heterogeneous execution across Intel® CPU and Intel® GPU hardware. To install OpenVINO Runtime, use the following command:
```
pip install openvino
```
For system requirements and more detailed steps, see <https://pypi.org/project/openvino>.
## Installing OpenVINO Development Tools
OpenVINO Development Tools include Model Optimizer, Benchmark Tool, Accuracy Checker, Post-Training Optimization Tool and Open Model Zoo tools including Model Downloader. While installing OpenVINO Development Tools, OpenVINO Runtime will also be installed as a dependency, so you don't need to install OpenVINO Runtime separately.
Use the following command to install OpenVINO Development Tools:
```
pip install openvino-dev[EXTRAS]
```
where the EXTRAS parameter specifies one or more deep learning frameworks via these values: `caffe`, `kaldi`, `mxnet`, `onnx`, `pytorch`, `tensorflow`, `tensorflow2`. Make sure that you install the corresponding frameworks for your models.
For example, to install and configure the components for working with TensorFlow 2.x, MXNet and Caffe, use the following command:
```
pip install openvino-dev[tensorflow2,mxnet,caffe]
```
> **NOTE**: For TensorFlow, use the `tensorflow2` value as much as possible. The `tensorflow` value is provided only for compatibility reasons.
For system requirements and more detailed steps, see <https://pypi.org/project/openvino-dev>.
## Additional Resources

View File

@@ -66,13 +66,13 @@
.. code-block:: sh
/home/<user>/intel/openvino/installer/installer
/home/<user>/intel/openvino_installer/installer
or in a case of administrative installation:
.. code-block:: sh
/opt/intel/openvino/installer/installer
/opt/intel/openvino_installer/installer
2. Follow the uninstallation wizard instructions.
@@ -82,7 +82,7 @@
.. code-block:: sh
open /opt/intel/openvino/installer/installer.app
open /opt/intel/openvino_installer/installer.app
2. Follow the uninstallation wizard instructions.

View File

@@ -1,47 +1,46 @@
alabaster==0.7.12
atomicwrites==1.4.0
attrs==21.4.0
Babel==2.9.1
beautifulsoup4==4.10.0
certifi==2021.10.8
charset-normalizer==2.0.10
attrs==20.3.0
Babel==2.9.0
beautifulsoup4==4.9.3
certifi==2020.12.5
colorama==0.4.4
Cython==0.29.26
docutils==0.14
idna==3.3
imagesize==1.3.0
importlib-metadata==4.10.1
Cython==0.29.23
docutils==0.16
idna==2.10
imagesize==1.2.0
importlib-metadata==3.10.0
iniconfig==1.1.1
ipython==8.0.1
Jinja2==3.0.3
lxml==4.7.1
MarkupSafe==2.0.1
mistune==2.0.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pydata-sphinx-theme==0.8.0
Pygments==2.11.2
pyparsing==3.0.6
pytest==6.2.5
ipython==7.22.0
Jinja2==2.11.3
lxml==4.6.3
MarkupSafe==1.1.1
mistune==2.0.0a6
packaging==20.9
pluggy==0.13.1
py==1.10.0
pydata-sphinx-theme==0.7.2
Pygments==2.8.1
pyparsing==2.4.7
pytest==6.2.3
pytest-html==3.1.1
pytest-metadata==1.11.0
pytz==2021.3
requests==2.27.1
six==1.16.0
snowballstemmer==2.2.0
soupsieve==2.3.1
Sphinx==4.4.0
sphinx-copybutton==0.4.0
sphinx-inline-tabs==2022.1.2b11
pytz==2021.1
requests==2.25.1
six==1.15.0
snowballstemmer==2.1.0
soupsieve==2.2.1
Sphinx==3.2.1
sphinx-copybutton==0.3.3
sphinx-inline-tabs==2021.8.17b10
sphinx-panels==0.6.0
sphinx-sitemap==2.2.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-htmlhelp==1.0.3
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-serializinghtml==1.1.4
toml==0.10.2
urllib3==1.26.8
zipp==3.7.0
urllib3==1.26.4
zipp==3.4.1

View File

@@ -0,0 +1,120 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_executable_network/properties.hpp"
#include "openvino/runtime/properties.hpp"
using namespace ov::test::behavior;
namespace {
const std::vector<ov::AnyMap> inproperties = {
{ov::device::id("UNSUPPORTED_DEVICE_ID_STRING")},
};
const std::vector<ov::AnyMap> hetero_inproperties = {
{ov::device::id("UNSUPPORTED_DEVICE_ID_STRING")},
};
const std::vector<ov::AnyMap> multi_inproperties = {
{ov::device::id("UNSUPPORTED_DEVICE_ID_STRING")},
};
const std::vector<ov::AnyMap> auto_inproperties = {
{ov::device::id("UNSUPPORTED_DEVICE_ID_STRING")},
};
const std::vector<ov::AnyMap> auto_batch_inproperties = {
{ov::device::id("UNSUPPORTED_DEVICE_ID_STRING")},
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelPropertiesIncorrectTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(inproperties)),
OVCompiledModelPropertiesIncorrectTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelPropertiesIncorrectTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(hetero_inproperties)),
OVCompiledModelPropertiesIncorrectTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVCompiledModelPropertiesIncorrectTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multi_inproperties)),
OVCompiledModelPropertiesIncorrectTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVCompiledModelPropertiesIncorrectTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_AUTO),
::testing::ValuesIn(auto_inproperties)),
OVCompiledModelPropertiesIncorrectTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVCompiledModelPropertiesIncorrectTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_BATCH),
::testing::ValuesIn(auto_batch_inproperties)),
OVCompiledModelPropertiesIncorrectTests::getTestCaseName);
const std::vector<ov::AnyMap> default_properties = {
{ov::enable_profiling(true)},
{ov::device::id("0")},
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelPropertiesDefaultTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(default_properties)),
OVCompiledModelPropertiesDefaultTests::getTestCaseName);
const std::vector<ov::AnyMap> properties = {
{ov::enable_profiling(true)},
{ov::device::id("0")},
};
const std::vector<ov::AnyMap> hetero_properties = {
{ov::device::priorities(CommonTestUtils::DEVICE_TEMPLATE), ov::enable_profiling(true)},
{ov::device::priorities(CommonTestUtils::DEVICE_TEMPLATE), ov::device::id("0")},
};
const std::vector<ov::AnyMap> multi_properties = {
{ov::device::priorities(CommonTestUtils::DEVICE_TEMPLATE), ov::enable_profiling(true)},
{ov::device::priorities(CommonTestUtils::DEVICE_TEMPLATE), ov::device::id("0")},
};
const std::vector<ov::AnyMap> auto_batch_properties = {
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_TEMPLATE}},
{{CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_TEMPLATE},
{CONFIG_KEY(AUTO_BATCH_TIMEOUT) , "1"}},
};
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVCompiledModelPropertiesTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_TEMPLATE),
::testing::ValuesIn(properties)),
OVCompiledModelPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVCompiledModelPropertiesTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_HETERO),
::testing::ValuesIn(hetero_properties)),
OVCompiledModelPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVCompiledModelPropertiesTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multi_properties)),
OVCompiledModelPropertiesTests::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AutoBatch_BehaviorTests, OVCompiledModelPropertiesTests,
::testing::Combine(
::testing::Values(CommonTestUtils::DEVICE_BATCH),
::testing::ValuesIn(auto_batch_properties)),
OVCompiledModelPropertiesTests::getTestCaseName);
} // namespace

View File

@@ -24,7 +24,10 @@ static const char input_message[] =
" of files for each input (except cases with single file for any input):"
"\"input1:1.jpg input2:1.bin\", \"input1:1.bin,2.bin input2:3.bin input3:4.bin,5.bin \"."
" Also you can pass specific keys for inputs: \"random\" - for fillling input with random data,"
" \"image_info\" - for filling input with image size.";
" \"image_info\" - for filling input with image size.\n"
" You should specify either one files set to be used for all inputs (without "
"providing "
"input names) or separate files sets for every input of model (providing inputs names).";
/// @brief message for model argument
static const char model_message[] =
@@ -329,7 +332,7 @@ DEFINE_string(data_shape, "", data_shape_message);
DEFINE_string(layout, "", layout_message);
/// @brief Define flag for inference precision
DEFINE_string(infer_precision, "f32", inference_precision_message);
DEFINE_string(infer_precision, "", inference_precision_message);
/// @brief Specify precision for all input layers of the network
DEFINE_string(ip, "", inputs_precision_message);

View File

@@ -329,7 +329,7 @@ int main(int argc, char* argv[]) {
} else if (supported(ov::num_streams.name())) {
// Use API 2.0 key for streams
key = ov::num_streams.name();
device_config[key] = ov::NumStreams::AUTO;
device_config[key] = ov::streams::AUTO;
}
}
}
@@ -550,7 +550,9 @@ int main(int argc, char* argv[]) {
info.at(name).type = type_to_set;
}
}
// Explicitly set inputs layout.
}
// Explicitly set inputs layout.
if (!name.empty() && !app_inputs_info[0].at(name).layout.empty()) {
in.model().set_layout(app_inputs_info[0].at(name).layout);
}
}
@@ -1059,8 +1061,7 @@ int main(int argc, char* argv[]) {
StatisticsVariant("Percentile boundary", "percentile_boundary", FLAGS_latency_percentile),
StatisticsVariant("Average latency (ms)", "latency_avg", generalLatency.avg),
StatisticsVariant("Min latency (ms)", "latency_min", generalLatency.min),
StatisticsVariant("Max latency (ms)", "latency_max", generalLatency.max),
StatisticsVariant("throughput", "throughput", fps)});
StatisticsVariant("Max latency (ms)", "latency_max", generalLatency.max)});
if (FLAGS_pcseq && app_inputs_info.size() > 1) {
for (size_t i = 0; i < groupLatencies.size(); ++i) {
@@ -1070,6 +1071,8 @@ int main(int argc, char* argv[]) {
}
}
}
statistics->add_parameters(StatisticsReport::Category::EXECUTION_RESULTS,
{StatisticsVariant("throughput", "throughput", fps)});
}
progressBar.finish();

View File

@@ -317,7 +317,11 @@ std::map<std::string, std::vector<std::string>> parse_input_arguments(const std:
}
for (auto& file : files.second) {
readInputFilesArguments(mapped_files[files.first], file);
if (file == "image_info" || file == "random") {
mapped_files[files.first].push_back(file);
} else {
readInputFilesArguments(mapped_files[files.first], file);
}
}
}
args_it = files_end;

View File

@@ -53,7 +53,7 @@ int main(int argc, char* argv[]) {
// try to find it.
ov::NodeVector ops = model->get_ops();
auto it = std::find_if(ops.begin(), ops.end(), [](std::shared_ptr<ov::Node> node) {
return node->get_type_info() == ngraph::op::DetectionOutput::get_type_info_static();
return node->get_type_info().name == ngraph::op::DetectionOutput::get_type_info_static().name;
});
if (it == ops.end()) {
throw std::logic_error("model does not contain DetectionOutput layer");

View File

@@ -107,7 +107,8 @@ Options:
-q "<mode>" Optional. Input quantization mode: static (default), dynamic, or user (use with -sf).
-qb "<integer>" Optional. Weight bits for quantization: 8 or 16 (default)
-sf "<double>" Optional. User-specified input scale factor for quantization (use with -q user). If the network contains multiple inputs, provide scale factors by separating them with commas.
-bs "<integer>" Optional. Batch size 1-8 (default 1)
-bs "<integer>" Optional. Batch size 1-8
-layout "<string>" Optional. Prompts how network layouts should be treated by application.For example, \"input1[NCHW],input2[NC]\" or \"[NCHW]\" in case of one input size.
-r "<path>" Optional. Read reference score file and compare scores. Example of usage: <reference.ark> or <reference.npz>
-rg "<path>" Read GNA model from file using path/filename provided (required if -m is missing).
-wg "<path>" Optional. Write GNA model to file using path/filename provided.

View File

@@ -19,7 +19,7 @@
// clang-format off
#include <openvino/openvino.hpp>
#include <gna/gna_config.hpp>
#include <openvino/runtime/intel_gna/properties.hpp>
#include <samples/args_helper.hpp>
#include <samples/slog.hpp>
@@ -83,7 +83,7 @@ int main(int argc, char* argv[]) {
// -------------------------------------
ov::Core core;
slog::info << "Loading model files:" << slog::endl << FLAGS_m << slog::endl;
uint32_t batchSize = (FLAGS_cw_r > 0 || FLAGS_cw_l > 0) ? 1 : (uint32_t)FLAGS_bs;
uint32_t batchSize = (FLAGS_cw_r > 0 || FLAGS_cw_l > 0 || !FLAGS_bs) ? 1 : (uint32_t)FLAGS_bs;
std::shared_ptr<ov::Model> model;
std::vector<std::string> outputs;
std::vector<size_t> ports;
@@ -115,16 +115,38 @@ int main(int argc, char* argv[]) {
}
}
check_number_of_inputs(model->inputs().size(), numInputFiles);
const ov::Layout tensor_layout{"NC"};
ov::preprocess::PrePostProcessor proc(model);
for (int i = 0; i < model->inputs().size(); i++) {
proc.input(i).tensor().set_element_type(ov::element::f32).set_layout(tensor_layout);
const auto& inputs = model->inputs();
std::map<std::string, std::string> custom_layouts;
if (!FLAGS_layout.empty()) {
custom_layouts = parse_input_layouts(FLAGS_layout, inputs);
}
for (const auto& input : inputs) {
const auto& item_name = input.get_any_name();
auto& in = proc.input(item_name);
in.tensor().set_element_type(ov::element::f32);
// Explicitly set inputs layout
if (custom_layouts.count(item_name) > 0) {
in.model().set_layout(ov::Layout(custom_layouts.at(item_name)));
}
}
for (int i = 0; i < model->outputs().size(); i++) {
proc.output(i).tensor().set_element_type(ov::element::f32);
}
model = proc.build();
ov::set_batch(model, batchSize);
if (FLAGS_bs) {
if (FLAGS_layout.empty() &&
std::any_of(inputs.begin(), inputs.end(), [](const ov::Output<ov::Node>& i) {
return ov::layout::get_layout(i).empty();
})) {
throw std::logic_error(
"-bs option is set to " + std::to_string(FLAGS_bs) +
" but model does not contain layout information for any input. Please "
"specify it explicitly using -layout option. For example, input1[NCHW], input2[NC] or [NC]");
} else {
ov::set_batch(model, batchSize);
}
}
}
// ------------------------------ Get Available Devices ------------------------------------------------------
auto isFeature = [&](const std::string xFeature) {
@@ -141,8 +163,15 @@ int main(int argc, char* argv[]) {
if (useGna) {
std::string gnaDevice =
useHetero ? FLAGS_d.substr(FLAGS_d.find("GNA"), FLAGS_d.find(",") - FLAGS_d.find("GNA")) : FLAGS_d;
gnaPluginConfig[InferenceEngine::GNAConfigParams::KEY_GNA_DEVICE_MODE] =
gnaDevice.find("_") == std::string::npos ? "GNA_AUTO" : gnaDevice;
auto parse_gna_device = [&](const std::string& device) -> ov::intel_gna::ExecutionMode {
ov::intel_gna::ExecutionMode mode;
std::stringstream ss(device);
ss >> mode;
return mode;
};
gnaPluginConfig[ov::intel_gna::execution_mode.name()] = gnaDevice.find("_") == std::string::npos
? ov::intel_gna::ExecutionMode::AUTO
: parse_gna_device(gnaDevice);
}
if (FLAGS_pc) {
genericPluginConfig.emplace(ov::enable_profiling(true));
@@ -151,23 +180,23 @@ int main(int argc, char* argv[]) {
if (!FLAGS_rg.empty()) {
slog::warn << "Custom scale factor will be used for imported gna model: " << FLAGS_rg << slog::endl;
}
auto scaleFactorInput = parse_scale_factors(FLAGS_sf);
if (numInputFiles != scaleFactorInput.size()) {
auto scale_factors_per_input = parse_scale_factors(model->inputs(), FLAGS_sf);
if (numInputFiles != scale_factors_per_input.size()) {
std::string errMessage(
"Incorrect command line for multiple inputs: " + std::to_string(scaleFactorInput.size()) +
"Incorrect command line for multiple inputs: " + std::to_string(scale_factors_per_input.size()) +
" scale factors provided for " + std::to_string(numInputFiles) + " input files.");
throw std::logic_error(errMessage);
}
for (size_t i = 0; i < scaleFactorInput.size(); ++i) {
slog::info << "For input " << i << " using scale factor of " << scaleFactorInput[i] << slog::endl;
std::string scaleFactorConfigKey = GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_") + std::to_string(i);
gnaPluginConfig[scaleFactorConfigKey] = scaleFactorInput[i];
for (auto&& sf : scale_factors_per_input) {
slog::info << "For input " << sf.first << " using scale factor of " << sf.second << slog::endl;
}
gnaPluginConfig[ov::intel_gna::scale_factors_per_input.name()] = scale_factors_per_input;
} else {
// "static" quantization with calculated scale factor
if (!FLAGS_rg.empty()) {
slog::info << "Using scale factor from provided imported gna model: " << FLAGS_rg << slog::endl;
} else {
std::map<std::string, float> scale_factors_per_input;
for (size_t i = 0; i < numInputFiles; i++) {
auto inputFileName = inputFiles[i].c_str();
std::string name;
@@ -187,30 +216,26 @@ int main(int argc, char* argv[]) {
numFrames * numFrameElements);
slog::info << "Using scale factor of " << floatScaleFactor << " calculated from first utterance."
<< slog::endl;
std::string scaleFactorConfigKey =
GNA_CONFIG_KEY(SCALE_FACTOR) + std::string("_") + std::to_string(i);
gnaPluginConfig[scaleFactorConfigKey] = std::to_string(floatScaleFactor);
scale_factors_per_input[model->input(i).get_any_name()] = floatScaleFactor;
}
gnaPluginConfig[ov::intel_gna::scale_factors_per_input.name()] = scale_factors_per_input;
}
}
if (FLAGS_qb == 8) {
gnaPluginConfig[InferenceEngine::GNAConfigParams::KEY_GNA_PRECISION] = "I8";
} else {
gnaPluginConfig[InferenceEngine::GNAConfigParams::KEY_GNA_PRECISION] = "I16";
}
gnaPluginConfig[InferenceEngine::GNAConfigParams::KEY_GNA_EXEC_TARGET] = FLAGS_exec_target;
gnaPluginConfig[InferenceEngine::GNAConfigParams::KEY_GNA_COMPILE_TARGET] = FLAGS_compile_target;
gnaPluginConfig[GNA_CONFIG_KEY(COMPACT_MODE)] = CONFIG_VALUE(NO);
IE_SUPPRESS_DEPRECATED_START
gnaPluginConfig[GNA_CONFIG_KEY(PWL_MAX_ERROR_PERCENT)] = std::to_string(FLAGS_pwl_me);
IE_SUPPRESS_DEPRECATED_END
gnaPluginConfig[ov::hint::inference_precision.name()] = (FLAGS_qb == 8) ? ov::element::i8 : ov::element::i16;
auto parse_target = [&](const std::string& target) -> ov::intel_gna::HWGeneration {
return (target == "GNA_TARGET_2_0") ? ov::intel_gna::HWGeneration::GNA_2_0
: (target == "GNA_TARGET_3_0") ? ov::intel_gna::HWGeneration::GNA_3_0
: ov::intel_gna::HWGeneration::UNDEFINED;
};
gnaPluginConfig[ov::intel_gna::execution_target.name()] = parse_target(FLAGS_exec_target);
gnaPluginConfig[ov::intel_gna::compile_target.name()] = parse_target(FLAGS_compile_target);
gnaPluginConfig[ov::intel_gna::memory_reuse.name()] = false;
gnaPluginConfig[ov::intel_gna::pwl_max_error_percent.name()] = FLAGS_pwl_me;
// -----------------------------------------------------------------------------------------------------
// --------------------------- Write model to file --------------------------------------------------
// Embedded GNA model dumping (for Intel(R) Speech Enabling Developer Kit)
if (!FLAGS_we.empty()) {
IE_SUPPRESS_DEPRECATED_START
gnaPluginConfig[InferenceEngine::GNAConfigParams::KEY_GNA_FIRMWARE_MODEL_IMAGE] = FLAGS_we;
IE_SUPPRESS_DEPRECATED_END
gnaPluginConfig[ov::intel_gna::firmware_model_image_path.name()] = FLAGS_we;
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- Step 2. Loading model to the device ------------------------------------------
@@ -232,6 +257,22 @@ int main(int argc, char* argv[]) {
throw std::runtime_error("Cannot open model file " + FLAGS_rg);
}
executableNet = core.import_model(streamrq, deviceStr, genericPluginConfig);
// loading batch from exported model
const auto& imported_inputs = executableNet.inputs();
if (std::any_of(imported_inputs.begin(), imported_inputs.end(), [](const ov::Output<const ov::Node>& i) {
return ov::layout::get_layout(i).empty();
})) {
slog::warn << "No batch dimension was found at any input, assuming batch to be 1." << slog::endl;
batchSize = 1;
} else {
for (auto& info : imported_inputs) {
auto imported_layout = ov::layout::get_layout(info);
if (ov::layout::has_batch(imported_layout)) {
batchSize = (uint32_t)info.get_shape()[ov::layout::batch_idx(imported_layout)];
break;
}
}
}
}
// --------------------------- Exporting gna model using InferenceEngine AOT API---------------------
if (!FLAGS_wg.empty()) {
@@ -248,7 +289,8 @@ int main(int argc, char* argv[]) {
return 0;
}
// ---------------------------------------------------------------------------------------------------------
// --------------------------- Step 3. Create infer request --------------------------------------------------
// --------------------------- Step 3. Create infer request
// --------------------------------------------------
std::vector<InferRequestStruct> inferRequests(1);
for (auto& inferRequest : inferRequests) {
@@ -430,7 +472,8 @@ int main(int argc, char* argv[]) {
outputBlob =
inferRequest.inferRequest.get_tensor(executableNet.output(FLAGS_oname));
}
// locked memory holder should be alive all time while access to its buffer happens
// locked memory holder should be alive all time while access to its buffer
// happens
auto byteSize = numScoresPerFrame * sizeof(float);
std::memcpy(outputFrame, outputBlob.data<float>(), byteSize);
}

View File

@@ -90,7 +90,8 @@ static const char quantization_bits_message[] = "Optional. Weight bits for quant
static const char scale_factor_message[] =
"Optional. User-specified input scale factor for quantization (use with -q user). "
"If the network contains multiple inputs, provide scale factors by separating them with "
"commas.";
"commas. "
"For example: <input_name1>:<sf1>,<input_name2>:<sf2> or just <sf> to be applied to all inputs";
/// @brief message for batch size argument
static const char batch_size_message[] = "Optional. Batch size 1-8 (default 1)";
@@ -120,6 +121,11 @@ static const char output_layer_names_message[] = "Optional. Layer names for outp
static const char input_layer_names_message[] = "Optional. Layer names for input blobs. "
"The names are separated with \",\" "
"Example: Input1,Input2 ";
/// @brief message for inputs layer names
static const char layout_message[] =
"Optional. Prompts how network layouts should be treated by application. "
"For example, \"input1[NCHW],input2[NC]\" or \"[NCHW]\" in case of one input size.";
;
/// @brief message for PWL max error percent
static const char pwl_max_error_percent_message[] = "Optional. The maximum percent of error for PWL function."
@@ -175,8 +181,8 @@ DEFINE_int32(qb, 16, quantization_bits_message);
/// @brief Scale factor for quantization
DEFINE_string(sf, "", scale_factor_message);
/// @brief Batch size (default 1)
DEFINE_int32(bs, 1, batch_size_message);
/// @brief Batch size (default 0)
DEFINE_int32(bs, 0, batch_size_message);
/// @brief Number of threads to use for inference on the CPU (also affects Hetero cases)
DEFINE_int32(nthreads, 1, infer_num_threads_message);
@@ -193,6 +199,9 @@ DEFINE_string(oname, "", output_layer_names_message);
/// @brief Input layer name
DEFINE_string(iname, "", input_layer_names_message);
/// @brief Input layer name
DEFINE_string(layout, "", layout_message);
/// @brief PWL max error percent
DEFINE_double(pwl_me, 1.0, pwl_max_error_percent_message);
@@ -222,6 +231,7 @@ static void show_usage() {
std::cout << " -cw_r \"<integer>\" " << context_window_message_r << std::endl;
std::cout << " -oname \"<string>\" " << output_layer_names_message << std::endl;
std::cout << " -iname \"<string>\" " << input_layer_names_message << std::endl;
std::cout << " -layout \"<string>\" " << layout_message << std::endl;
std::cout << " -pwl_me \"<double>\" " << pwl_max_error_percent_message << std::endl;
std::cout << " -exec_target \"<string>\" " << execution_target_message << std::endl;
std::cout << " -compile_target \"<string>\" " << compile_target_message << std::endl;
@@ -281,7 +291,7 @@ bool parse_and_check_command_line(int argc, char* argv[]) {
}
uint32_t batchSize = (uint32_t)FLAGS_bs;
if ((batchSize < 1) || (batchSize > 8)) {
if (batchSize && ((batchSize < 1) || (batchSize > 8))) {
throw std::logic_error("Batch size out of range (1..8).");
}

View File

@@ -361,30 +361,113 @@ void sum_performance_counters(std::map<std::string, ov::ProfilingInfo> const& pe
}
/**
* @brief Parse scale factors
* @param str reference to user-specified input scale factor for quantization, can be separated by comma
* @return vector scale factors
* @brief Split string by delimeter
* @param s input string
* @param delim delimeter
* @return vector of chunks
*/
std::vector<std::string> parse_scale_factors(const std::string& str) {
std::vector<std::string> scaleFactorInput;
std::vector<std::string> split(const std::string& s, char delim) {
std::vector<std::string> result;
std::stringstream ss(s);
std::string item;
if (!str.empty()) {
std::string outStr;
std::istringstream stream(str);
int i = 0;
while (getline(stream, outStr, ',')) {
auto floatScaleFactor = std::stof(outStr);
if (floatScaleFactor <= 0.0f) {
throw std::logic_error("Scale factor for input #" + std::to_string(i) +
" (counting from zero) is out of range (must be positive).");
}
scaleFactorInput.push_back(outStr);
i++;
}
} else {
throw std::logic_error("Scale factor need to be specified via -sf option if you are using -q user");
while (getline(ss, item, delim)) {
result.push_back(item);
}
return scaleFactorInput;
return result;
}
/**
* @brief Concat strings using delimeter
* @param chunks input chunks
* @param delim delimeter
* @return concatenated string
*/
std::string concat(const std::vector<std::string>& chunks, char delim) {
std::stringstream ss;
for (auto&& chunk : chunks) {
if (!ss.str().empty()) {
ss << delim;
}
ss << chunk;
}
return ss.str();
}
/**
* @brief Check whether name is present in node vector
* @param nodes nodes
* @param node_name name
* @return false or true
*/
bool check_name(const ov::OutputVector& nodes, const std::string& node_name) {
std::vector<std::string> any_names;
bool count = false;
for (auto& node : nodes) {
any_names.push_back(node.get_any_name());
auto names = node.get_names();
count = std::count(names.begin(), names.end(), node_name);
if (count)
break;
}
if (!count) {
std::stringstream ss;
ss << "Incorrect node name '" + node_name << "'! ";
ss << "Try one of the following names: [ ";
for (auto&& name : any_names) {
ss << name << " ";
}
ss << "]";
throw std::logic_error(ss.str());
}
return count;
}
/**
* @brief Parse scale factors per input
* Format : <input_name1>:<sf1>,<input2>:<sf2> or just <sf>
* @param inputs model inputs
* @param values_string values_string input string
* @return map of scale factors per input
*/
std::map<std::string, float> parse_scale_factors(const ov::OutputVector& inputs, const std::string& values_string) {
auto get_sf = [&](const std::string& sf_string, const std::string& input_name = "") -> float {
float sf;
try {
sf = std::stof(sf_string);
} catch (...) {
throw std::logic_error("Can't get float scale factor from: " + sf_string);
}
if (sf <= 0.0f) {
throw std::logic_error("Scale factor for input '" + input_name +
"' (counting from zero) is out of range (must be positive).");
}
return sf;
};
std::map<std::string, float> result;
auto scale_factor_strings = split(values_string, ',');
for (auto& scale_factor_string : scale_factor_strings) {
auto values = split(scale_factor_string, ':');
if (values.size() == 1) {
if (scale_factor_strings.size() != 1) {
throw std::logic_error("Unrecognized scale factor format! "
"Please specify <input_name1>:<sf1>,<input_name2>:<sf2> or "
"just <sf> to be applied to all inputs");
}
auto scale_factor = get_sf(values.at(0));
for (auto& input : inputs) {
result[input.get_any_name()] = scale_factor;
}
} else if (values.size() > 0) {
auto sf_sting = values.back();
values.pop_back();
// input name can contain port, concat back
auto input_name = concat(values, ':');
check_name(inputs, input_name);
result[input_name] = get_sf(sf_sting, input_name);
}
}
return result;
}
/**
@@ -405,3 +488,43 @@ std::vector<std::string> convert_str_to_vector(std::string str) {
}
return blobName;
}
/**
* @brief Parse layout string like "input0[value0],input1[value1]" or "[value]" (applied to all inputs)
* @param layout_string input names with layout values
* @param input_info reference to vector of inputs
* @return map of inputs with layout values
*/
std::map<std::string, std::string> parse_input_layouts(const std::string& layout_string,
const std::vector<ov::Output<ov::Node>>& input_info) {
// Parse parameter string like "input0[value0],input1[value1]" or "[value]" (applied to all
// inputs)
std::map<std::string, std::string> return_value;
std::string search_string = layout_string;
auto start_pos = search_string.find_first_of('[');
auto input_name = search_string.substr(0, start_pos);
while (start_pos != std::string::npos) {
auto end_pos = search_string.find_first_of(']');
if (end_pos == std::string::npos)
break;
if (start_pos)
input_name = search_string.substr(0, start_pos);
auto input_value = search_string.substr(start_pos + 1, end_pos - start_pos - 1);
if (!input_name.empty()) {
return_value[input_name] = input_value;
} else {
for (auto& item : input_info) {
return_value[item.get_any_name()] = input_value;
}
}
search_string = search_string.substr(end_pos + 1);
if (search_string.empty() || (search_string.front() != ',' && search_string.front() != '['))
break;
if (search_string.front() == ',')
search_string = search_string.substr(1);
start_pos = search_string.find_first_of('[');
}
if (!search_string.empty())
throw std::logic_error("Can't parse input parameter string: " + layout_string);
return return_value;
}

View File

@@ -49,7 +49,7 @@ install(PROGRAMS "${_setupvars_file}"
# install install_dependencies
if(UNIX)
if(LINUX)
ie_cpack_add_component(install_dependencies REQUIRED)
install(DIRECTORY install_dependencies/
DESTINATION install_dependencies

View File

@@ -1 +0,0 @@
<meta http-equiv="REFRESH" content="0;URL=http://docs.openvinotoolkit.org/2019_R1/_docs_install_guides_installing_openvino_linux.html#set-the-environment-variables">

View File

@@ -183,7 +183,10 @@ void regclass_AsyncInferQueue(py::module m) {
// getIdleRequestId function has an intention to block InferQueue
// until there is at least one idle (free to use) InferRequest
auto handle = self.get_idle_request_id();
self._idle_handles.pop();
{
std::lock_guard<std::mutex> lock(self._mutex);
self._idle_handles.pop();
}
// Set new inputs label/id from user
self._user_ids[handle] = userdata;
// Update inputs if there are any
@@ -314,4 +317,8 @@ void regclass_AsyncInferQueue(py::module m) {
:return: List of all passed userdata. None if the data wasn't passed yet.
:rtype: List[Any]
)");
cls.def("__repr__", [](const AsyncInferQueue& self) {
return "<AsyncInferQueue: " + std::to_string(self._requests.size()) + " jobs>";
});
}

View File

@@ -7,9 +7,11 @@
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/iostream.h>
#include <openvino/core/type/element_type.hpp>
#include <string>
#include <iterator>
#include "Python.h"
#include "ie_common.h"
@@ -59,4 +61,39 @@ public:
return &impl.get();
}
};
namespace docs {
template<typename Container, typename std::enable_if<std::is_same<typename Container::value_type, std::string>::value, bool>::type = true>
std::string container_to_string(const Container& c, const std::string& delimiter) {
if (c.size() == 0) {
return std::string{};
}
std::string buffer;
for (const auto& elem : c) {
buffer += elem + delimiter;
}
buffer.erase(buffer.end() - delimiter.size(), buffer.end());
return buffer;
}
template<typename Container, typename std::enable_if<!std::is_same<typename Container::value_type, std::string>::value, bool>::type = true>
std::string container_to_string(const Container& c, const std::string& delimiter) {
if (c.size() == 0) {
return std::string{};
}
std::string buffer;
for (const auto& elem : c) {
buffer += py::cast<std::string>(py::cast(elem).attr("__repr__")()) + delimiter;
}
buffer.erase(buffer.end() - delimiter.size(), buffer.end());
return buffer;
}
}; // namespace docs
}; // namespace Common

View File

@@ -284,4 +284,11 @@ void regclass_CompiledModel(py::module m) {
:return: A compiled model output.
:rtype: openvino.runtime.ConstOutput
)");
cls.def("__repr__", [](const ov::CompiledModel& self) {
auto inputs_str = Common::docs::container_to_string(self.inputs(), ",\n");
auto outputs_str = Common::docs::container_to_string(self.outputs(), ",\n");
return "<CompiledModel:\ninputs[\n" + inputs_str + "\n]\noutputs[\n" + outputs_str + "\n]>";
});
}

View File

@@ -634,4 +634,11 @@ void regclass_InferRequest(py::module m) {
:return: Dictionary of results from output tensors with ports as keys.
:rtype: Dict[openvino.runtime.ConstOutput, numpy.array]
)");
cls.def("__repr__", [](const InferRequestWrapper& self) {
auto inputs_str = Common::docs::container_to_string(self._inputs, ",\n");
auto outputs_str = Common::docs::container_to_string(self._outputs, ",\n");
return "<InferRequest:\ninputs[\n" + inputs_str + "\n]\noutputs[\n" + outputs_str + "\n]>";
});
}

View File

@@ -228,4 +228,12 @@ void regclass_Tensor(py::module m) {
R"(
Tensor's shape get/set.
)");
cls.def("__repr__", [](const ov::Tensor& self) {
std::stringstream ss;
ss << "shape" << self.get_shape() << " type: " << self.get_element_type();
return "<Tensor: " + ss.str() + ">";
});
}

View File

@@ -8,14 +8,13 @@
#include <pybind11/stl_bind.h>
#include "extension/json_config.hpp"
#include "manager.hpp"
#include "openvino/frontend/exception.hpp"
#include "openvino/frontend/extension/conversion.hpp"
#include "openvino/frontend/extension/decoder_transformation.hpp"
#include "openvino/frontend/extension/op.hpp"
#include "openvino/frontend/extension/progress_reporter.hpp"
#include "openvino/frontend/extension/telemetry.hpp"
#include "pyopenvino/graph/model.hpp"
#include "pyopenvino/utils/utils.hpp"
namespace py = pybind11;
@@ -130,7 +129,7 @@ void regclass_frontend_OpExtension(py::module m) {
const std::map<std::string, py::object>& attr_values_map) {
std::map<std::string, ov::Any> any_map;
for (const auto& it : attr_values_map) {
any_map[it.first] = it.second;
any_map[it.first] = py_object_to_any(it.second);
}
return std::make_shared<OpExtension<void>>(fw_type_name, attr_names_map, any_map);
}),
@@ -144,8 +143,9 @@ void regclass_frontend_OpExtension(py::module m) {
const std::map<std::string, py::object>& attr_values_map) {
std::map<std::string, ov::Any> any_map;
for (const auto& it : attr_values_map) {
any_map[it.first] = it.second;
any_map[it.first] = py_object_to_any(it.second);
}
return std::make_shared<OpExtension<void>>(ov_type_name, fw_type_name, attr_names_map, any_map);
}),
py::arg("ov_type_name"),

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2021 Intel Corporation
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
@@ -19,7 +19,8 @@ function(frontend_module TARGET FRAMEWORK INSTALL_COMPONENT)
add_dependencies(${TARGET_NAME} pyopenvino)
target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}"
"${PYTHON_SOURCE_DIR}/pyopenvino/utils/")
target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime openvino::frontend::${FRAMEWORK})
# Compatibility with python 2.7 which has deprecated "register" specifier

View File

@@ -72,7 +72,7 @@ void regclass_frontend_NodeContext(py::module m) {
CAST_TO_PY(any, dtype, int64_t);
CAST_TO_PY(any, dtype, bool);
CAST_TO_PY(any, dtype, std::string);
CAST_TO_PY(any, dtype, float);
CAST_TO_PY(any, dtype, double);
CAST_TO_PY(any, dtype, ov::element::Type);
CAST_TO_PY(any, dtype, ov::PartialShape);
@@ -83,7 +83,7 @@ void regclass_frontend_NodeContext(py::module m) {
CAST_VEC_TO_PY(any, dtype, std::vector<bool>);
#endif
CAST_VEC_TO_PY(any, dtype, std::vector<std::string>);
CAST_VEC_TO_PY(any, dtype, std::vector<float>);
CAST_VEC_TO_PY(any, dtype, std::vector<double>);
CAST_VEC_TO_PY(any, dtype, std::vector<ov::element::Type>);
CAST_VEC_TO_PY(any, dtype, std::vector<ov::PartialShape>);

View File

@@ -3,6 +3,7 @@
//
#include "extension.hpp"
#include "utils.hpp"
#include <pybind11/functional.h>
#include <pybind11/pybind11.h>
@@ -52,9 +53,10 @@ void regclass_frontend_onnx_OpExtension(py::module m) {
ext.def(py::init([](const std::string& fw_type_name,
const std::map<std::string, std::string>& attr_names_map,
const std::map<std::string, py::object>& attr_values_map) {
std::map<std::string, ov::Any> any_map;
for (const auto& it : attr_values_map) {
any_map[it.first] = it.second;
any_map[it.first] = py_object_to_any(it.second);
}
return std::make_shared<OpExtension<void>>(fw_type_name, attr_names_map, any_map);
}), py::arg("fw_type_name"),
@@ -65,9 +67,10 @@ void regclass_frontend_onnx_OpExtension(py::module m) {
const std::string& fw_type_name,
const std::map<std::string, std::string>& attr_names_map,
const std::map<std::string, py::object>& attr_values_map) {
std::map<std::string, ov::Any> any_map;
for (const auto& it : attr_values_map) {
any_map[it.first] = it.second;
any_map[it.first] = py_object_to_any(it.second);
}
return std::make_shared<OpExtension<void>>(ov_type_name, fw_type_name, attr_names_map, any_map);
}),

View File

@@ -3,6 +3,7 @@
//
#include "extension.hpp"
#include "utils.hpp"
#include <pybind11/functional.h>
#include <pybind11/pybind11.h>
@@ -52,7 +53,7 @@ void regclass_frontend_tensorflow_OpExtension(py::module m) {
const std::map<std::string, py::object>& attr_values_map) {
std::map<std::string, ov::Any> any_map;
for (const auto& it : attr_values_map) {
any_map[it.first] = it.second;
any_map[it.first] = py_object_to_any(it.second);
}
return std::make_shared<OpExtension<void>>(fw_type_name, attr_names_map, any_map);
}), py::arg("fw_type_name"),
@@ -65,7 +66,7 @@ void regclass_frontend_tensorflow_OpExtension(py::module m) {
const std::map<std::string, py::object>& attr_values_map) {
std::map<std::string, ov::Any> any_map;
for (const auto& it : attr_values_map) {
any_map[it.first] = it.second;
any_map[it.first] = py_object_to_any(it.second);
}
return std::make_shared<OpExtension<void>>(ov_type_name, fw_type_name, attr_names_map, any_map);
}),

View File

@@ -11,6 +11,7 @@
#include "openvino/core/partial_shape.hpp"
#include "openvino/op/parameter.hpp" // ov::op::v0::Parameter
#include "openvino/op/sink.hpp"
#include "pyopenvino/core/common.hpp"
#include "pyopenvino/core/tensor.hpp"
#include "pyopenvino/graph/ops/result.hpp"
#include "pyopenvino/graph/ops/util/variable.hpp"
@@ -595,17 +596,17 @@ void regclass_graph_Model(py::module m) {
:type evaluation_context: PyRTMap
:rtype: bool
)");
function.def("__repr__", [](const ov::Model& self) {
std::string class_name = py::cast(self).get_type().attr("__name__").cast<std::string>();
std::stringstream shapes_ss;
for (size_t i = 0; i < self.get_output_size(); ++i) {
if (i > 0) {
shapes_ss << ", ";
}
shapes_ss << self.get_output_partial_shape(i);
}
return "<" + class_name + ": '" + self.get_friendly_name() + "' (" + shapes_ss.str() + ")>";
auto inputs_str = Common::docs::container_to_string(self.inputs(), ",\n");
auto outputs_str = Common::docs::container_to_string(self.outputs(), ",\n");
return "<" + class_name + ": '" + self.get_friendly_name() + "'\ninputs[\n" + inputs_str + "\n]\noutputs[\n" +
outputs_str + "\n]>";
});
function.def_static("from_capsule", [](py::object* capsule) {
// get the underlying PyObject* which is a PyCapsule pointer
auto* pybind_capsule_ptr = capsule->ptr();

View File

@@ -9,6 +9,7 @@
#include <pybind11/stl.h>
#include "openvino/core/node_output.hpp"
#include "pyopenvino/core/common.hpp"
namespace py = pybind11;
@@ -34,8 +35,8 @@ void regclass_graph_Output(py::module m, std::string typestring)
output.def(py::self == py::self);
output.def(py::self != py::self);
output.def("__hash__", [](ov::Output<VT>& port) {
return std::hash<VT*>()(port.get_node()) + port.get_index();
output.def("__hash__", [](ov::Output<VT>& self) {
return std::hash<VT*>()(self.get_node()) + self.get_index();
});
output.def("get_node",
@@ -128,6 +129,14 @@ void regclass_graph_Output(py::module m, std::string typestring)
:rtype: openvino.runtime.RTMap
)");
output.def("__repr__", [typestring](const ov::Output<VT>& self) {
std::stringstream shape_type_ss;
auto names_str = Common::docs::container_to_string(self.get_names(), ", ");
shape_type_ss << " shape" << self.get_partial_shape() << " type: " << self.get_element_type();
return "<" + typestring + "Output: names[" + names_str + "]" + shape_type_ss.str() + ">";
});
output.def_property_readonly("node", &ov::Output<VT>::get_node_shared_ptr);
output.def_property_readonly("index", &ov::Output<VT>::get_index);

View File

@@ -0,0 +1,62 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
#include <openvino/core/any.hpp>
ov::Any py_object_to_any(const pybind11::object& py_obj) {
if (pybind11::isinstance<pybind11::str>(py_obj)) {
return py_obj.cast<std::string>();
} else if (pybind11::isinstance<pybind11::bool_>(py_obj)) {
return py_obj.cast<bool>();
} else if (pybind11::isinstance<pybind11::float_>(py_obj)) {
return py_obj.cast<double>();
} else if (pybind11::isinstance<pybind11::int_>(py_obj)) {
return py_obj.cast<int64_t>();
} else if (pybind11::isinstance<pybind11::list>(py_obj)) {
auto _list = py_obj.cast<pybind11::list>();
enum class PY_TYPE : int {
UNKNOWN = 0,
STR,
INT,
FLOAT,
BOOL
};
PY_TYPE detected_type = PY_TYPE::UNKNOWN;
for (const auto &it: _list) {
auto check_type = [&](PY_TYPE type) {
if (detected_type == PY_TYPE::UNKNOWN || detected_type == type) {
detected_type = type;
return;
}
OPENVINO_ASSERT("Incorrect attribute. Mixed types in the list are not allowed.");
};
if (pybind11::isinstance<pybind11::str>(it)) {
check_type(PY_TYPE::STR);
} else if (pybind11::isinstance<pybind11::int_>(it)) {
check_type(PY_TYPE::INT);
} else if (pybind11::isinstance<pybind11::float_>(it)) {
check_type(PY_TYPE::FLOAT);
} else if (pybind11::isinstance<pybind11::bool_>(it)) {
check_type(PY_TYPE::BOOL);
}
}
switch (detected_type) {
case PY_TYPE::STR:
return _list.cast<std::vector<std::string>>();
case PY_TYPE::FLOAT:
return _list.cast<std::vector<double>>();
case PY_TYPE::INT:
return _list.cast<std::vector<int64_t>>();
case PY_TYPE::BOOL:
return _list.cast<std::vector<bool>>();
default:
OPENVINO_ASSERT(false, "Unsupported attribute type.");
}
}
OPENVINO_ASSERT(false, "Unsupported attribute type.");
}

View File

@@ -127,5 +127,4 @@ xfail_issue_63137 = xfail_test(reason="Unsupported operations: OptionalHasElemen
xfail_issue_63138 = xfail_test(reason="Missing ONNX Shape-15 support")
xfail_issue_68212 = xfail_test(reason="Unsupported reading model with bytes streams")
xfail_issue_77668 = xfail_test(reason="Accuracy issue related to Gather-8.")
xfail_issue_78843 = xfail_test(reason="Missing reference output files for ssd mobilenet models")

View File

@@ -91,6 +91,42 @@ def create_onnx_model_with_custom_attributes():
return make_model(graph, producer_name="ngraph ONNX Importer")
def create_onnx_model_for_op_extension():
# operation with double attribute
elu = onnx.helper.make_node("Elu", alpha=1.0, inputs=["x"], outputs=["elu"])
# operation with vector<size_t>, enum, bool attributes
avg_pool = onnx.helper.make_node("AveragePool", kernel_shape=[2, 2], auto_pad="SAME_LOWER",
strides=[2, 2],
inputs=["elu"], outputs=["avg_pool"])
# operation with no attributes
floor = onnx.helper.make_node("Floor", inputs=["avg_pool"], outputs=["floor"])
# operation with int64_t attribute
concat = onnx.helper.make_node("Concat", axis=0, inputs=["floor", "avg_pool"], outputs=["concat"])
const_tensor = onnx.helper.make_tensor("const_tensor",
onnx.TensorProto.FLOAT,
[1],
[0.5])
const_node = onnx.helper.make_node("Constant", [], outputs=["const_node"],
value=const_tensor, name="const_node")
# operation with enum attribute
mul = onnx.helper.make_node("Mul", inputs=["concat", "const_node"], outputs=["mul"])
# operation with element::type (class) attribute
cast = onnx.helper.make_node("Cast", to=int(onnx.TensorProto.FLOAT), inputs=["mul"], outputs=["out"])
input_tensors = [
make_tensor_value_info("x", onnx.TensorProto.FLOAT, (1, 3, 32, 32)),
]
output_tensors = [make_tensor_value_info("out", onnx.TensorProto.FLOAT, (3, 3, 32, 32))]
graph = make_graph([const_node, elu, avg_pool, floor, concat, mul, cast], "graph",
input_tensors, output_tensors)
return make_model(graph, producer_name="ngraph ONNX Importer")
def run_function(function, *inputs, expected):
runtime = get_runtime()
computation = runtime.computation(function)
@@ -106,6 +142,7 @@ fem = FrontEndManager()
onnx_model_filename = "model.onnx"
onnx_model_with_custom_attributes_filename = "model_custom_attributes.onnx"
onnx_model_with_subgraphs_filename = "model_subgraphs.onnx"
onnx_model_for_op_extension_test = "model_op_extension.onnx"
ONNX_FRONTEND_NAME = "onnx"
@@ -114,12 +151,14 @@ def setup_module():
onnx.save_model(create_onnx_model_with_custom_attributes(),
onnx_model_with_custom_attributes_filename)
onnx.save_model(create_onnx_model_with_subgraphs(), onnx_model_with_subgraphs_filename)
onnx.save_model(create_onnx_model_for_op_extension(), onnx_model_for_op_extension_test)
def teardown_module():
os.remove(onnx_model_filename)
os.remove(onnx_model_with_custom_attributes_filename)
os.remove(onnx_model_with_subgraphs_filename)
os.remove(onnx_model_for_op_extension_test)
def skip_if_onnx_frontend_is_disabled():
@@ -425,7 +464,8 @@ def test_onnx_conversion_extension():
assert invoked
def test_op_extension_via_onnx_extension():
@pytest.mark.parametrize("opset_prefix", ["opset1.", "opset1::", "opset8.", "opset8::", ""])
def test_op_extension_specify_opset(opset_prefix):
skip_if_onnx_frontend_is_disabled()
# use specific (openvino.frontend.onnx) import here
@@ -433,47 +473,123 @@ def test_op_extension_via_onnx_extension():
from openvino.runtime import Core
ie = Core()
ie.add_extension(OpExtension("FW_OV_OP"))
ie.add_extension(OpExtension("OV_OP", "FW_OP_1"))
ie.add_extension(OpExtension("OV_OP", "FW_OP_2", {"ov_attribute_1": "fw_attribute_1",
"ov_attribute_2": "fw_attribute_2"}))
ie.add_extension(OpExtension("OV_OP", "FW_OP_3", {"ov_attribute_1": "fw_attribute_1",
"ov_attribute_2": "fw_attribute_2"},
{"ov_attribute_str": "string",
"ov_attribute_int": 4,
"ov_attribute_bool": True,
"ov_attribute_float": 4.,
"ov_attribute_vec_string": ["str1", "str2", "str3"],
"ov_attribute_vec_int": [1, 2, 3, 4, 5, 6, 7],
"ov_attribute_vec_bool": [True, False, True],
"ov_attribute_vec_float": [1., 2., 3., 4., 5., 6., 7.]}))
model = ie.read_model(onnx_model_filename)
# check the model is valid
model = ie.read_model(onnx_model_for_op_extension_test)
assert model
# add extensions
fw_operation = "Floor"
ov_operation = opset_prefix + fw_operation
ie.add_extension(OpExtension(ov_operation, fw_operation))
model = ie.read_model(onnx_model_for_op_extension_test)
assert model
def test_op_extension_via_frontend_extension():
@pytest.mark.parametrize("opset_prefix", ["opset1..", "opset1:::", "opset.", "opset::", "wrong"])
def test_op_extension_specify_wrong_opset(opset_prefix):
skip_if_onnx_frontend_is_disabled()
# use specific (openvino.frontend) import here
# use specific (openvino.frontend.onnx) import here
from openvino.frontend.onnx import OpExtension
from openvino.runtime import Core
ie = Core()
# add extensions
fw_operation = "Floor"
ov_operation = opset_prefix + fw_operation
ie.add_extension(OpExtension(ov_operation, fw_operation))
with pytest.raises(Exception):
ie.read_model(onnx_model_for_op_extension_test)
def test_op_extension_via_onnx_extension_set_attrs_values():
skip_if_onnx_frontend_is_disabled()
# use specific (openvino.frontend.onnx) import here
from openvino.frontend.onnx import OpExtension
from openvino.runtime import Core
ie = Core()
# check the model is valid
model = ie.read_model(onnx_model_for_op_extension_test)
assert model
# add extensions
ie.add_extension(OpExtension("Multiply", "Mul", {}, {"auto_broadcast": "numpy"}))
ie.add_extension(OpExtension("Elu", {}, {"alpha": 1.}))
ie.add_extension(OpExtension("Floor"))
ie.add_extension(OpExtension("Concat", {}, {"axis": 0}))
ie.add_extension(OpExtension("Convert", "Cast", {}, {"destination_type": "i64"}))
ie.add_extension(OpExtension("AvgPool", "AveragePool", {}, {"kernel": [2, 2],
"strides": [2, 2],
"pads_begin": [0, 0],
"pads_end": [1, 1],
"exclude-pad": True,
"auto_pad": "same_upper",
"rounding_type": "floor"}))
model = ie.read_model(onnx_model_for_op_extension_test)
assert model
def test_op_extension_via_frontend_extension_set_attrs_values():
skip_if_onnx_frontend_is_disabled()
# use common (openvino.frontend) import here
from openvino.frontend import OpExtension
from openvino.runtime import Core
ie = Core()
ie.add_extension(OpExtension("FW_OV_OP"))
ie.add_extension(OpExtension("OV_OP", "FW_OP_1"))
ie.add_extension(OpExtension("OV_OP", "FW_OP_2", {"ov_attribute_1": "fw_attribute_1",
"ov_attribute_2": "fw_attribute_2"}))
ie.add_extension(OpExtension("OV_OP", "FW_OP_3", {"ov_attribute_1": "fw_attribute_1",
"ov_attribute_2": "fw_attribute_2"},
{"ov_attribute_str": "string",
"ov_attribute_int": 4,
"ov_attribute_bool": True,
"ov_attribute_float": 4.,
"ov_attribute_vec_string": ["str1", "str2", "str3"],
"ov_attribute_vec_int": [1, 2, 3, 4, 5, 6, 7],
"ov_attribute_vec_bool": [True, False, True],
"ov_attribute_vec_float": [1., 2., 3., 4., 5., 6., 7.]}))
model = ie.read_model(onnx_model_filename)
# check the model is valid
model = ie.read_model(onnx_model_for_op_extension_test)
assert model
# add extensions
ie.add_extension(OpExtension("Multiply", "Mul", {}, {"auto_broadcast": "numpy"}))
ie.add_extension(OpExtension("Elu", "Elu", {}, {"alpha": 1.}))
ie.add_extension(OpExtension("Floor"))
ie.add_extension(OpExtension("Concat", {}, {"axis": 0}))
ie.add_extension(OpExtension("Convert", "Cast", {}, {"destination_type": "i64"}))
ie.add_extension(OpExtension("AvgPool", "AveragePool", {}, {"kernel": [2, 2],
"strides": [2, 2],
"pads_begin": [0, 0],
"pads_end": [1, 1],
"exclude-pad": True,
"auto_pad": "same_upper",
"rounding_type": "floor"}))
model = ie.read_model(onnx_model_for_op_extension_test)
assert model
def test_op_extension_via_frontend_extension_map_attributes():
skip_if_onnx_frontend_is_disabled()
# use common (openvino.frontend) import here
from openvino.frontend import OpExtension
from openvino.runtime import Core
ie = Core()
# check the model is valid
model = ie.read_model(onnx_model_for_op_extension_test)
assert model
# add extensions
ie.add_extension(OpExtension("Elu", "Elu", {"alpha": "alpha"}))
ie.add_extension(OpExtension("Concat", {"axis": "axis"}, {"axis": 0}))
ie.add_extension(OpExtension("AvgPool", "AveragePool", {"kernel": "kernel_shape",
"strides": "strides",
"auto_pad": "auto_pad"},
{"pads_begin": [0, 0],
"pads_end": [1, 1],
"exclude-pad": True,
"rounding_type": "floor"}))
model = ie.read_model(onnx_model_for_op_extension_test)
assert model

View File

@@ -58,7 +58,7 @@ def test_get_profiling_info(device):
prof_info = request.get_profiling_info()
soft_max_node = next(node for node in prof_info if node.node_name == "fc_out")
assert soft_max_node.node_type == "Softmax"
assert soft_max_node.status == ProfilingInfo.Status.OPTIMIZED_OUT
assert soft_max_node.status == ProfilingInfo.Status.EXECUTED
assert isinstance(soft_max_node.real_time, datetime.timedelta)
assert isinstance(soft_max_node.cpu_time, datetime.timedelta)
assert isinstance(soft_max_node.exec_type, str)

View File

@@ -239,7 +239,10 @@ def test_repr_dynamic_shape():
model = parameter_a + parameter_b
function = Model(model, [parameter_a, parameter_b], "simple_dyn_shapes_graph")
assert repr(function) == "<Model: 'simple_dyn_shapes_graph' ({?,2})>"
assert repr(function) == "<Model: 'simple_dyn_shapes_graph'\ninputs[" + \
"\n<ConstOutput: names[A] shape{?,2} type: f32>," +\
"\n<ConstOutput: names[B] shape{?,2} type: f32>\n]" + \
"\noutputs[\n<ConstOutput: names[] shape{?,2} type: f32>\n]>"
ops = function.get_ordered_ops()
for op in ops:

View File

@@ -22,8 +22,7 @@ from tests import (
xfail_issue_48190,
xfail_issue_58676,
xfail_issue_78843,
xfail_issue_onnx_models_140,
xfail_issue_77668)
xfail_issue_onnx_models_140)
MODELS_ROOT_DIR = tests.MODEL_ZOO_DIR
@@ -180,8 +179,6 @@ if len(zoo_models) > 0:
(xfail_issue_48190, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_roberta_base_11_cpu"),
(xfail_issue_onnx_models_140, "test_onnx_model_zoo_vision_object_detection_segmentation_duc_model_ResNet101_DUC_7_ResNet101_DUC_HDC_ResNet101_DUC_HDC_cpu"),
(xfail_issue_78843, "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_ssd_mobilenet_v1_cpu"),
(xfail_issue_77668, "test_onnx_model_zoo_vision_object_detection_segmentation_faster_rcnn_model_FasterRCNN_10_faster_rcnn_R_50_FPN_1x_cpu"),
(xfail_issue_77668, "test_onnx_model_zoo_vision_object_detection_segmentation_mask_rcnn_model_MaskRCNN_10_mask_rcnn_R_50_FPN_1x_cpu"),
# Model MSFT
(xfail_issue_37973, "test_MSFT_opset7_tf_inception_v2_model_cpu"),
@@ -197,8 +194,6 @@ if len(zoo_models) > 0:
(xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"),
(xfail_issue_78843, "test_MSFT_opset10_mlperf_ssd_mobilenet_300_ssd_mobilenet_v1_coco_2018_01_28_cpu"),
(xfail_issue_77668, "test_MSFT_opset10_faster_rcnn_faster_rcnn_R_50_FPN_1x_cpu"),
(xfail_issue_77668, "test_MSFT_opset10_mask_rcnn_mask_rcnn_R_50_FPN_1x_cpu"),
]
for test_case in import_xfail_list + execution_xfail_list:
xfail, test_name = test_case

View File

@@ -13,8 +13,10 @@ import openvino.runtime as ov
def get_test_function():
param = ov.opset8.parameter(PartialShape([1, 3, 22, 22]), name="parameter")
param.get_output_tensor(0).set_names({"parameter"})
relu = ov.opset8.relu(param)
res = ov.opset8.result(relu, name="result")
res.get_output_tensor(0).set_names({"result"})
return Model([res], [param], "test")

View File

@@ -136,6 +136,5 @@ xfail_issue_63136 = xfail_test(reason="Unsupported operation: CastLike")
xfail_issue_63137 = xfail_test(reason="Unsupported operations: OptionalHasElement, OptionalGetElement")
xfail_issue_63138 = xfail_test(reason="Missing ONNX Shape-15 support")
xfail_issue_77668 = xfail_test(reason="Accuracy issue related to Gather-8.")
xfail_issue_78843 = xfail_test(reason="Missing reference output files for ssd mobilenet models")
xfail_issue_78741 = xfail_test(reason="Cannot get dims for non static shape")

View File

@@ -23,8 +23,7 @@ from tests_compatibility import (
xfail_issue_48190,
xfail_issue_58676,
xfail_issue_78843,
xfail_issue_onnx_models_140,
xfail_issue_77668)
xfail_issue_onnx_models_140)
MODELS_ROOT_DIR = tests_compatibility.MODEL_ZOO_DIR
@@ -168,7 +167,6 @@ if len(zoo_models) > 0:
(xfail_issue_48190, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_roberta_base_11_cpu"),
(xfail_issue_onnx_models_140, "test_onnx_model_zoo_vision_object_detection_segmentation_duc_model_ResNet101_DUC_7_ResNet101_DUC_HDC_ResNet101_DUC_HDC_cpu"),
(xfail_issue_78843, "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_ssd_mobilenet_v1_cpu"),
(xfail_issue_77668, "test_onnx_model_zoo_vision_object_detection_segmentation_faster_rcnn_model_FasterRCNN_10_faster_rcnn_R_50_FPN_1x_cpu"),
# Model MSFT
(xfail_issue_37973, "test_MSFT_opset7_tf_inception_v2_model_cpu"),
@@ -185,8 +183,6 @@ if len(zoo_models) > 0:
(xfail_issue_39669, "test_MSFT_opset9_cgan_cgan_cpu"),
(xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"),
(xfail_issue_78843, "test_MSFT_opset10_mlperf_ssd_mobilenet_300_ssd_mobilenet_v1_coco_2018_01_28_cpu"),
(xfail_issue_77668, "test_MSFT_opset10_faster_rcnn_faster_rcnn_R_50_FPN_1x_cpu"),
]
for test_case in import_xfail_list + execution_xfail_list:
xfail, test_name = test_case

View File

@@ -92,16 +92,16 @@ case_wrapper<C, T> make_case_wrapper(C && val) {
}
template<template<typename...> class Fn, typename Ctx, typename T, typename Case>
bool match(Ctx && ctx, T && val, Case && cs) {
bool match(Ctx&& ctx, T&& val, Case && cs) {
const bool is_matched = val == cs.value;
if (is_matched)
Fn<typename Case::type>()(std::forward<Ctx>(ctx));
Fn<typename Case::type>()(ctx);
return is_matched;
}
template<template<typename...> class Fn, typename Ctx, typename T, typename Case, typename ...Cases>
bool match(Ctx && ctx, T && val, Case && cs, Cases&&... cases) {
if (match<Fn>(std::forward<Ctx>(ctx), std::forward<T>(val), std::forward<Case>(cs)))
bool match(Ctx&& ctx, T&& val, Case && cs, Cases&&... cases) {
if (match<Fn>(ctx, val, std::forward<Case>(cs)))
return true;
return match<Fn>(std::forward<Ctx>(ctx), std::forward<T>(val), std::forward<Cases>(cases)...);
}

View File

@@ -317,7 +317,7 @@ std::shared_ptr<Node> foldConvert(const Output<Node>& node, const element::Type
template <typename T, typename... Args>
std::shared_ptr<Node> fold_reshape(Args&&... args) {
std::shared_ptr<Node> node = std::make_shared<T>(std::forward<Args>(args)...);
std::shared_ptr<Node> node = std::make_shared<T>(args...);
if (node->get_output_size() == 1) {
// issue #57985: remove fold_reshape & reuse nGraph implementation
const auto values = ov::as_type_ptr<opset1::Constant>(node->input_value(1).get_node_shared_ptr())->template cast_vector<int64_t>();
@@ -325,7 +325,6 @@ std::shared_ptr<Node> fold_reshape(Args&&... args) {
return fold<opset1::Reshape>(std::forward<Args>(args)...);
}
OutputVector folded;
if (ov::is_type<opset1::Constant>(node->input_value(0).get_node_shared_ptr()) &&
ov::is_type<opset1::Constant>(node->input_value(1).get_node_shared_ptr())) {
return std::make_shared<opset1::Constant>(

View File

@@ -34,8 +34,8 @@ FakeQuantizeTransformation::FakeQuantizeTransformation(const Params& params) : L
}
bool FakeQuantizeTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
std::shared_ptr<opset1::FakeQuantize> layer = std::dynamic_pointer_cast<opset1::FakeQuantize>(m.get_match_root());
if (!QuantizationDetails::outputLayoutIsSupported(layer)) {
const auto layer = ov::as_type_ptr<opset1::FakeQuantize>(m.get_match_root());
if (!layer || !QuantizationDetails::outputLayoutIsSupported(layer)) {
return false;
}

View File

@@ -273,7 +273,7 @@ std::tuple<std::shared_ptr<Node>, std::shared_ptr<Node>> decomposeFakeQuantize(
bool FakeQuantizeDecompositionTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher& m) {
auto layer = ov::as_type_ptr<opset1::FakeQuantize>(m.get_match_root());
if (!NetworkHelper::isQuantizeSupported(layer)) {
if (!layer || !NetworkHelper::isQuantizeSupported(layer)) {
return false;
}

View File

@@ -31,7 +31,10 @@ FuseFakeQuantizeTransformation::FuseFakeQuantizeTransformation(const Params& par
}
bool FuseFakeQuantizeTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) {
std::shared_ptr<opset1::FakeQuantize> fakeQuantize = ov::as_type_ptr<ngraph::opset1::FakeQuantize>(m.get_match_root());
auto fakeQuantize = ov::as_type_ptr<ngraph::opset1::FakeQuantize>(m.get_match_root());
if (!fakeQuantize)
return false;
do {
fakeQuantize = handle(context, fakeQuantize);
} while (fakeQuantize != nullptr);

View File

@@ -104,12 +104,12 @@ void make_matcher_type_relaxed(ngraph::pass::GraphRewrite* transformation) {
ngraph::graph_rewrite_callback callback = [](ngraph::pattern::Matcher& m) {
auto l_node = std::dynamic_pointer_cast<BaseOp>(m.get_match_root());
if (!l_node) {
THROW_TRANSFORMATION_EXCEPTION << "unexpected operation type for type relaxed conversion";
}
if (std::dynamic_pointer_cast<ngraph::op::TypeRelaxedBase>(l_node)) {
return false;
}
if (!l_node) {
THROW_IE_LPT_EXCEPTION(*l_node) << "unexpected operation type";
}
OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::LPT_LT, "LowPrecisionTypeRelaxedMatcher");

View File

@@ -116,7 +116,7 @@ std::shared_ptr<opset1::Constant> NetworkHelper::foldDequantizationConstant(
const auto result = ov::as_type_ptr<opset1::Constant>(outputs[outIdx].get_node_shared_ptr());
if (result == nullptr) {
THROW_IE_LPT_EXCEPTION(*result) << "result of constant folding is not constant";
THROW_TRANSFORMATION_EXCEPTION << "result of constant folding is not constant";
}
return result;
@@ -441,9 +441,9 @@ std::vector<size_t> NetworkHelper::updateReshapeValues(
}
std::shared_ptr<ngraph::opset1::Multiply> NetworkHelper::optimizeMultipliesAfter(std::shared_ptr<Node> node) {
std::shared_ptr<ngraph::opset1::Multiply> multiply = ov::as_type_ptr<opset1::Multiply>(std::move(node));
const auto multiply = ov::as_type_ptr<opset1::Multiply>(std::move(node));
if (!multiply) {
THROW_IE_LPT_EXCEPTION(*multiply) << "Unexpected operation type";
THROW_TRANSFORMATION_EXCEPTION << "Unexpected operation type in the optimizeMultipliesAfter method";
}
if (multiply->output(0).get_target_inputs().size() == 1) {
@@ -586,7 +586,7 @@ FakeQuantizeDequantization NetworkHelper::foldDequantization(const std::shared_p
std::shared_ptr<ngraph::Node> NetworkHelper::separateInStandaloneBranch(std::shared_ptr<ngraph::Node> node,
const std::vector<ngraph::element::Type>& defaultPrecisions) {
FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(node, defaultPrecisions);
if (dequantization.isShared()) {
if (dequantization.isShared() && !dequantization.empty()) {
Output<Node> parent = dequantization.data;
if (dequantization.convert != nullptr) {
auto convert = dequantization.convert->clone_with_new_inputs({ parent });
@@ -1708,6 +1708,10 @@ NetworkHelper::InsertDequantizationResult NetworkHelper::moveDequantizationBefor
dequantization.convert->get_output_element_type(0) :
deqPrecision;
parent = std::make_shared<opset1::Convert>(parent, convertOutputPrecision);
if (dequantization.convert == nullptr) {
THROW_TRANSFORMATION_EXCEPTION << "dequantization convert is absent";
}
parent->set_friendly_name(dequantization.convert->get_friendly_name() + "_" + std::to_string(i + 1));
ngraph::copy_runtime_info(dequantization.convert, parent);
}
@@ -1761,6 +1765,9 @@ NetworkHelper::InsertDequantizationResult NetworkHelper::moveDequantizationBefor
}
auto newOperation = operation->clone_with_new_inputs(ngraph::OutputVector(newNodes.begin(), newNodes.end()));
NetworkHelper::copyInfo(operation, newOperation);
if (dequantization.multiply == nullptr) {
THROW_TRANSFORMATION_EXCEPTION << "dequantization operations must end with multiply";
}
replace_node(dequantization.multiply, newOperation);
if (const auto op = std::dynamic_pointer_cast<ngraph::op::TypeRelaxedBase>(newOperation)) {

View File

@@ -112,7 +112,7 @@ ngraph::pass::low_precision::PullReshapeThroughDequantization::PullReshapeThroug
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher & m) -> bool {
const auto& opsMap = m.get_pattern_value_map();
auto reshape = opsMap.find(reshapeWrapper)->second.get_node()->shared_from_this();
auto reshape = opsMap.at(reshapeWrapper).get_node_shared_ptr();
auto child = reshape->get_output_target_inputs(0).begin()->get_node();
if (ov::is_type<opset1::GroupConvolution>(child)) {

View File

@@ -29,8 +29,8 @@ class Mask : public std::vector<std::set<uint64_t>>,
public std::enable_shared_from_this<Mask> {
public:
static const ::ov::DiscreteTypeInfo& get_type_info_static() {
static const ::ov::DiscreteTypeInfo type_info{"Mask", 0, "0"};
return type_info;
static const ::ov::DiscreteTypeInfo type_info_static{"Mask", 0, "0"};
return type_info_static;
}
using Ptr = std::shared_ptr<Mask>;

View File

@@ -64,17 +64,18 @@ public:
InitMatMulMask() {
auto a = pattern::any_input();
auto b = pattern::any_input();
auto matmul = pattern::wrap_type<opset6::MatMul>({a, b});
auto matmul_pattern = pattern::wrap_type<opset6::MatMul>({a, b});
ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
const auto & pattern_map = m.get_pattern_value_map();
const auto & m_output = pattern_map.at(matmul);
const auto & matmul = std::dynamic_pointer_cast<opset6::MatMul>(pattern_map.at(matmul_pattern).get_node_shared_ptr());
if (!matmul) return false;
// Assume constant always in the first input port.
// Initializing weights mask:
// 1. Looking for Const node with weights
NodeVector weights_calculation_nodes;
auto cur_node = m_output.get_node()->get_input_node_shared_ptr(1);
auto cur_node = matmul->get_input_node_shared_ptr(1);
while (!ngraph::is_type<opset6::Constant>(cur_node) && cur_node->inputs().size()) {
weights_calculation_nodes.push_back(cur_node);
@@ -82,17 +83,16 @@ public:
}
if (!ngraph::is_type<opset6::Constant>(cur_node)) {
NGRAPH_DEBUG << "Can't find Constant weights for MatMul: " <<
m_output.get_node()->get_friendly_name() << std::endl;
matmul->get_friendly_name() << std::endl;
return false;
}
// 2. Get constant rank to set mask on last dimension
const auto const_op = std::dynamic_pointer_cast<opset6::Constant>(cur_node);
const auto shape_rank = const_op->get_shape().size();
const auto matmul = std::dynamic_pointer_cast<opset6::MatMul>(m_output.get_node_shared_ptr());
const auto shift = (matmul->get_transpose_b())? 2 : 1;
if (shape_rank < shift) {
NGRAPH_DEBUG << "Can't init mask for MatMul: " <<
m_output.get_node()->get_friendly_name() << std::endl;
matmul->get_friendly_name() << std::endl;
return false;
}
const size_t outer_dim = shape_rank - shift;
@@ -101,7 +101,7 @@ public:
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(matmul, "MatMulInitMask");
auto m = std::make_shared<ngraph::pattern::Matcher>(matmul_pattern, "MatMulInitMask");
register_matcher(m, callback);
}
};

View File

@@ -511,6 +511,7 @@ public:
m_output_low.get_node_shared_ptr(),
m_output_high.get_node_shared_ptr()};
auto fq_node = std::dynamic_pointer_cast<op::FakeQuantize>(m_output.get_node_shared_ptr());
if (!fq_node) return false;
size_t idx = 0;
if (fq_node->get_auto_broadcast() != ngraph::op::AutoBroadcastType::NONE) {
for (auto node : fq_params_nodes) {

View File

@@ -98,8 +98,7 @@ ngraph::pass::AddFakeQuantizeFusion::AddFakeQuantizeFusion() {
auto fq_users = fq->get_users();
// Concat LPT transformation supports per tensor quantization only
bool fq_user_is_concat = std::any_of(fq_users.begin(), fq_users.end(),
[] (const Output<Node>& node) -> bool {
auto node_ptr = node.get_node();
[] (const std::shared_ptr<Node> node_ptr) -> bool {
return is_type<opset5::Concat>(node_ptr);
});
if (fq_user_is_concat)

View File

@@ -49,6 +49,7 @@ class PropagateNMSPath: public pass::MatcherPass {
opset8::Reshape,
op::util::BroadcastBase,
opset8::StridedSlice,
opset8::Slice,
opset8::VariadicSplit,
op::util::GatherBase,
opset8::Concat,

View File

@@ -16,8 +16,7 @@ NGRAPH_RTTI_DEFINITION(pass::MatMulMultiplyFusion, "MatMulMultiplyFusion", 0);
static std::shared_ptr<Node> fuse_const_to_weights(const std::shared_ptr<Node>& matmul,
const Output<Node>& weights,
std::shared_ptr<opset8::Constant> mul_const,
const op::AutoBroadcastSpec& autob) {
std::shared_ptr<opset8::Constant> mul_const) {
auto const_shape = mul_const->get_shape();
auto const_rank = static_cast<int64_t>(const_shape.size());
const auto& weights_shape = weights.get_partial_shape();
@@ -149,15 +148,13 @@ pass::MatMulMultiplyFusion::MatMulMultiplyFusion() {
matcher_pass_callback callback = [=](pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
const auto& weights = pattern_map.at(weights_pattern);
auto mul = std::dynamic_pointer_cast<opset8::Multiply>(pattern_map.at(mul_pattern).get_node_shared_ptr());
if (!mul)
return false;
auto mul = pattern_map.at(mul_pattern).get_node_shared_ptr();
auto mul_const = std::dynamic_pointer_cast<opset8::Constant>(pattern_map.at(mul_const_pattern).get_node_shared_ptr());
if (!mul_const)
return false;
auto matmul = pattern_map.at(matmul_pattern).get_node_shared_ptr();
auto new_weights = fuse_const_to_weights(matmul, weights, mul_const, mul->get_autob());
auto new_weights = fuse_const_to_weights(matmul, weights, mul_const);
if (!new_weights)
return false;

View File

@@ -88,8 +88,7 @@ ngraph::pass::MulFakeQuantizeFusion::MulFakeQuantizeFusion() {
auto fq_users = fq->get_users();
// Concat LPT transformation supports per tensor quantization only
bool fq_user_is_concat = std::any_of(fq_users.begin(), fq_users.end(),
[] (const Output<Node>& node) -> bool {
auto node_ptr = node.get_node();
[] (const std::shared_ptr<Node> node_ptr) -> bool {
return is_type<opset5::Concat>(node_ptr);
});
if (fq_user_is_concat)

View File

@@ -37,9 +37,8 @@ bool has_valid_pattern(const ov::Output<ov::Node>& node_out) {
// Upper bound of the value
auto ub = ngraph::evaluate_upper_bound(node_out);
if (!ub) return false;
const auto ub_const_node = std::make_shared<ngraph::opset8::Constant>(ub);
if (!ub_const_node) return false;
const auto ub_const_node = std::make_shared<ngraph::opset8::Constant>(ub);
const auto & ub_values = ub_const_node->cast_vector<int64_t>();
if (lb_values.size() != ub_values.size()) return false;

View File

@@ -153,10 +153,12 @@ ngraph::pass::TransposeReshapeEliminationForMatmul::TransposeReshapeEliminationF
// check transpose order before and after matmul
auto transpose_before = std::dynamic_pointer_cast<opset1::Transpose>(pattern_value_map.at(transpose_before_pattern).get_node_shared_ptr());
auto transpose_after = std::dynamic_pointer_cast<opset1::Transpose>(pattern_value_map.at(transpose_after_pattern).get_node_shared_ptr());
if (!transpose_before || !transpose_after) return false;
auto transpose_before_constant = std::dynamic_pointer_cast<ngraph::opset1::Constant>(transpose_before->get_input_node_shared_ptr(1));
auto transpose_after_constant = std::dynamic_pointer_cast<ngraph::opset1::Constant>(transpose_after->get_input_node_shared_ptr(1));
if (!transpose_before || !transpose_after || !transpose_before_constant || !transpose_after_constant)
return false;
if (!transpose_before_constant || !transpose_after_constant) return false;
auto transpose_before_order = transpose_before_constant->cast_vector<int64_t>();
auto transpose_after_order = transpose_after_constant->cast_vector<int64_t>();
// need to check that input shape is correctly contracted and output shape is correctly unpacked using transposes

View File

@@ -210,10 +210,11 @@ ngraph::pass::TransposeFQReduction::TransposeFQReduction() {
auto &pattern_to_output = m.get_pattern_value_map();
auto transpose = pattern_to_output.at(transpose_label).get_node_shared_ptr();
if (!transpose) return false;
auto transpose_order = std::dynamic_pointer_cast<opset6::Constant>(transpose->get_input_node_shared_ptr(1));
auto fq = pattern_to_output.at(fq_label).get_node_shared_ptr();
if (!transpose || !transpose_order || !fq)
return false;
if (!transpose_order || !fq) return false;
ngraph::NodeVector new_ops;

View File

@@ -152,6 +152,8 @@ namespace {
gnn_sequence->get_activations_beta(),
gnn_sequence->get_clip(),
gnn_sequence->get_linear_before_reset());
} else {
return false;
}
ngraph::ParameterVector body_params;

View File

@@ -125,11 +125,11 @@ ngraph::pass::SqueezeStridedSlice::SqueezeStridedSlice() {
matcher_pass_callback callback = [](pattern::Matcher& m) -> bool {
auto slice = std::dynamic_pointer_cast<ngraph::opset5::StridedSlice>(m.get_match_root());
if (!slice) return false;
auto squeeze = slice->get_input_node_shared_ptr(0);
const auto& const_axes =
std::dynamic_pointer_cast<ngraph::opset5::Constant>(squeeze->get_input_node_shared_ptr(1));
if (!const_axes || !slice)
return false;
if (!const_axes) return false;
auto begin = std::dynamic_pointer_cast<ngraph::opset5::Constant>(slice->input_value(1).get_node_shared_ptr());
auto end = std::dynamic_pointer_cast<ngraph::opset5::Constant>(slice->input_value(2).get_node_shared_ptr());

View File

@@ -13,40 +13,6 @@
namespace ov {
namespace util {
template <class T>
struct ValueTyped {
template <class U>
static auto test(U*) -> decltype(std::declval<typename U::value_type&>(), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <typename, typename>
struct Read;
template <typename T, typename std::enable_if<ValueTyped<T>::value, bool>::type = true>
inline typename T::value_type from_string(const std::string& val, const T&) {
std::stringstream ss(val);
typename T::value_type value;
Read<typename T::value_type, void>{}(ss, value);
return value;
}
template <typename>
struct Write;
template <typename T>
inline std::string to_string(const T& value) {
std::stringstream ss;
Write<T>{}(ss, value);
return ss.str();
}
template <typename T>
std::string join(const T& v, const std::string& sep = ", ") {
std::ostringstream ss;

View File

@@ -6,6 +6,7 @@
#include <functional>
#include <memory>
#include <ngraph/log.hpp>
#include <set>
#include "ngraph/pass/pass.hpp"
@@ -21,6 +22,69 @@ namespace pass {
using ov::pass::BackwardGraphRewrite;
using ov::pass::GraphRewrite;
using ov::pass::MatcherPass;
using ov::pass::RecurrentGraphRewrite;
class NGRAPH_DEPRECATED("Use MatcherPass or FunctionPass instead.") NGRAPH_API RecurrentGraphRewrite
: public FunctionPass {
public:
RecurrentGraphRewrite(size_t num_iters = 10) : ModelPass(), m_num_iters(num_iters) {}
void add_matcher(const std::shared_ptr<pattern::RecurrentMatcher>& m,
const ov::recurrent_graph_rewrite_callback& callback,
const PassPropertyMask& property) {
NGRAPH_SUPPRESS_DEPRECATED_START
m_matchers.push_back(std::make_shared<MatcherPass>(
"Recurrent matcher",
nullptr,
[m, callback](const std::shared_ptr<Node>& node) {
NGRAPH_DEBUG << "Running recurrent matcher on " << node;
if (m->match(node->output(0))) {
NGRAPH_DEBUG << "Recurrent matcher matched " << m.get();
return callback(*m.get());
}
return false;
},
property));
NGRAPH_SUPPRESS_DEPRECATED_END
}
// TODO: This interface may deprecate after all passes are refactored.
void add_matcher(const std::shared_ptr<pattern::RecurrentMatcher>& m,
const ov::recurrent_graph_rewrite_callback& callback) {
NGRAPH_SUPPRESS_DEPRECATED_START
// TODO: before deprecate this function, by default expect the
// callback require static shape.
add_matcher(m, callback, {PassProperty::REQUIRE_STATIC_SHAPE});
NGRAPH_SUPPRESS_DEPRECATED_END
}
bool run_on_model(const std::shared_ptr<ov::Model>& m) override {
NGRAPH_SUPPRESS_DEPRECATED_START
bool changed = false;
size_t i = 0;
auto run_matchers = [&]() -> bool {
for (const auto& node : m->get_ops()) {
for (auto& m_pass : m_matchers) {
if (m_pass->apply(node)) {
return true;
}
}
}
return false;
};
do {
changed = run_matchers();
i++;
} while (changed && i < m_num_iters);
return changed;
NGRAPH_SUPPRESS_DEPRECATED_END
}
private:
size_t m_num_iters;
std::vector<std::shared_ptr<ov::pass::MatcherPass>> m_matchers;
};
} // namespace pass
} // namespace ngraph

View File

@@ -25,8 +25,6 @@ class Manager;
namespace ngraph {
namespace pass {
using FunctionPass = ov::pass::ModelPass;
using ov::pass::FusionType;
using ov::pass::FusionTypeMask;
using ov::pass::Manager;
using ov::pass::PassBase;
using ov::pass::PassProperty;
@@ -40,5 +38,20 @@ public:
~NodePass() override;
virtual bool run_on_node(std::shared_ptr<ngraph::Node>) = 0;
};
enum class NGRAPH_DEPRECATED("FusionType is no longer used anywhere. Please do no use it.") FusionType : uint32_t {
//`DIFFERENTIABLE_FUSIONS` produce ops that support autodiff
// i.e. implement `generate_adjoints`
DIFFERENTIABLE_FUSIONS = 0x1,
REGULAR_FUSIONS = 0x2,
//`FOP_FUSIONS` produce ops in the FusedOps category that might
// not be supported by all backends
FOP_FUSIONS = 0x4,
ALL_FUSIONS = 0xFFFFFFFF
};
NGRAPH_SUPPRESS_DEPRECATED_START
using FusionTypeMask = ov::EnumMask<FusionType>;
NGRAPH_SUPPRESS_DEPRECATED_END
} // namespace pass
} // namespace ngraph

View File

@@ -32,6 +32,19 @@ namespace util {
template <typename T, typename = void>
struct Read;
template <class T>
struct Readable {
template <class U>
static auto test(U*)
-> decltype(std::declval<Read<U>>()(std::declval<std::istream&>(), std::declval<U&>()), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <class T>
struct Istreamable {
template <class U>
@@ -45,35 +58,6 @@ struct Istreamable {
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <class T>
struct Readable {
template <class U>
static auto test(U*) -> decltype(read(std::declval<std::istream&>(), std::declval<U&>()), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <typename T, typename>
struct Read {
template <typename U>
auto operator()(std::istream&, U&) const ->
typename std::enable_if<std::is_same<T, U>::value && !Istreamable<U>::value && !Readable<U>::value>::type {
OPENVINO_UNREACHABLE("Could read type without std::istream& operator>>(std::istream&, T)",
" defined or ov::util::Read<T> class specialization, T: ",
typeid(T).name());
}
template <typename U>
auto operator()(std::istream& is, U& value) const ->
typename std::enable_if<std::is_same<T, U>::value && Istreamable<U>::value && !Readable<U>::value>::type {
is >> value;
}
};
template <>
struct OPENVINO_API Read<bool> {
void operator()(std::istream& is, bool& value) const;
@@ -139,12 +123,80 @@ struct OPENVINO_API Read<std::tuple<unsigned int, unsigned int>> {
void operator()(std::istream& is, std::tuple<unsigned int, unsigned int>& tuple) const;
};
template <typename T>
auto from_string(const std::string& str) -> const
typename std::enable_if<std::is_same<T, std::string>::value, T>::type& {
return str;
}
template <typename T>
auto from_string(const std::string& val) ->
typename std::enable_if<Readable<T>::value && !std::is_same<T, std::string>::value, T>::type {
std::stringstream ss(val);
T value;
Read<T>{}(ss, value);
return value;
}
template <typename T>
auto from_string(const std::string& val) ->
typename std::enable_if<!Readable<T>::value && Istreamable<T>::value && !std::is_same<T, std::string>::value,
T>::type {
std::stringstream ss(val);
T value;
ss >> value;
return value;
}
template <class T>
struct ValueTyped {
template <class U>
static auto test(U*) -> decltype(std::declval<typename U::value_type&>(), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <typename T,
typename std::enable_if<ValueTyped<T>::value && Readable<typename T::value_type>::value, bool>::type = true>
typename T::value_type from_string(const std::string& val, const T&) {
std::stringstream ss(val);
typename T::value_type value;
Read<typename T::value_type, void>{}(ss, value);
return value;
}
template <typename T,
typename std::enable_if<ValueTyped<T>::value && !Readable<typename T::value_type>::value &&
Istreamable<typename T::value_type>::value,
bool>::type = true>
typename T::value_type from_string(const std::string& val, const T&) {
std::stringstream ss(val);
typename T::value_type value;
ss >> value;
return value;
}
template <typename T>
auto from_string(const std::string& val) ->
typename std::enable_if<!Readable<T>::value && !Istreamable<T>::value && !std::is_same<T, std::string>::value,
T>::type {
OPENVINO_UNREACHABLE("Could read type without std::istream& operator>>(std::istream&, T)",
" defined or ov::util::Read<T> class specialization, T: ",
typeid(T).name());
}
template <typename T, typename A>
struct Read<std::vector<T, A>, typename std::enable_if<std::is_default_constructible<T>::value>::type> {
void operator()(std::istream& is, std::vector<T, A>& vec) const {
while (is.good()) {
T v;
Read<T>{}(is, v);
std::string str;
is >> str;
auto v = from_string<T>(str);
vec.push_back(std::move(v));
}
}
@@ -156,10 +208,11 @@ struct Read<
typename std::enable_if<std::is_default_constructible<K>::value && std::is_default_constructible<T>::value>::type> {
void operator()(std::istream& is, std::map<K, T, C, A>& map) const {
while (is.good()) {
K k;
T v;
Read<K>{}(is, k);
Read<T>{}(is, v);
std::string str;
is >> str;
auto k = from_string<K>(str);
is >> str;
auto v = from_string<T>(str);
map.emplace(std::move(k), std::move(v));
}
}
@@ -184,7 +237,8 @@ struct Ostreamable {
template <class T>
struct Writable {
template <class U>
static auto test(U*) -> decltype(write(std::declval<std::ostream&>(), std::declval<const U&>()), std::true_type()) {
static auto test(U*) -> decltype(std::declval<Write<U>>()(std::declval<std::ostream&>(), std::declval<const U&>()),
std::true_type()) {
return {};
}
template <typename>
@@ -194,18 +248,6 @@ struct Writable {
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <typename T>
struct Write {
template <typename U>
auto operator()(std::ostream& os, const U&) const ->
typename std::enable_if<std::is_same<T, U>::value && !Ostreamable<U>::value && !Writable<U>::value>::type {}
template <typename U>
auto operator()(std::ostream& os, const U& value) const ->
typename std::enable_if<std::is_same<T, U>::value && Ostreamable<U>::value && !Writable<U>::value>::type {
os << value;
}
};
template <>
struct OPENVINO_API Write<bool> {
void operator()(std::ostream& is, const bool& b) const;
@@ -226,13 +268,44 @@ struct OPENVINO_API Write<std::tuple<unsigned int, unsigned int>> {
void operator()(std::ostream& os, const std::tuple<unsigned int, unsigned int>& tuple) const;
};
template <typename T>
auto to_string(const T& str) -> const typename std::enable_if<std::is_same<T, std::string>::value, T>::type& {
return str;
}
template <typename T>
auto to_string(const T& value) ->
typename std::enable_if<Writable<T>::value && !std::is_same<T, std::string>::value, std::string>::type {
std::stringstream ss;
Write<T>{}(ss, value);
return ss.str();
}
template <typename T>
auto to_string(const T& value) ->
typename std::enable_if<!Writable<T>::value && Ostreamable<T>::value && !std::is_same<T, std::string>::value,
std::string>::type {
std::stringstream ss;
ss << value;
return ss.str();
}
template <typename T>
auto to_string(const T&) ->
typename std::enable_if<!Writable<T>::value && !Ostreamable<T>::value && !std::is_same<T, std::string>::value,
std::string>::type {
OPENVINO_UNREACHABLE("Could convert to string from type without std::ostream& operator>>(std::ostream&, const T&)",
" defined or ov::util::Write<T> class specialization, T: ",
typeid(T).name());
}
template <typename T, typename A>
struct Write<std::vector<T, A>> {
void operator()(std::ostream& os, const std::vector<T, A>& vec) const {
if (!vec.empty()) {
std::size_t i = 0;
for (auto&& v : vec) {
Write<T>{}(os, v);
os << to_string(v);
if (i < (vec.size() - 1))
os << ' ';
++i;
@@ -247,9 +320,9 @@ struct Write<std::map<K, T, C, A>> {
if (!map.empty()) {
std::size_t i = 0;
for (auto&& v : map) {
Write<K>{}(os, v.first);
os << to_string(v.first);
os << ' ';
Write<T>{}(os, v.second);
os << to_string(v.second);
if (i < (map.size() - 1))
os << ' ';
++i;
@@ -346,9 +419,6 @@ class OPENVINO_API Any {
static bool equal(std::type_index lhs, std::type_index rhs);
/**
* @brief Base API of erased type
*/
class OPENVINO_API Base : public std::enable_shared_from_this<Base> {
public:
void type_check(const std::type_info&) const;
@@ -364,6 +434,7 @@ class OPENVINO_API Any {
virtual bool equal(const Base& rhs) const = 0;
virtual void print(std::ostream& os) const = 0;
virtual void read(std::istream& os) = 0;
void read_to(Base& other) const;
virtual const DiscreteTypeInfo& get_type_info() const = 0;
virtual std::shared_ptr<RuntimeAttribute> as_runtime_attribute() const;
@@ -504,12 +575,50 @@ class OPENVINO_API Any {
return false;
}
template <typename U>
static typename std::enable_if<util::Writable<U>::value>::type print_impl(std::ostream& os, const U& value) {
util::Write<U>{}(os, value);
}
template <typename U>
static typename std::enable_if<!util::Writable<U>::value && util::Ostreamable<U>::value>::type print_impl(
std::ostream& os,
const U& value) {
os << value;
}
template <typename U>
static typename std::enable_if<!util::Writable<U>::value && !util::Ostreamable<U>::value>::type print_impl(
std::ostream&,
const U&) {}
void print(std::ostream& os) const override {
util::Write<T>{}(os, value);
print_impl(os, value);
}
template <typename U>
static typename std::enable_if<util::Readable<U>::value>::type read_impl(std::istream& is, U& value) {
util::Read<U>{}(is, value);
}
template <typename U>
static typename std::enable_if<!util::Readable<U>::value && util::Istreamable<U>::value>::type read_impl(
std::istream& is,
U& value) {
is >> value;
}
template <typename U>
static typename std::enable_if<!util::Readable<U>::value && !util::Istreamable<U>::value>::type read_impl(
std::istream&,
U&) {
OPENVINO_UNREACHABLE("Could read type without std::istream& operator>>(std::istream&, T)",
" defined or ov::util::Read<T> class specialization, T: ",
typeid(T).name());
}
void read(std::istream& is) override {
util::Read<T>{}(is, value);
read_impl(is, value);
}
T value;
@@ -527,9 +636,7 @@ class OPENVINO_API Any {
void impl_check() const;
mutable Base::Ptr _temp_impl;
mutable std::string _str;
mutable Base::Ptr _temp;
Base::Ptr _impl;
@@ -612,9 +719,10 @@ public:
bool empty() const;
/**
* @brief check the type of value in any
* @brief Check that stored type can be casted to specified type.
* If internal type supports Base
* @tparam T Type of value
* @return true if type of value is correct
* @return true if type of value is correct. Return false if any is empty
*/
template <class T>
bool is() const {
@@ -637,10 +745,10 @@ public:
* @return casted object
*/
template <class T>
typename std::enable_if<std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value, T>::type& as() & {
typename std::enable_if<std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value, T>::type& as() {
if (_impl == nullptr) {
_temp_impl = std::make_shared<Impl<decay_t<T>>>(T{});
return _temp_impl->as<T>();
_temp = std::make_shared<Impl<decay_t<T>>>(T{});
return *static_cast<decay_t<T>*>(_temp->addressof());
} else {
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<decay_t<T>*>(_impl->addressof());
@@ -662,48 +770,9 @@ public:
" to ",
static_cast<std::string>(T::element_type::get_type_info_static()));
}
vptr = std::static_pointer_cast<typename T::element_type>(runtime_attribute);
_temp_impl = std::make_shared<Impl<decay_t<T>>>(vptr);
return _temp_impl->as<T>();
}
}
}
/**
* Dynamic cast to specified type
* @tparam T type
* @return casted object
*/
template <class T>
const typename std::enable_if<std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value, T>::type& as()
const& {
if (_impl == nullptr) {
_temp_impl = std::make_shared<Impl<decay_t<T>>>(T{});
return _temp_impl->as<T>();
} else {
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
} else {
auto runtime_attribute = _impl->as_runtime_attribute();
if (runtime_attribute == nullptr) {
OPENVINO_UNREACHABLE("Any does not contains pointer to runtime_attribute. It contains ",
_impl->type_info().name());
}
auto vptr = std::dynamic_pointer_cast<typename T::element_type>(runtime_attribute);
if (vptr == nullptr && T::element_type::get_type_info_static() != runtime_attribute->get_type_info() &&
T::element_type::get_type_info_static() != RuntimeAttribute::get_type_info_static()) {
OPENVINO_UNREACHABLE("Could not cast Any runtime_attribute to ",
typeid(T).name(),
" from ",
_impl->type_info().name(),
"; from ",
static_cast<std::string>(runtime_attribute->get_type_info()),
" to ",
static_cast<std::string>(T::element_type::get_type_info_static()));
}
vptr = std::static_pointer_cast<typename T::element_type>(runtime_attribute);
_temp_impl = std::make_shared<Impl<decay_t<T>>>(vptr);
return _temp_impl->as<T>();
_temp = std::make_shared<Impl<decay_t<T>>>(
std::static_pointer_cast<typename T::element_type>(runtime_attribute));
return *static_cast<decay_t<T>*>(_temp->addressof());
}
}
}
@@ -715,17 +784,17 @@ public:
*/
template <class T>
typename std::enable_if<!std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value &&
!std::is_same<T, std::string>::value && std::is_default_constructible<T>::value,
!std::is_same<T, std::string>::value && std::is_default_constructible<T>::value &&
(util::Istreamable<T>::value || util::Readable<T>::value),
T>::type&
as() & {
as() {
impl_check();
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<decay_t<T>*>(_impl->addressof());
} else if (_impl->is(typeid(std::string))) {
_temp_impl = std::make_shared<Impl<decay_t<T>>>();
std::stringstream strm{as<std::string>()};
_temp_impl->read(strm);
return *static_cast<decay_t<T>*>(_temp_impl->addressof());
_temp = std::make_shared<Impl<decay_t<T>>>();
_impl->read_to(*_temp);
return *static_cast<decay_t<T>*>(_temp->addressof());
}
for (const auto& type_index : _impl->base_type_info()) {
if (equal(type_index, typeid(decay_t<T>))) {
@@ -741,37 +810,11 @@ public:
* @return casted object
*/
template <class T>
const typename std::enable_if<!std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value &&
!std::is_same<T, std::string>::value && std::is_default_constructible<T>::value,
T>::type&
as() const& {
impl_check();
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
} else if (_impl->is(typeid(std::string))) {
_temp_impl = std::make_shared<Impl<decay_t<T>>>();
std::stringstream strm{as<std::string>()};
_temp_impl->read(strm);
return *static_cast<const decay_t<T>*>(_temp_impl->addressof());
}
for (const auto& type_index : _impl->base_type_info()) {
if (equal(type_index, typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
}
}
OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name());
}
/**
* Dynamic cast to specified type
* @tparam T type
* @return casted object
*/
template <class T>
typename std::enable_if<!std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value &&
!std::is_same<T, std::string>::value && !std::is_default_constructible<T>::value,
T>::type&
as() & {
typename std::enable_if<
!std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value && !std::is_same<T, std::string>::value &&
(!std::is_default_constructible<T>::value || (!util::Istreamable<T>::value && !util::Readable<T>::value)),
T>::type&
as() {
impl_check();
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<decay_t<T>*>(_impl->addressof());
@@ -790,64 +833,29 @@ public:
* @return casted object
*/
template <class T>
const typename std::enable_if<!std::is_convertible<T, std::shared_ptr<RuntimeAttribute>>::value &&
!std::is_same<T, std::string>::value && !std::is_default_constructible<T>::value,
T>::type&
as() const& {
impl_check();
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
}
for (const auto& type_index : _impl->base_type_info()) {
if (equal(type_index, typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
}
}
OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name());
}
/**
* Dynamic cast to specified type
* @tparam T type
* @return casted object
*/
template <class T>
typename std::enable_if<std::is_same<T, std::string>::value, T>::type& as() & {
typename std::enable_if<std::is_same<T, std::string>::value, T>::type& as() {
if (_impl != nullptr) {
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<decay_t<T>*>(_impl->addressof());
} else {
std::stringstream strm;
print(strm);
_str = strm.str();
return _str;
_temp = std::make_shared<Impl<std::string>>();
_impl->read_to(*_temp);
return *static_cast<std::string*>(_temp->addressof());
}
} else {
_str = {};
return _str;
_temp = std::make_shared<Impl<std::string>>();
return *static_cast<std::string*>(_temp->addressof());
}
}
/**
* Dynamic cast to specified type
* @tparam T type
* @return casted object
* @return const reference to caster object
*/
template <class T>
const typename std::enable_if<std::is_same<T, std::string>::value, T>::type& as() const& {
if (_impl != nullptr) {
if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof());
} else {
std::stringstream strm;
print(strm);
_str = strm.str();
return _str;
}
} else {
_str = {};
return _str;
}
const T& as() const {
return const_cast<Any*>(this)->as<T>();
}
/**
@@ -958,6 +966,18 @@ public:
impl_check();
return _impl.get();
}
/**
* @brief Returns address to internal value if any is not empty and `nullptr` instead
* @return address to internal stored value
*/
void* addressof();
/**
* @brief Returns address to internal value if any is not empty and `nullptr` instead
* @return address to internal stored value
*/
const void* addressof() const;
};
/** @cond INTERNAL */

View File

@@ -35,9 +35,6 @@ public:
/// as_type.
virtual const DiscreteTypeInfo& get_type_info() const = 0;
virtual ~ValueAccessor() = default;
virtual ov::Any get_as_any() {
throw ov::Exception("get_as_any is not implemented");
}
virtual void set_as_any(const ov::Any& x) {
throw ov::Exception("set_as_any is not implemented");
}
@@ -59,11 +56,14 @@ public:
virtual const VAT& get() = 0;
/// Sets the value
virtual void set(const VAT& value) = 0;
ov::Any get_as_any() override {
return get();
}
void set_as_any(const ov::Any& x) override {
set(x.as<VAT>());
const auto* data = x.addressof();
OPENVINO_ASSERT(data != nullptr, "Data conversion is not possible. Empty data is provided.");
if (x.is<VAT>()) {
set(*static_cast<const VAT*>(data));
} else {
OPENVINO_UNREACHABLE("Bad cast from: ", x.type_info().name(), " to: ", typeid(VAT).name());
}
}
};
@@ -108,14 +108,18 @@ public:
}
void set_as_any(const ov::Any& x) override {
const auto* data = x.addressof();
OPENVINO_ASSERT(data != nullptr, "Data conversion is not possible. Empty data is provided.");
// Try to represent x as VAT or AT
if (x.is<VAT>()) {
set(x.as<VAT>());
} else {
set(*static_cast<const VAT*>(data));
} else if (x.is<AT>()) {
// Don't call set here avoiding unnecessary casts AT -> VAT -> AT,
// instead reimplement logic from set.
m_ref = x.as<AT>();
m_ref = *static_cast<const AT*>(data);
m_buffer_valid = false;
} else {
OPENVINO_UNREACHABLE("Bad cast from: ", x.type_info().name(), " to: ", typeid(AT).name());
}
}
@@ -153,14 +157,18 @@ public:
}
void set_as_any(const ov::Any& x) override {
const auto* data = x.addressof();
OPENVINO_ASSERT(data != nullptr, "Data conversion is not possible. Empty data is provided.");
// Try to represent x as VAT or AT
if (x.is<VAT>()) {
set(x.as<VAT>());
} else {
set(*static_cast<const VAT*>(data));
} else if (x.is<AT>()) {
// Don't call set here avoiding unnecessary casts AT -> VAT -> AT,
// instead reimplement logic from set.
m_ref = x.as<AT>();
m_ref = *static_cast<const AT*>(data);
m_buffer_valid = false;
} else {
OPENVINO_UNREACHABLE("Bad cast from: ", x.type_info().name(), " to: ", typeid(AT).name());
}
}
operator AT&() {
@@ -196,13 +204,17 @@ public:
}
void set_as_any(const ov::Any& x) override {
const auto* data = x.addressof();
OPENVINO_ASSERT(data != nullptr, "Data conversion is not possible. Empty data is provided.");
// Try to represent x as std::string or AT
if (x.is<std::string>()) {
set(x.as<std::string>());
} else {
} else if (x.is<AT>()) {
// Don't call set here avoiding unnecessary casts AT -> std::string -> AT,
// instead reimplement logic from set.
m_ref = x.as<AT>();
m_ref = *static_cast<const AT*>(data);
} else {
OPENVINO_UNREACHABLE("Bad cast from: ", x.type_info().name(), " to: ", typeid(AT).name());
}
}

View File

@@ -168,6 +168,13 @@ public:
Dimension operator&(const Dimension& dim) const;
/// \brief Intersection of dimensions
Dimension& operator&=(const Dimension& dim);
/// \brief Swap of dimensions
friend void swap(Dimension& a, Dimension& b) {
using std::swap;
swap(a.m_dimension, b.m_dimension);
swap(a.m_label, b.m_label);
swap(a.m_table_of_equivalence, b.m_table_of_equivalence);
}
private:
Dimension(const Interval& interval) : m_dimension(interval) {}

View File

@@ -43,8 +43,8 @@ class OPENVINO_API Model : public std::enable_shared_from_this<Model> {
public:
static const ::ov::DiscreteTypeInfo& get_type_info_static() {
static const ::ov::DiscreteTypeInfo type_info{"Model", 0};
return type_info;
static const ::ov::DiscreteTypeInfo type_info_static{"Model", 0};
return type_info_static;
}
const ::ov::DiscreteTypeInfo& get_type_info() const {
return get_type_info_static();

View File

@@ -11,14 +11,14 @@
#define _OPENVINO_RTTI_WITH_TYPE(TYPE_NAME) _OPENVINO_RTTI_WITH_TYPE_VERSION(TYPE_NAME, "util")
#define _OPENVINO_RTTI_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME) \
static const ::ov::DiscreteTypeInfo& get_type_info_static() { \
static ::ov::DiscreteTypeInfo type_info{TYPE_NAME, 0, VERSION_NAME}; \
type_info.hash(); \
return type_info; \
} \
const ::ov::DiscreteTypeInfo& get_type_info() const override { \
return get_type_info_static(); \
#define _OPENVINO_RTTI_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME) \
static const ::ov::DiscreteTypeInfo& get_type_info_static() { \
static ::ov::DiscreteTypeInfo type_info_static{TYPE_NAME, 0, VERSION_NAME}; \
type_info_static.hash(); \
return type_info_static; \
} \
const ::ov::DiscreteTypeInfo& get_type_info() const override { \
return get_type_info_static(); \
}
#define _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT(TYPE_NAME, VERSION_NAME, PARENT_CLASS) \
@@ -26,12 +26,12 @@
#define _OPENVINO_RTTI_WITH_TYPE_VERSIONS_PARENT(TYPE_NAME, VERSION_NAME, PARENT_CLASS, OLD_VERSION) \
static const ::ov::DiscreteTypeInfo& get_type_info_static() { \
static ::ov::DiscreteTypeInfo type_info{TYPE_NAME, \
OLD_VERSION, \
VERSION_NAME, \
&PARENT_CLASS::get_type_info_static()}; \
type_info.hash(); \
return type_info; \
static ::ov::DiscreteTypeInfo type_info_static{TYPE_NAME, \
OLD_VERSION, \
VERSION_NAME, \
&PARENT_CLASS::get_type_info_static()}; \
type_info_static.hash(); \
return type_info_static; \
} \
const ::ov::DiscreteTypeInfo& get_type_info() const override { \
return get_type_info_static(); \

View File

@@ -20,8 +20,8 @@ class Any;
class OPENVINO_API RuntimeAttribute {
public:
static const DiscreteTypeInfo& get_type_info_static() {
static const ::ov::DiscreteTypeInfo type_info{"RuntimeAttribute", 0};
return type_info;
static const ::ov::DiscreteTypeInfo type_info_static{"RuntimeAttribute", 0};
return type_info_static;
}
virtual const DiscreteTypeInfo& get_type_info() const {
return get_type_info_static();

View File

@@ -225,25 +225,5 @@ public:
bool run_on_model(const std::shared_ptr<ov::Model>& m) override;
};
class OPENVINO_API RecurrentGraphRewrite : public ModelPass {
public:
RecurrentGraphRewrite(size_t num_iters = 10) : ModelPass(), m_num_iters(num_iters) {}
void add_matcher(const std::shared_ptr<pattern::RecurrentMatcher>& m,
const ov::recurrent_graph_rewrite_callback& callback,
const PassPropertyMask& property);
// TODO: This interface may deprecate after all passes are refactored.
void add_matcher(const std::shared_ptr<pattern::RecurrentMatcher>& m,
const ov::recurrent_graph_rewrite_callback& callback);
bool run_on_model(const std::shared_ptr<ov::Model>& m) override;
private:
size_t m_num_iters;
std::vector<std::shared_ptr<ov::pass::MatcherPass>> m_matchers;
};
} // namespace pass
} // namespace ov

View File

@@ -100,17 +100,5 @@ private:
bool call_on_model{false};
};
class Manager;
enum class FusionType : uint32_t {
//`DIFFERENTIABLE_FUSIONS` produce ops that support autodiff
// i.e. implement `generate_adjoints`
DIFFERENTIABLE_FUSIONS = 0x1,
REGULAR_FUSIONS = 0x2,
//`FOP_FUSIONS` produce ops in the FusedOps category that might
// not be supported by all backends
FOP_FUSIONS = 0x4,
ALL_FUSIONS = 0xFFFFFFFF
};
using FusionTypeMask = ov::EnumMask<FusionType>;
} // namespace pass
} // namespace ov

View File

@@ -0,0 +1,123 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <openvino/core/validation_util.hpp>
#include <openvino/op/matmul.hpp>
#include "utils.hpp"
namespace ov {
namespace op {
namespace v0 {
template<class T>
void shape_infer(const ov::op::v0::MatMul *op,
const std::vector<T> &input_shapes,
std::vector<T> &output_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1);
auto arg0_shape = input_shapes[0], arg1_shape = input_shapes[1];
if (arg0_shape.rank().is_dynamic() || arg1_shape.rank().is_dynamic()) {
output_shapes[0] = ov::PartialShape::dynamic();
return;
}
// ranks are known
const bool transpose_a = op->get_transpose_a();
const bool transpose_b = op->get_transpose_b();
size_t arg0_rank = arg0_shape.size(), arg1_rank = arg1_shape.size();
NODE_VALIDATION_CHECK(op, (arg0_rank != 0 && arg1_rank != 0), "Scalars are not supported as MatMul inputs.");
// Temporary Dimension vectors to calculate output shape
T arg0_shape_tmp(arg0_shape), arg1_shape_tmp(arg1_shape);
// 1. Applying transpositions specified by optional `transpose_a` and `transpose_b`
// Only two right-most dimensions are swapped, other dimensions remain the same.
// Transpose attributes are ignored for 1D tensors.
if (transpose_a && arg0_rank > 1) {
swap(arg0_shape_tmp[arg0_rank - 2], arg0_shape_tmp[arg0_rank - 1]);
}
if (transpose_b && arg1_rank > 1) {
swap(arg1_shape_tmp[arg1_rank - 2], arg1_shape_tmp[arg1_rank - 1]);
}
// 2. One-dimensional tensors unsqueezing is applied to each input independently.
if (arg0_rank == 1) {
// If the first input is 1D tensor, it is unsqueezed to 2D tensor (row vector)
// by adding axes with size 1 at ROW_INDEX_DIM, to the left of the shape.
// For example {S} will be reshaped to {1, S}.
arg0_shape_tmp.insert(arg0_shape_tmp.begin(), 1);
arg0_rank = arg0_shape_tmp.size();
}
if (arg1_rank == 1) {
// If the second input is 1D tensor, it is unsqueezed to 2D tensor (column vector)
// by adding axes with size 1 at COL_INDEX_DIM, to the right of the shape.
// For example {S} will be reshaped to {S, 1}.
arg1_shape_tmp.insert(arg1_shape_tmp.end(), 1);
arg1_rank = arg1_shape_tmp.size();
}
// Check matrices dimensions compatibility,
// COL_INDEX_DIM of the first matrix has to match ROW_INDEX_DIM of the second matrix.
// Error is not thrown for dynamic dimensions bounds without intersection
// to ensure MatMul backward compatibility.
using DimType = typename std::iterator_traits<typename T::iterator>::value_type;
auto merged_dimension = DimType();
auto arg0_col_dim = arg0_shape_tmp[arg0_rank - 1];
auto arg1_row_dim = arg1_shape_tmp[arg1_rank - 2];
NODE_VALIDATION_CHECK(op, DimType::merge(merged_dimension, arg0_col_dim, arg1_row_dim) || arg0_col_dim.is_dynamic() ||
arg1_row_dim.is_dynamic(),
"Incompatible MatMul matrix dimension. ",
"First input dimension=",
arg0_col_dim,
" at COL_INDEX_DIM=",
(arg0_rank - 1),
" doesn't match the second input dimension=",
arg1_row_dim,
" at ROW_INDEX_DIM=",
(arg1_rank - 2));
// 3. If ranks of input arguments are different after steps 1 and 2,
// the smaller tensor is unsqueezed from the left side of the shape
// by necessary number of axes to make both shapes of the same rank.
if (arg0_rank < arg1_rank)
arg0_shape_tmp.insert(arg0_shape_tmp.begin(), arg1_rank - arg0_rank, 1);
else if (arg0_rank > arg1_rank)
arg1_shape_tmp.insert(arg1_shape_tmp.begin(), arg0_rank - arg1_rank, 1);
// Both arg0_shape_tmp and arg1_shape_tmp have identical size now
size_t max_rank = arg0_shape_tmp.size();
std::vector<DimType> output_shape(max_rank);
// 4. Usual rules of the broadcasting are applied for batch dimensions.
// Broadcast all batches (last two dimensions represent matrix),
// expand dim with value 1 to bigger dim if dimensions are not equal.
for (size_t i = 0; i < max_rank - 2; ++i) {
NODE_VALIDATION_CHECK(op, DimType::broadcast_merge(output_shape[i], arg0_shape_tmp[i], arg1_shape_tmp[i]) ||
arg0_shape_tmp[i].is_dynamic() || arg1_shape_tmp[i].is_dynamic(),
"Incompatible MatMul batch dimension. ",
"Can't merge first input dimension=",
arg0_shape_tmp[i],
" with second input dimension=",
arg1_shape_tmp[i],
" at index=",
i);
}
// In output_shape replace 2 last axes with ROW_INDEX_DIM from arg0 matrix
// and COL_INDEX_DIM from arg1 matrix.
output_shape[output_shape.size() - 2] = arg0_shape_tmp[arg0_shape_tmp.size() - 2];
output_shape[output_shape.size() - 1] = arg1_shape_tmp[arg1_shape_tmp.size() - 1];
// 5. Removing the temporary axes from originally 1D tensors.
// Output shape of two 1D tensors multiplication will be a 0D tensor (scalar).
if (arg0_shape.rank().get_length() == 1) {
// arg0 input temporary axis inserted at ROW_INDEX_DIM is removed
output_shape.erase(output_shape.begin() + output_shape.size() - 2);
}
if (arg1_shape.rank().get_length() == 1) {
// arg1 input temporary axis inserted at COL_INDEX_DIM is removed
output_shape.erase(output_shape.begin() + output_shape.size() - 1);
}
output_shapes[0] = output_shape;
}
}
}
}

View File

@@ -57,8 +57,18 @@ bool Any::Base::visit_attributes(AttributeVisitor& visitor) const {
return const_cast<Any::Base*>(this)->visit_attributes(visitor);
}
void Any::Base::read_to(Base& other) const {
std::stringstream strm;
print(strm);
if (other.is<std::string>()) {
*static_cast<std::string*>(other.addressof()) = strm.str();
} else {
other.read(strm);
}
}
Any::~Any() {
_temp_impl = {};
_temp = {};
_impl = {};
}
@@ -96,7 +106,7 @@ void Any::read(std::istream& istream) {
}
bool Any::operator==(const Any& other) const {
if (_impl == nullptr && other._impl == nullptr) {
if (_impl == nullptr || other._impl == nullptr) {
return false;
}
if (_impl == other._impl) {
@@ -121,6 +131,13 @@ const Any::Base* Any::operator->() const {
return _impl.get();
}
void* Any::addressof() {
return _impl != nullptr ? _impl->addressof() : nullptr;
}
const void* Any::addressof() const {
return _impl != nullptr ? _impl->addressof() : nullptr;
}
namespace util {
void Read<bool>::operator()(std::istream& is, bool& value) const {
@@ -138,7 +155,7 @@ void Read<bool>::operator()(std::istream& is, bool& value) const {
template <typename F>
static auto stream_to(std::istream& is, F&& f) -> decltype(f(std::declval<const std::string&>())) {
std::string str;
Read<std::string>{}(is, str);
is >> str;
try {
return f(str);
} catch (std::exception& e) {

View File

@@ -110,15 +110,27 @@ bool Dimension::merge(Dimension& dst, const Dimension& d1, const Dimension& d2)
}
bool Dimension::broadcast_merge(Dimension& dst, const Dimension& d1, const Dimension& d2) {
if (d1 == 1) {
bool d1_has_1 = d1.m_dimension.contains(1);
bool d2_has_1 = d2.m_dimension.contains(1);
if (d1_has_1 && d2_has_1) {
auto result = ov::Interval(std::min(d1.m_dimension.get_min_val(), d2.m_dimension.get_min_val()),
std::max(d1.m_dimension.get_max_val(), d2.m_dimension.get_max_val()));
if (result.empty())
return false;
dst = Dimension(result);
if (d1.m_label == d2.m_label || d2.m_label == 0)
dst.m_label = d1.m_label;
else if (d1.m_label == 0)
dst.m_label = d2.m_label;
return true;
} else if (d1_has_1) {
dst = d2;
return true;
}
if (d2 == 1) {
} else if (d2_has_1) {
dst = d1;
return true;
} else {
return merge(dst, d1, d2);
}
return merge(dst, d1, d2);
return true;
}
Dimension::value_type Dimension::get_length() const {

View File

@@ -4,13 +4,11 @@
#include "ngraph/op/matmul.hpp"
#include <dimension_tracker.hpp>
#include <memory>
#include <numeric>
#include "itt.hpp"
#include "matmul_shape_inference.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/runtime/reference/matmul.hpp"
using namespace std;
@@ -40,167 +38,18 @@ shared_ptr<Node> op::MatMul::clone_with_new_inputs(const OutputVector& new_args)
namespace matmul {
namespace {
ov::PartialShape validate_matmul_output_shape(const ov::PartialShape& arg0_shape,
const ov::PartialShape& arg1_shape,
bool transpose_a,
bool transpose_b) {
auto arg0_rank = arg0_shape.rank().get_length();
auto arg1_rank = arg1_shape.rank().get_length();
NGRAPH_CHECK((arg0_rank != 0 && arg1_rank != 0), "Scalars are not supported as MatMul inputs.");
// Temporary Dimension vectors to calculate output shape
std::vector<Dimension> arg0_shape_tmp(arg0_shape);
std::vector<Dimension> arg1_shape_tmp(arg1_shape);
// 1. Applying transpositions specified by optional `transpose_a` and `transpose_b`
// Only two right-most dimensions are swapped, other dimensions remain the same.
// Transpose attributes are ignored for 1D tensors.
if (transpose_a && arg0_rank > 1) {
swap(arg0_shape_tmp[arg0_rank - 2], arg0_shape_tmp[arg0_rank - 1]);
}
if (transpose_b && arg1_rank > 1) {
swap(arg1_shape_tmp[arg1_rank - 2], arg1_shape_tmp[arg1_rank - 1]);
}
// 2. One-dimensional tensors unsqueezing is applied to each input independently.
if (arg0_rank == 1) {
// If the first input is 1D tensor, it is unsqueezed to 2D tensor (row vector)
// by adding axes with size 1 at ROW_INDEX_DIM, to the left of the shape.
// For example {S} will be reshaped to {1, S}.
arg0_shape_tmp.insert(arg0_shape_tmp.begin(), 1);
arg0_rank = arg0_shape_tmp.size();
}
if (arg1_rank == 1) {
// If the second input is 1D tensor, it is unsqueezed to 2D tensor (column vector)
// by adding axes with size 1 at COL_INDEX_DIM, to the right of the shape.
// For example {S} will be reshaped to {S, 1}.
arg1_shape_tmp.insert(arg1_shape_tmp.end(), 1);
arg1_rank = arg1_shape_tmp.size();
}
// Check matrices dimensions compatibility,
// COL_INDEX_DIM of the first matrix has to match ROW_INDEX_DIM of the second matrix.
// Error is not thrown for dynamic dimensions bounds without intersection
// to ensure MatMul backward compatibility.
auto merged_dimension = Dimension::dynamic();
auto arg0_col_dim = arg0_shape_tmp[arg0_rank - 1];
auto arg1_row_dim = arg1_shape_tmp[arg1_rank - 2];
NGRAPH_CHECK(Dimension::merge(merged_dimension, arg0_col_dim, arg1_row_dim) || arg0_col_dim.is_dynamic() ||
arg1_row_dim.is_dynamic(),
"Incompatible MatMul matrix dimension. ",
"First input dimension=",
arg0_col_dim,
" at COL_INDEX_DIM=",
(arg0_rank - 1),
" doesn't match the second input dimension=",
arg1_row_dim,
" at ROW_INDEX_DIM=",
(arg1_rank - 2));
// 3. If ranks of input arguments are different after steps 1 and 2,
// the smaller tensor is unsqueezed from the left side of the shape
// by necessary number of axes to make both shapes of the same rank.
if (arg0_rank < arg1_rank)
arg0_shape_tmp.insert(arg0_shape_tmp.begin(), arg1_rank - arg0_rank, 1);
else if (arg0_rank > arg1_rank)
arg1_shape_tmp.insert(arg1_shape_tmp.begin(), arg0_rank - arg1_rank, 1);
// Both arg0_shape_tmp and arg1_shape_tmp have identical size now
auto max_rank = arg0_shape_tmp.size();
std::vector<Dimension> output_shape(max_rank);
// 4. Usual rules of the broadcasting are applied for batch dimensions.
// Broadcast all batches (last two dimensions represent matrix),
// expand dim with value 1 to bigger dim if dimensions are not equal.
for (uint64_t i = 0; i < max_rank - 2; i++) {
auto min_dim_val = std::min(arg0_shape_tmp[i].get_min_length(), arg1_shape_tmp[i].get_min_length());
// If both dimensions don't have 1 in range, usual merge is enough.
if (min_dim_val > 1) {
// Error is not thrown for dynamic dimensions bounds without intersection
// to ensure MatMul backward compatibility.
// Instead fully dynamic dimension is set as default for such a case.
auto merged_dimension = Dimension::dynamic();
NGRAPH_CHECK(Dimension::merge(merged_dimension, arg0_shape_tmp[i], arg1_shape_tmp[i]) ||
arg0_shape_tmp[i].is_dynamic() || arg1_shape_tmp[i].is_dynamic(),
"Incompatible MatMul batch dimension. ",
"Can't merge first input dimension=",
arg0_shape_tmp[i],
" with second input dimension=",
arg1_shape_tmp[i],
" at index=",
i);
output_shape[i] = merged_dimension;
} else {
// Dimension with value 1 can be expanded to any bigger.
Dimension::value_type lower_bound; // The lowest possible value of output dimension
Dimension::value_type upper_bound; // The highest possible value of output dimension
// Output dimension lower_bound is a maximum of
// corresponding input dimensions lower bounds.
lower_bound = std::max(arg0_shape_tmp[i].get_min_length(), arg1_shape_tmp[i].get_min_length());
if (lower_bound <= 1) {
// If both of the dimensions have 1 in range, output dimension upper_bound
// is a maximum of corresponding input dimensions upper bounds.
upper_bound = std::max(arg0_shape_tmp[i].get_interval().get_max_val(),
arg1_shape_tmp[i].get_interval().get_max_val());
} else {
// Otherwise output dimension upper_bound is same as upper bound of
// the dimension without 1 in range.
upper_bound = arg0_shape_tmp[i].get_min_length() <= 1 ? arg1_shape_tmp[i].get_max_length()
: arg0_shape_tmp[i].get_max_length();
}
output_shape[i] = Dimension(lower_bound, upper_bound);
// label setting
size_t out_label = 0;
size_t label_0 = ov::DimensionTracker::get_label(arg0_shape_tmp[i]);
size_t label_1 = ov::DimensionTracker::get_label(arg1_shape_tmp[i]);
if (label_0 == label_1 || label_1 == 0)
out_label = label_0;
else if (label_0 == 0)
out_label = label_1;
output_shape[i] = Dimension(lower_bound, upper_bound);
if (out_label)
ov::DimensionTracker::set_label(output_shape[i], out_label);
}
}
// In output_shape replace 2 last axes with ROW_INDEX_DIM from arg0 matrix
// and COL_INDEX_DIM from arg1 matrix.
output_shape.at(output_shape.size() - 2) = arg0_shape_tmp.at(arg0_shape_tmp.size() - 2);
output_shape.at(output_shape.size() - 1) = arg1_shape_tmp.at(arg1_shape_tmp.size() - 1);
// 5. Removing the temporary axes from originally 1D tensors.
// Output shape of two 1D tensors multiplication will be a 0D tensor (scalar).
if (arg0_shape.rank().get_length() == 1) {
// arg0 input temporary axis inserted at ROW_INDEX_DIM is removed
output_shape.erase(output_shape.begin() + output_shape.size() - 2);
}
if (arg1_shape.rank().get_length() == 1) {
// arg1 input temporary axis inserted at COL_INDEX_DIM is removed
output_shape.erase(output_shape.begin() + output_shape.size() - 1);
}
return ov::PartialShape(output_shape);
}
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& output,
bool transpose_a,
bool transpose_b) {
bool evaluate(const op::MatMul* op, const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& output) {
using T = typename element_type_traits<ET>::value_type;
ov::Shape arg0_shape = arg0->get_shape();
ov::Shape arg1_shape = arg1->get_shape();
ov::PartialShape output_partial_shape = validate_matmul_output_shape(ov::PartialShape(arg0_shape),
ov::PartialShape(arg1_shape),
transpose_a,
transpose_b);
ov::Shape output_shape = output_partial_shape.to_shape();
std::vector<ov::PartialShape> input_shapes = {arg0_shape, arg1_shape};
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape{}};
shape_infer(op, input_shapes, output_shapes);
ov::Shape output_shape = output_shapes[0].to_shape();
output->set_element_type(arg0->get_element_type());
output->set_shape(output_shape);
@@ -210,25 +59,24 @@ bool evaluate(const HostTensorPtr& arg0,
arg0_shape,
arg1_shape,
output_shape,
transpose_a,
transpose_b);
op->get_transpose_a(),
op->get_transpose_b());
return true;
}
bool evaluate_matmul(const HostTensorPtr& arg0,
bool evaluate_matmul(const op::MatMul* op,
const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& output,
bool transpose_a,
bool transpose_b) {
const HostTensorPtr& output) {
bool rc = true;
switch (arg0->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_matmul, i32, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, i64, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, u32, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, u64, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, f16, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, f32, arg0, arg1, output, transpose_a, transpose_b);
NGRAPH_TYPE_CASE(evaluate_matmul, i32, op, arg0, arg1, output);
NGRAPH_TYPE_CASE(evaluate_matmul, i64, op, arg0, arg1, output);
NGRAPH_TYPE_CASE(evaluate_matmul, u32, op, arg0, arg1, output);
NGRAPH_TYPE_CASE(evaluate_matmul, u64, op, arg0, arg1, output);
NGRAPH_TYPE_CASE(evaluate_matmul, f16, op, arg0, arg1, output);
NGRAPH_TYPE_CASE(evaluate_matmul, f32, op, arg0, arg1, output);
default:
rc = false;
break;
@@ -240,7 +88,7 @@ bool evaluate_matmul(const HostTensorPtr& arg0,
bool op::MatMul::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
NGRAPH_OP_SCOPE(v0_MatMul_evaluate);
return matmul::evaluate_matmul(inputs[0], inputs[1], outputs[0], get_transpose_a(), get_transpose_b());
return matmul::evaluate_matmul(this, inputs[0], inputs[1], outputs[0]);
}
bool op::MatMul::has_evaluate() const {
@@ -271,19 +119,9 @@ void ngraph::op::v0::MatMul::validate_and_infer_types() {
get_input_element_type(1),
").");
const auto& A_partial_shape = get_input_partial_shape(0);
const auto& B_partial_shape = get_input_partial_shape(1);
if (A_partial_shape.rank().is_static() && B_partial_shape.rank().is_static()) {
ov::PartialShape output_shape;
const bool transpose_a = get_transpose_a();
const bool transpose_b = get_transpose_b();
output_shape = matmul::validate_matmul_output_shape(A_partial_shape, B_partial_shape, transpose_a, transpose_b);
set_output_type(0, result_et, output_shape);
} else {
set_output_type(0, result_et, ov::PartialShape::dynamic());
}
const auto &A_shape = get_input_partial_shape(0), B_shape = get_input_partial_shape(1);
std::vector<ov::PartialShape> input_shapes = {A_shape, B_shape};
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape{}};
shape_infer(this, input_shapes, output_shapes);
set_output_type(0, result_et, output_shapes[0]);
}

View File

@@ -288,52 +288,6 @@ void ov::pass::GraphRewrite::set_pass_config(const std::shared_ptr<PassConfig>&
}
}
void ov::pass::RecurrentGraphRewrite::add_matcher(const std::shared_ptr<pattern::RecurrentMatcher>& m,
const ov::recurrent_graph_rewrite_callback& callback,
const PassPropertyMask& property) {
m_matchers.push_back(std::make_shared<MatcherPass>(
"Recurrent matcher",
nullptr,
[m, callback](const std::shared_ptr<Node>& node) {
NGRAPH_DEBUG << "Running recurrent matcher on " << node;
if (m->match(node->output(0))) {
NGRAPH_DEBUG << "Recurrent matcher matched " << m.get();
return callback(*m.get());
}
return false;
},
property));
}
void ov::pass::RecurrentGraphRewrite::add_matcher(const std::shared_ptr<pattern::RecurrentMatcher>& m,
const ov::recurrent_graph_rewrite_callback& callback) {
// TODO: before deprecate this function, by default expect the
// callback require static shape.
add_matcher(m, callback, {PassProperty::REQUIRE_STATIC_SHAPE});
}
bool ov::pass::RecurrentGraphRewrite::run_on_model(const std::shared_ptr<Model>& f) {
bool changed = false;
size_t i = 0;
auto run_matchers = [&]() -> bool {
for (const auto& node : f->get_ops()) {
for (auto& m_pass : m_matchers) {
if (m_pass->apply(node)) {
return true;
}
}
}
return false;
};
do {
changed = run_matchers();
i++;
} while (changed && i < m_num_iters);
return changed;
}
void ov::pass::MatcherPass::register_matcher(const std::shared_ptr<ov::pass::pattern::Matcher>& m,
const ov::graph_rewrite_callback& callback,
const PassPropertyMask& property) {

View File

@@ -15,48 +15,91 @@ using namespace opset8;
using namespace op::util;
namespace {
string generate_variable_name(const shared_ptr<Parameter>& param, const shared_ptr<Result>& res) {
return param->get_friendly_name() + res->get_friendly_name();
}
ov::pass::MakeStateful::ParamResPairs find_param_results_by_names(
std::tuple<ov::pass::MakeStateful::ParamResPairs, std::vector<std::string>> find_param_results_by_names(
const shared_ptr<ngraph::Function>& func,
const std::map<std::string, std::string>& param_res_names) {
ov::pass::MakeStateful::ParamResPairs pairs_to_replace;
std::vector<std::string> variable_names;
const auto& params = func->get_parameters();
const auto& results = func->get_results();
std::set<Node*> uniq_params;
std::set<Node*> uniq_res;
// find corresponding param and result by name and add to the list
for (const auto& param_res : param_res_names) {
const auto& param_name = param_res.first;
const auto& res_name = param_res.second;
auto param = std::find_if(params.begin(), params.end(), [&](const std::shared_ptr<ngraph::Node>& node) {
return node->get_friendly_name() == param_name;
const auto& possible_names = node->output(0).get_names();
return possible_names.find(param_name) != possible_names.end();
});
NGRAPH_CHECK(param != params.end(), "Parameter node with name = ", param_name, "doesn't exist in the function");
NGRAPH_CHECK(param != params.end(),
"The tensor name ",
param_name,
" is not associated with any of "
"Parameters in the network.");
uniq_params.insert(param->get());
auto res = std::find_if(results.begin(), results.end(), [&](const std::shared_ptr<ngraph::Node>& node) {
return node->get_friendly_name() == res_name;
const auto& possible_names = node->output(0).get_names();
return possible_names.find(res_name) != possible_names.end();
});
NGRAPH_CHECK(res != results.end(), "Result node with name = ", res_name, " doesn't exist in the function");
pairs_to_replace.emplace_back(*param, *res);
NGRAPH_CHECK(res != results.end(),
"The tensor name ",
res_name,
" is not associated with any of "
"Results in the network.");
// In case of several Results connected to one output tensor,
// We can't determine what result we need to take exactly.
// But we can take first unused, the order is not important, data is the same.
opset8::Result* unused_res = nullptr;
for (const auto& target_in : (*res)->input_value(0).get_target_inputs()) {
auto is_target_res = ov::as_type<opset8::Result>(target_in.get_node());
if (!is_target_res) {
continue;
}
if (uniq_res.find(is_target_res) == uniq_res.end()) {
unused_res = is_target_res;
break;
}
}
NGRAPH_CHECK(unused_res != nullptr,
"All Result operations associated with the tensor ",
res_name,
" are already involved in the transformation.");
uniq_res.insert(unused_res);
if (auto casted = std::dynamic_pointer_cast<opset8::Result>(unused_res->shared_from_this()))
pairs_to_replace.emplace_back(*param, casted);
variable_names.push_back(param_name + res_name);
}
return pairs_to_replace;
return std::make_tuple(pairs_to_replace, variable_names);
}
} // namespace
bool ov::pass::MakeStateful::run_on_model(const std::shared_ptr<ov::Model>& f) {
// in case of user passes the tensor names to find Parameter/Result nodes, we use these tensor names
// to generate variable names. In case of user passes Parameter/Result nodes directly, we use friendly
// names of these nodes to generate variable names.
std::vector<std::string> variable_names;
if (m_param_res_pairs.empty()) {
m_param_res_pairs = find_param_results_by_names(f, m_param_res_names);
std::tie(m_param_res_pairs, variable_names) = find_param_results_by_names(f, m_param_res_names);
} else {
for (const auto& pair : m_param_res_pairs) {
variable_names.push_back(pair.first->get_friendly_name() + pair.second->get_friendly_name());
}
}
VariableVector variables;
SinkVector sinks;
for (const auto& pair : m_param_res_pairs) {
const auto& param = pair.first;
const auto& res = pair.second;
for (size_t i = 0; i < m_param_res_pairs.size(); ++i) {
const auto& param = m_param_res_pairs[i].first;
const auto& res = m_param_res_pairs[i].second;
NGRAPH_CHECK(param->get_partial_shape().is_static(),
"Shape of Parameter ",
@@ -64,7 +107,7 @@ bool ov::pass::MakeStateful::run_on_model(const std::shared_ptr<ov::Model>& f) {
" must be static. MakeStateful transformation doesn't support dynamic shapes.");
// Create Variable
std::string var_name = generate_variable_name(param, res);
std::string var_name = variable_names[i];
auto variable =
std::make_shared<Variable>(VariableInfo{param->get_shape(), param->get_element_type(), var_name});
variables.push_back(variable);

View File

@@ -49,6 +49,7 @@ set(SRC
coordinate_range.cpp
copy.cpp
copy_runtime_info.cpp
dimension.cpp
element_type.cpp
eval.cpp
extension.cpp

View File

@@ -129,7 +129,6 @@ TEST_F(AnyTests, AnyAsStringInLine) {
Any p = "test";
ASSERT_TRUE(p.is<std::string>());
std::string test = p.as<std::string>();
;
ASSERT_EQ("test", test);
}
@@ -370,14 +369,13 @@ TEST_F(AnyTests, PrintToVectorOfUInts) {
TEST_F(AnyTests, PrintToVectorOfFloats) {
auto ref_vec = std::vector<float>{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f};
std::stringstream stream;
{
Any p = std::vector<float>{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f};
ASSERT_NO_THROW(p.print(stream));
ASSERT_EQ(stream.str(), std::string{"0 1.1 2.2 3.3 4.4 5.5"});
ASSERT_EQ(p.as<std::string>(), std::string{"0 1.1 2.2 3.3 4.4 5.5"});
}
{
Any p = stream.str();
Any p = "0 1.1 2.2 3.3 4.4 5.5";
ASSERT_EQ((p.as<std::vector<float>>()), ref_vec);
}
}
@@ -534,4 +532,11 @@ TEST_F(AnyTests, NotIntFromStringThrow) {
Any p = "not42";
ASSERT_TRUE(p.is<std::string>());
ASSERT_THROW(p.as<int>(), ov::Exception);
}
TEST_F(AnyTests, AddressofNoThrow) {
Any p;
ASSERT_EQ(nullptr, p.addressof());
p = 42;
ASSERT_NE(nullptr, p.addressof());
}

View File

@@ -0,0 +1,55 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/dimension.hpp"
#include "gtest/gtest.h"
using namespace std;
using namespace ngraph;
TEST(dimension, broadcast_merge_static_1_and_10) {
Dimension result;
Dimension one(1), ten(10);
bool success = Dimension::broadcast_merge(result, one, ten);
EXPECT_TRUE(success);
EXPECT_EQ(result, ten);
}
TEST(dimension, broadcast_merge_static_1_5_and_10_15) {
Dimension result;
Dimension one(1, 5), ten(10, 15);
bool success = Dimension::broadcast_merge(result, one, ten);
EXPECT_TRUE(success);
EXPECT_EQ(result, ten);
}
TEST(dimension, broadcast_merge_static_1_12_and_10_15) {
Dimension result;
Dimension one(1, 12), ten(10, 15);
bool success = Dimension::broadcast_merge(result, one, ten);
EXPECT_TRUE(success);
EXPECT_EQ(result, ten);
}
TEST(dimension, broadcast_merge_static_7_12_and_10_15) {
Dimension result;
Dimension one(7, 12), ten(10, 15);
bool success = Dimension::broadcast_merge(result, one, ten);
EXPECT_TRUE(success);
EXPECT_EQ(result, Dimension(10, 12));
}
TEST(dimension, broadcast_merge_static_0_12_and_1_15) {
Dimension result;
Dimension one(0, 12), ten(1, 15);
bool success = Dimension::broadcast_merge(result, one, ten);
EXPECT_TRUE(success);
EXPECT_EQ(result, Dimension(0, 15));
}

View File

@@ -3,8 +3,14 @@
<layers>
<layer id="0" name="Parameter_69" type="Parameter" version="opset1">
<data shape="490, 608, 1, 1" element_type="f32" />
<rt_info>
<attribute name="custom_attr" version="0"/>
</rt_info>
<output>
<port id="0" precision="FP32">
<rt_info>
<attribute name="another_custom_attr" version="0"/>
</rt_info>
<dim>490</dim>
<dim>608</dim>
<dim>1</dim>

View File

@@ -26,6 +26,7 @@ from google.protobuf import text_format
import onnx
from onnx.external_data_helper import convert_model_to_external_data
import os
import sys
ONNX_SUFFX = '.onnx'
PROTOTXT_SUFFX = '.prototxt'

View File

@@ -107,7 +107,7 @@ TEST(type_prop, matmul_dynamic_5D_transpose_b) {
auto matmul = make_shared<op::MatMul>(A, B, 0, 1);
ASSERT_EQ(matmul->get_element_type(), element::f32);
ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{Dimension(1, -1), 4, dynamic, dynamic, 4}));
ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{Dimension(-1), 4, dynamic, dynamic, 4}));
}
TEST(type_prop, matmul_dynamic_2D_transpose_a) {
@@ -422,7 +422,7 @@ TEST(type_prop, matmul_batch_dynamic_bounds) {
Dimension(5, 7), // 3
Dimension(5, 7), // 4
Dimension(5, 7), // 5
Dimension(1, -1), // 6
Dimension(-1), // 6
Dimension(0, 1), // 7
Dimension(2, 5), // 8
Dimension(5, 10), // 9
@@ -432,7 +432,7 @@ TEST(type_prop, matmul_batch_dynamic_bounds) {
Dimension(2, -1), // 13
Dimension(2, -1), // 14
Dimension(1, -1), // 15
Dimension(1, -1), // 16
Dimension(-1), // 16
3, // 17
5, // 18
4}; // 19

View File

@@ -20,6 +20,16 @@ TEST(type_prop, select_deduce) {
ASSERT_EQ(bc->get_shape(), (Shape{2, 4}));
}
TEST(type_prop, select_dynamic) {
auto param_0 =
make_shared<op::Parameter>(element::boolean, PartialShape({{2, 8}, {3, 7}, {1, 10}, {1, 6}, {1, 10}}));
auto param_1 = make_shared<op::Parameter>(element::f32, PartialShape::dynamic(5));
auto param_2 = make_shared<op::Parameter>(element::f32, PartialShape({{1, 5}, {1, 11}, 5, {1, 8}}));
auto bc = make_shared<op::v1::Select>(param_0, param_1, param_2);
ASSERT_EQ(bc->get_element_type(), element::f32);
ASSERT_EQ(bc->get_output_partial_shape(0), PartialShape({{2, 8}, {3, 7}, -1, 5, -1}));
}
TEST(type_prop, select_shape_mismatch_a) {
auto tv0_2_4_param_0 = make_shared<op::Parameter>(element::boolean, Shape{3, 5});
auto tv0_2_4_param_1 = make_shared<op::Parameter>(element::f32, Shape{2, 4});

View File

@@ -91,12 +91,22 @@ public:
void on_adapter(const std::string& name, ValueAccessor<void>& adapter) override {
auto p_value = m_attr_values_map.find(name);
if (p_value != m_attr_values_map.end()) {
adapter.set_as_any(p_value->second);
} else {
auto p_name = m_attr_names_map.find(name);
const std::string& target_name = p_name != m_attr_names_map.end() ? p_name->second : name;
adapter.set_as_any(m_context.get_attribute_as_any(target_name));
try {
adapter.set_as_any(m_context.get_attribute_as_any(target_name));
} catch (::ov::AssertFailure ex) {
OPENVINO_ASSERT(false,
ex.what(),
"\nValue for attribute \"",
target_name,
"\" is not set or mapping between "
"framework and openvino node attributes is incorrect.");
}
}
}
@@ -142,7 +152,7 @@ OpExtensionBase<BaseConversionType, void>::OpExtensionBase(const std::string& ov
const std::map<std::string, ov::Any>& attr_values_map)
: BaseConversionType(fw_type_name,
OpConversionFunction(
[&]() -> std::shared_ptr<ov::Node> {
[=]() -> std::shared_ptr<ov::Node> {
auto split = [](const std::string& s, const std::string& delimiter) {
size_t pos_start = 0, pos_end, delim_len = delimiter.length();
std::string token;
@@ -194,7 +204,7 @@ OpExtensionBase<BaseConversionType, void>::OpExtensionBase(const std::string& ov
} else {
FRONT_END_GENERAL_CHECK(
false,
"Invalid OpenVINO operation format, one of the next is expected:"
"Invalid OpenVINO operation format, one of the next is expected: \n"
"opsetN::OpName or opsetN.OpName or OpName. Provided operation format: ",
ov_type_name);
}
@@ -206,7 +216,8 @@ OpExtensionBase<BaseConversionType, void>::OpExtensionBase(const std::string& ov
"name ",
op_name);
}
return opset.create(op_name)->shared_from_this();
return std::shared_ptr<ngraph::Node>(opset.create(op_name));
},
attr_names_map,
attr_values_map)) {}

View File

@@ -755,7 +755,8 @@ std::shared_ptr<ngraph::Node> XmlDeserializer::createNode(
IE_THROW() << "Attribute: " << item.name() << " is not recognized as runtime attribute";
}
} else {
IE_THROW() << "Attribute: " << item.name() << " is not recognized";
// As runtime attributes are optional, so we skip attribute if it is unknown to avoid exception
// when loading new IR with new attribute in old IE version.
}
}
};

View File

@@ -28,13 +28,31 @@ Subgraph Attribute::get_subgraph(const Graph* parent_graph) const {
ov::Any Attribute::get_any() const {
switch (get_type()) {
case Type::float_point:
return get_float();
// OV has automatic downcasting of node attributes:
// double -> float
// but upcasting is not supported:
// float -> double
// so float value from protobuf leads to the issue
// when we are trying to get an attribute of double type in ov::Node
return static_cast<double>(get_float());
case Type::integer:
return get_integer();
case Type::string:
return get_string();
case Type::float_point_array:
return get_float_array();
case Type::float_point_array: {
auto float_array = get_float_array();
// OV has automatic downcasting of node attributes:
// double -> float
// but upcasting is not supported:
// float -> double
// so float value from protobuf leads to the issue
// when we are trying to get an attribute of double type in ov::Node
std::vector<double> double_array(float_array.size());
for (size_t i = 0; i < float_array.size(); ++i) {
double_array[i] = static_cast<double>(float_array[i]);
}
return double_array;
}
case Type::integer_array:
return get_integer_array();
case Type::string_array:

View File

@@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/opsets/opset7.hpp"
#include "ngraph/opsets/opset8.hpp"
namespace ngraph {
namespace onnx_import {
namespace default_opset = ngraph::opset7;
namespace default_opset = ngraph::opset8;
}
} // namespace ngraph

View File

@@ -25,7 +25,7 @@ OutputVector experimental_detectron_prior_grid_generator(const Node& node) {
attrs.h = node.get_attribute_value<int64_t>("h", 0);
attrs.w = node.get_attribute_value<int64_t>("w", 0);
attrs.stride_x = node.get_attribute_value<float>("stride_x", 0.0f);
attrs.stride_x = node.get_attribute_value<float>("stride_y", 0.0f);
attrs.stride_y = node.get_attribute_value<float>("stride_y", 0.0f);
return {std::make_shared<PriorGridGenerator>(priors, feature_map, im_data, attrs)};
}

View File

@@ -59,13 +59,13 @@ OutputVector PoolingFactory::make_avg_pool() const {
}
OutputVector PoolingFactory::make_max_pool() const {
return {std::make_shared<default_opset::MaxPool>(m_inputs.at(0),
m_strides,
m_padding_below,
m_padding_above,
m_kernel_shape,
m_rounding_type,
m_auto_pad)};
return {std::make_shared<op::v1::MaxPool>(m_inputs.at(0),
m_strides,
m_padding_below,
m_padding_above,
m_kernel_shape,
m_rounding_type,
m_auto_pad)};
}
OutputVector PoolingFactory::make_max_pool_with_indices() const {

View File

@@ -8,6 +8,7 @@
#include <array>
#include <exception>
#include <map>
#include <unordered_set>
#include <vector>
namespace {
@@ -128,7 +129,7 @@ ONNXField decode_next_field(std::istream& model) {
switch (decoded_key.second) {
case VARINT: {
// the decoded varint is the payload in this case but its value does not matter
// in the fast check process so you can discard it
// in the fast check process so it can be discarded
decode_varint(model);
return {onnx_field, 0};
}
@@ -198,21 +199,23 @@ namespace ngraph {
namespace onnx_common {
bool is_valid_model(std::istream& model) {
// the model usually starts with a 0x08 byte indicating the ir_version value
// so this checker expects at least 2 valid ONNX keys to be found in the validated model
const unsigned int EXPECTED_FIELDS_FOUND = 2u;
unsigned int valid_fields_found = 0u;
// so this checker expects at least 3 valid ONNX keys to be found in the validated model
const size_t EXPECTED_FIELDS_FOUND = 3u;
std::unordered_set<onnx::Field, std::hash<int>> onnx_fields_found = {};
try {
while (!model.eof() && valid_fields_found < EXPECTED_FIELDS_FOUND) {
while (!model.eof() && onnx_fields_found.size() < EXPECTED_FIELDS_FOUND) {
const auto field = ::onnx::decode_next_field(model);
++valid_fields_found;
if (field.second > 0) {
::onnx::skip_payload(model, field.second);
if (onnx_fields_found.count(field.first) > 0) {
// if the same field is found twice, this is not a valid ONNX model
return false;
} else {
onnx_fields_found.insert(field.first);
onnx::skip_payload(model, field.second);
}
}
return valid_fields_found == EXPECTED_FIELDS_FOUND;
return onnx_fields_found.size() == EXPECTED_FIELDS_FOUND;
} catch (...) {
return false;
}

View File

@@ -5,24 +5,61 @@
ov_add_frontend(NAME tensorflow
LINKABLE_FRONTEND
SHUTDOWN_PROTOBUF
SKIP_INSTALL
FILEDESCRIPTION "FrontEnd to load and convert TensorFlow file format"
LINK_LIBRARIES openvino::util openvino::runtime::dev)
# give a different name during installation to OpenVINO package
set_target_properties(openvino_tensorflow_frontend PROPERTIES OUTPUT_NAME openvino_tensorflow_fe)
#
# Temporary install steps
#
function(ov_frontend_get_file_name target_name library_name)
set(LIB_PREFIX "${CMAKE_SHARED_LIBRARY_PREFIX}")
set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}")
set(TARGET_NAME openvino_tensorflow_frontend)
set(TARGET_NAME_IRC openvino_tensorflow_fe)
set("${library_name}" "${LIB_PREFIX}${target_name}${LIB_SUFFIX}" PARENT_SCOPE)
endfunction()
ov_frontend_get_file_name(openvino_tensorflow_frontend output_name)
set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME frontend::tensorflow)
export(TARGETS ${TARGET_NAME} NAMESPACE openvino::
APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake")
# install with original name for tests component
install(FILES $<TARGET_FILE:openvino_tensorflow_frontend>
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT tests
RENAME ${output_name}
EXCLUDE_FROM_ALL)
install(TARGETS ${TARGET_NAME}
RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests EXCLUDE_FROM_ALL
LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests EXCLUDE_FROM_ALL)
if(BUILD_SHARED_LIBS)
function(ov_shared_library_name target_name library_name)
set(LIB_PREFIX "${CMAKE_SHARED_LIBRARY_PREFIX}")
set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}")
set("${library_name}" "${LIB_PREFIX}${target_name}${LIB_SUFFIX}" PARENT_SCOPE)
endfunction()
function(ov_lib_file_name target_name library_name)
set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}")
set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_STATIC_LIBRARY_SUFFIX}")
set("${library_name}" "${LIB_PREFIX}${target_name}${LIB_SUFFIX}" PARENT_SCOPE)
endfunction()
ov_shared_library_name(${TARGET_NAME_IRC} shared_library_name)
# rename targets files to avoid auto-loading by FEM
install(FILES $<TARGET_FILE:${TARGET_NAME}>
DESTINATION ${IE_CPACK_RUNTIME_PATH}
COMPONENT core
RENAME ${shared_library_name})
if(WIN32)
ov_lib_file_name(${TARGET_NAME_IRC} lib_file_name)
# need to install renamed .lib file as well
install(FILES $<TARGET_LINKER_FILE:${TARGET_NAME}>
DESTINATION ${IE_CPACK_LIBRARY_PATH}
COMPONENT core
RENAME ${lib_file_name})
endif()
endif()
# install -dev part
install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/openvino
DESTINATION ${FRONTEND_INSTALL_INCLUDE}/
COMPONENT core_dev
FILES_MATCHING PATTERN "*.hpp")

View File

@@ -108,6 +108,10 @@ void FrontEnd::translate_graph(const ov::frontend::InputModel::Ptr& model,
// prepare a list of OV node inputs for each node
ov::OutputVector ng_inputs;
for (size_t input_port_idx = 0; input_port_idx < operation_decoder->get_input_size(); ++input_port_idx) {
// TODO: Implement more general approach. Skipping Constants that have input edges
if (operation_decoder->get_op_type() == "Const") {
break;
}
std::string producer_name;
size_t producer_port_idx;
try {
@@ -143,7 +147,7 @@ void FrontEnd::translate_graph(const ov::frontend::InputModel::Ptr& model,
ng_inputs.push_back(input_outputs_vector.at(producer_port_idx));
} else {
FRONT_END_GENERAL_CHECK(false,
"No input is found for node \"" + operation_name + "\" by port" +
"No input is found for node \"" + operation_name + "\" by port " +
std::to_string(producer_port_idx));
}
}

View File

@@ -23,8 +23,8 @@ OutputVector translate_conv_3d_op(const NodeContext& node) {
auto tf_data_format = node.get_attribute<std::string>("data_format");
TENSORFLOW_OP_VALIDATION(node,
tf_data_format == "NHWC" || tf_data_format == "NCHW",
"Conv3D data format is neither NHWC nor NCHW");
tf_data_format == "NDHWC" || tf_data_format == "NCDHW",
"Conv3D data format is neither NDHWC nor NCDHW");
bool is_ndhwc = (tf_data_format == "NDHWC");

View File

@@ -24,10 +24,10 @@
namespace ov {
class Function;
class ICore;
} // namespace ov
namespace InferenceEngine {
class ICore;
class ExecutorManager;
class IExecutableNetworkInternal;
class RemoteContext;
@@ -256,13 +256,13 @@ public:
* @brief Sets pointer to ICore interface
* @param core Pointer to Core interface
*/
virtual void SetCore(std::weak_ptr<ICore> core);
virtual void SetCore(std::weak_ptr<ov::ICore> core);
/**
* @brief Gets reference to ICore interface
* @return Reference to ICore interface
*/
virtual std::shared_ptr<ICore> GetCore() const noexcept;
virtual std::shared_ptr<ov::ICore> GetCore() const noexcept;
/**
* @brief Gets reference to tasks execution manager
@@ -337,7 +337,7 @@ protected:
std::string _pluginName; //!< A device name that plugins enables
std::map<std::string, std::string> _config; //!< A map config keys -> values
std::weak_ptr<ICore> _core; //!< A pointer to ICore interface
std::weak_ptr<ov::ICore> _core; //!< A pointer to ICore interface
std::shared_ptr<ExecutorManager> _executorManager; //!< A tasks execution manager
};

View File

@@ -17,8 +17,9 @@
#include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp"
#include "ie_parameter.hpp"
#include "ie_remote_context.hpp"
#include "openvino/runtime/properties.hpp"
namespace InferenceEngine {
namespace ov {
/**
* @interface ICore
@@ -33,7 +34,7 @@ public:
* @param weights shared pointer to constant blob with weights
* @return CNNNetwork
*/
virtual CNNNetwork ReadNetwork(const std::string& model, const Blob::CPtr& weights) const = 0;
virtual ie::CNNNetwork ReadNetwork(const std::string& model, const ie::Blob::CPtr& weights) const = 0;
/**
* @brief Reads IR xml and bin files
@@ -42,7 +43,7 @@ public:
* if bin file with the same name was not found, will load IR without weights.
* @return CNNNetwork
*/
virtual CNNNetwork ReadNetwork(const std::string& modelPath, const std::string& binPath) const = 0;
virtual ie::CNNNetwork ReadNetwork(const std::string& modelPath, const std::string& binPath) const = 0;
/**
* @brief Creates an executable network from a network object.
@@ -56,9 +57,9 @@ public:
* operation
* @return An executable network reference
*/
virtual SoExecutableNetworkInternal LoadNetwork(const CNNNetwork& network,
const std::string& deviceName,
const std::map<std::string, std::string>& config = {}) = 0;
virtual ie::SoExecutableNetworkInternal LoadNetwork(const ie::CNNNetwork& network,
const std::string& deviceName,
const std::map<std::string, std::string>& config = {}) = 0;
/**
* @brief Creates an executable network from a network object.
@@ -72,9 +73,9 @@ public:
* operation
* @return An executable network reference
*/
virtual SoExecutableNetworkInternal LoadNetwork(const CNNNetwork& network,
const RemoteContext::Ptr& remoteCtx,
const std::map<std::string, std::string>& config = {}) = 0;
virtual ie::SoExecutableNetworkInternal LoadNetwork(const ie::CNNNetwork& network,
const ie::RemoteContext::Ptr& remoteCtx,
const std::map<std::string, std::string>& config = {}) = 0;
/**
* @brief Creates an executable network from a model file.
@@ -89,10 +90,11 @@ public:
* @param val Optional callback to perform validation of loaded CNNNetwork, if ReadNetwork is triggered
* @return An executable network reference
*/
virtual SoExecutableNetworkInternal LoadNetwork(const std::string& modelPath,
const std::string& deviceName,
const std::map<std::string, std::string>& config,
const std::function<void(const CNNNetwork&)>& val = nullptr) = 0;
virtual ie::SoExecutableNetworkInternal LoadNetwork(
const std::string& modelPath,
const std::string& deviceName,
const std::map<std::string, std::string>& config,
const std::function<void(const ie::CNNNetwork&)>& val = nullptr) = 0;
/**
* @brief Creates an executable network from a previously exported network
@@ -102,9 +104,9 @@ public:
* operation*
* @return An executable network reference
*/
virtual SoExecutableNetworkInternal ImportNetwork(std::istream& networkModel,
const std::string& deviceName = {},
const std::map<std::string, std::string>& config = {}) = 0;
virtual ie::SoExecutableNetworkInternal ImportNetwork(std::istream& networkModel,
const std::string& deviceName = {},
const std::map<std::string, std::string>& config = {}) = 0;
/**
* @brief Query device if it supports specified network with specified configuration
@@ -114,9 +116,9 @@ public:
* @param config Optional map of pairs: (config parameter name, config parameter value)
* @return An object containing a map of pairs a layer name -> a device name supporting this layer.
*/
virtual QueryNetworkResult QueryNetwork(const CNNNetwork& network,
const std::string& deviceName,
const std::map<std::string, std::string>& config) const = 0;
virtual ie::QueryNetworkResult QueryNetwork(const ie::CNNNetwork& network,
const std::string& deviceName,
const std::map<std::string, std::string>& config) const = 0;
/**
* @brief Gets general runtime metric for dedicated hardware.
@@ -128,9 +130,7 @@ public:
* @param name - metric name to request.
* @return Metric value corresponding to metric key.
*/
virtual Parameter GetMetric(const std::string& deviceName,
const std::string& name,
const ParamMap& options = {}) const = 0;
virtual Any GetMetric(const std::string& deviceName, const std::string& name, const AnyMap& options = {}) const = 0;
/**
* @brief Gets configuration dedicated to device behaviour.
@@ -141,7 +141,7 @@ public:
* @param name - config key.
* @return Value of config corresponding to config key.
*/
virtual Parameter GetConfig(const std::string& deviceName, const std::string& name) const = 0;
virtual Any GetConfig(const std::string& deviceName, const std::string& name) const = 0;
/**
* @brief Returns devices available for neural networks inference
@@ -167,8 +167,7 @@ public:
* @param params Map of device-specific shared context parameters.
* @return A shared pointer to a created remote context.
*/
virtual InferenceEngine::RemoteContext::Ptr CreateContext(const std::string& deviceName,
const InferenceEngine::ParamMap&) = 0;
virtual InferenceEngine::RemoteContext::Ptr CreateContext(const std::string& deviceName, const AnyMap&) = 0;
/**
* @brief Get only configs that are suppored by device
@@ -186,14 +185,81 @@ public:
* @param deviceName - A name of a device to get create shared context from.
* @return A shared pointer to a default remote context.
*/
virtual RemoteContext::Ptr GetDefaultContext(const std::string& deviceName) = 0;
virtual ie::RemoteContext::Ptr GetDefaultContext(const std::string& deviceName) = 0;
/**
* @brief Sets properties for a device, acceptable keys can be found in openvino/runtime/properties.hpp.
*
* @param device_name Name of a device.
*
* @param properties Map of pairs: (property name, property value).
*/
virtual void set_property(const std::string& device_name, const AnyMap& properties) = 0;
/**
* @brief Sets properties for a device, acceptable keys can be found in openvino/runtime/properties.hpp.
*
* @tparam Properties Should be the pack of `std::pair<std::string, Any>` types.
* @param device_name Name of a device.
* @param properties Optional pack of pairs: (property name, property value).
*/
template <typename... Properties>
util::EnableIfAllStringAny<void, Properties...> set_property(const std::string& device_name,
Properties&&... properties) {
set_property(device_name, AnyMap{std::forward<Properties>(properties)...});
}
/**
* @brief Gets properties related to device behaviour.
*
*
* @param device_name Name of a device to get a property value.
* @param name Property name.
* @param arguments Additional arguments to get a property.
* @return Value of a property corresponding to the property name.
*/
virtual Any get_property(const std::string& device_name,
const std::string& name,
const AnyMap& arguments) const = 0;
/**
* @brief Gets properties related to device behaviour.
*
* @tparam T Type of a returned value.
* @tparam M Property mutability.
* @param deviceName Name of a device to get a property value.
* @param property Property object.
* @return Property value.
*/
template <typename T, PropertyMutability M>
T get_property(const std::string& device_name, const Property<T, M>& property) const {
return get_property(device_name, property.name(), {}).template as<T>();
}
/**
* @brief Gets properties related to device behaviour.
*
* @tparam T Type of a returned value.
* @tparam M Property mutability.
* @param deviceName Name of a device to get a property value.
* @param property Property object.
* @param arguments Additional arguments to get a property.
* @return Property value.
*/
template <typename T, PropertyMutability M>
T get_property(const std::string& device_name, const Property<T, M>& property, const AnyMap& arguments) const {
return get_property(device_name, property.name(), arguments).template as<T>();
}
/**
* @brief Default virtual destructor
*/
virtual ~ICore() = default;
};
} // namespace ov
namespace InferenceEngine {
using ICore = ov::ICore;
/**
* @private
*/

View File

@@ -628,10 +628,11 @@ constexpr static const auto EXPORT_IMPORT = "EXPORT_IMPORT"; //!< Device suppor
} // namespace capability
} // namespace device
namespace streams {
/**
* @brief Class to represent number of streams in streams executor
*/
struct NumStreams {
struct Num {
using Base = std::tuple<int32_t>; //!< NumStreams is representable as int32_t
/**
@@ -642,9 +643,9 @@ struct NumStreams {
NUMA = -2, //!< Creates as many streams as needed to accommodate NUMA and avoid associated penalties
};
NumStreams() : num{AUTO} {};
constexpr Num() : num{AUTO} {};
NumStreams(const int32_t num_) : num{num_} {}
constexpr Num(const int32_t num_) : num{num_} {}
operator int32_t() {
return num;
@@ -657,28 +658,37 @@ struct NumStreams {
int32_t num = 0;
};
/**
* @brief The number of executor logical partitions
*/
static constexpr Property<Num, PropertyMutability::RW> num{"NUM_STREAMS"};
static constexpr Num AUTO{Num::AUTO}; //!< Creates bare minimum of streams to improve the performance
static constexpr Num NUMA{
Num::NUMA}; //!< Creates as many streams as needed to accommodate NUMA and avoid associated penalties
/** @cond INTERNAL */
inline std::ostream& operator<<(std::ostream& os, const NumStreams& num_streams) {
switch (num_streams.num) {
case NumStreams::AUTO:
inline std::ostream& operator<<(std::ostream& os, const Num& num) {
switch (num.num) {
case Num::AUTO:
return os << "AUTO";
case NumStreams::NUMA:
case Num::NUMA:
return os << "NUMA";
default:
return os << num_streams.num;
return os << num.num;
}
}
inline std::istream& operator>>(std::istream& is, NumStreams& num_streams) {
inline std::istream& operator>>(std::istream& is, Num& num) {
std::string str;
is >> str;
if (str == "AUTO") {
num_streams = {NumStreams::AUTO};
num = AUTO;
} else if (str == "NUMA") {
num_streams = {NumStreams::NUMA};
num = NUMA;
} else {
try {
num_streams = {std::stoi(str)};
num = {std::stoi(str)};
} catch (const std::exception& e) {
throw ov::Exception{std::string{"Could not read number of streams from str: "} + str + "; " + e.what()};
}
@@ -686,11 +696,17 @@ inline std::istream& operator>>(std::istream& is, NumStreams& num_streams) {
return is;
}
/** @endcond */
} // namespace streams
/**
* @brief Class to represent number of streams in streams executor
*/
using NumStreams = streams::Num;
/**
* @brief The number of executor logical partitions
*/
static constexpr Property<NumStreams, PropertyMutability::RW> num_streams{"NUM_STREAMS"};
static constexpr Property<streams::Num, PropertyMutability::RW> num_streams{"NUM_STREAMS"};
/**
* @brief Maximum number of threads that can be used for inference tasks

View File

@@ -79,9 +79,9 @@ Blob::Ptr InferRequest::GetBlob(const std::string& name) {
Blob::Ptr blobPtr;
INFER_REQ_CALL_STATEMENT(blobPtr = _impl->GetBlob(name);)
std::string error = "Internal error: blob with name `" + name + "` is not allocated!";
const bool remoteBlobPassed = blobPtr->is<RemoteBlob>();
if (blobPtr == nullptr)
IE_THROW() << error;
const bool remoteBlobPassed = blobPtr->is<RemoteBlob>();
if (!remoteBlobPassed && blobPtr->buffer() == nullptr)
IE_THROW() << error;
return blobPtr;
@@ -442,7 +442,7 @@ std::vector<ProfilingInfo> InferRequest::get_profiling_info() const {
info.status = ProfilingInfo::Status::OPTIMIZED_OUT;
break;
case ie::InferenceEngineProfileInfo::EXECUTED:
info.status = ProfilingInfo::Status::OPTIMIZED_OUT;
info.status = ProfilingInfo::Status::EXECUTED;
break;
}
info.real_time = std::chrono::microseconds{ieInfo.realTime_uSec};

Some files were not shown because too many files have changed in this diff Show More