From 0668ea07795585b95f4d7f513f86533df80d43d6 Mon Sep 17 00:00:00 2001 From: Alexander Zhogov Date: Thu, 16 Dec 2021 18:54:08 +0300 Subject: [PATCH 01/10] Azure CI: Fix "access denied" issue with certutil on Windows (#9262) * Azure CI: Fix "access denied" issue with certutil on Windows * Use Invoke-WebRequest for GitHub --- .ci/azure/windows.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index b9738b6a7bb..81e2b47a622 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -83,7 +83,7 @@ jobs: displayName: 'Make dir' - script: | - certutil -urlcache -split -f https://openvinoweb.z5.web.core.windows.net/incredibuild/install_ib_console.bat install_ib_console.bat + curl -O https://openvinoweb.z5.web.core.windows.net/incredibuild/install_ib_console.bat call install_ib_console.bat workingDirectory: $(WORK_DIR) displayName: 'Install IncrediBuild' @@ -117,9 +117,9 @@ jobs: python -m pip install -r $(REPO_DIR)\tools\mo\requirements.txt python -m pip install -r $(REPO_DIR)\tools\mo\requirements_dev.txt rem Speed up build - certutil -urlcache -split -f https://github.com/Kitware/CMake/releases/download/v$(CMAKE_VERSION)/cmake-$(CMAKE_VERSION)-windows-x86_64.zip cmake-$(CMAKE_VERSION)-windows-x86_64.zip + powershell -command "Invoke-WebRequest https://github.com/Kitware/CMake/releases/download/v$(CMAKE_VERSION)/cmake-$(CMAKE_VERSION)-windows-x86_64.zip -OutFile cmake-$(CMAKE_VERSION)-windows-x86_64.zip" powershell -command "Expand-Archive -Force cmake-$(CMAKE_VERSION)-windows-x86_64.zip" - certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-win.zip ninja-win.zip + powershell -command "Invoke-WebRequest https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-win.zip -OutFile ninja-win.zip" powershell -command "Expand-Archive -Force ninja-win.zip" git clone https://github.com/google/gtest-parallel.git workingDirectory: $(WORK_DIR) From 012e513f750679c939158e783b94670cd7ee9f28 Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Thu, 16 Dec 2021 19:05:36 +0300 Subject: [PATCH 02/10] update POT submodule (#8740) * update POT submodule * update tests --- tools/pot/tests/utils/open_model_zoo.py | 4 ++-- tools/pot/thirdparty/open_model_zoo | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/pot/tests/utils/open_model_zoo.py b/tools/pot/tests/utils/open_model_zoo.py index 02582e9d145..e49b7ec17c8 100644 --- a/tools/pot/tests/utils/open_model_zoo.py +++ b/tools/pot/tests/utils/open_model_zoo.py @@ -28,7 +28,7 @@ OMZ_DEFINITIONS_PATH = LIBS_ROOT/'open_model_zoo'/'data'/'dataset_definitions.ym sys.path.append(str(OMZ_DOWNLOADER_PATH / 'src')) # pylint: disable=E0611,C0413,C0411,E0401 importlib.reload(openvino) -from openvino.model_zoo._configuration import load_models +from openvino.model_zoo._configuration import load_models, ModelLoadingMode from openvino.model_zoo._common import MODEL_ROOT is_platform_windows = sys.platform.startswith('win') @@ -83,7 +83,7 @@ def convert(config): def get_models_list(): - return load_models(MODEL_ROOT, Dict(config=None)) + return load_models(MODEL_ROOT, Dict(config=None), mode=ModelLoadingMode.ignore_composite) def download_engine_config(model_name): diff --git a/tools/pot/thirdparty/open_model_zoo b/tools/pot/thirdparty/open_model_zoo index b7c83530a47..a04512d8553 160000 --- a/tools/pot/thirdparty/open_model_zoo +++ b/tools/pot/thirdparty/open_model_zoo @@ -1 +1 @@ -Subproject commit b7c83530a470734e3e4bde8fe0d80dcc2d9e9b2c +Subproject commit a04512d8553aed4b7bde5032141f28bfd26f6f5c From 58f0c75b91587dd8d3855b6ba5966d086d06f4b4 Mon Sep 17 00:00:00 2001 From: Yury Gaydaychuk Date: Thu, 16 Dec 2021 22:02:09 +0300 Subject: [PATCH 03/10] [CPU] RoiAlign: case of integer tensor handled (#8069) --- .../src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp | 9 ++++++--- .../plugin/cpu/single_layer_tests/roialign.cpp | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp index bbd4d12ac5e..91ee9cf58fd 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp @@ -108,9 +108,12 @@ void MKLDNNROIAlignNode::initSupportedPrimitiveDescriptors() { Precision inputPrec0 = getOriginalInputPrecisionAtPort(0); Precision outputPrec = getOriginalOutputPrecisionAtPort(0); - if (!mayiuse(avx512_core)) { - if (outputPrec == Precision::BF16 || inputPrec0 == Precision::BF16) + if (inputPrec0 != Precision::FP32 || outputPrec != Precision::FP32) { + if ((outputPrec == Precision::BF16 || inputPrec0 == Precision::BF16) && mayiuse(avx512_core)) { + outputPrec = inputPrec0 = Precision::BF16; + } else { outputPrec = inputPrec0 = Precision::FP32; + } } NodeConfig config; @@ -130,7 +133,7 @@ void MKLDNNROIAlignNode::initSupportedPrimitiveDescriptors() { {LayoutType::ncsp, Precision::FP32}, {LayoutType::ncsp, Precision::I32}}, {{fmts.second, outputPrec}}, - impl_desc_type::unknown); + impl_desc_type::ref); } } diff --git a/src/tests/functional/plugin/cpu/single_layer_tests/roialign.cpp b/src/tests/functional/plugin/cpu/single_layer_tests/roialign.cpp index a8587e46f1e..4492a458519 100644 --- a/src/tests/functional/plugin/cpu/single_layer_tests/roialign.cpp +++ b/src/tests/functional/plugin/cpu/single_layer_tests/roialign.cpp @@ -149,7 +149,7 @@ protected: auto roialign = std::make_shared(float_params[0], float_params[1], int_params[0], pooledH, pooledW, samplingRatio, spatialScale, mode); - selectedType = makeSelectedTypeStr("unknown", inputPrecision); + selectedType = makeSelectedTypeStr("ref", inputPrecision); if (inputPrecision == ElementType::bf16) { rel_threshold = 1e-2; } @@ -182,7 +182,7 @@ std::vector filterCPUInfoForDevice() { const std::vector netPrecisions = { ElementType::f32, - ElementType::bf16, + ElementType::bf16 }; const std::vector spatialBinXVector = { 2 }; From d421bc6c4f0e058e2d938fc424082978a0dfd1aa Mon Sep 17 00:00:00 2001 From: Alexander Zhogov Date: Thu, 16 Dec 2021 22:09:26 +0300 Subject: [PATCH 04/10] Azure CI: Fix "access denied" issue with certutil on Windows CC (#9269) --- .ci/azure/windows_conditional_compilation.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/azure/windows_conditional_compilation.yml b/.ci/azure/windows_conditional_compilation.yml index cc310068d11..3f1e152a4d6 100644 --- a/.ci/azure/windows_conditional_compilation.yml +++ b/.ci/azure/windows_conditional_compilation.yml @@ -59,7 +59,7 @@ jobs: - script: | rem Speed up build - certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-win.zip ninja-win.zip + powershell -command "Invoke-WebRequest https://github.com/ninja-build/ninja/releases/download/v1.10.2/ninja-win.zip -OutFile ninja-win.zip" powershell -command "Expand-Archive -Force ninja-win.zip" workingDirectory: $(WORK_DIR) displayName: 'Install dependencies' From 4fea4024b4ce6c2ea66902f6aa6191a0929d5cdb Mon Sep 17 00:00:00 2001 From: Mikhail Nosov Date: Fri, 17 Dec 2021 00:55:47 +0300 Subject: [PATCH 05/10] Overwrite 'model' layout with preprocessing when layout is already set (#9258) --- src/core/src/preprocess/pre_post_process.cpp | 6 ++-- src/core/tests/preprocess.cpp | 34 ++++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/src/core/src/preprocess/pre_post_process.cpp b/src/core/src/preprocess/pre_post_process.cpp index db9395534e6..090708274d2 100644 --- a/src/core/src/preprocess/pre_post_process.cpp +++ b/src/core/src/preprocess/pre_post_process.cpp @@ -367,7 +367,8 @@ std::shared_ptr PrePostProcessor::build() { for (const auto& input_info : m_impl->m_inputs) { auto& input = input_info.m_impl; // Set parameter layout from 'model' information - if (input->get_model()->is_layout_set() && input->m_resolved_param->get_layout().empty()) { + if (input->get_model()->is_layout_set()) { + // Overwrite existing model's layout here (fix 74065) input->m_resolved_param->set_layout(input->get_model()->get_layout()); } } @@ -563,7 +564,8 @@ std::shared_ptr PrePostProcessor::build() { node.get_tensor().set_names({}); result = std::dynamic_pointer_cast(node.get_node_shared_ptr()); // Set result layout from 'model' information - if (output->get_model_data()->is_layout_set() && result->get_layout().empty()) { + if (output->get_model_data()->is_layout_set()) { + // Overwrite existing model's layout here (fix 74065) result->set_layout(output->get_model_data()->get_layout()); } auto parent = result->get_input_source_output(0); diff --git a/src/core/tests/preprocess.cpp b/src/core/tests/preprocess.cpp index 856dd597dce..532f1427de7 100644 --- a/src/core/tests/preprocess.cpp +++ b/src/core/tests/preprocess.cpp @@ -534,6 +534,23 @@ TEST(pre_post_process, reuse_model_layout_no_tensor_info) { EXPECT_EQ(f->get_parameters().front()->get_layout(), "NC??"); } +TEST(pre_post_process, set_model_layout_when_already_exists) { + auto m = create_simple_function(element::f32, PartialShape{Dimension::dynamic(), 3, 2, 1}); + { + auto p = PrePostProcessor(m); + p.input().model().set_layout("N???"); + m = p.build(); + } + EXPECT_EQ(m->input().get_partial_shape(), (PartialShape{Dimension::dynamic(), 3, 2, 1})); + { + auto p = PrePostProcessor(m); + p.input().tensor().set_layout("NHWC"); + p.input().model().set_layout("NCHW"); // Expect "N???" will be overwritten by "NCHW" + m = p.build(); + } + EXPECT_EQ(m->input().get_partial_shape(), (PartialShape{Dimension::dynamic(), 2, 1, 3})); +} + TEST(pre_post_process, set_layout_out_of_bounds) { auto shape = PartialShape{1, 2}; std::stringstream shape_str; @@ -1146,6 +1163,23 @@ TEST(pre_post_process, postprocess_convert_layout_implicit) { EXPECT_EQ(f->get_results()[0]->get_output_tensor(0).get_partial_shape(), (PartialShape{1, 2, 2, 3})); } +TEST(pre_post_process, postprocess_set_model_layout_when_already_exists) { + auto m = create_simple_function(element::f32, PartialShape{Dimension::dynamic(), 3, 2, 1}); + { + auto p = PrePostProcessor(m); + p.output().model().set_layout("N???"); + m = p.build(); + } + EXPECT_EQ(m->output().get_partial_shape(), (PartialShape{Dimension::dynamic(), 3, 2, 1})); + { + auto p = PrePostProcessor(m); + p.output().model().set_layout("NCHW"); // Expect "N???" will be overwritten by "NCHW" + p.output().tensor().set_layout("NHWC"); + m = p.build(); + } + EXPECT_EQ(m->output().get_partial_shape(), (PartialShape{Dimension::dynamic(), 2, 1, 3})); +} + TEST(pre_post_process, postprocess_convert_layout_explicit_no_target) { auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 2, 2}); auto p = PrePostProcessor(f); From dbd2b5dc08796a25dcd78a659a915e3dbd563029 Mon Sep 17 00:00:00 2001 From: Ivan Tikhonov Date: Fri, 17 Dec 2021 08:05:30 +0300 Subject: [PATCH 06/10] ONNX/IR/Common FrontEnds Refactoring (#9174) * Refactor ONNX,IR,Common FrontEnds * Rename frontends dir to frontend * Rename frontend_manager, frontend_defs -> manager, defs; move Place, InputModel to src for Paddle FE * remove exports for ir/input_model * fix unit tests * Fix unit tests * revert ONNX_API/IR_API defines * move manager.hpp to frontend dir * delete common folder; rename defs to visibility; fix includes * revert local changes * codestyle * fix build: set correct includes * fix includes * fix build; resolve review comments * fix install paths * correct installation folder for onnx_import * use IE_THROW in ir frontend --- .../frontends/frontends.cmake | 4 +- .../frontends/ov_frontends.hpp.in | 2 +- .../{manager.cpp => frontend_manager.cpp} | 3 +- .../src/pyopenvino/frontend/frontend.cpp | 6 +- .../src/pyopenvino/frontend/inputmodel.cpp | 4 +- .../{frontend_manager.cpp => manager.cpp} | 5 +- .../{frontend_manager.hpp => manager.hpp} | 0 .../python/src/pyopenvino/frontend/place.cpp | 5 +- .../python/src/pyopenvino/pyopenvino.cpp | 2 +- .../mock_py_ov_frontend/mock_py_frontend.cpp | 4 +- .../mock_py_ov_frontend/mock_py_frontend.hpp | 4 +- src/core/tests/frontend/frontend_manager.cpp | 22 +- src/core/tests/frontend/mock_frontend.cpp | 4 +- .../paddlepaddle/convert_unsupported.cpp | 4 +- .../paddlepaddle/incorrect_cut_model.cpp | 4 +- .../tests/frontend/paddlepaddle/places.cpp | 2 +- .../test_models/gen_scripts/generate_slice.py | 2 +- .../paddlepaddle/throw_in_conversion.cpp | 4 +- .../frontend/shared/include/basic_api.hpp | 2 +- .../frontend/shared/include/convert_model.hpp | 2 +- .../shared/include/cut_specific_model.hpp | 2 +- .../frontend/shared/include/load_from.hpp | 2 +- .../frontend/shared/include/op_fuzzy.hpp | 2 +- .../frontend/shared/include/partial_shape.hpp | 2 +- .../shared/include/set_element_type.hpp | 2 +- .../frontend/shared/include/telemetry.hpp | 2 +- .../tests/frontend/shared/include/utils.hpp | 2 +- .../tests/frontend/shared/src/telemetry.cpp | 2 +- .../tensorflow/convert_unsupported.cpp | 4 +- .../frontend/tensorflow/transpose_sinking.cpp | 2 +- src/core/tests/pass/serialization/read_ir.hpp | 2 +- src/frontends/common/CMakeLists.txt | 23 +- .../frontend/exception.hpp} | 2 +- .../frontend}/frontend.hpp | 26 +- .../frontend}/input_model.hpp | 10 +- .../{ => openvino/frontend}/manager.hpp | 7 +- .../{common => openvino/frontend}/place.hpp | 4 +- .../frontend}/telemetry_extension.hpp | 2 +- .../frontend/visibility.hpp} | 7 +- src/frontends/common/src/frontend.cpp | 72 +++++ src/frontends/common/src/input_model.cpp | 114 +++++++ .../src/{frontend_manager.cpp => manager.cpp} | 293 +----------------- src/frontends/common/src/place.cpp | 139 +++++++++ src/frontends/common/src/plugin_loader.cpp | 16 +- src/frontends/common/src/plugin_loader.hpp | 2 +- .../common/src/telemetry_extension.cpp | 2 +- src/frontends/common/src/utils.cpp | 2 +- src/frontends/common/src/utils.hpp | 2 +- .../ir/include/ir_frontend/model.hpp | 31 -- .../frontend/ir}/frontend.hpp | 14 +- .../frontend/ir/visibility.hpp} | 10 - src/frontends/ir/src/frontend.cpp | 32 +- .../ir/src/{model.cpp => input_model.cpp} | 18 +- src/frontends/ir/src/input_model.hpp | 33 ++ src/frontends/ir/src/ir_deserializer.cpp | 1 - src/frontends/ir/src/ir_deserializer.hpp | 2 +- src/frontends/ir/src/rt_info_deserializer.cpp | 5 +- src/frontends/ir/src/rt_info_deserializer.hpp | 1 - src/frontends/onnx/frontend/CMakeLists.txt | 2 +- .../frontend/onnx}/frontend.hpp | 8 +- .../onnx/frontend/src/core/graph.hpp | 2 +- src/frontends/onnx/frontend/src/editor.hpp | 2 +- src/frontends/onnx/frontend/src/frontend.cpp | 46 +-- .../onnx/frontend/src/input_model.cpp | 99 +++--- .../onnx/frontend/src/input_model.hpp | 26 +- src/frontends/onnx/frontend/src/place.cpp | 184 ++++++----- src/frontends/onnx/frontend/src/place.hpp | 30 +- .../onnx/frontend/src/utils/onnx_internal.hpp | 2 +- src/frontends/paddlepaddle/CMakeLists.txt | 2 +- .../paddlepaddle_frontend/frontend.hpp | 4 +- .../include/paddlepaddle_frontend/model.hpp | 4 +- .../include/paddlepaddle_frontend/place.hpp | 4 +- .../include/paddlepaddle_frontend/utility.hpp | 2 +- .../src/{exceptions.cpp => exception.cpp} | 3 +- .../exceptions.hpp | 2 +- src/frontends/paddlepaddle/src/frontend.cpp | 2 +- src/frontends/paddlepaddle/src/model.cpp | 2 +- .../paddlepaddle/src/node_context.hpp | 2 +- src/frontends/paddlepaddle/src/pdpd_utils.hpp | 2 +- src/frontends/tensorflow/CMakeLists.txt | 2 +- .../include/tensorflow_frontend/frontend.hpp | 6 +- .../include/tensorflow_frontend/utility.hpp | 2 +- src/frontends/tensorflow/src/exceptions.hpp | 3 +- src/frontends/tensorflow/src/model.cpp | 2 +- src/frontends/tensorflow/src/model.hpp | 6 +- src/frontends/tensorflow/src/place.cpp | 2 +- src/frontends/tensorflow/src/place.hpp | 2 +- src/frontends/tensorflow/src/tensorflow.cpp | 2 +- src/inference/src/ie_network_reader.cpp | 2 +- .../partial_shape_deserialization.cpp | 2 +- .../partial_shape_serialization.cpp | 2 +- .../rt_info_deserialization.cpp | 2 +- .../rt_info_serialization.cpp | 2 +- tests/fuzz/src/import_pdpd-fuzzer.cc | 2 +- .../mock_mo_ov_frontend/mock_mo_frontend.cpp | 4 +- .../mock_mo_ov_frontend/mock_mo_frontend.hpp | 4 +- 96 files changed, 746 insertions(+), 710 deletions(-) rename src/bindings/python/src/compatibility/pyngraph/passes/{manager.cpp => frontend_manager.cpp} (99%) rename src/bindings/python/src/pyopenvino/frontend/{frontend_manager.cpp => manager.cpp} (98%) rename src/bindings/python/src/pyopenvino/frontend/{frontend_manager.hpp => manager.hpp} (100%) rename src/frontends/common/include/{common/frontend_exceptions.hpp => openvino/frontend/exception.hpp} (99%) rename src/frontends/common/include/{common => openvino/frontend}/frontend.hpp (86%) rename src/frontends/common/include/{common => openvino/frontend}/input_model.hpp (97%) rename src/frontends/common/include/{ => openvino/frontend}/manager.hpp (94%) rename src/frontends/common/include/{common => openvino/frontend}/place.hpp (99%) rename src/frontends/common/include/{common => openvino/frontend}/telemetry_extension.hpp (97%) rename src/frontends/common/include/{common/frontend_defs.hpp => openvino/frontend/visibility.hpp} (68%) create mode 100644 src/frontends/common/src/frontend.cpp create mode 100644 src/frontends/common/src/input_model.cpp rename src/frontends/common/src/{frontend_manager.cpp => manager.cpp} (51%) create mode 100644 src/frontends/common/src/place.cpp delete mode 100644 src/frontends/ir/include/ir_frontend/model.hpp rename src/frontends/ir/include/{ir_frontend => openvino/frontend/ir}/frontend.hpp (82%) rename src/frontends/ir/include/{ir_frontend/utility.hpp => openvino/frontend/ir/visibility.hpp} (60%) rename src/frontends/ir/src/{model.cpp => input_model.cpp} (95%) create mode 100644 src/frontends/ir/src/input_model.hpp rename src/frontends/onnx/frontend/include/{onnx_frontend => openvino/frontend/onnx}/frontend.hpp (86%) rename src/frontends/paddlepaddle/src/{exceptions.cpp => exception.cpp} (90%) rename src/frontends/paddlepaddle/{include/paddlepaddle_frontend => src}/exceptions.hpp (96%) diff --git a/cmake/developer_package/frontends/frontends.cmake b/cmake/developer_package/frontends/frontends.cmake index b36db7b0d78..b5b07559bea 100644 --- a/cmake/developer_package/frontends/frontends.cmake +++ b/cmake/developer_package/frontends/frontends.cmake @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -set(FRONTEND_INSTALL_INCLUDE "runtime/include/ngraph/frontend") +set(FRONTEND_INSTALL_INCLUDE "runtime/include/") set(FRONTEND_NAME_SUFFIX "_ov_frontend") set(FRONTEND_NAMES "" CACHE INTERNAL "") @@ -225,7 +225,7 @@ macro(ov_add_frontend) if(OV_FRONTEND_LINKABLE_FRONTEND) # install -dev part - install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/${OV_FRONTEND_NAME}_frontend + install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/ DESTINATION ${FRONTEND_INSTALL_INCLUDE} COMPONENT core_dev FILES_MATCHING PATTERN "*.hpp") diff --git a/cmake/developer_package/frontends/ov_frontends.hpp.in b/cmake/developer_package/frontends/ov_frontends.hpp.in index 0293054160d..82c1f1d010f 100644 --- a/cmake/developer_package/frontends/ov_frontends.hpp.in +++ b/cmake/developer_package/frontends/ov_frontends.hpp.in @@ -4,7 +4,7 @@ #pragma once -#include "common/frontend.hpp" +#include "openvino/frontend/frontend.hpp" @OV_FRONTEND_DECLARATIONS@ diff --git a/src/bindings/python/src/compatibility/pyngraph/passes/manager.cpp b/src/bindings/python/src/compatibility/pyngraph/passes/frontend_manager.cpp similarity index 99% rename from src/bindings/python/src/compatibility/pyngraph/passes/manager.cpp rename to src/bindings/python/src/compatibility/pyngraph/passes/frontend_manager.cpp index dd3ae2b6ad6..a85c4be5645 100644 --- a/src/bindings/python/src/compatibility/pyngraph/passes/manager.cpp +++ b/src/bindings/python/src/compatibility/pyngraph/passes/frontend_manager.cpp @@ -2,12 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/pass/manager.hpp" - #include #include #include "ngraph/pass/constant_folding.hpp" +#include "ngraph/pass/manager.hpp" #include "ngraph/pass/pass.hpp" #include "ngraph/pass/validate.hpp" #include "pyngraph/passes/manager.hpp" diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp index 096bc3e5bf6..5adeed0ce1c 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp @@ -7,9 +7,9 @@ #include #include -#include "common/frontend_exceptions.hpp" -#include "common/telemetry_extension.hpp" -#include "manager.hpp" +#include "openvino/frontend/exception.hpp" +#include "openvino/frontend/manager.hpp" +#include "openvino/frontend/telemetry_extension.hpp" #include "pyopenvino/graph/function.hpp" namespace py = pybind11; diff --git a/src/bindings/python/src/pyopenvino/frontend/inputmodel.cpp b/src/bindings/python/src/pyopenvino/frontend/inputmodel.cpp index 45ba73c7fbf..db6c9c55c74 100644 --- a/src/bindings/python/src/pyopenvino/frontend/inputmodel.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/inputmodel.cpp @@ -6,8 +6,8 @@ #include #include -#include "common/frontend_exceptions.hpp" -#include "manager.hpp" +#include "openvino/frontend/exception.hpp" +#include "openvino/frontend/manager.hpp" namespace py = pybind11; diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend_manager.cpp b/src/bindings/python/src/pyopenvino/frontend/manager.cpp similarity index 98% rename from src/bindings/python/src/pyopenvino/frontend/frontend_manager.cpp rename to src/bindings/python/src/pyopenvino/frontend/manager.cpp index 2f0a40903e3..38c1bf64ef6 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend_manager.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/manager.cpp @@ -2,14 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "frontend_manager.hpp" +#include "openvino/frontend/manager.hpp" #include #include #include -#include "common/frontend_exceptions.hpp" -#include "manager.hpp" +#include "openvino/frontend/exception.hpp" namespace py = pybind11; diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend_manager.hpp b/src/bindings/python/src/pyopenvino/frontend/manager.hpp similarity index 100% rename from src/bindings/python/src/pyopenvino/frontend/frontend_manager.hpp rename to src/bindings/python/src/pyopenvino/frontend/manager.hpp diff --git a/src/bindings/python/src/pyopenvino/frontend/place.cpp b/src/bindings/python/src/pyopenvino/frontend/place.cpp index f0f00972c0b..69744140a2a 100644 --- a/src/bindings/python/src/pyopenvino/frontend/place.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/place.cpp @@ -6,9 +6,8 @@ #include #include -#include "common/frontend_exceptions.hpp" -#include "frontend_manager.hpp" -#include "manager.hpp" +#include "openvino/frontend/exception.hpp" +#include "openvino/frontend/manager.hpp" #include "pyopenvino/graph/function.hpp" namespace py = pybind11; diff --git a/src/bindings/python/src/pyopenvino/pyopenvino.cpp b/src/bindings/python/src/pyopenvino/pyopenvino.cpp index e86d64a88f9..46bc6e1eb8f 100644 --- a/src/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/src/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -32,8 +32,8 @@ #include "pyopenvino/core/variable_state.hpp" #include "pyopenvino/core/version.hpp" #include "pyopenvino/frontend/frontend.hpp" -#include "pyopenvino/frontend/frontend_manager.hpp" #include "pyopenvino/frontend/inputmodel.hpp" +#include "pyopenvino/frontend/manager.hpp" #include "pyopenvino/frontend/place.hpp" #include "pyopenvino/graph/any.hpp" #include "pyopenvino/graph/descriptors/tensor.hpp" diff --git a/src/bindings/python/tests/mock/mock_py_ov_frontend/mock_py_frontend.cpp b/src/bindings/python/tests/mock/mock_py_ov_frontend/mock_py_frontend.cpp index 6491ac6a43e..fd48b138bff 100644 --- a/src/bindings/python/tests/mock/mock_py_ov_frontend/mock_py_frontend.cpp +++ b/src/bindings/python/tests/mock/mock_py_ov_frontend/mock_py_frontend.cpp @@ -4,8 +4,8 @@ #include "mock_py_frontend.hpp" -#include "common/frontend_defs.hpp" -#include "manager.hpp" +#include "openvino/frontend/manager.hpp" +#include "openvino/frontend/visibility.hpp" using namespace ngraph; using namespace ov::frontend; diff --git a/src/bindings/python/tests/mock/mock_py_ov_frontend/mock_py_frontend.hpp b/src/bindings/python/tests/mock/mock_py_ov_frontend/mock_py_frontend.hpp index 32f9794bec6..3bc8d8f423a 100644 --- a/src/bindings/python/tests/mock/mock_py_ov_frontend/mock_py_frontend.hpp +++ b/src/bindings/python/tests/mock/mock_py_ov_frontend/mock_py_frontend.hpp @@ -4,9 +4,9 @@ #pragma once -#include "common/frontend_defs.hpp" -#include "manager.hpp" #include "ngraph/visibility.hpp" +#include "openvino/frontend/manager.hpp" +#include "openvino/frontend/visibility.hpp" // Defined if we are building the plugin DLL (instead of using it) #ifdef mock_py_ov_frontend_EXPORTS diff --git a/src/core/tests/frontend/frontend_manager.cpp b/src/core/tests/frontend/frontend_manager.cpp index 12c8748f35a..b4255de3085 100644 --- a/src/core/tests/frontend/frontend_manager.cpp +++ b/src/core/tests/frontend/frontend_manager.cpp @@ -2,9 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include #include +#include +#include #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -66,8 +66,9 @@ static int set_test_env(const char* name, const char* value) { TEST(FrontEndManagerTest, testAvailableFrontEnds) { FrontEndManager fem; + class MockFrontEnd : public FrontEnd {}; ASSERT_NO_THROW(fem.register_front_end("mock", []() { - return std::make_shared(); + return std::make_shared(); })); auto frontends = fem.get_available_front_ends(); ASSERT_NE(std::find(frontends.begin(), frontends.end(), "mock"), frontends.end()); @@ -105,8 +106,9 @@ TEST(FrontEndManagerTest, testDefaultFrontEnd) { ASSERT_NO_THROW(fe = fem.load_by_model("")); ASSERT_FALSE(fe); - std::unique_ptr fePtr(new FrontEnd()); // to verify base destructor - fe = std::make_shared(); + class MockFrontEnd : public FrontEnd {}; + std::unique_ptr fePtr(new MockFrontEnd()); // to verify base destructor + fe = std::make_shared(); ASSERT_ANY_THROW(fe->load("")); ASSERT_ANY_THROW(fe->convert(std::shared_ptr(nullptr))); ASSERT_ANY_THROW(fe->convert(InputModel::Ptr(nullptr))); @@ -117,8 +119,9 @@ TEST(FrontEndManagerTest, testDefaultFrontEnd) { } TEST(FrontEndManagerTest, testDefaultInputModel) { - std::unique_ptr imPtr(new InputModel()); // to verify base destructor - InputModel::Ptr im = std::make_shared(); + class MockInputModel : public InputModel {}; + std::unique_ptr imPtr(new MockInputModel()); // to verify base destructor + InputModel::Ptr im = std::make_shared(); ASSERT_EQ(im->get_inputs(), std::vector{}); ASSERT_EQ(im->get_outputs(), std::vector{}); ASSERT_ANY_THROW(im->override_all_inputs({nullptr})); @@ -146,8 +149,9 @@ TEST(FrontEndManagerTest, testDefaultInputModel) { } TEST(FrontEndManagerTest, testDefaultPlace) { - std::unique_ptr placePtr(new Place()); // to verify base destructor - Place::Ptr place = std::make_shared(); + class MockPlace : public Place {}; + std::unique_ptr placePtr(new MockPlace()); // to verify base destructor + Place::Ptr place = std::make_shared(); ASSERT_ANY_THROW(place->get_names()); ASSERT_EQ(place->get_consuming_operations(), std::vector{}); ASSERT_EQ(place->get_consuming_operations(0), std::vector{}); diff --git a/src/core/tests/frontend/mock_frontend.cpp b/src/core/tests/frontend/mock_frontend.cpp index ad7a205f3a7..7d4e24290be 100644 --- a/src/core/tests/frontend/mock_frontend.cpp +++ b/src/core/tests/frontend/mock_frontend.cpp @@ -2,9 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "common/frontend_defs.hpp" -#include "manager.hpp" #include "ngraph/visibility.hpp" +#include "openvino/frontend/manager.hpp" +#include "openvino/frontend/visibility.hpp" // Defined if we are building the plugin DLL (instead of using it) #ifdef mock1_ov_frontend_EXPORTS diff --git a/src/core/tests/frontend/paddlepaddle/convert_unsupported.cpp b/src/core/tests/frontend/paddlepaddle/convert_unsupported.cpp index 940bb3e72f2..764e752ed06 100644 --- a/src/core/tests/frontend/paddlepaddle/convert_unsupported.cpp +++ b/src/core/tests/frontend/paddlepaddle/convert_unsupported.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include +#include +#include #include "common_test_utils/ngraph_test_utils.hpp" #include "paddle_utils.hpp" diff --git a/src/core/tests/frontend/paddlepaddle/incorrect_cut_model.cpp b/src/core/tests/frontend/paddlepaddle/incorrect_cut_model.cpp index 595181f3edc..6005ad3ffea 100644 --- a/src/core/tests/frontend/paddlepaddle/incorrect_cut_model.cpp +++ b/src/core/tests/frontend/paddlepaddle/incorrect_cut_model.cpp @@ -4,8 +4,8 @@ #include -#include -#include +#include +#include #include "paddle_utils.hpp" #include "utils.hpp" diff --git a/src/core/tests/frontend/paddlepaddle/places.cpp b/src/core/tests/frontend/paddlepaddle/places.cpp index 925fb068eae..2092920cbee 100644 --- a/src/core/tests/frontend/paddlepaddle/places.cpp +++ b/src/core/tests/frontend/paddlepaddle/places.cpp @@ -3,7 +3,7 @@ // #include -#include +#include #include "gtest/gtest.h" #include "paddle_utils.hpp" diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py b/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py index 8c6b5fd1eeb..75328e48fca 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py +++ b/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py @@ -26,7 +26,7 @@ def slice(name : str, x, axes : list, start : list, end : list): outs = exe.run( feed={'x': x}, - fetch_list=[out]) + fetch_list=[out]) saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) diff --git a/src/core/tests/frontend/paddlepaddle/throw_in_conversion.cpp b/src/core/tests/frontend/paddlepaddle/throw_in_conversion.cpp index a67950514c6..88c8128baab 100644 --- a/src/core/tests/frontend/paddlepaddle/throw_in_conversion.cpp +++ b/src/core/tests/frontend/paddlepaddle/throw_in_conversion.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include +#include +#include #include "common_test_utils/ngraph_test_utils.hpp" #include "paddle_utils.hpp" diff --git a/src/core/tests/frontend/shared/include/basic_api.hpp b/src/core/tests/frontend/shared/include/basic_api.hpp index 40c11081c33..045c302d908 100644 --- a/src/core/tests/frontend/shared/include/basic_api.hpp +++ b/src/core/tests/frontend/shared/include/basic_api.hpp @@ -6,7 +6,7 @@ #include -#include +#include using BasicTestParam = std::tuple -#include +#include using ConvertParam = std::tuple -#include +#include struct CutModelParam { std::string m_frontEndName; diff --git a/src/core/tests/frontend/shared/include/load_from.hpp b/src/core/tests/frontend/shared/include/load_from.hpp index 77d89a65e11..db037d27953 100644 --- a/src/core/tests/frontend/shared/include/load_from.hpp +++ b/src/core/tests/frontend/shared/include/load_from.hpp @@ -6,7 +6,7 @@ #include -#include +#include struct LoadFromFEParam { std::string m_frontEndName; diff --git a/src/core/tests/frontend/shared/include/op_fuzzy.hpp b/src/core/tests/frontend/shared/include/op_fuzzy.hpp index fae7726a266..0b29df5f901 100644 --- a/src/core/tests/frontend/shared/include/op_fuzzy.hpp +++ b/src/core/tests/frontend/shared/include/op_fuzzy.hpp @@ -6,7 +6,7 @@ #include -#include +#include using FuzzyOpTestParam = std::tuple -#include +#include struct BaseFEParam { BaseFEParam() {} diff --git a/src/core/tests/frontend/shared/include/set_element_type.hpp b/src/core/tests/frontend/shared/include/set_element_type.hpp index 3ae4fce68cf..7e345420d9d 100644 --- a/src/core/tests/frontend/shared/include/set_element_type.hpp +++ b/src/core/tests/frontend/shared/include/set_element_type.hpp @@ -6,7 +6,7 @@ #include -#include +#include struct SetTypeFEParam { std::string m_frontEndName; diff --git a/src/core/tests/frontend/shared/include/telemetry.hpp b/src/core/tests/frontend/shared/include/telemetry.hpp index 770967455f2..747af456b3d 100644 --- a/src/core/tests/frontend/shared/include/telemetry.hpp +++ b/src/core/tests/frontend/shared/include/telemetry.hpp @@ -6,7 +6,7 @@ #include -#include +#include class TelemetryMock { public: diff --git a/src/core/tests/frontend/shared/include/utils.hpp b/src/core/tests/frontend/shared/include/utils.hpp index 6c9c9d6072e..4a82ca1356d 100644 --- a/src/core/tests/frontend/shared/include/utils.hpp +++ b/src/core/tests/frontend/shared/include/utils.hpp @@ -5,7 +5,7 @@ #pragma once #include -#include +#include #include #include "common_test_utils/file_utils.hpp" diff --git a/src/core/tests/frontend/shared/src/telemetry.cpp b/src/core/tests/frontend/shared/src/telemetry.cpp index 088a3074b27..c96b05b990e 100644 --- a/src/core/tests/frontend/shared/src/telemetry.cpp +++ b/src/core/tests/frontend/shared/src/telemetry.cpp @@ -4,7 +4,7 @@ #include "telemetry.hpp" -#include +#include #include "utils.hpp" diff --git a/src/core/tests/frontend/tensorflow/convert_unsupported.cpp b/src/core/tests/frontend/tensorflow/convert_unsupported.cpp index 6374912e74d..391305ea0ed 100644 --- a/src/core/tests/frontend/tensorflow/convert_unsupported.cpp +++ b/src/core/tests/frontend/tensorflow/convert_unsupported.cpp @@ -2,8 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include +#include +#include #include "common_test_utils/ngraph_test_utils.hpp" #include "tf_utils.hpp" diff --git a/src/core/tests/frontend/tensorflow/transpose_sinking.cpp b/src/core/tests/frontend/tensorflow/transpose_sinking.cpp index b1f08b28aa9..44f50d2c80b 100644 --- a/src/core/tests/frontend/tensorflow/transpose_sinking.cpp +++ b/src/core/tests/frontend/tensorflow/transpose_sinking.cpp @@ -5,7 +5,7 @@ #include "transpose_sinking.hpp" #include -#include +#include #include #include #include diff --git a/src/core/tests/pass/serialization/read_ir.hpp b/src/core/tests/pass/serialization/read_ir.hpp index 31a81999d84..e763c93ebc8 100644 --- a/src/core/tests/pass/serialization/read_ir.hpp +++ b/src/core/tests/pass/serialization/read_ir.hpp @@ -4,7 +4,7 @@ #pragma once -#include "manager.hpp" +#include "openvino/frontend/manager.hpp" #include "openvino/core/except.hpp" #include "openvino/core/model.hpp" diff --git a/src/frontends/common/CMakeLists.txt b/src/frontends/common/CMakeLists.txt index 869db527261..3750dc92e92 100644 --- a/src/frontends/common/CMakeLists.txt +++ b/src/frontends/common/CMakeLists.txt @@ -13,7 +13,7 @@ file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp) file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) # Add include path to so_extension.hpp -set_property(SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src/frontend_manager.cpp +set_property(SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src/frontend.cpp APPEND PROPERTY INCLUDE_DIRECTORIES "${OpenVINO_SOURCE_DIR}/src/core/src/") set(FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) @@ -25,7 +25,6 @@ source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS}) # create library add_library(${TARGET_NAME} ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}) -add_library(ngraph::${TARGET_NAME} ALIAS ${TARGET_NAME}) add_library(openvino::frontend::common ALIAS ${TARGET_NAME}) target_include_directories(${TARGET_NAME} @@ -37,10 +36,10 @@ target_include_directories(${TARGET_NAME} "${CMAKE_CURRENT_BINARY_DIR}/src") target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) -target_link_libraries(${TARGET_NAME} PRIVATE ${CMAKE_DL_LIBS} openvino::util PUBLIC ngraph) +target_link_libraries(${TARGET_NAME} PRIVATE ${CMAKE_DL_LIBS} openvino::util PUBLIC openvino::core) set_property(SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src/plugin_loader.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/src/frontend_manager.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/manager.cpp APPEND PROPERTY COMPILE_DEFINITIONS FRONTEND_LIB_PREFIX="${CMAKE_SHARED_LIBRARY_PREFIX}" FRONTEND_LIB_SUFFIX="${FRONTEND_NAME_SUFFIX}${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") @@ -48,9 +47,7 @@ set_property(SOURCE add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} - INCLUDE_DIRECTORY "${FRONTEND_INCLUDE_DIR}" - ADDITIONAL_INCLUDE_DIRECTORIES - $) + INCLUDE_DIRECTORY "${FRONTEND_INCLUDE_DIR}") ie_add_vs_version_file(NAME ${TARGET_NAME} FILEDESCRIPTION "Manager of OpenVINO Frontends") @@ -61,7 +58,7 @@ ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) if(BUILD_SHARED_LIBS) add_library(${TARGET_NAME}_static STATIC ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}) add_library(${TARGET_NAME}::static ALIAS ${TARGET_NAME}_static) - target_link_libraries(${TARGET_NAME}_static PRIVATE ${CMAKE_DL_LIBS} openvino::util PUBLIC ngraph) + target_link_libraries(${TARGET_NAME}_static PRIVATE ${CMAKE_DL_LIBS} openvino::util PUBLIC openvino::core) target_include_directories(${TARGET_NAME}_static PUBLIC $) target_include_directories(${TARGET_NAME}_static PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) @@ -82,15 +79,9 @@ install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) -install(DIRECTORY ${FRONTEND_INCLUDE_DIR}/common +install(DIRECTORY ${FRONTEND_INCLUDE_DIR}/openvino DESTINATION ${FRONTEND_INSTALL_INCLUDE} - COMPONENT core_dev - FILES_MATCHING PATTERN "*.hpp") - -install(DIRECTORY ${FRONTEND_INCLUDE_DIR}/ - DESTINATION ${FRONTEND_INSTALL_INCLUDE} - COMPONENT core_dev - FILES_MATCHING PATTERN "manager.hpp") + COMPONENT core_dev) export(TARGETS ${TARGET_NAME} NAMESPACE openvino:: APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") diff --git a/src/frontends/common/include/common/frontend_exceptions.hpp b/src/frontends/common/include/openvino/frontend/exception.hpp similarity index 99% rename from src/frontends/common/include/common/frontend_exceptions.hpp rename to src/frontends/common/include/openvino/frontend/exception.hpp index 67dfe85d4f1..71d14238d81 100644 --- a/src/frontends/common/include/common/frontend_exceptions.hpp +++ b/src/frontends/common/include/openvino/frontend/exception.hpp @@ -7,8 +7,8 @@ #include #include -#include "frontend_defs.hpp" #include "openvino/core/except.hpp" +#include "visibility.hpp" namespace ov { namespace frontend { diff --git a/src/frontends/common/include/common/frontend.hpp b/src/frontends/common/include/openvino/frontend/frontend.hpp similarity index 86% rename from src/frontends/common/include/common/frontend.hpp rename to src/frontends/common/include/openvino/frontend/frontend.hpp index 1ba63e6f593..13b30254a39 100644 --- a/src/frontends/common/include/common/frontend.hpp +++ b/src/frontends/common/include/openvino/frontend/frontend.hpp @@ -8,12 +8,12 @@ #include #include -#include "frontend_defs.hpp" #include "input_model.hpp" #include "openvino/core/any.hpp" #include "openvino/core/extension.hpp" #include "openvino/core/model.hpp" #include "openvino/core/op_extension.hpp" +#include "visibility.hpp" namespace ov { namespace frontend { @@ -25,7 +25,7 @@ public: FrontEnd(); - virtual ~FrontEnd(); + virtual ~FrontEnd() = 0; /// \brief Validates if FrontEnd can recognize model with parameters specified. /// Same parameters should be used to load model. @@ -55,34 +55,34 @@ public: return load_impl(vars); } - /// \brief Completely convert and normalize entire function, throws if it is not + /// \brief Completely convert and normalize entire Model, throws if it is not /// possible /// \param model Input model - /// \return fully converted nGraph function + /// \return fully converted OV Model virtual std::shared_ptr convert(InputModel::Ptr model) const; - /// \brief Completely convert the remaining, not converted part of a function. - /// \param partiallyConverted partially converted nGraph function + /// \brief Completely convert the remaining, not converted part of a Model. + /// \param partiallyConverted partially converted OV Model virtual void convert(std::shared_ptr partially_converted) const; /// \brief Convert only those parts of the model that can be converted leaving others /// as-is. Converted parts are not normalized by additional transformations; normalize - /// function or another form of convert function should be called to finalize the + /// Model or another form of convert Model should be called to finalize the /// conversion process. /// \param model Input model - /// \return partially converted nGraph function + /// \return partially converted OV Model virtual std::shared_ptr convert_partially(InputModel::Ptr model) const; /// \brief Convert operations with one-to-one mapping with decoding nodes. - /// Each decoding node is an nGraph node representing a single FW operation node with + /// Each decoding node is an OV node representing a single FW operation node with /// all attributes represented in FW-independent way. /// \param model Input model - /// \return nGraph function after decoding + /// \return OV Model after decoding virtual std::shared_ptr decode(InputModel::Ptr model) const; - /// \brief Runs normalization passes on function that was loaded with partial conversion - /// \param function partially converted nGraph function - virtual void normalize(std::shared_ptr function) const; + /// \brief Runs normalization passes on Model that was loaded with partial conversion + /// \param Model partially converted OV Model + virtual void normalize(std::shared_ptr model) const; /// \brief Gets name of this FrontEnd. Can be used by clients /// if frontend is selected automatically by FrontEndManager::load_by_model diff --git a/src/frontends/common/include/common/input_model.hpp b/src/frontends/common/include/openvino/frontend/input_model.hpp similarity index 97% rename from src/frontends/common/include/common/input_model.hpp rename to src/frontends/common/include/openvino/frontend/input_model.hpp index 1642e3b10f2..c8327b0252c 100644 --- a/src/frontends/common/include/common/input_model.hpp +++ b/src/frontends/common/include/openvino/frontend/input_model.hpp @@ -8,10 +8,10 @@ #include #include -#include "frontend_defs.hpp" #include "openvino/core/partial_shape.hpp" #include "openvino/core/type/element_type.hpp" #include "place.hpp" +#include "visibility.hpp" namespace ov { namespace frontend { @@ -22,7 +22,7 @@ namespace frontend { /// \note Class methods are divided into several groups: searching for places, naming and /// annotation, topology editing, setting tensor properties. /// -/// Editing requests may affect ability to convert the original model to nGraph function. +/// Editing requests may affect ability to convert the original model to OV Model. /// Aim to provide these editing capabilities is to unlock conversion for models that /// are not natively supported "as-is" because of undefined shapes, types or operations. /// @@ -39,7 +39,7 @@ class FRONTEND_API InputModel { public: typedef std::shared_ptr Ptr; - virtual ~InputModel() = default; + virtual ~InputModel() = 0; ///// Searching for places ///// @@ -169,8 +169,8 @@ public: /// \brief Defines all possible shape that may be used for this place; place should be /// uniquely refer to some data. This partial shape will be converted to corresponding - /// shape of results ngraph nodes and will define shape inference when the model is - /// converted to ngraph. + /// shape of results OV nodes and will define shape inference when the model is + /// converted to OV. /// \param place Model place /// \param shape Partial shape for this place virtual void set_partial_shape(Place::Ptr place, const ov::PartialShape& shape); diff --git a/src/frontends/common/include/manager.hpp b/src/frontends/common/include/openvino/frontend/manager.hpp similarity index 94% rename from src/frontends/common/include/manager.hpp rename to src/frontends/common/include/openvino/frontend/manager.hpp index b79fc89ba2c..af6a3778aac 100644 --- a/src/frontends/common/include/manager.hpp +++ b/src/frontends/common/include/openvino/frontend/manager.hpp @@ -8,9 +8,9 @@ #include #include -#include "common/frontend.hpp" -#include "common/frontend_defs.hpp" #include "openvino/core/any.hpp" +#include "openvino/frontend/frontend.hpp" +#include "openvino/frontend/visibility.hpp" namespace ov { namespace frontend { @@ -22,8 +22,7 @@ using FrontEndFactory = std::function; /// frontends This is a main frontend entry point for client applications class FRONTEND_API FrontEndManager final { public: - /// \brief Default constructor. Searches and loads of available frontends - FrontEndManager(); + /*/ \brief Default constructor. Searches and loads of available frontends*/ FrontEndManager(); /// \brief Default move constructor FrontEndManager(FrontEndManager&&) noexcept; diff --git a/src/frontends/common/include/common/place.hpp b/src/frontends/common/include/openvino/frontend/place.hpp similarity index 99% rename from src/frontends/common/include/common/place.hpp rename to src/frontends/common/include/openvino/frontend/place.hpp index f5c00263764..7fdab781996 100644 --- a/src/frontends/common/include/common/place.hpp +++ b/src/frontends/common/include/openvino/frontend/place.hpp @@ -8,7 +8,7 @@ #include #include -#include "frontend_defs.hpp" +#include "visibility.hpp" namespace ov { namespace frontend { @@ -61,7 +61,7 @@ class FRONTEND_API Place { public: typedef std::shared_ptr Ptr; - virtual ~Place() = default; + virtual ~Place() = 0; /// \brief All associated names (synonyms) that identify this place in the graph in a /// framework specific way diff --git a/src/frontends/common/include/common/telemetry_extension.hpp b/src/frontends/common/include/openvino/frontend/telemetry_extension.hpp similarity index 97% rename from src/frontends/common/include/common/telemetry_extension.hpp rename to src/frontends/common/include/openvino/frontend/telemetry_extension.hpp index 914ad41c4bb..b1454414b3e 100644 --- a/src/frontends/common/include/common/telemetry_extension.hpp +++ b/src/frontends/common/include/openvino/frontend/telemetry_extension.hpp @@ -8,11 +8,11 @@ #include #include -#include "frontend_defs.hpp" #include "openvino/core/extension.hpp" #include "openvino/pass/graph_rewrite.hpp" #include "openvino/pass/manager.hpp" #include "openvino/pass/pass.hpp" +#include "visibility.hpp" namespace ov { namespace frontend { diff --git a/src/frontends/common/include/common/frontend_defs.hpp b/src/frontends/common/include/openvino/frontend/visibility.hpp similarity index 68% rename from src/frontends/common/include/common/frontend_defs.hpp rename to src/frontends/common/include/openvino/frontend/visibility.hpp index 23d2fa86998..b578a596673 100644 --- a/src/frontends/common/include/common/frontend_defs.hpp +++ b/src/frontends/common/include/openvino/frontend/visibility.hpp @@ -11,11 +11,14 @@ #if defined(USE_STATIC_FRONTEND_COMMON) || defined(OPENVINO_STATIC_LIBRARY) # define FRONTEND_API +# define FRONTEND_C_API #else // Defined if cmake is building the frontend_common DLL (instead of using it) # ifdef frontend_common_EXPORTS -# define FRONTEND_API OPENVINO_CORE_EXPORTS +# define FRONTEND_API OPENVINO_CORE_EXPORTS +# define FRONTEND_C_API OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS # else -# define FRONTEND_API OPENVINO_CORE_IMPORTS +# define FRONTEND_API OPENVINO_CORE_IMPORTS +# define FRONTEND_C_API OPENVINO_EXTERN_C OPENVINO_CORE_IMPORTS # endif // frontend_common_EXPORTS #endif // USE_STATIC_FRONTEND_COMMON || OPENVINO_STATIC_LIBRARY diff --git a/src/frontends/common/src/frontend.cpp b/src/frontends/common/src/frontend.cpp new file mode 100644 index 00000000000..2ab96691ee9 --- /dev/null +++ b/src/frontends/common/src/frontend.cpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "openvino/frontend/exception.hpp" +#include "openvino/frontend/manager.hpp" +#include "openvino/frontend/place.hpp" +#include "openvino/util/env_util.hpp" +#include "plugin_loader.hpp" +#include "so_extension.hpp" +#include "utils.hpp" + +using namespace ov; +using namespace ov::frontend; + +FrontEnd::FrontEnd() = default; + +FrontEnd::~FrontEnd() = default; + +bool FrontEnd::supported_impl(const std::vector& variants) const { + return false; +} + +InputModel::Ptr FrontEnd::load_impl(const std::vector& params) const { + FRONT_END_NOT_IMPLEMENTED(load_impl); +} +std::shared_ptr FrontEnd::convert(InputModel::Ptr model) const { + FRONT_END_NOT_IMPLEMENTED(convert); +} + +void FrontEnd::convert(std::shared_ptr) const { + FRONT_END_NOT_IMPLEMENTED(convert); +} + +std::shared_ptr FrontEnd::convert_partially(InputModel::Ptr model) const { + FRONT_END_NOT_IMPLEMENTED(convert_partially); +} + +std::shared_ptr FrontEnd::decode(InputModel::Ptr model) const { + FRONT_END_NOT_IMPLEMENTED(decode); +} + +void FrontEnd::normalize(std::shared_ptr model) const { + FRONT_END_NOT_IMPLEMENTED(normalize); +} + +void FrontEnd::add_extension(const std::shared_ptr& extension) { + // Left unimplemented intentionally. + // Each frontend can support own set of extensions, so this method should be implemented on the frontend side +} + +void FrontEnd::add_extension(const std::vector>& extensions) { + for (const auto& ext : extensions) + add_extension(ext); +} + +void FrontEnd::add_extension(const std::string& library_path) { + add_extension(ov::detail::load_extensions(library_path)); +} + +#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT +void FrontEnd::add_extension(const std::wstring& library_path) { + add_extension(ov::detail::load_extensions(library_path)); +} +#endif + +std::string FrontEnd::get_name() const { + return std::string(); +} diff --git a/src/frontends/common/src/input_model.cpp b/src/frontends/common/src/input_model.cpp new file mode 100644 index 00000000000..9c1cc7a3629 --- /dev/null +++ b/src/frontends/common/src/input_model.cpp @@ -0,0 +1,114 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/frontend/exception.hpp" +#include "openvino/frontend/manager.hpp" +#include "openvino/frontend/place.hpp" +#include "utils.hpp" + +using namespace ov; +using namespace ov::frontend; + +InputModel::~InputModel() = default; + +std::vector InputModel::get_inputs() const { + return {}; +} + +std::vector InputModel::get_outputs() const { + return {}; +} + +Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensor_name) const { + return nullptr; +} + +Place::Ptr InputModel::get_place_by_operation_name(const std::string& operation_name) const { + return nullptr; +} + +Place::Ptr InputModel::get_place_by_operation_name_and_input_port(const std::string& operation_name, + int input_port_index) { + return nullptr; +} + +Place::Ptr InputModel::get_place_by_operation_name_and_output_port(const std::string& operation_name, + int output_port_index) { + return nullptr; +} + +void InputModel::set_name_for_tensor(Place::Ptr tensor, const std::string& new_name) { + FRONT_END_NOT_IMPLEMENTED(set_name_for_tensor); +} + +void InputModel::add_name_for_tensor(Place::Ptr tensor, const std::string& new_name) { + FRONT_END_NOT_IMPLEMENTED(add_name_for_tensor); +} + +void InputModel::set_name_for_operation(Place::Ptr operation, const std::string& new_name) { + FRONT_END_NOT_IMPLEMENTED(set_name_for_operation); +} + +void InputModel::free_name_for_tensor(const std::string& name) { + FRONT_END_NOT_IMPLEMENTED(free_name_for_tensor); +} + +void InputModel::free_name_for_operation(const std::string& name) { + FRONT_END_NOT_IMPLEMENTED(free_name_for_operation); +} + +void InputModel::set_name_for_dimension(Place::Ptr place, size_t shape_dim_index, const std::string& dim_name) { + FRONT_END_NOT_IMPLEMENTED(set_name_for_dimension); +} + +void InputModel::cut_and_add_new_input(Place::Ptr place, const std::string& new_name_optional) { + FRONT_END_NOT_IMPLEMENTED(cut_and_add_new_input); +} + +void InputModel::cut_and_add_new_output(Place::Ptr place, const std::string& new_name_optional) { + FRONT_END_NOT_IMPLEMENTED(cut_and_add_new_output); +} + +Place::Ptr InputModel::add_output(Place::Ptr place) { + FRONT_END_NOT_IMPLEMENTED(add_output); +} + +void InputModel::remove_output(Place::Ptr place) { + FRONT_END_NOT_IMPLEMENTED(remove_output); +} + +void InputModel::override_all_outputs(const std::vector& outputs) { + FRONT_END_NOT_IMPLEMENTED(override_all_outputs); +} + +void InputModel::override_all_inputs(const std::vector& inputs) { + FRONT_END_NOT_IMPLEMENTED(override_all_inputs); +} + +void InputModel::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { + FRONT_END_NOT_IMPLEMENTED(extract_subgraph); +} + +// Setting tensor properties +void InputModel::set_partial_shape(Place::Ptr place, const PartialShape&) { + FRONT_END_NOT_IMPLEMENTED(set_partial_shape); +} + +PartialShape InputModel::get_partial_shape(Place::Ptr place) const { + FRONT_END_NOT_IMPLEMENTED(get_partial_shape); +} + +void InputModel::set_element_type(Place::Ptr place, const element::Type&) { + FRONT_END_NOT_IMPLEMENTED(set_element_type); +} + +void InputModel::set_tensor_value(Place::Ptr place, const void* value) { + FRONT_END_NOT_IMPLEMENTED(set_tensor_value); +} + +void InputModel::set_tensor_partial_value(Place::Ptr place, const void* min_value, const void* max_value) { + FRONT_END_NOT_IMPLEMENTED(set_tensor_partial_value); +} diff --git a/src/frontends/common/src/frontend_manager.cpp b/src/frontends/common/src/manager.cpp similarity index 51% rename from src/frontends/common/src/frontend_manager.cpp rename to src/frontends/common/src/manager.cpp index f50781418a4..7b79721603a 100644 --- a/src/frontends/common/src/frontend_manager.cpp +++ b/src/frontends/common/src/manager.cpp @@ -2,22 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/frontend/manager.hpp" + #include #include -#include "common/frontend_exceptions.hpp" -#include "common/place.hpp" -#include "manager.hpp" -#include "ngraph/except.hpp" -#include "openvino/util/env_util.hpp" +#include "openvino/frontend/exception.hpp" +#include "openvino/frontend/place.hpp" #include "plugin_loader.hpp" -#include "so_extension.hpp" #include "utils.hpp" using namespace ov; using namespace ov::frontend; -//----------- FrontEndManager --------------------------- class FrontEndManager::Impl { std::mutex m_loading_mutex; std::vector m_plugins; @@ -232,285 +229,3 @@ template <> FrontEnd::Ptr FrontEndManager::load_by_model(const std::vector& variants) { return load_by_model_impl(variants); } - -//----------- FrontEnd --------------------------- - -FrontEnd::FrontEnd() = default; - -FrontEnd::~FrontEnd() = default; - -bool FrontEnd::supported_impl(const std::vector& variants) const { - return false; -} - -InputModel::Ptr FrontEnd::load_impl(const std::vector& params) const { - FRONT_END_NOT_IMPLEMENTED(load_impl); -} -std::shared_ptr FrontEnd::convert(InputModel::Ptr model) const { - FRONT_END_NOT_IMPLEMENTED(convert); -} - -void FrontEnd::convert(std::shared_ptr) const { - FRONT_END_NOT_IMPLEMENTED(convert); -} - -std::shared_ptr FrontEnd::convert_partially(InputModel::Ptr model) const { - FRONT_END_NOT_IMPLEMENTED(convert_partially); -} - -std::shared_ptr FrontEnd::decode(InputModel::Ptr model) const { - FRONT_END_NOT_IMPLEMENTED(decode); -} - -void FrontEnd::normalize(std::shared_ptr model) const { - FRONT_END_NOT_IMPLEMENTED(normalize); -} - -void FrontEnd::add_extension(const std::shared_ptr& extension) { - // Left unimplemented intentionally. - // Each frontend can support own set of extensions, so this method should be implemented on the frontend side -} - -void FrontEnd::add_extension(const std::vector>& extensions) { - for (const auto& ext : extensions) - add_extension(ext); -} - -void FrontEnd::add_extension(const std::string& library_path) { - add_extension(ov::detail::load_extensions(library_path)); -} - -#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -void FrontEnd::add_extension(const std::wstring& library_path) { - add_extension(ov::detail::load_extensions(library_path)); -} -#endif - -std::string FrontEnd::get_name() const { - return std::string(); -} - -//----------- InputModel --------------------------- -std::vector InputModel::get_inputs() const { - return {}; -} - -std::vector InputModel::get_outputs() const { - return {}; -} - -Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensor_name) const { - return nullptr; -} - -Place::Ptr InputModel::get_place_by_operation_name(const std::string& operation_name) const { - return nullptr; -} - -Place::Ptr InputModel::get_place_by_operation_name_and_input_port(const std::string& operation_name, - int input_port_index) { - return nullptr; -} - -Place::Ptr InputModel::get_place_by_operation_name_and_output_port(const std::string& operation_name, - int output_port_index) { - return nullptr; -} - -void InputModel::set_name_for_tensor(Place::Ptr tensor, const std::string& new_name) { - FRONT_END_NOT_IMPLEMENTED(set_name_for_tensor); -} - -void InputModel::add_name_for_tensor(Place::Ptr tensor, const std::string& new_name) { - FRONT_END_NOT_IMPLEMENTED(add_name_for_tensor); -} - -void InputModel::set_name_for_operation(Place::Ptr operation, const std::string& new_name) { - FRONT_END_NOT_IMPLEMENTED(set_name_for_operation); -} - -void InputModel::free_name_for_tensor(const std::string& name) { - FRONT_END_NOT_IMPLEMENTED(free_name_for_tensor); -} - -void InputModel::free_name_for_operation(const std::string& name) { - FRONT_END_NOT_IMPLEMENTED(free_name_for_operation); -} - -void InputModel::set_name_for_dimension(Place::Ptr place, size_t shape_dim_index, const std::string& dim_name) { - FRONT_END_NOT_IMPLEMENTED(set_name_for_dimension); -} - -void InputModel::cut_and_add_new_input(Place::Ptr place, const std::string& new_name_optional) { - FRONT_END_NOT_IMPLEMENTED(cut_and_add_new_input); -} - -void InputModel::cut_and_add_new_output(Place::Ptr place, const std::string& new_name_optional) { - FRONT_END_NOT_IMPLEMENTED(cut_and_add_new_output); -} - -Place::Ptr InputModel::add_output(Place::Ptr place) { - FRONT_END_NOT_IMPLEMENTED(add_output); -} - -void InputModel::remove_output(Place::Ptr place) { - FRONT_END_NOT_IMPLEMENTED(remove_output); -} - -void InputModel::override_all_outputs(const std::vector& outputs) { - FRONT_END_NOT_IMPLEMENTED(override_all_outputs); -} - -void InputModel::override_all_inputs(const std::vector& inputs) { - FRONT_END_NOT_IMPLEMENTED(override_all_inputs); -} - -void InputModel::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { - FRONT_END_NOT_IMPLEMENTED(extract_subgraph); -} - -// Setting tensor properties -void InputModel::set_partial_shape(Place::Ptr place, const PartialShape&) { - FRONT_END_NOT_IMPLEMENTED(set_partial_shape); -} - -PartialShape InputModel::get_partial_shape(Place::Ptr place) const { - FRONT_END_NOT_IMPLEMENTED(get_partial_shape); -} - -void InputModel::set_element_type(Place::Ptr place, const element::Type&) { - FRONT_END_NOT_IMPLEMENTED(set_element_type); -} - -void InputModel::set_tensor_value(Place::Ptr place, const void* value) { - FRONT_END_NOT_IMPLEMENTED(set_tensor_value); -} - -void InputModel::set_tensor_partial_value(Place::Ptr place, const void* min_value, const void* max_value) { - FRONT_END_NOT_IMPLEMENTED(set_tensor_partial_value); -} - -//----------- Place --------------------------- -std::vector Place::get_names() const { - FRONT_END_NOT_IMPLEMENTED(get_names); -} - -std::vector Place::get_consuming_operations() const { - return {}; -} - -std::vector Place::get_consuming_operations(int output_port_index) const { - return {}; -} - -std::vector Place::get_consuming_operations(const std::string& outputPortName, int outputPortIndex) const { - return {}; -} - -Place::Ptr Place::get_target_tensor() const { - return nullptr; -} - -Place::Ptr Place::get_target_tensor(int output_port_index) const { - return nullptr; -} - -Place::Ptr Place::get_producing_operation() const { - return nullptr; -} - -Place::Ptr Place::get_producing_operation(int input_port_index) const { - return nullptr; -} - -Place::Ptr Place::get_producing_port() const { - return nullptr; -} - -Place::Ptr Place::get_input_port() const { - return nullptr; -} - -Place::Ptr Place::get_input_port(int input_port_index) const { - return nullptr; -} - -Place::Ptr Place::get_input_port(const std::string& input_name) const { - return nullptr; -} - -Place::Ptr Place::get_input_port(const std::string& input_name, int input_port_index) const { - return nullptr; -} - -Place::Ptr Place::get_output_port() const { - return nullptr; -} - -Place::Ptr Place::get_output_port(int output_port_index) const { - return nullptr; -} - -Place::Ptr Place::get_output_port(const std::string& output_name) const { - return nullptr; -} - -Place::Ptr Place::get_output_port(const std::string& output_name, int output_port_index) const { - return nullptr; -} - -std::vector Place::get_consuming_ports() const { - return {}; -} - -bool Place::is_input() const { - FRONT_END_NOT_IMPLEMENTED(is_input); -} - -bool Place::is_output() const { - FRONT_END_NOT_IMPLEMENTED(is_output); -} - -bool Place::is_equal(Ptr another) const { - FRONT_END_NOT_IMPLEMENTED(is_equal); -} - -bool Place::is_equal_data(Ptr another) const { - FRONT_END_NOT_IMPLEMENTED(is_equal_data); -} - -Place::Ptr Place::get_source_tensor() const { - return nullptr; -} - -Place::Ptr Place::get_source_tensor(int input_port_index) const { - return nullptr; -} - -Place::Ptr Place::get_source_tensor(const std::string& inputName, int inputPortIndex) const { - return nullptr; -} - -Place::Ptr Place::get_source_tensor(const std::string& inputName) const { - return nullptr; -} - -Place::Ptr Place::get_target_tensor(const std::string& outputPortName) const { - return nullptr; -} - -Place::Ptr Place::get_target_tensor(const std::string& outputPortName, int outputPortIndex) const { - return nullptr; -} - -Place::Ptr Place::get_producing_operation(const std::string& inputName) const { - return nullptr; -} - -Place::Ptr Place::get_producing_operation(const std::string& inputName, int inputPortIndex) const { - return nullptr; -} - -std::vector Place::get_consuming_operations(const std::string& outputPortName) const { - return {}; -} diff --git a/src/frontends/common/src/place.cpp b/src/frontends/common/src/place.cpp new file mode 100644 index 00000000000..9b8f611f9ce --- /dev/null +++ b/src/frontends/common/src/place.cpp @@ -0,0 +1,139 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/place.hpp" + +#include + +#include "openvino/frontend/exception.hpp" +#include "utils.hpp" + +using namespace ov; +using namespace ov::frontend; + +Place::~Place() = default; + +std::vector Place::get_names() const { + FRONT_END_NOT_IMPLEMENTED(get_names); +} + +std::vector Place::get_consuming_operations() const { + return {}; +} + +std::vector Place::get_consuming_operations(int output_port_index) const { + return {}; +} + +std::vector Place::get_consuming_operations(const std::string& outputPortName, int outputPortIndex) const { + return {}; +} + +Place::Ptr Place::get_target_tensor() const { + return nullptr; +} + +Place::Ptr Place::get_target_tensor(int output_port_index) const { + return nullptr; +} + +Place::Ptr Place::get_producing_operation() const { + return nullptr; +} + +Place::Ptr Place::get_producing_operation(int input_port_index) const { + return nullptr; +} + +Place::Ptr Place::get_producing_port() const { + return nullptr; +} + +Place::Ptr Place::get_input_port() const { + return nullptr; +} + +Place::Ptr Place::get_input_port(int input_port_index) const { + return nullptr; +} + +Place::Ptr Place::get_input_port(const std::string& input_name) const { + return nullptr; +} + +Place::Ptr Place::get_input_port(const std::string& input_name, int input_port_index) const { + return nullptr; +} + +Place::Ptr Place::get_output_port() const { + return nullptr; +} + +Place::Ptr Place::get_output_port(int output_port_index) const { + return nullptr; +} + +Place::Ptr Place::get_output_port(const std::string& output_name) const { + return nullptr; +} + +Place::Ptr Place::get_output_port(const std::string& output_name, int output_port_index) const { + return nullptr; +} + +std::vector Place::get_consuming_ports() const { + return {}; +} + +bool Place::is_input() const { + FRONT_END_NOT_IMPLEMENTED(is_input); +} + +bool Place::is_output() const { + FRONT_END_NOT_IMPLEMENTED(is_output); +} + +bool Place::is_equal(Ptr another) const { + FRONT_END_NOT_IMPLEMENTED(is_equal); +} + +bool Place::is_equal_data(Ptr another) const { + FRONT_END_NOT_IMPLEMENTED(is_equal_data); +} + +Place::Ptr Place::get_source_tensor() const { + return nullptr; +} + +Place::Ptr Place::get_source_tensor(int input_port_index) const { + return nullptr; +} + +Place::Ptr Place::get_source_tensor(const std::string& inputName, int inputPortIndex) const { + return nullptr; +} + +Place::Ptr Place::get_source_tensor(const std::string& inputName) const { + return nullptr; +} + +Place::Ptr Place::get_target_tensor(const std::string& outputPortName) const { + return nullptr; +} + +Place::Ptr Place::get_target_tensor(const std::string& outputPortName, int outputPortIndex) const { + return nullptr; +} + +Place::Ptr Place::get_producing_operation(const std::string& inputName) const { + return nullptr; +} + +Place::Ptr Place::get_producing_operation(const std::string& inputName, int inputPortIndex) const { + return nullptr; +} + +std::vector Place::get_consuming_operations(const std::string& outputPortName) const { + return {}; +} diff --git a/src/frontends/common/src/plugin_loader.cpp b/src/frontends/common/src/plugin_loader.cpp index e3d3cb1c2f4..e7233ec2542 100644 --- a/src/frontends/common/src/plugin_loader.cpp +++ b/src/frontends/common/src/plugin_loader.cpp @@ -16,7 +16,7 @@ #include -#include +#include #include #include @@ -61,7 +61,6 @@ void load_static_plugins(std::vector& res) { // TODO: change to std::filesystem for C++17 static std::vector list_files(const std::string& path) { - NGRAPH_SUPPRESS_DEPRECATED_START std::vector res; try { const auto prefix = std::string(FRONTEND_LIB_PREFIX); @@ -82,7 +81,6 @@ static std::vector list_files(const std::string& path) { // Ignore exceptions } return res; - NGRAPH_SUPPRESS_DEPRECATED_END } void ov::frontend::find_plugins(const std::string& dir_name, std::vector& res) { @@ -99,7 +97,7 @@ void ov::frontend::find_plugins(const std::string& dir_name, std::vector(ov::util::get_symbol(so, "GetAPIVersion")); if (!info_addr) { - NGRAPH_DEBUG << "Loaded FrontEnd [" << m_file_path << "] doesn't have API version" << std::endl; + OPENVINO_DEBUG << "Loaded FrontEnd [" << m_file_path << "] doesn't have API version" << std::endl; return false; } FrontEndVersion plug_info{reinterpret_cast(info_addr())}; if (plug_info != OV_FRONTEND_API_VERSION) { // Plugin has incompatible API version, do not load it - NGRAPH_DEBUG << "Loaded FrontEnd [" << m_file_path << "] has incompatible API version" << plug_info - << std::endl; + OPENVINO_DEBUG << "Loaded FrontEnd [" << m_file_path << "] has incompatible API version" << plug_info + << std::endl; return false; } auto creator_addr = reinterpret_cast(ov::util::get_symbol(so, "GetFrontEndData")); if (!creator_addr) { - NGRAPH_DEBUG << "Loaded FrontEnd [" << m_file_path << "] doesn't have Frontend Data" << std::endl; + OPENVINO_DEBUG << "Loaded FrontEnd [" << m_file_path << "] doesn't have Frontend Data" << std::endl; return false; } diff --git a/src/frontends/common/src/plugin_loader.hpp b/src/frontends/common/src/plugin_loader.hpp index 32340d05729..481b6dd434a 100644 --- a/src/frontends/common/src/plugin_loader.hpp +++ b/src/frontends/common/src/plugin_loader.hpp @@ -4,7 +4,7 @@ #pragma once -#include +#include #ifdef _WIN32 static const char PathSeparator[] = ";"; diff --git a/src/frontends/common/src/telemetry_extension.cpp b/src/frontends/common/src/telemetry_extension.cpp index 134836ac701..8dfa0c3725d 100644 --- a/src/frontends/common/src/telemetry_extension.cpp +++ b/src/frontends/common/src/telemetry_extension.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "common/telemetry_extension.hpp" +#include "openvino/frontend/telemetry_extension.hpp" using namespace ov::frontend; diff --git a/src/frontends/common/src/utils.cpp b/src/frontends/common/src/utils.cpp index 8fceccf2718..35a54723e6e 100644 --- a/src/frontends/common/src/utils.cpp +++ b/src/frontends/common/src/utils.cpp @@ -4,7 +4,7 @@ #include "utils.hpp" -#include "common/frontend_exceptions.hpp" +#include "openvino/frontend/exception.hpp" #include "openvino/util/file_util.hpp" #include "plugin_loader.hpp" diff --git a/src/frontends/common/src/utils.hpp b/src/frontends/common/src/utils.hpp index c5a4f30475f..100e4abb207 100644 --- a/src/frontends/common/src/utils.hpp +++ b/src/frontends/common/src/utils.hpp @@ -4,7 +4,7 @@ #include -#include "common/frontend_defs.hpp" +#include "openvino/frontend/visibility.hpp" namespace ov { namespace frontend { diff --git a/src/frontends/ir/include/ir_frontend/model.hpp b/src/frontends/ir/include/ir_frontend/model.hpp deleted file mode 100644 index 243bb327d9b..00000000000 --- a/src/frontends/ir/include/ir_frontend/model.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ir_frontend/utility.hpp" -#include "manager.hpp" -#include "ngraph/runtime/aligned_buffer.hpp" - -namespace ov { -namespace frontend { - -class IR_API InputModelIR : public InputModel { - friend class FrontEndIR; - class InputModelIRImpl; - std::shared_ptr _impl; - -public: - InputModelIR(std::istream& stream, - const std::shared_ptr& weights, - const std::unordered_map& extensions); - - std::shared_ptr convert(); -}; - -} // namespace frontend -} // namespace ov diff --git a/src/frontends/ir/include/ir_frontend/frontend.hpp b/src/frontends/ir/include/openvino/frontend/ir/frontend.hpp similarity index 82% rename from src/frontends/ir/include/ir_frontend/frontend.hpp rename to src/frontends/ir/include/openvino/frontend/ir/frontend.hpp index ac79bca3ff1..4139e37b929 100644 --- a/src/frontends/ir/include/ir_frontend/frontend.hpp +++ b/src/frontends/ir/include/openvino/frontend/ir/frontend.hpp @@ -4,16 +4,19 @@ #pragma once -#include "common/frontend.hpp" -#include "common/telemetry_extension.hpp" -#include "utility.hpp" +#include "openvino/frontend/exception.hpp" +#include "openvino/frontend/frontend.hpp" +#include "openvino/frontend/ir/visibility.hpp" +#include "openvino/frontend/telemetry_extension.hpp" +#include "openvino/openvino.hpp" namespace ov { namespace frontend { +namespace ir { -class IR_API FrontEndIR : public FrontEnd { +class IR_API FrontEnd : public ov::frontend::FrontEnd { public: - FrontEndIR() = default; + FrontEnd() = default; /// \brief Completely convert the remaining, not converted part of a function. /// \param partiallyConverted partially converted nGraph function @@ -47,5 +50,6 @@ private: std::shared_ptr m_telemetry; }; +} // namespace ir } // namespace frontend } // namespace ov diff --git a/src/frontends/ir/include/ir_frontend/utility.hpp b/src/frontends/ir/include/openvino/frontend/ir/visibility.hpp similarity index 60% rename from src/frontends/ir/include/ir_frontend/utility.hpp rename to src/frontends/ir/include/openvino/frontend/ir/visibility.hpp index 5e9555fc50d..a90f0e1d529 100644 --- a/src/frontends/ir/include/ir_frontend/utility.hpp +++ b/src/frontends/ir/include/openvino/frontend/ir/visibility.hpp @@ -18,13 +18,3 @@ # define IR_C_API OPENVINO_EXTERN_C OPENVINO_CORE_IMPORTS # endif // ir_ov_frontend_EXPORTS #endif // OPENVINO_STATIC_LIBRARY - -#define IR_ASSERT(ex, msg) \ - { \ - if (!(ex)) \ - throw std::runtime_error(msg); \ - } - -#define IR_THROW(msg) throw std::runtime_error(std::string("ERROR: ") + msg) - -#define NOT_IMPLEMENTED(msg) throw std::runtime_error(std::string(msg) + " is not implemented") diff --git a/src/frontends/ir/src/frontend.cpp b/src/frontends/ir/src/frontend.cpp index ff32cde6dee..4ce3da25c5a 100644 --- a/src/frontends/ir/src/frontend.cpp +++ b/src/frontends/ir/src/frontend.cpp @@ -2,13 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ir_frontend/frontend.hpp" +#include "openvino/frontend/ir/frontend.hpp" #include #include -#include "ir_frontend/model.hpp" -#include "ir_frontend/utility.hpp" +#include "input_model.hpp" #include "ngraph/runtime/aligned_buffer.hpp" #include "ngraph/runtime/shared_buffer.hpp" #include "openvino/core/any.hpp" @@ -20,6 +19,7 @@ using namespace ov; namespace ov { namespace frontend { +namespace ir { namespace { inline size_t GetIRVersion(pugi::xml_node& root) { @@ -59,7 +59,7 @@ size_t GetIRVersion(std::istream& model) { } // namespace -bool FrontEndIR::supported_impl(const std::vector& variants) const { +bool FrontEnd::supported_impl(const std::vector& variants) const { std::ifstream local_model_stream; std::istream* provided_model_stream = nullptr; @@ -99,7 +99,7 @@ bool FrontEndIR::supported_impl(const std::vector& variants) const { return version >= 10 && version <= 11; } -void FrontEndIR::add_extension(const ov::Extension::Ptr& ext) { +void FrontEnd::add_extension(const ov::Extension::Ptr& ext) { if (auto telemetry = std::dynamic_pointer_cast(ext)) { m_telemetry = telemetry; } else if (auto so_ext = std::dynamic_pointer_cast(ext)) { @@ -111,7 +111,7 @@ void FrontEndIR::add_extension(const ov::Extension::Ptr& ext) { extensions.emplace_back(ext); } -InputModel::Ptr FrontEndIR::load_impl(const std::vector& variants) const { +InputModel::Ptr FrontEnd::load_impl(const std::vector& variants) const { std::ifstream local_model_stream; std::istream* provided_model_stream = nullptr; std::shared_ptr weights; @@ -125,11 +125,11 @@ InputModel::Ptr FrontEndIR::load_impl(const std::vector& variants) cons return exts; }; - auto create_input_model = [&]() -> std::shared_ptr { + auto create_input_model = [&]() -> std::shared_ptr { if (provided_model_stream) { - return std::make_shared(*provided_model_stream, weights, create_extensions_map()); + return std::make_shared(*provided_model_stream, weights, create_extensions_map()); } else if (local_model_stream.is_open()) { - auto input_model = std::make_shared(local_model_stream, weights, create_extensions_map()); + auto input_model = std::make_shared(local_model_stream, weights, create_extensions_map()); local_model_stream.close(); return input_model; } @@ -203,9 +203,9 @@ InputModel::Ptr FrontEndIR::load_impl(const std::vector& variants) cons bin_stream.open(weights_path, std::ios::binary); if (!bin_stream.is_open()) #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - IR_THROW("Weights file " + ov::util::wstring_to_string(weights_path) + " cannot be opened!"); + IE_THROW() << "Weights file " + ov::util::wstring_to_string(weights_path) + " cannot be opened!"; #else - IR_THROW("Weights file " + weights_path + " cannot be opened!"); + IE_THROW() << "Weights file " + weights_path + " cannot be opened!"; #endif bin_stream.seekg(0, std::ios::end); @@ -225,15 +225,17 @@ InputModel::Ptr FrontEndIR::load_impl(const std::vector& variants) cons return create_input_model(); } -std::shared_ptr FrontEndIR::convert(InputModel::Ptr model) const { - auto ir_model = std::dynamic_pointer_cast(model); +std::shared_ptr FrontEnd::convert(InputModel::Ptr model) const { + auto ir_model = std::dynamic_pointer_cast(model); OPENVINO_ASSERT(ir_model != nullptr); return ir_model->convert(); } -std::string FrontEndIR::get_name() const { +std::string FrontEnd::get_name() const { return "ir"; } + +} // namespace ir } // namespace frontend } // namespace ov @@ -245,7 +247,7 @@ IR_C_API void* GetFrontEndData() { frontend::FrontEndPluginInfo* res = new frontend::FrontEndPluginInfo(); res->m_name = "ir"; res->m_creator = []() { - return std::make_shared(); + return std::make_shared(); }; return res; } diff --git a/src/frontends/ir/src/model.cpp b/src/frontends/ir/src/input_model.cpp similarity index 95% rename from src/frontends/ir/src/model.cpp rename to src/frontends/ir/src/input_model.cpp index 341cb6ea54a..96b38ce906c 100644 --- a/src/frontends/ir/src/model.cpp +++ b/src/frontends/ir/src/input_model.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ir_frontend/model.hpp" +#include "input_model.hpp" #include @@ -186,7 +186,9 @@ void ParsePreProcess(pugi::xml_node& root, namespace ov { namespace frontend { -class InputModelIR::InputModelIRImpl { +namespace ir { + +class InputModel::InputModelIRImpl { std::shared_ptr m_weights; std::unordered_map m_extensions; std::unordered_map m_opsets; @@ -217,17 +219,17 @@ public: std::shared_ptr convert(); }; -InputModelIR::InputModelIR(std::istream& stream, - const std::shared_ptr& weights, - const std::unordered_map& extensions) { +InputModel::InputModel(std::istream& stream, + const std::shared_ptr& weights, + const std::unordered_map& extensions) { _impl = std::make_shared(stream, weights, extensions); } -std::shared_ptr InputModelIR::convert() { +std::shared_ptr InputModel::convert() { return _impl->convert(); } -std::shared_ptr InputModelIR::InputModelIRImpl::convert() { +std::shared_ptr InputModel::InputModelIRImpl::convert() { std::unordered_map> variables; // Load default opsets @@ -240,5 +242,7 @@ std::shared_ptr InputModelIR::InputModelIRImpl::convert() { return function; } + +} // namespace ir } // namespace frontend } // namespace ov diff --git a/src/frontends/ir/src/input_model.hpp b/src/frontends/ir/src/input_model.hpp new file mode 100644 index 00000000000..6fae50ea6cd --- /dev/null +++ b/src/frontends/ir/src/input_model.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "ngraph/runtime/aligned_buffer.hpp" +#include "openvino/frontend/manager.hpp" +#include "openvino/frontend/visibility.hpp" + +namespace ov { +namespace frontend { +namespace ir { + +class InputModel : public ov::frontend::InputModel { + friend class FrontEnd; + class InputModelIRImpl; + std::shared_ptr _impl; + +public: + InputModel(std::istream& stream, + const std::shared_ptr& weights, + const std::unordered_map& extensions); + + std::shared_ptr convert(); +}; + +} // namespace ir +} // namespace frontend +} // namespace ov diff --git a/src/frontends/ir/src/ir_deserializer.cpp b/src/frontends/ir/src/ir_deserializer.cpp index 555d32997a7..166c9a92808 100644 --- a/src/frontends/ir/src/ir_deserializer.cpp +++ b/src/frontends/ir/src/ir_deserializer.cpp @@ -7,7 +7,6 @@ #include #include "ie_ngraph_utils.hpp" -#include "ir_frontend/model.hpp" #include "ngraph/op/util/framework_node.hpp" #include "ngraph/opsets/opset1.hpp" #include "rt_info_deserializer.hpp" diff --git a/src/frontends/ir/src/ir_deserializer.hpp b/src/frontends/ir/src/ir_deserializer.hpp index 869a980fed2..27836bb5220 100644 --- a/src/frontends/ir/src/ir_deserializer.hpp +++ b/src/frontends/ir/src/ir_deserializer.hpp @@ -9,7 +9,7 @@ #include #include "ie_ngraph_utils.hpp" -#include "ir_frontend/model.hpp" +#include "input_model.hpp" #include "ngraph/opsets/opset.hpp" #include "openvino/core/attribute_visitor.hpp" #include "openvino/core/op_extension.hpp" diff --git a/src/frontends/ir/src/rt_info_deserializer.cpp b/src/frontends/ir/src/rt_info_deserializer.cpp index 0b7bee7c329..1c39be259ba 100644 --- a/src/frontends/ir/src/rt_info_deserializer.cpp +++ b/src/frontends/ir/src/rt_info_deserializer.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include #include +#include "openvino/frontend/exception.hpp" + using namespace ov; void RTInfoDeserializer::on_adapter(const std::string& name, ValueAccessor& adapter) { @@ -19,6 +20,6 @@ void RTInfoDeserializer::on_adapter(const std::string& name, ValueAccessor str_to_container(val, ss); a->set(ss); } else { - IR_THROW("Not implemented"); + IE_THROW() << "Not implemented"; } } diff --git a/src/frontends/ir/src/rt_info_deserializer.hpp b/src/frontends/ir/src/rt_info_deserializer.hpp index f706fbc7c7f..22d5550ac63 100644 --- a/src/frontends/ir/src/rt_info_deserializer.hpp +++ b/src/frontends/ir/src/rt_info_deserializer.hpp @@ -7,7 +7,6 @@ #include #include -#include "ir_frontend/utility.hpp" #include "openvino/core/attribute_visitor.hpp" #include "utils.hpp" diff --git a/src/frontends/onnx/frontend/CMakeLists.txt b/src/frontends/onnx/frontend/CMakeLists.txt index 5fe802a7f3f..aaf2b473682 100644 --- a/src/frontends/onnx/frontend/CMakeLists.txt +++ b/src/frontends/onnx/frontend/CMakeLists.txt @@ -20,6 +20,6 @@ ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} $) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/onnx_import - DESTINATION ${FRONTEND_INSTALL_INCLUDE} + DESTINATION ${FRONTEND_INSTALL_INCLUDE}/ngraph/frontend COMPONENT core_dev FILES_MATCHING PATTERN "*.hpp") diff --git a/src/frontends/onnx/frontend/include/onnx_frontend/frontend.hpp b/src/frontends/onnx/frontend/include/openvino/frontend/onnx/frontend.hpp similarity index 86% rename from src/frontends/onnx/frontend/include/onnx_frontend/frontend.hpp rename to src/frontends/onnx/frontend/include/openvino/frontend/onnx/frontend.hpp index a82d7056a70..131bff7aa79 100644 --- a/src/frontends/onnx/frontend/include/onnx_frontend/frontend.hpp +++ b/src/frontends/onnx/frontend/include/openvino/frontend/onnx/frontend.hpp @@ -4,7 +4,8 @@ #pragma once -#include +#include +#include #ifdef OPENVINO_STATIC_LIBRARY # define ONNX_FRONTEND_API @@ -21,7 +22,9 @@ namespace ov { namespace frontend { -class ONNX_FRONTEND_API FrontEndONNX : public FrontEnd { +namespace onnx { + +class ONNX_FRONTEND_API FrontEnd : public ov::frontend::FrontEnd { public: std::shared_ptr convert(InputModel::Ptr model) const override; void convert(std::shared_ptr partially_converted) const override; @@ -37,5 +40,6 @@ private: std::shared_ptr m_telemetry; }; +} // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/graph.hpp b/src/frontends/onnx/frontend/src/core/graph.hpp index 0e5c2378d32..49991b29754 100644 --- a/src/frontends/onnx/frontend/src/core/graph.hpp +++ b/src/frontends/onnx/frontend/src/core/graph.hpp @@ -10,12 +10,12 @@ #include #include -#include "common/telemetry_extension.hpp" #include "core/graph_cache.hpp" #include "core/model.hpp" #include "ngraph/function.hpp" #include "ngraph/op/parameter.hpp" #include "onnx_import/core/operator_set.hpp" +#include "openvino/frontend/telemetry_extension.hpp" namespace ngraph { namespace onnx_import { diff --git a/src/frontends/onnx/frontend/src/editor.hpp b/src/frontends/onnx/frontend/src/editor.hpp index 133a6dcc743..d72d03a64ad 100644 --- a/src/frontends/onnx/frontend/src/editor.hpp +++ b/src/frontends/onnx/frontend/src/editor.hpp @@ -4,10 +4,10 @@ #pragma once -#include #include #include #include +#include #include "editor_types.hpp" #include "ngraph/function.hpp" diff --git a/src/frontends/onnx/frontend/src/frontend.cpp b/src/frontends/onnx/frontend/src/frontend.cpp index 6acec3bd5f3..38aca686dde 100644 --- a/src/frontends/onnx/frontend/src/frontend.cpp +++ b/src/frontends/onnx/frontend/src/frontend.cpp @@ -2,82 +2,82 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include #include #include -#include -#include #include +#include +#include +#include +#include #include #include #include "onnx_common/onnx_model_validator.hpp" using namespace ov; -using namespace ov::frontend; +using namespace ov::frontend::onnx; -ONNX_FRONTEND_C_API FrontEndVersion GetAPIVersion() { +ONNX_FRONTEND_C_API ov::frontend::FrontEndVersion GetAPIVersion() { return OV_FRONTEND_API_VERSION; } ONNX_FRONTEND_C_API void* GetFrontEndData() { - FrontEndPluginInfo* res = new FrontEndPluginInfo(); + ov::frontend::FrontEndPluginInfo* res = new ov::frontend::FrontEndPluginInfo(); res->m_name = "onnx"; res->m_creator = []() { - return std::make_shared(); + return std::make_shared(); }; return res; } -InputModel::Ptr FrontEndONNX::load_impl(const std::vector& variants) const { - if (variants.size() == 0) { +InputModel::Ptr FrontEnd::load_impl(const std::vector& variants) const { + if (variants.empty()) { return nullptr; } if (variants[0].is()) { const auto path = variants[0].as(); - return std::make_shared(path, m_telemetry); + return std::make_shared(path, m_telemetry); } #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) if (variants[0].is()) { const auto path = variants[0].as(); - return std::make_shared(path, m_telemetry); + return std::make_shared(path, m_telemetry); } #endif if (variants[0].is()) { const auto stream = variants[0].as(); if (variants.size() > 1 && variants[1].is()) { const auto path = variants[0].as(); - return std::make_shared(*stream, path, m_telemetry); + return std::make_shared(*stream, path, m_telemetry); } #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) if (variants.size() > 1 && variants[1].is()) { const auto path = variants[1].as(); - return std::make_shared(*stream, path, m_telemetry); + return std::make_shared(*stream, path, m_telemetry); } #endif - return std::make_shared(*stream, m_telemetry); + return std::make_shared(*stream, m_telemetry); } return nullptr; } -std::shared_ptr FrontEndONNX::convert(InputModel::Ptr model) const { - auto model_onnx = std::dynamic_pointer_cast(model); +std::shared_ptr FrontEnd::convert(InputModel::Ptr model) const { + auto model_onnx = std::dynamic_pointer_cast(model); NGRAPH_CHECK(model_onnx != nullptr, "Invalid input model"); return model_onnx->convert(); } -void FrontEndONNX::convert(std::shared_ptr partially_converted) const { +void FrontEnd::convert(std::shared_ptr partially_converted) const { ngraph::onnx_import::detail::convert_decoded_function(partially_converted); } -std::shared_ptr FrontEndONNX::decode(InputModel::Ptr model) const { - auto model_onnx = std::dynamic_pointer_cast(model); +std::shared_ptr FrontEnd::decode(InputModel::Ptr model) const { + auto model_onnx = std::dynamic_pointer_cast(model); NGRAPH_CHECK(model_onnx != nullptr, "Invalid input model"); return model_onnx->decode(); } -std::string FrontEndONNX::get_name() const { +std::string FrontEnd::get_name() const { return "onnx"; } @@ -103,7 +103,7 @@ private: }; } // namespace -bool FrontEndONNX::supported_impl(const std::vector& variants) const { +bool FrontEnd::supported_impl(const std::vector& variants) const { if (variants.size() == 0) { return false; } @@ -133,7 +133,7 @@ bool FrontEndONNX::supported_impl(const std::vector& variants) const { return false; } -void FrontEndONNX::add_extension(const std::shared_ptr& extension) { +void FrontEnd::add_extension(const std::shared_ptr& extension) { if (auto telemetry = std::dynamic_pointer_cast(extension)) { m_telemetry = telemetry; } diff --git a/src/frontends/onnx/frontend/src/input_model.cpp b/src/frontends/onnx/frontend/src/input_model.cpp index 38fda9ae8a7..8a3fe3ee286 100644 --- a/src/frontends/onnx/frontend/src/input_model.cpp +++ b/src/frontends/onnx/frontend/src/input_model.cpp @@ -4,79 +4,76 @@ #include "input_model.hpp" -#include +#include #include #include "place.hpp" using namespace ov; -using namespace ov::frontend; +using namespace ov::frontend::onnx; NGRAPH_SUPPRESS_DEPRECATED_START -InputModelONNX::InputModelONNX(const std::string& path, - const std::shared_ptr& telemetry) +InputModel::InputModel(const std::string& path, const std::shared_ptr& telemetry) : m_editor{std::make_shared(path, telemetry)} {} #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) -InputModelONNX::InputModelONNX(const std::wstring& path, - const std::shared_ptr& telemetry) +InputModel::InputModel(const std::wstring& path, const std::shared_ptr& telemetry) : m_editor{std::make_shared(path, telemetry)} {} #endif -InputModelONNX::InputModelONNX(std::istream& model_stream, - const std::shared_ptr& telemetry) +InputModel::InputModel(std::istream& model_stream, const std::shared_ptr& telemetry) : m_editor{std::make_shared(model_stream, "", telemetry)} {} -InputModelONNX::InputModelONNX(std::istream& model_stream, - const std::string& path, - const std::shared_ptr& telemetry) +InputModel::InputModel(std::istream& model_stream, + const std::string& path, + const std::shared_ptr& telemetry) : m_editor{std::make_shared(model_stream, path, telemetry)} {} #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -InputModelONNX::InputModelONNX(std::istream& model_stream, - const std::wstring& path, - const std::shared_ptr& telemetry) - : InputModelONNX(model_stream, ov::util::wstring_to_string(path), telemetry) {} +InputModel::InputModel(std::istream& model_stream, + const std::wstring& path, + const std::shared_ptr& telemetry) + : InputModel(model_stream, ov::util::wstring_to_string(path), telemetry) {} #endif -std::vector InputModelONNX::get_inputs() const { +std::vector InputModel::get_inputs() const { const auto& inputs = m_editor->model_inputs(); std::vector in_places; in_places.reserve(inputs.size()); for (const auto& input : inputs) { - in_places.push_back(std::make_shared(input, m_editor)); + in_places.push_back(std::make_shared(input, m_editor)); } return in_places; } -std::vector InputModelONNX::get_outputs() const { +std::vector InputModel::get_outputs() const { const auto& outputs = m_editor->model_outputs(); std::vector out_places; out_places.reserve(outputs.size()); for (const auto& output : outputs) { - out_places.push_back(std::make_shared(output, m_editor)); + out_places.push_back(std::make_shared(output, m_editor)); } return out_places; } -Place::Ptr InputModelONNX::get_place_by_tensor_name(const std::string& tensor_name) const { +ov::frontend::Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensor_name) const { if (m_editor->is_correct_tensor_name(tensor_name)) { - return std::make_shared(tensor_name, m_editor); + return std::make_shared(tensor_name, m_editor); } return nullptr; } -Place::Ptr InputModelONNX::get_place_by_operation_name(const std::string& operation_name) const { +ov::frontend::Place::Ptr InputModel::get_place_by_operation_name(const std::string& operation_name) const { if (m_editor->is_correct_and_unambiguous_node(operation_name)) { const auto node_index = m_editor->get_node_index(onnx_editor::EditorNode{operation_name}); - return std::make_shared(onnx_editor::EditorNode{node_index}, m_editor); + return std::make_shared(onnx_editor::EditorNode{node_index}, m_editor); } return nullptr; } -Place::Ptr InputModelONNX::get_place_by_operation_name_and_input_port(const std::string& operation_name, - int input_port_index) { +ov::frontend::Place::Ptr InputModel::get_place_by_operation_name_and_input_port(const std::string& operation_name, + int input_port_index) { const auto op = get_place_by_operation_name(operation_name); if (op != nullptr) { return op->get_input_port(input_port_index); @@ -84,8 +81,8 @@ Place::Ptr InputModelONNX::get_place_by_operation_name_and_input_port(const std: return nullptr; } -Place::Ptr InputModelONNX::get_place_by_operation_name_and_output_port(const std::string& operation_name, - int output_port_index) { +ov::frontend::Place::Ptr InputModel::get_place_by_operation_name_and_output_port(const std::string& operation_name, + int output_port_index) { const auto op = get_place_by_operation_name(operation_name); if (op != nullptr) { return op->get_output_port(output_port_index); @@ -93,62 +90,62 @@ Place::Ptr InputModelONNX::get_place_by_operation_name_and_output_port(const std return nullptr; } -void InputModelONNX::set_name_for_tensor(Place::Ptr tensor, const std::string& new_name) { - const auto onnx_tensor = std::dynamic_pointer_cast(tensor); +void InputModel::set_name_for_tensor(Place::Ptr tensor, const std::string& new_name) { + const auto onnx_tensor = std::dynamic_pointer_cast(tensor); FRONT_END_GENERAL_CHECK(onnx_tensor, __FUNCTION__, " expects a pointer to place of ONNX tensor type."); onnx_tensor->set_name(new_name); } -void InputModelONNX::set_name_for_operation(Place::Ptr operation, const std::string& new_name) { - const auto onnx_operation = std::dynamic_pointer_cast(operation); +void InputModel::set_name_for_operation(Place::Ptr operation, const std::string& new_name) { + const auto onnx_operation = std::dynamic_pointer_cast(operation); FRONT_END_GENERAL_CHECK(onnx_operation, __FUNCTION__, " expects a pointer to place of ONNX operation type."); onnx_operation->set_name(new_name); } -void InputModelONNX::free_name_for_operation(const std::string& name) { +void InputModel::free_name_for_operation(const std::string& name) { m_editor->clear_nodes_name(name); } -void InputModelONNX::set_name_for_dimension(Place::Ptr tensor, size_t shape_dim_index, const std::string& dim_name) { - const auto onnx_tensor = std::dynamic_pointer_cast(tensor); +void InputModel::set_name_for_dimension(Place::Ptr tensor, size_t shape_dim_index, const std::string& dim_name) { + const auto onnx_tensor = std::dynamic_pointer_cast(tensor); FRONT_END_GENERAL_CHECK(onnx_tensor, __FUNCTION__, " expects a pointer to place of ONNX tensor type."); onnx_tensor->set_name_for_dimension(shape_dim_index, dim_name); } -void InputModelONNX::add_name_for_tensor(Place::Ptr, const std::string&) { +void InputModel::add_name_for_tensor(Place::Ptr, const std::string&) { FRONT_END_THROW("Method add_name_for_tensor is not applicable for ONNX model. ONNX tensor has just one name."); } -void InputModelONNX::free_name_for_tensor(const std::string&) { +void InputModel::free_name_for_tensor(const std::string&) { FRONT_END_THROW("Method free_name_for_tensor is not applicable for ONNX model. ONNX tensor name is an identifier."); } -void InputModelONNX::set_partial_shape(Place::Ptr place, const ngraph::PartialShape& shape) { +void InputModel::set_partial_shape(Place::Ptr place, const ngraph::PartialShape& shape) { std::map m; m[place->get_names()[0]] = shape; m_editor->set_input_shapes(m); } -ngraph::PartialShape InputModelONNX::get_partial_shape(Place::Ptr place) const { +ngraph::PartialShape InputModel::get_partial_shape(Place::Ptr place) const { return m_editor->get_tensor_shape(place->get_names().at(0)); } -void InputModelONNX::set_element_type(Place::Ptr place, const ngraph::element::Type& type) { +void InputModel::set_element_type(Place::Ptr place, const ngraph::element::Type& type) { std::map m; m[place->get_names()[0]] = type; m_editor->set_input_types(m); } -std::shared_ptr InputModelONNX::decode() { +std::shared_ptr InputModel::decode() { return m_editor->decode(); } -std::shared_ptr InputModelONNX::convert() { +std::shared_ptr InputModel::convert() { return m_editor->get_function(); } // Editor features -void InputModelONNX::override_all_outputs(const std::vector& outputs) { +void InputModel::override_all_outputs(const std::vector& outputs) { extract_subgraph({}, outputs); NGRAPH_CHECK(m_editor->model_outputs().size() == outputs.size(), "Unexpected number of outputs after override_all_outputs"); @@ -160,7 +157,7 @@ void InputModelONNX::override_all_outputs(const std::vector& outputs "Not all provided arguments of override_all_outputs are new outputs of the model"); } -void InputModelONNX::override_all_inputs(const std::vector& inputs) { +void InputModel::override_all_inputs(const std::vector& inputs) { const auto outputs_before_extraction = m_editor->model_outputs(); extract_subgraph({inputs}, {}); NGRAPH_CHECK(std::equal(std::begin(outputs_before_extraction), @@ -172,13 +169,13 @@ void InputModelONNX::override_all_inputs(const std::vector& inputs) "Unexpected number of inputs after override_all_inputs"); } -void InputModelONNX::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { +void InputModel::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { std::vector onnx_inputs; onnx_inputs.reserve(inputs.size()); for (const auto& input : inputs) { - if (const auto input_port = std::dynamic_pointer_cast(input)) { + if (const auto input_port = std::dynamic_pointer_cast(input)) { onnx_inputs.push_back(input_port->get_input_edge()); - } else if (const auto tensor = std::dynamic_pointer_cast(input)) { + } else if (const auto tensor = std::dynamic_pointer_cast(input)) { auto name = tensor->get_names()[0]; const auto consumers = m_editor->find_output_consumers(name); std::transform(std::begin(consumers), @@ -187,7 +184,7 @@ void InputModelONNX::extract_subgraph(const std::vector& inputs, con [](const onnx_editor::InputEdge& edge) { return edge; }); - } else if (const auto op = std::dynamic_pointer_cast(input)) { + } else if (const auto op = std::dynamic_pointer_cast(input)) { const auto editor_node = op->get_editor_node(); const auto op_inputs = m_editor->get_input_ports(editor_node); int node_idx = m_editor->get_node_index(editor_node); @@ -204,14 +201,14 @@ void InputModelONNX::extract_subgraph(const std::vector& inputs, con std::vector onnx_outputs; onnx_outputs.reserve(outputs.size()); for (const auto& output : outputs) { - if (const auto output_port = std::dynamic_pointer_cast(output)) { + if (const auto output_port = std::dynamic_pointer_cast(output)) { onnx_outputs.push_back(output_port->get_output_edge()); - } else if (const auto tensor = std::dynamic_pointer_cast(output)) { + } else if (const auto tensor = std::dynamic_pointer_cast(output)) { const auto output_port = tensor->get_producing_port(); - const auto onnx_output_edge = std::dynamic_pointer_cast(output_port); + const auto onnx_output_edge = std::dynamic_pointer_cast(output_port); NGRAPH_CHECK(onnx_output_edge, "Non-onnx output place was passed as extraction subgraph argument"); onnx_outputs.push_back(onnx_output_edge->get_output_edge()); - } else if (const auto op = std::dynamic_pointer_cast(output)) { + } else if (const auto op = std::dynamic_pointer_cast(output)) { const auto editor_node = op->get_editor_node(); const auto op_outputs = m_editor->get_output_ports(editor_node); int node_idx = m_editor->get_node_index(editor_node); diff --git a/src/frontends/onnx/frontend/src/input_model.hpp b/src/frontends/onnx/frontend/src/input_model.hpp index a3875b2adba..1e1c86427ed 100644 --- a/src/frontends/onnx/frontend/src/input_model.hpp +++ b/src/frontends/onnx/frontend/src/input_model.hpp @@ -4,28 +4,30 @@ #pragma once -#include #include #include +#include namespace ov { namespace frontend { -class InputModelONNX : public InputModel { +namespace onnx { + +class InputModel : public ov::frontend::InputModel { public: - InputModelONNX(const std::string& path, const std::shared_ptr& telemetry = {}); + InputModel(const std::string& path, const std::shared_ptr& telemetry = {}); #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - InputModelONNX(const std::wstring& path, const std::shared_ptr& telemetry = {}); + InputModel(const std::wstring& path, const std::shared_ptr& telemetry = {}); #endif - InputModelONNX(std::istream& model_stream, const std::shared_ptr& telemetry = {}); + InputModel(std::istream& model_stream, const std::shared_ptr& telemetry = {}); // The path can be required even if the model is passed as a stream because it is necessary // for ONNX external data feature - InputModelONNX(std::istream& model_stream, - const std::string& path, - const std::shared_ptr& telemetry = {}); + InputModel(std::istream& model_stream, + const std::string& path, + const std::shared_ptr& telemetry = {}); #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT - InputModelONNX(std::istream& model_stream, - const std::wstring& path, - const std::shared_ptr& telemetry = {}); + InputModel(std::istream& model_stream, + const std::wstring& path, + const std::shared_ptr& telemetry = {}); #endif std::vector get_inputs() const override; @@ -62,5 +64,7 @@ public: private: std::shared_ptr m_editor; }; + +} // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/place.cpp b/src/frontends/onnx/frontend/src/place.cpp index 5c2c25fbb1c..6e73fd450ac 100644 --- a/src/frontends/onnx/frontend/src/place.cpp +++ b/src/frontends/onnx/frontend/src/place.cpp @@ -4,160 +4,157 @@ #include "place.hpp" -#include +#include using namespace ov; -using namespace ov::frontend; +using namespace ov::frontend::onnx; -PlaceInputEdgeONNX::PlaceInputEdgeONNX(const onnx_editor::InputEdge& edge, - std::shared_ptr editor) +PlaceInputEdge::PlaceInputEdge(const onnx_editor::InputEdge& edge, std::shared_ptr editor) : m_edge{edge}, m_editor{std::move(editor)} {} -PlaceInputEdgeONNX::PlaceInputEdgeONNX(onnx_editor::InputEdge&& edge, - std::shared_ptr editor) +PlaceInputEdge::PlaceInputEdge(onnx_editor::InputEdge&& edge, std::shared_ptr editor) : m_edge{std::move(edge)}, m_editor{std::move(editor)} {} -onnx_editor::InputEdge PlaceInputEdgeONNX::get_input_edge() const { +onnx_editor::InputEdge PlaceInputEdge::get_input_edge() const { return m_edge; } -bool PlaceInputEdgeONNX::is_input() const { +bool PlaceInputEdge::is_input() const { return m_editor->is_input(m_edge); } -bool PlaceInputEdgeONNX::is_output() const { +bool PlaceInputEdge::is_output() const { return false; } -bool PlaceInputEdgeONNX::is_equal(Place::Ptr another) const { - if (const auto in_edge = std::dynamic_pointer_cast(another)) { +bool PlaceInputEdge::is_equal(Place::Ptr another) const { + if (const auto in_edge = std::dynamic_pointer_cast(another)) { const auto& editor_edge = in_edge->get_input_edge(); return (editor_edge.m_node_idx == m_edge.m_node_idx) && (editor_edge.m_port_idx == m_edge.m_port_idx); } return false; } -bool PlaceInputEdgeONNX::is_equal_data(Place::Ptr another) const { +bool PlaceInputEdge::is_equal_data(Place::Ptr another) const { return get_source_tensor()->is_equal_data(another); } -Place::Ptr PlaceInputEdgeONNX::get_source_tensor() const { - return std::make_shared(m_editor->get_source_tensor_name(m_edge), m_editor); +ov::frontend::Place::Ptr PlaceInputEdge::get_source_tensor() const { + return std::make_shared(m_editor->get_source_tensor_name(m_edge), m_editor); } -std::vector PlaceInputEdgeONNX::get_consuming_operations() const { - return {std::make_shared(onnx_editor::EditorNode{m_edge.m_node_idx}, m_editor)}; +std::vector PlaceInputEdge::get_consuming_operations() const { + return {std::make_shared(onnx_editor::EditorNode{m_edge.m_node_idx}, m_editor)}; } -Place::Ptr PlaceInputEdgeONNX::get_producing_operation() const { +ov::frontend::Place::Ptr PlaceInputEdge::get_producing_operation() const { return get_source_tensor()->get_producing_operation(); } -Place::Ptr PlaceInputEdgeONNX::get_producing_port() const { +ov::frontend::Place::Ptr PlaceInputEdge::get_producing_port() const { return get_source_tensor()->get_producing_port(); } -PlaceOutputEdgeONNX::PlaceOutputEdgeONNX(const onnx_editor::OutputEdge& edge, - std::shared_ptr editor) +PlaceOutputEdge::PlaceOutputEdge(const onnx_editor::OutputEdge& edge, + std::shared_ptr editor) : m_edge{edge}, m_editor{std::move(editor)} {} -PlaceOutputEdgeONNX::PlaceOutputEdgeONNX(onnx_editor::OutputEdge&& edge, - std::shared_ptr editor) +PlaceOutputEdge::PlaceOutputEdge(onnx_editor::OutputEdge&& edge, std::shared_ptr editor) : m_edge{std::move(edge)}, m_editor{std::move(editor)} {} -onnx_editor::OutputEdge PlaceOutputEdgeONNX::get_output_edge() const { +onnx_editor::OutputEdge PlaceOutputEdge::get_output_edge() const { return m_edge; } -bool PlaceOutputEdgeONNX::is_input() const { +bool PlaceOutputEdge::is_input() const { return false; } -bool PlaceOutputEdgeONNX::is_output() const { +bool PlaceOutputEdge::is_output() const { return m_editor->is_output(m_edge); } -bool PlaceOutputEdgeONNX::is_equal(Place::Ptr another) const { - if (const auto out_edge = std::dynamic_pointer_cast(another)) { +bool PlaceOutputEdge::is_equal(Place::Ptr another) const { + if (const auto out_edge = std::dynamic_pointer_cast(another)) { const auto& editor_edge = out_edge->get_output_edge(); return (editor_edge.m_node_idx == m_edge.m_node_idx) && (editor_edge.m_port_idx == m_edge.m_port_idx); } return false; } -bool PlaceOutputEdgeONNX::is_equal_data(Place::Ptr another) const { +bool PlaceOutputEdge::is_equal_data(Place::Ptr another) const { return get_target_tensor()->is_equal_data(another); } -Place::Ptr PlaceOutputEdgeONNX::get_target_tensor() const { - return std::make_shared(m_editor->get_target_tensor_name(m_edge), m_editor); +ov::frontend::Place::Ptr PlaceOutputEdge::get_target_tensor() const { + return std::make_shared(m_editor->get_target_tensor_name(m_edge), m_editor); } -std::vector PlaceOutputEdgeONNX::get_consuming_ports() const { +std::vector PlaceOutputEdge::get_consuming_ports() const { return get_target_tensor()->get_consuming_ports(); } -Place::Ptr PlaceOutputEdgeONNX::get_producing_operation() const { - return std::make_shared(onnx_editor::EditorNode{m_edge.m_node_idx}, m_editor); +ov::frontend::Place::Ptr PlaceOutputEdge::get_producing_operation() const { + return std::make_shared(onnx_editor::EditorNode{m_edge.m_node_idx}, m_editor); } -std::vector PlaceOutputEdgeONNX::get_consuming_operations() const { +std::vector PlaceOutputEdge::get_consuming_operations() const { return get_target_tensor()->get_consuming_operations(); } -PlaceTensorONNX::PlaceTensorONNX(const std::string& name, std::shared_ptr editor) +PlaceTensor::PlaceTensor(const std::string& name, std::shared_ptr editor) : m_name{name}, m_editor{std::move(editor)} {} -PlaceTensorONNX::PlaceTensorONNX(std::string&& name, std::shared_ptr editor) +PlaceTensor::PlaceTensor(std::string&& name, std::shared_ptr editor) : m_name{std::move(name)}, m_editor{std::move(editor)} {} -std::vector PlaceTensorONNX::get_names() const { +std::vector PlaceTensor::get_names() const { return {m_name}; } -Place::Ptr PlaceTensorONNX::get_producing_port() const { +ov::frontend::Place::Ptr PlaceTensor::get_producing_port() const { FRONT_END_GENERAL_CHECK(!is_input(), "Tensor: " + m_name + " is an input of the model and doesn't have producing port."); - return std::make_shared(m_editor->find_output_edge(m_name), m_editor); + return std::make_shared(m_editor->find_output_edge(m_name), m_editor); } -std::vector PlaceTensorONNX::get_consuming_ports() const { - std::vector ret; +std::vector PlaceTensor::get_consuming_ports() const { + std::vector ret; auto edges = m_editor->find_output_consumers(m_name); std::transform(edges.begin(), edges.end(), std::back_inserter(ret), [this](const onnx_editor::InputEdge& edge) { - return std::make_shared(edge, this->m_editor); + return std::make_shared(edge, this->m_editor); }); return ret; } -Place::Ptr PlaceTensorONNX::get_producing_operation() const { +ov::frontend::Place::Ptr PlaceTensor::get_producing_operation() const { return get_producing_port()->get_producing_operation(); } -bool PlaceTensorONNX::is_input() const { +bool PlaceTensor::is_input() const { const auto inputs = m_editor->model_inputs(); return std::find(std::begin(inputs), std::end(inputs), m_name) != std::end(inputs); } -bool PlaceTensorONNX::is_output() const { +bool PlaceTensor::is_output() const { const auto outputs = m_editor->model_outputs(); return std::find(std::begin(outputs), std::end(outputs), m_name) != std::end(outputs); } -bool PlaceTensorONNX::is_equal(Place::Ptr another) const { - if (const auto tensor = std::dynamic_pointer_cast(another)) { +bool PlaceTensor::is_equal(Place::Ptr another) const { + if (const auto tensor = std::dynamic_pointer_cast(another)) { return m_name == tensor->get_names().at(0); } return false; } -bool PlaceTensorONNX::is_equal_data(Place::Ptr another) const { +bool PlaceTensor::is_equal_data(Place::Ptr another) const { const auto consuming_ports = get_consuming_ports(); const auto eq_to_consuming_port = [&consuming_ports](const Ptr& another) { return std::any_of(consuming_ports.begin(), consuming_ports.end(), [&another](const Ptr& place) { @@ -168,9 +165,9 @@ bool PlaceTensorONNX::is_equal_data(Place::Ptr another) const { eq_to_consuming_port(another); } -std::vector PlaceTensorONNX::get_consuming_operations() const { - std::vector consuming_ports = get_consuming_ports(); - std::vector consuming_ops; +std::vector PlaceTensor::get_consuming_operations() const { + std::vector consuming_ports = get_consuming_ports(); + std::vector consuming_ops; std::transform(std::begin(consuming_ports), std::end(consuming_ports), std::back_inserter(consuming_ops), @@ -181,87 +178,86 @@ std::vector PlaceTensorONNX::get_consuming_operations() const { return consuming_ops; } -void PlaceTensorONNX::set_name(const std::string& new_name) { +void PlaceTensor::set_name(const std::string& new_name) { if (m_name == new_name) return; m_editor->set_tensor_name(m_name, new_name); m_name = new_name; } -void PlaceTensorONNX::set_name_for_dimension(size_t shape_dim_index, const std::string& dim_name) { +void PlaceTensor::set_name_for_dimension(size_t shape_dim_index, const std::string& dim_name) { m_editor->set_name_for_dimension(m_name, shape_dim_index, dim_name); } -PlaceOpONNX::PlaceOpONNX(const onnx_editor::EditorNode& node, std::shared_ptr editor) +PlaceOp::PlaceOp(const onnx_editor::EditorNode& node, std::shared_ptr editor) : m_node{node}, m_editor{std::move(editor)} {} -PlaceOpONNX::PlaceOpONNX(onnx_editor::EditorNode&& node, std::shared_ptr editor) +PlaceOp::PlaceOp(onnx_editor::EditorNode&& node, std::shared_ptr editor) : m_node{std::move(node)}, m_editor{std::move(editor)} {} -std::vector PlaceOpONNX::get_names() const { +std::vector PlaceOp::get_names() const { return {m_node.m_node_name}; } -const onnx_editor::EditorNode& PlaceOpONNX::get_editor_node() const { +const onnx_editor::EditorNode& PlaceOp::get_editor_node() const { return m_node; } -Place::Ptr PlaceOpONNX::get_output_port() const { +ov::frontend::Place::Ptr PlaceOp::get_output_port() const { if (m_editor->get_output_ports(m_node).size() == 1) { return get_output_port(0); } return nullptr; } -Place::Ptr PlaceOpONNX::get_output_port(int output_port_index) const { +ov::frontend::Place::Ptr PlaceOp::get_output_port(int output_port_index) const { if (output_port_index < m_editor->get_output_ports(m_node).size()) { - return std::make_shared( + return std::make_shared( m_editor->find_output_edge(m_node, onnx_editor::EditorOutput{output_port_index}), m_editor); } return nullptr; } -Place::Ptr PlaceOpONNX::get_output_port(const std::string& output_port_name) const { +ov::frontend::Place::Ptr PlaceOp::get_output_port(const std::string& output_port_name) const { const auto output_ports = m_editor->get_output_ports(m_node); if (std::count(std::begin(output_ports), std::end(output_ports), output_port_name) == 1) { - return std::make_shared( + return std::make_shared( m_editor->find_output_edge(m_node, onnx_editor::EditorOutput{output_port_name}), m_editor); } return nullptr; } -Place::Ptr PlaceOpONNX::get_input_port() const { +ov::frontend::Place::Ptr PlaceOp::get_input_port() const { if (m_editor->get_input_ports(m_node).size() == 1) { return get_input_port(0); } return nullptr; } -Place::Ptr PlaceOpONNX::get_input_port(int input_port_index) const { +ov::frontend::Place::Ptr PlaceOp::get_input_port(int input_port_index) const { if (input_port_index < m_editor->get_input_ports(m_node).size()) { - return std::make_shared( + return std::make_shared( m_editor->find_input_edge(m_node, onnx_editor::EditorInput{input_port_index}), m_editor); } return nullptr; } -Place::Ptr PlaceOpONNX::get_input_port(const std::string& input_name) const { +ov::frontend::Place::Ptr PlaceOp::get_input_port(const std::string& input_name) const { const auto input_ports = m_editor->get_input_ports(m_node); if (std::count(std::begin(input_ports), std::end(input_ports), input_name) == 1) { - return std::make_shared( - m_editor->find_input_edge(m_node, onnx_editor::EditorInput{input_name}), - m_editor); + return std::make_shared(m_editor->find_input_edge(m_node, onnx_editor::EditorInput{input_name}), + m_editor); } return nullptr; } -std::vector PlaceOpONNX::get_consuming_ports() const { - std::vector consuming_ports; +std::vector PlaceOp::get_consuming_ports() const { + std::vector consuming_ports; const auto out_ports_size = m_editor->get_output_ports(m_node).size(); for (int out_idx = 0; out_idx < out_ports_size; ++out_idx) { auto consuming_ops_out = get_output_port(out_idx)->get_consuming_ports(); @@ -271,12 +267,12 @@ std::vector PlaceOpONNX::get_consuming_ports() const { } namespace { -std::vector get_consuming_ops(std::vector input_ports) { - std::vector consuming_ops; +std::vector get_consuming_ops(std::vector input_ports) { + std::vector consuming_ops; std::transform(std::begin(input_ports), std::end(input_ports), std::back_inserter(consuming_ops), - [](const Place::Ptr place) { + [](const ov::frontend::Place::Ptr place) { return place->get_consuming_operations().at(0); }); @@ -284,22 +280,22 @@ std::vector get_consuming_ops(std::vector input_ports) { } } // namespace -std::vector PlaceOpONNX::get_consuming_operations() const { - std::vector consuming_ports = get_consuming_ports(); +std::vector PlaceOp::get_consuming_operations() const { + std::vector consuming_ports = get_consuming_ports(); return get_consuming_ops(consuming_ports); } -std::vector PlaceOpONNX::get_consuming_operations(int output_port_index) const { - std::vector consuming_ports = get_output_port(output_port_index)->get_consuming_ports(); +std::vector PlaceOp::get_consuming_operations(int output_port_index) const { + std::vector consuming_ports = get_output_port(output_port_index)->get_consuming_ports(); return get_consuming_ops(consuming_ports); } -std::vector PlaceOpONNX::get_consuming_operations(const std::string& output_port_name) const { - std::vector consuming_ports = get_output_port(output_port_name)->get_consuming_ports(); +std::vector PlaceOp::get_consuming_operations(const std::string& output_port_name) const { + std::vector consuming_ports = get_output_port(output_port_name)->get_consuming_ports(); return get_consuming_ops(consuming_ports); } -Place::Ptr PlaceOpONNX::get_producing_operation() const { +ov::frontend::Place::Ptr PlaceOp::get_producing_operation() const { const auto input_port = get_input_port(); if (input_port != nullptr) { return input_port->get_producing_operation(); @@ -307,7 +303,7 @@ Place::Ptr PlaceOpONNX::get_producing_operation() const { return nullptr; } -Place::Ptr PlaceOpONNX::get_producing_operation(int input_port_index) const { +ov::frontend::Place::Ptr PlaceOp::get_producing_operation(int input_port_index) const { const auto input_port = get_input_port(input_port_index); if (input_port != nullptr) { return input_port->get_producing_operation(); @@ -315,7 +311,7 @@ Place::Ptr PlaceOpONNX::get_producing_operation(int input_port_index) const { return nullptr; } -Place::Ptr PlaceOpONNX::get_producing_operation(const std::string& input_port_name) const { +ov::frontend::Place::Ptr PlaceOp::get_producing_operation(const std::string& input_port_name) const { const auto input_port = get_input_port(input_port_name); if (input_port != nullptr) { return input_port->get_producing_operation(); @@ -323,8 +319,8 @@ Place::Ptr PlaceOpONNX::get_producing_operation(const std::string& input_port_na return nullptr; } -bool PlaceOpONNX::is_equal(Place::Ptr another) const { - if (const auto place_op = std::dynamic_pointer_cast(another)) { +bool PlaceOp::is_equal(Place::Ptr another) const { + if (const auto place_op = std::dynamic_pointer_cast(another)) { const auto& another_node = place_op->get_editor_node(); if (m_editor->is_correct_and_unambiguous_node(m_node) || m_editor->is_correct_and_unambiguous_node(another_node)) { @@ -334,7 +330,7 @@ bool PlaceOpONNX::is_equal(Place::Ptr another) const { return false; } -Place::Ptr PlaceOpONNX::get_target_tensor() const { +ov::frontend::Place::Ptr PlaceOp::get_target_tensor() const { const auto output_port = get_output_port(); if (output_port != nullptr) { return output_port->get_target_tensor(); @@ -342,7 +338,7 @@ Place::Ptr PlaceOpONNX::get_target_tensor() const { return nullptr; } -Place::Ptr PlaceOpONNX::get_target_tensor(int output_port_index) const { +ov::frontend::Place::Ptr PlaceOp::get_target_tensor(int output_port_index) const { const auto output_port = get_output_port(output_port_index); if (output_port != nullptr) { return output_port->get_target_tensor(); @@ -350,7 +346,7 @@ Place::Ptr PlaceOpONNX::get_target_tensor(int output_port_index) const { return nullptr; } -Place::Ptr PlaceOpONNX::get_target_tensor(const std::string& output_name) const { +ov::frontend::Place::Ptr PlaceOp::get_target_tensor(const std::string& output_name) const { const auto output_port = get_output_port(output_name); if (output_port != nullptr) { return output_port->get_target_tensor(); @@ -358,7 +354,7 @@ Place::Ptr PlaceOpONNX::get_target_tensor(const std::string& output_name) const return nullptr; } -Place::Ptr PlaceOpONNX::get_source_tensor() const { +ov::frontend::Place::Ptr PlaceOp::get_source_tensor() const { const auto input_port = get_input_port(); if (input_port != nullptr) { return input_port->get_source_tensor(); @@ -366,7 +362,7 @@ Place::Ptr PlaceOpONNX::get_source_tensor() const { return nullptr; } -Place::Ptr PlaceOpONNX::get_source_tensor(int input_port_index) const { +ov::frontend::Place::Ptr PlaceOp::get_source_tensor(int input_port_index) const { const auto input_port = get_input_port(input_port_index); if (input_port != nullptr) { return input_port->get_source_tensor(); @@ -374,7 +370,7 @@ Place::Ptr PlaceOpONNX::get_source_tensor(int input_port_index) const { return nullptr; } -Place::Ptr PlaceOpONNX::get_source_tensor(const std::string& input_name) const { +ov::frontend::Place::Ptr PlaceOp::get_source_tensor(const std::string& input_name) const { const auto input_port = get_input_port(input_name); if (input_port != nullptr) { return input_port->get_source_tensor(); @@ -382,15 +378,15 @@ Place::Ptr PlaceOpONNX::get_source_tensor(const std::string& input_name) const { return nullptr; } -bool PlaceOpONNX::is_input() const { +bool PlaceOp::is_input() const { return false; } -bool PlaceOpONNX::is_output() const { +bool PlaceOp::is_output() const { return false; } -void PlaceOpONNX::set_name(const std::string& new_name) { +void PlaceOp::set_name(const std::string& new_name) { m_editor->set_node_name(m_node, new_name); m_node.m_node_name = new_name; } diff --git a/src/frontends/onnx/frontend/src/place.hpp b/src/frontends/onnx/frontend/src/place.hpp index 063a18867d8..02bd737e16f 100644 --- a/src/frontends/onnx/frontend/src/place.hpp +++ b/src/frontends/onnx/frontend/src/place.hpp @@ -4,17 +4,19 @@ #pragma once -#include #include #include +#include #include namespace ov { namespace frontend { -class PlaceInputEdgeONNX : public Place { +namespace onnx { + +class PlaceInputEdge : public Place { public: - PlaceInputEdgeONNX(const onnx_editor::InputEdge& edge, std::shared_ptr editor); - PlaceInputEdgeONNX(onnx_editor::InputEdge&& edge, std::shared_ptr editor); + PlaceInputEdge(const onnx_editor::InputEdge& edge, std::shared_ptr editor); + PlaceInputEdge(onnx_editor::InputEdge&& edge, std::shared_ptr editor); // internal usage onnx_editor::InputEdge get_input_edge() const; @@ -34,10 +36,10 @@ private: const std::shared_ptr m_editor; }; -class PlaceOutputEdgeONNX : public Place { +class PlaceOutputEdge : public Place { public: - PlaceOutputEdgeONNX(const onnx_editor::OutputEdge& edge, std::shared_ptr editor); - PlaceOutputEdgeONNX(onnx_editor::OutputEdge&& edge, std::shared_ptr editor); + PlaceOutputEdge(const onnx_editor::OutputEdge& edge, std::shared_ptr editor); + PlaceOutputEdge(onnx_editor::OutputEdge&& edge, std::shared_ptr editor); // internal usage onnx_editor::OutputEdge get_output_edge() const; @@ -57,10 +59,10 @@ private: std::shared_ptr m_editor; }; -class PlaceTensorONNX : public Place { +class PlaceTensor : public Place { public: - PlaceTensorONNX(const std::string& name, std::shared_ptr editor); - PlaceTensorONNX(std::string&& name, std::shared_ptr editor); + PlaceTensor(const std::string& name, std::shared_ptr editor); + PlaceTensor(std::string&& name, std::shared_ptr editor); // external usage std::vector get_names() const override; @@ -81,10 +83,10 @@ private: std::shared_ptr m_editor; }; -class PlaceOpONNX : public Place { +class PlaceOp : public Place { public: - PlaceOpONNX(const onnx_editor::EditorNode& node, std::shared_ptr editor); - PlaceOpONNX(onnx_editor::EditorNode&& node, std::shared_ptr editor); + PlaceOp(const onnx_editor::EditorNode& node, std::shared_ptr editor); + PlaceOp(onnx_editor::EditorNode&& node, std::shared_ptr editor); std::vector get_names() const override; // internal usage @@ -125,5 +127,7 @@ private: onnx_editor::EditorNode m_node; std::shared_ptr m_editor; }; + +} // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp b/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp index 9813e82fa4b..dba12e3b2c6 100644 --- a/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp +++ b/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp @@ -7,8 +7,8 @@ #include #include -#include "common/telemetry_extension.hpp" #include "ngraph/function.hpp" +#include "openvino/frontend/telemetry_extension.hpp" namespace ONNX_NAMESPACE { class ModelProto; diff --git a/src/frontends/paddlepaddle/CMakeLists.txt b/src/frontends/paddlepaddle/CMakeLists.txt index b3d46d67d1a..d6c5e4999f5 100644 --- a/src/frontends/paddlepaddle/CMakeLists.txt +++ b/src/frontends/paddlepaddle/CMakeLists.txt @@ -2,8 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 # +# TODO: Add LINKABLE_FRONTEND option when tensorflow frontend directory is moved to openvino folder ov_add_frontend(NAME paddlepaddle - LINKABLE_FRONTEND PROTOBUF_LITE FILEDESCRIPTION "FrontEnd to load and convert PaddlePaddle file format" LINK_LIBRARIES inference_engine_transformations) diff --git a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp b/src/frontends/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp index 2b10a622ff7..0cabd02cd2b 100644 --- a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp +++ b/src/frontends/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp @@ -4,8 +4,8 @@ #pragma once -#include -#include +#include +#include #include "exceptions.hpp" #include "model.hpp" diff --git a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/model.hpp b/src/frontends/paddlepaddle/include/paddlepaddle_frontend/model.hpp index 8cc63a5540a..ba8659e04b4 100644 --- a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/model.hpp +++ b/src/frontends/paddlepaddle/include/paddlepaddle_frontend/model.hpp @@ -4,8 +4,8 @@ #pragma once -#include -#include +#include +#include #include "paddlepaddle_frontend/utility.hpp" diff --git a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/place.hpp b/src/frontends/paddlepaddle/include/paddlepaddle_frontend/place.hpp index e41e39564db..d99a9db6f4d 100644 --- a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/place.hpp +++ b/src/frontends/paddlepaddle/include/paddlepaddle_frontend/place.hpp @@ -4,9 +4,9 @@ #pragma once -#include +#include -#include "paddlepaddle_frontend/exceptions.hpp" +#include "exceptions.hpp" namespace paddle { namespace framework { diff --git a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/utility.hpp b/src/frontends/paddlepaddle/include/paddlepaddle_frontend/utility.hpp index 8d61af08ff6..a9c273cb392 100644 --- a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/utility.hpp +++ b/src/frontends/paddlepaddle/include/paddlepaddle_frontend/utility.hpp @@ -4,7 +4,7 @@ #pragma once -#include +#include #ifdef OPENVINO_STATIC_LIBRARY # define PDPD_API diff --git a/src/frontends/paddlepaddle/src/exceptions.cpp b/src/frontends/paddlepaddle/src/exception.cpp similarity index 90% rename from src/frontends/paddlepaddle/src/exceptions.cpp rename to src/frontends/paddlepaddle/src/exception.cpp index 0d172c6f4b5..ddd097ba5ef 100644 --- a/src/frontends/paddlepaddle/src/exceptions.cpp +++ b/src/frontends/paddlepaddle/src/exception.cpp @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "paddlepaddle_frontend/exceptions.hpp" - +#include "exceptions.hpp" #include "node_context.hpp" namespace ov { diff --git a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/exceptions.hpp b/src/frontends/paddlepaddle/src/exceptions.hpp similarity index 96% rename from src/frontends/paddlepaddle/include/paddlepaddle_frontend/exceptions.hpp rename to src/frontends/paddlepaddle/src/exceptions.hpp index 2501ffafa35..78ce6937d83 100644 --- a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/exceptions.hpp +++ b/src/frontends/paddlepaddle/src/exceptions.hpp @@ -4,7 +4,7 @@ #pragma once -#include +#include namespace ov { namespace frontend { diff --git a/src/frontends/paddlepaddle/src/frontend.cpp b/src/frontends/paddlepaddle/src/frontend.cpp index b9df7dce3a1..a552dcaa06a 100644 --- a/src/frontends/paddlepaddle/src/frontend.cpp +++ b/src/frontends/paddlepaddle/src/frontend.cpp @@ -10,11 +10,11 @@ #include #include "decoder.hpp" +#include "exceptions.hpp" #include "framework.pb.h" #include "node_context.hpp" #include "op_table.hpp" #include "openvino/opsets/opset7.hpp" -#include "paddlepaddle_frontend/exceptions.hpp" #include "paddlepaddle_frontend/model.hpp" #include "paddlepaddle_frontend/place.hpp" #include "pdpd_fw_node.hpp" diff --git a/src/frontends/paddlepaddle/src/model.cpp b/src/frontends/paddlepaddle/src/model.cpp index e089e836048..48ccef9643e 100644 --- a/src/frontends/paddlepaddle/src/model.cpp +++ b/src/frontends/paddlepaddle/src/model.cpp @@ -8,10 +8,10 @@ #include #include "decoder.hpp" +#include "exceptions.hpp" #include "framework.pb.h" #include "node_context.hpp" #include "openvino/opsets/opset7.hpp" -#include "paddlepaddle_frontend/exceptions.hpp" #include "paddlepaddle_frontend/place.hpp" #include "pdpd_utils.hpp" diff --git a/src/frontends/paddlepaddle/src/node_context.hpp b/src/frontends/paddlepaddle/src/node_context.hpp index 8b363d1f6ba..6dfe727f6ee 100644 --- a/src/frontends/paddlepaddle/src/node_context.hpp +++ b/src/frontends/paddlepaddle/src/node_context.hpp @@ -3,9 +3,9 @@ // #pragma once +#include "exceptions.hpp" #include "ngraph/compatibility.hpp" #include "openvino/core/any.hpp" -#include "paddlepaddle_frontend/exceptions.hpp" #include "paddlepaddle_frontend/utility.hpp" namespace ov { diff --git a/src/frontends/paddlepaddle/src/pdpd_utils.hpp b/src/frontends/paddlepaddle/src/pdpd_utils.hpp index 6f98dcd16db..5fcaaa6c188 100644 --- a/src/frontends/paddlepaddle/src/pdpd_utils.hpp +++ b/src/frontends/paddlepaddle/src/pdpd_utils.hpp @@ -4,7 +4,7 @@ #pragma once -#include "common/frontend_exceptions.hpp" +#include "openvino/frontend/exception.hpp" #include "openvino/opsets/opset6.hpp" namespace ov { diff --git a/src/frontends/tensorflow/CMakeLists.txt b/src/frontends/tensorflow/CMakeLists.txt index 1e049c6d66d..b285b4ed459 100644 --- a/src/frontends/tensorflow/CMakeLists.txt +++ b/src/frontends/tensorflow/CMakeLists.txt @@ -2,8 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 # +# TODO: Add LINKABLE_FRONTEND option when tensorflow frontend directory is moved to openvino folder ov_add_frontend(NAME tensorflow - LINKABLE_FRONTEND SKIP_INSTALL FILEDESCRIPTION "FrontEnd to load and convert TensorFlow file format" LINK_LIBRARIES openvino::util) diff --git a/src/frontends/tensorflow/include/tensorflow_frontend/frontend.hpp b/src/frontends/tensorflow/include/tensorflow_frontend/frontend.hpp index 14c8ea8922c..8c53ecb4a9d 100644 --- a/src/frontends/tensorflow/include/tensorflow_frontend/frontend.hpp +++ b/src/frontends/tensorflow/include/tensorflow_frontend/frontend.hpp @@ -7,11 +7,11 @@ #include #include -#include "common/frontend.hpp" -#include "common/input_model.hpp" -#include "common/telemetry_extension.hpp" #include "openvino/core/any.hpp" #include "openvino/core/node_vector.hpp" +#include "openvino/frontend/frontend.hpp" +#include "openvino/frontend/input_model.hpp" +#include "openvino/frontend/telemetry_extension.hpp" #include "tensorflow_frontend/utility.hpp" namespace ov { diff --git a/src/frontends/tensorflow/include/tensorflow_frontend/utility.hpp b/src/frontends/tensorflow/include/tensorflow_frontend/utility.hpp index 2e6c3abf6b2..1990785fc30 100644 --- a/src/frontends/tensorflow/include/tensorflow_frontend/utility.hpp +++ b/src/frontends/tensorflow/include/tensorflow_frontend/utility.hpp @@ -4,7 +4,7 @@ #pragma once -#include "common/frontend_exceptions.hpp" +#include "openvino/frontend/exception.hpp" #ifdef OPENVINO_STATIC_LIBRARY # define TF_API diff --git a/src/frontends/tensorflow/src/exceptions.hpp b/src/frontends/tensorflow/src/exceptions.hpp index 0a72e27ad7e..d43995a765d 100644 --- a/src/frontends/tensorflow/src/exceptions.hpp +++ b/src/frontends/tensorflow/src/exceptions.hpp @@ -3,8 +3,7 @@ // #pragma once -#include "common/frontend_exceptions.hpp" -#include "openvino/core/node.hpp" +#include "openvino/frontend/exception.hpp" namespace ov { namespace frontend { diff --git a/src/frontends/tensorflow/src/model.cpp b/src/frontends/tensorflow/src/model.cpp index e3e1aa76531..3fc273d38cd 100644 --- a/src/frontends/tensorflow/src/model.cpp +++ b/src/frontends/tensorflow/src/model.cpp @@ -7,8 +7,8 @@ #include #include -#include "common/frontend_exceptions.hpp" #include "node_context.hpp" +#include "openvino/frontend/exception.hpp" #include "openvino/opsets/opset7.hpp" #include "place.hpp" #include "tensorflow_frontend/graph_iterator.hpp" diff --git a/src/frontends/tensorflow/src/model.hpp b/src/frontends/tensorflow/src/model.hpp index d2edc1dc5d3..6c88056131e 100644 --- a/src/frontends/tensorflow/src/model.hpp +++ b/src/frontends/tensorflow/src/model.hpp @@ -4,9 +4,9 @@ #pragma once -#include "common/input_model.hpp" -#include "common/place.hpp" -#include "common/telemetry_extension.hpp" +#include "openvino/frontend/input_model.hpp" +#include "openvino/frontend/place.hpp" +#include "openvino/frontend/telemetry_extension.hpp" #include "tensorflow_frontend/graph_iterator.hpp" namespace ov { diff --git a/src/frontends/tensorflow/src/place.cpp b/src/frontends/tensorflow/src/place.cpp index 8b7a3f9ac55..bdddede95f7 100644 --- a/src/frontends/tensorflow/src/place.cpp +++ b/src/frontends/tensorflow/src/place.cpp @@ -4,9 +4,9 @@ #include "place.hpp" -#include "common/frontend_exceptions.hpp" #include "node_context.hpp" #include "op_def.pb.h" +#include "openvino/frontend/exception.hpp" #include "tensor.pb.h" #include "types.pb.h" diff --git a/src/frontends/tensorflow/src/place.hpp b/src/frontends/tensorflow/src/place.hpp index 733e0c25262..f6197ee3a90 100644 --- a/src/frontends/tensorflow/src/place.hpp +++ b/src/frontends/tensorflow/src/place.hpp @@ -4,7 +4,7 @@ #pragma once -#include "common/frontend.hpp" +#include "openvino/frontend/frontend.hpp" #include "tensorflow_frontend/decoder.hpp" namespace ov { diff --git a/src/frontends/tensorflow/src/tensorflow.cpp b/src/frontends/tensorflow/src/tensorflow.cpp index 24824017e42..e462ef7b92e 100644 --- a/src/frontends/tensorflow/src/tensorflow.cpp +++ b/src/frontends/tensorflow/src/tensorflow.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "manager.hpp" +#include "openvino/frontend/manager.hpp" #include "tensorflow_frontend/frontend.hpp" TF_C_API ov::frontend::FrontEndVersion GetAPIVersion() { diff --git a/src/inference/src/ie_network_reader.cpp b/src/inference/src/ie_network_reader.cpp index 6fa28421f66..e7bc26c8e99 100644 --- a/src/inference/src/ie_network_reader.cpp +++ b/src/inference/src/ie_network_reader.cpp @@ -18,7 +18,7 @@ #include "ie_common.h" #include "ie_icnn_network.hpp" #include "ie_input_info.hpp" -#include "manager.hpp" +#include "openvino/frontend/manager.hpp" #ifdef ENABLE_IR_V7_READER # include "legacy/ie_ir_version.hpp" #endif diff --git a/src/tests/functional/inference_engine/ir_serialization/partial_shape_deserialization.cpp b/src/tests/functional/inference_engine/ir_serialization/partial_shape_deserialization.cpp index c9eddbf6e21..4f1e879c966 100644 --- a/src/tests/functional/inference_engine/ir_serialization/partial_shape_deserialization.cpp +++ b/src/tests/functional/inference_engine/ir_serialization/partial_shape_deserialization.cpp @@ -7,7 +7,7 @@ #include #include #include -#include "manager.hpp" +#include "openvino/frontend/manager.hpp" #include "graph_comparator.hpp" #include "ie_blob.h" diff --git a/src/tests/functional/inference_engine/ir_serialization/partial_shape_serialization.cpp b/src/tests/functional/inference_engine/ir_serialization/partial_shape_serialization.cpp index 445c672ce69..7303fce5346 100644 --- a/src/tests/functional/inference_engine/ir_serialization/partial_shape_serialization.cpp +++ b/src/tests/functional/inference_engine/ir_serialization/partial_shape_serialization.cpp @@ -9,7 +9,7 @@ #include #include #include -#include "manager.hpp" +#include "openvino/frontend/manager.hpp" #include "common_test_utils/ngraph_test_utils.hpp" #include "ngraph/ngraph.hpp" diff --git a/src/tests/functional/inference_engine/ir_serialization/rt_info_deserialization.cpp b/src/tests/functional/inference_engine/ir_serialization/rt_info_deserialization.cpp index 4a2f8e5e775..ff203e3a1fa 100644 --- a/src/tests/functional/inference_engine/ir_serialization/rt_info_deserialization.cpp +++ b/src/tests/functional/inference_engine/ir_serialization/rt_info_deserialization.cpp @@ -15,7 +15,7 @@ #include #include #include -#include "manager.hpp" +#include "openvino/frontend/manager.hpp" #include "graph_comparator.hpp" #include "ie_blob.h" #include "ie_precision.hpp" diff --git a/src/tests/functional/inference_engine/ir_serialization/rt_info_serialization.cpp b/src/tests/functional/inference_engine/ir_serialization/rt_info_serialization.cpp index d72ad3b4a1a..fcb884ca442 100644 --- a/src/tests/functional/inference_engine/ir_serialization/rt_info_serialization.cpp +++ b/src/tests/functional/inference_engine/ir_serialization/rt_info_serialization.cpp @@ -15,7 +15,7 @@ #include #include #include -#include "manager.hpp" +#include "openvino/frontend/manager.hpp" using namespace ngraph; diff --git a/tests/fuzz/src/import_pdpd-fuzzer.cc b/tests/fuzz/src/import_pdpd-fuzzer.cc index ae85d3c3f42..5cb685847f9 100644 --- a/tests/fuzz/src/import_pdpd-fuzzer.cc +++ b/tests/fuzz/src/import_pdpd-fuzzer.cc @@ -1,7 +1,7 @@ // Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "manager.hpp" +#include "openvino/frontend/manager.hpp" #include "ngraph/ngraph.hpp" #include "tokenizer.h" #include diff --git a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_ov_frontend/mock_mo_frontend.cpp b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_ov_frontend/mock_mo_frontend.cpp index e23910ef623..2820d8e8ebd 100644 --- a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_ov_frontend/mock_mo_frontend.cpp +++ b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_ov_frontend/mock_mo_frontend.cpp @@ -3,9 +3,9 @@ // #include "mock_mo_frontend.hpp" -#include "common/frontend_defs.hpp" -#include "manager.hpp" #include "ngraph/visibility.hpp" +#include "openvino/frontend/manager.hpp" +#include "openvino/frontend/visibility.hpp" using namespace ngraph; using namespace ov::frontend; diff --git a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_ov_frontend/mock_mo_frontend.hpp b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_ov_frontend/mock_mo_frontend.hpp index 553870dd423..2a0c6dddab4 100644 --- a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_ov_frontend/mock_mo_frontend.hpp +++ b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_ov_frontend/mock_mo_frontend.hpp @@ -4,9 +4,9 @@ #pragma once -#include "common/frontend_defs.hpp" -#include "manager.hpp" #include "ngraph/visibility.hpp" +#include "openvino/frontend/manager.hpp" +#include "openvino/frontend/visibility.hpp" // Defined if we are building the plugin DLL (instead of using it) #ifdef mock_mo_ov_frontend_EXPORTS From b41cb57773737bb905852eb389d3b417c0e73ec1 Mon Sep 17 00:00:00 2001 From: Sergey Lyubimtsev Date: Fri, 17 Dec 2021 10:42:21 +0300 Subject: [PATCH 07/10] Fix build issue for openvino wheel package on Windows (#9265) * Fix build issue for openvino wheel package on Windows * fix env --- .../ie_bridges/python/wheel/.env.in | 15 ------- .../ie_bridges/python/wheel/CMakeLists.txt | 43 ++++++------------- .../wheel/meta/openvino-dev.requirements.txt | 28 ------------ .../python/wheel/meta/openvino-dev.setup.cfg | 22 ---------- .../wheel/meta/openvino.requirements.txt | 1 - .../python/wheel/meta/pypi_overview.md | 32 -------------- .../python/wheel/requirements-dev.txt | 2 +- .../ie_bridges/python/wheel/setup.cfg | 14 +++--- .../ie_bridges/python/wheel/setup.py | 34 +++++++-------- tools/openvino_dev/setup.py | 2 +- 10 files changed, 41 insertions(+), 152 deletions(-) delete mode 100644 inference-engine/ie_bridges/python/wheel/.env.in delete mode 100644 inference-engine/ie_bridges/python/wheel/meta/openvino-dev.requirements.txt delete mode 100644 inference-engine/ie_bridges/python/wheel/meta/openvino-dev.setup.cfg delete mode 100644 inference-engine/ie_bridges/python/wheel/meta/openvino.requirements.txt delete mode 100644 inference-engine/ie_bridges/python/wheel/meta/pypi_overview.md diff --git a/inference-engine/ie_bridges/python/wheel/.env.in b/inference-engine/ie_bridges/python/wheel/.env.in deleted file mode 100644 index 5dc313f6b1d..00000000000 --- a/inference-engine/ie_bridges/python/wheel/.env.in +++ /dev/null @@ -1,15 +0,0 @@ -WHEEL_PACKAGE_NAME=@WHEEL_PACKAGE_NAME@ -WHEEL_VERSION=@WHEEL_VERSION@ -WHEEL_BUILD=@WHEEL_BUILD@ -WHEEL_LICENCE_TYPE=@WHEEL_LICENCE_TYPE@ -WHEEL_AUTHOR=@WHEEL_AUTHOR@ -WHEEL_AUTHOR_EMAIL=@WHEEL_AUTHOR_EMAIL@ -WHEEL_DESC=@WHEEL_DESC@ -WHEEL_LICENSE=@WHEEL_LICENSE@ -WHEEL_REQUIREMENTS=@WHEEL_REQUIREMENTS@ -WHEEL_OVERVIEW=@WHEEL_OVERVIEW@ - -CMAKE_BUILD_DIR=@CMAKE_BINARY_DIR@ -OV_RUNTIME_LIBS_DIR=@IE_CPACK_RUNTIME_PATH@ -TBB_LIBS_DIR=@TBB_LIBS_DIR@ -PY_PACKAGES_DIR=@PY_PACKAGES_DIR@ diff --git a/inference-engine/ie_bridges/python/wheel/CMakeLists.txt b/inference-engine/ie_bridges/python/wheel/CMakeLists.txt index 53d8207c347..7bb7bc2ab79 100644 --- a/inference-engine/ie_bridges/python/wheel/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/wheel/CMakeLists.txt @@ -1,40 +1,14 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # - -set(WHEEL_PACKAGE_NAME "openvino" CACHE STRING "Name of the package") -set(WHEEL_LICENCE_TYPE "OSI Approved :: Apache Software License" CACHE STRING "License type for the package") -set(WHEEL_AUTHOR "Intel Corporation" CACHE STRING "Package author’s name") -set(WHEEL_AUTHOR_EMAIL "openvino_pushbot@intel.com" CACHE STRING "Email address of the package author") -set(WHEEL_DESC "Inference Engine Python* API" CACHE STRING "Short, summary description of the package") -set(WHEEL_URL "https://docs.openvinotoolkit.org/latest/index.html" CACHE STRING "Home page url") -set(WHEEL_DOWNLOAD_URL "https://github.com/openvinotoolkit/openvino/tags" CACHE STRING "Download page url") set(WHEEL_VERSION "${IE_VERSION}" CACHE STRING "Version of this release" FORCE) set(WHEEL_BUILD "${IE_VERSION_BUILD}" CACHE STRING "Build number of this release" FORCE) -set(WHEEL_LICENSE "${CMAKE_SOURCE_DIR}/LICENSE" CACHE STRING "Wheel license file") -set(WHEEL_REQUIREMENTS "${CMAKE_CURRENT_SOURCE_DIR}/meta/openvino.requirements.txt" CACHE STRING "Wheel requirements.txt file") -set(WHEEL_OVERVIEW "${CMAKE_CURRENT_SOURCE_DIR}/meta/pypi_overview.md" CACHE STRING "Detailed description") - -set(SETUP_PY "${CMAKE_CURRENT_SOURCE_DIR}/setup.py") -set(SETUP_ENV "${CMAKE_CURRENT_SOURCE_DIR}/.env.in") -set(SETUP_ENV_OUT "${CMAKE_CURRENT_SOURCE_DIR}/.env") - set(PY_PACKAGES_DIR ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}) set(TBB_LIBS_DIR runtime/3rdparty/tbb/lib) - -if(APPLE) - set(WHEEL_PLATFORM macosx_10_15_x86_64) -elseif(UNIX) - set(WHEEL_PLATFORM manylinux2014_x86_64) -elseif(WIN32) - set(WHEEL_PLATFORM win_amd64) +if(WIN32) set(TBB_LIBS_DIR runtime/3rdparty/tbb/bin) -else() - message(FATAL_ERROR "This platform is not supported") endif() -configure_file(${SETUP_ENV} ${SETUP_ENV_OUT} @ONLY) - if(LINUX) find_host_program(patchelf_program NAMES patchelf @@ -55,21 +29,30 @@ endforeach() execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import wheel.bdist_wheel ; print(f'{wheel.bdist_wheel.get_abi_tag()}')" OUTPUT_VARIABLE PYTHON_ABI) execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import wheel.vendored.packaging.tags as tags ; print(f'{tags.interpreter_name()}{tags.interpreter_version()}')" OUTPUT_VARIABLE INTERPRETER) +execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import wheel.vendored.packaging.tags as tags ; print(f'{next(tags._platform_tags())}')" OUTPUT_VARIABLE WHEEL_PLATFORM) string(STRIP ${PYTHON_ABI} PYTHON_ABI) string(STRIP ${INTERPRETER} INTERPRETER) +string(STRIP ${WHEEL_PLATFORM} WHEEL_PLATFORM) set(openvino_wheel_name "openvino-${WHEEL_VERSION}-${WHEEL_BUILD}-${INTERPRETER}-${PYTHON_ABI}-${WHEEL_PLATFORM}.whl") set(openvino_wheels_output_dir "${CMAKE_BINARY_DIR}/wheels") set(openvino_wheel_path "${openvino_wheels_output_dir}/${openvino_wheel_name}") add_custom_command(OUTPUT ${openvino_wheel_path} + COMMAND ${CMAKE_COMMAND} -E copy_directory "${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}" + COMMAND ${CMAKE_COMMAND} -E copy_directory "${CMAKE_SOURCE_DIR}/licensing" "${CMAKE_BINARY_DIR}/licensing" COMMAND ${CMAKE_COMMAND} -E remove_directory "${CMAKE_CURRENT_BINARY_DIR}/site-packages" - COMMAND ${PYTHON_EXECUTABLE} ${SETUP_PY} clean bdist_wheel + COMMAND ${CMAKE_COMMAND} -E env WHEEL_VERSION=${WHEEL_VERSION} + WHEEL_BUILD=${WHEEL_BUILD} + CMAKE_BUILD_DIR=${CMAKE_BINARY_DIR} + OV_RUNTIME_LIBS_DIR=${IE_CPACK_RUNTIME_PATH} + TBB_LIBS_DIR=${TBB_LIBS_DIR} + PY_PACKAGES_DIR=${PY_PACKAGES_DIR} + ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/setup.py" clean bdist_wheel --dist-dir ${openvino_wheels_output_dir} --build=${WHEEL_BUILD} --plat-name=${WHEEL_PLATFORM} - # COMMAND ${CMAKE_COMMAND} -E remove ${SETUP_ENV_OUT} - DEPENDS ${openvino_wheel_deps} ${SETUP_ENV_OUT} + DEPENDS ${openvino_wheel_deps} WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" COMMENT "Building Python wheel ${openvino_wheel_name}" VERBATIM) diff --git a/inference-engine/ie_bridges/python/wheel/meta/openvino-dev.requirements.txt b/inference-engine/ie_bridges/python/wheel/meta/openvino-dev.requirements.txt deleted file mode 100644 index ccc569a0194..00000000000 --- a/inference-engine/ie_bridges/python/wheel/meta/openvino-dev.requirements.txt +++ /dev/null @@ -1,28 +0,0 @@ -defusedxml>=0.7.1 -scipy~=1.5.4 -jstyleson~=0.0.2 -numpy>=1.16.6,<1.20 -addict>=2.4.0 -pandas~=1.1.5 -hyperopt~=0.1.2 -networkx~=2.5 -tqdm>=4.54.1 -texttable~=1.6.3 -py-cpuinfo>=7.0.0 -PyYAML>=5.4.1 -pillow>=8.1.2 -scikit-image>=0.17.2 -scikit-learn>=0.24.1 -yamlloader>=0.5 -shapely>=1.7.1 -nibabel>=3.2.1 -pydicom>=2.1.2 -sentencepiece>=0.1.95 -tokenizers>=0.10.1 -editdistance>=0.5.3 -parasail>=1.2.4 -fast-ctc-decode>=0.2.5 -rawpy>=0.16.0 -nltk>=3.5 -opencv-python==4.5.* -progress>=1.5 diff --git a/inference-engine/ie_bridges/python/wheel/meta/openvino-dev.setup.cfg b/inference-engine/ie_bridges/python/wheel/meta/openvino-dev.setup.cfg deleted file mode 100644 index d6789c4a084..00000000000 --- a/inference-engine/ie_bridges/python/wheel/meta/openvino-dev.setup.cfg +++ /dev/null @@ -1,22 +0,0 @@ -[options] -py_modules = - mo - mo_tf - mo_caffe - mo_mxnet - mo_onnx - mo_kaldi - -[options.package_data] - * = * - -[options.entry_points] -console_scripts = - -[metadata] -license_files = - readme* - *LICENSE* - *license* - *third-party-programs* - *EULA* diff --git a/inference-engine/ie_bridges/python/wheel/meta/openvino.requirements.txt b/inference-engine/ie_bridges/python/wheel/meta/openvino.requirements.txt deleted file mode 100644 index 63012dd1739..00000000000 --- a/inference-engine/ie_bridges/python/wheel/meta/openvino.requirements.txt +++ /dev/null @@ -1 +0,0 @@ -numpy>=1.16.6,<1.20 diff --git a/inference-engine/ie_bridges/python/wheel/meta/pypi_overview.md b/inference-engine/ie_bridges/python/wheel/meta/pypi_overview.md deleted file mode 100644 index 418a04bc16a..00000000000 --- a/inference-engine/ie_bridges/python/wheel/meta/pypi_overview.md +++ /dev/null @@ -1,32 +0,0 @@ -## OpenVINO™ Toolkit - -OpenVINO™ toolkit quickly deploys applications and solutions that emulate human vision. Based on Convolutional Neural Networks (CNNs), the toolkit extends computer vision (CV) workloads across Intel® hardware, maximizing performance. The OpenVINO™ toolkit includes the Deep Learning Deployment Toolkit (DLDT). - -OpenVINO™ toolkit: - -- Enables CNN-based deep learning inference on the edge -- Supports heterogeneous execution across an Intel® CPU, Intel® Integrated Graphics, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs -- Speeds time-to-market via an easy-to-use library of computer vision functions and pre-optimized kernels -- Includes optimized calls for computer vision standards, including OpenCV\* and OpenCL™ - -Operating Systems: -- Ubuntu* 18.04 long-term support (LTS), 64-bit -- Windows* 10, 64-bit -- macOS* 10.15, 64-bit - -## Install the Runtime Package Using the PyPI Repository -1. Set up and update pip to the highest version: - ```sh - python3 -m pip install --upgrade pip - ``` -2. Install the Intel® distribution of OpenVINO™ toolkit: - ```sh - pip install openvino - ``` - -3. Verify that the package is installed: - ```sh - python3 -c "from openvino.inference_engine import IECore" - ``` - -Now you are ready to develop and run your application. \ No newline at end of file diff --git a/inference-engine/ie_bridges/python/wheel/requirements-dev.txt b/inference-engine/ie_bridges/python/wheel/requirements-dev.txt index 8c4ce47c35f..b7574b392d2 100644 --- a/inference-engine/ie_bridges/python/wheel/requirements-dev.txt +++ b/inference-engine/ie_bridges/python/wheel/requirements-dev.txt @@ -1,3 +1,3 @@ setuptools>=53.0.0 wheel>=0.36.2 -python-decouple>=3.4 + diff --git a/inference-engine/ie_bridges/python/wheel/setup.cfg b/inference-engine/ie_bridges/python/wheel/setup.cfg index abb1790b67f..c6893c93c42 100644 --- a/inference-engine/ie_bridges/python/wheel/setup.cfg +++ b/inference-engine/ie_bridges/python/wheel/setup.cfg @@ -1,7 +1,11 @@ [metadata] license_files = - readme* - *LICENSE* - *license* - *third-party-programs* - *EULA* + readme* + *LICENSE* + *license* + *third-party-programs* + ../../../../licensing/runtime-third-party-programs.txt + ../../../../licensing/tbb_third-party-programs.txt + ../../../../licensing/onednn_third-party-programs.txt + ../../../../LICENSE + diff --git a/inference-engine/ie_bridges/python/wheel/setup.py b/inference-engine/ie_bridges/python/wheel/setup.py index 517dce7560e..eb8d573dfba 100644 --- a/inference-engine/ie_bridges/python/wheel/setup.py +++ b/inference-engine/ie_bridges/python/wheel/setup.py @@ -21,7 +21,6 @@ from setuptools import setup, find_namespace_packages, Extension from setuptools.command.build_ext import build_ext from setuptools.command.build_clib import build_clib from setuptools.command.install import install -from decouple import config WHEEL_LIBS_INSTALL_DIR = os.path.join('openvino', 'libs') WHEEL_LIBS_PACKAGE = 'openvino.libs' @@ -41,10 +40,11 @@ elif machine == 'aarch64': ARCH = 'arm64' # The following variables can be defined in environment or .env file -CMAKE_BUILD_DIR = config('CMAKE_BUILD_DIR', '.') -OV_RUNTIME_LIBS_DIR = config('OV_RUNTIME_LIBS_DIR', f'runtime/{LIBS_DIR}/{ARCH}/{CONFIG}') -TBB_LIBS_DIR = config('TBB_LIBS_DIR', f'runtime/3rdparty/tbb/{LIBS_DIR}') -PY_PACKAGES_DIR = config('PY_PACKAGES_DIR', f'python/{PYTHON_VERSION}') +SCRIPT_DIR = Path(__file__).resolve().parents[0] +CMAKE_BUILD_DIR = os.getenv('CMAKE_BUILD_DIR', '.') +OV_RUNTIME_LIBS_DIR = os.getenv('OV_RUNTIME_LIBS_DIR', f'runtime/{LIBS_DIR}/{ARCH}/{CONFIG}') +TBB_LIBS_DIR = os.getenv('TBB_LIBS_DIR', f'runtime/3rdparty/tbb/{LIBS_DIR}') +PY_PACKAGES_DIR = os.getenv('PY_PACKAGES_DIR', f'python/{PYTHON_VERSION}') LIBS_RPATH = '$ORIGIN' if sys.platform == 'linux' else '@loader_path' LIB_INSTALL_CFG = { @@ -428,28 +428,28 @@ if not any(pl in sys.platform for pl in platforms): sys.exit(f'Unsupported platform: {sys.platform}, expected: linux, win32, darwin') # copy license file into the build directory -package_license = config('WHEEL_LICENSE', '') +package_license = os.getenv('WHEEL_LICENSE', SCRIPT_DIR.parents[3] / 'LICENSE') if os.path.exists(package_license): copyfile(package_license, 'LICENSE') packages = find_namespace_packages(get_package_dir(PY_INSTALL_CFG)) package_data: typing.Dict[str, list] = {} -pkg_name = config('WHEEL_PACKAGE_NAME', 'openvino') +pkg_name = os.getenv('WHEEL_PACKAGE_NAME', 'openvino') ext_modules = find_prebuilt_extensions(get_dir_list(PY_INSTALL_CFG)) if pkg_name == 'openvino' else [] setup( - version=config('WHEEL_VERSION', '0.0.0'), - build=config('WHEEL_BUILD', '000'), - author_email=config('WHEEL_AUTHOR_EMAIL', 'openvino_pushbot@intel.com'), + version=os.getenv('WHEEL_VERSION', '0.0.0'), + build=os.getenv('WHEEL_BUILD', '000'), + author_email=os.getenv('WHEEL_AUTHOR_EMAIL', 'openvino_pushbot@intel.com'), name=pkg_name, - license=config('WHEEL_LICENCE_TYPE', 'OSI Approved :: Apache Software License'), - author=config('WHEEL_AUTHOR', 'Intel Corporation'), - description=config('WHEEL_DESC', 'Inference Engine Python* API'), - install_requires=get_dependencies(config('WHEEL_REQUIREMENTS', 'meta/openvino.requirements.txt')), - long_description=get_description(config('WHEEL_OVERVIEW', 'meta/pypi_overview.md')), + license=os.getenv('WHEEL_LICENCE_TYPE', 'OSI Approved :: Apache Software License'), + author=os.getenv('WHEEL_AUTHOR', 'Intel(R) Corporation'), + description=os.getenv('WHEEL_DESC', 'OpenVINO(TM) Runtime'), + install_requires=get_dependencies(os.getenv('WHEEL_REQUIREMENTS', SCRIPT_DIR.parents[0] / 'requirements.txt')), + long_description=get_description(os.getenv('WHEEL_OVERVIEW', SCRIPT_DIR.parents[3] / 'docs/install_guides/pypi-openvino-rt.md')), long_description_content_type='text/markdown', - download_url=config('WHEEL_DOWNLOAD_URL', 'https://github.com/openvinotoolkit/openvino/tags'), - url=config('WHEEL_URL', 'https://docs.openvinotoolkit.org/latest/index.html'), + download_url=os.getenv('WHEEL_DOWNLOAD_URL', 'https://github.com/openvinotoolkit/openvino/tags'), + url=os.getenv('WHEEL_URL', 'https://docs.openvinotoolkit.org/latest/index.html'), cmdclass={ 'build': CustomBuild, 'install': CustomInstall, diff --git a/tools/openvino_dev/setup.py b/tools/openvino_dev/setup.py index a18d6991a75..44861d6a3ed 100644 --- a/tools/openvino_dev/setup.py +++ b/tools/openvino_dev/setup.py @@ -195,7 +195,7 @@ setup( author_email='openvino_pushbot@intel.com', url='https://docs.openvinotoolkit.org/latest/index.html', download_url='https://github.com/openvinotoolkit/openvino/tags', - description='OpenVINO™ Developer Package', + description='OpenVINO(TM) Development Tools', long_description=get_description(SCRIPT_DIR.parents[1] / 'docs/install_guides/pypi-openvino-dev.md'), long_description_content_type='text/markdown', classifiers=[ From 94c3731b50771737a945a7d65907d8f73581fcbe Mon Sep 17 00:00:00 2001 From: "Min, Byungil" Date: Fri, 17 Dec 2021 16:43:21 +0900 Subject: [PATCH 08/10] [GPU] support multiple sum post-op of onednn kernel (#9221) + Applied WA for multiple sum post-ops of nednn kernel Signed-off-by: Min, Byungil --- .../thirdparty/clDNN/src/layout_optimizer.cpp | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp b/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp index 1c4518b1654..1646ca99a89 100644 --- a/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp +++ b/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp @@ -1283,6 +1283,23 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node, format impl_candidate = impl_types::ocl; } + // [WA] to avoid an onednn kernel issue of multiple sum post-ops + if (!node.get_fused_primitives().empty()) { + size_t sum_post_op_cnt = 0; + for (auto& fused_op : node.get_fused_primitives()) { + if (fused_op.node->is_type() && node.get_dependencies().size() > fused_op.dep_start_idx && fused_op.deps.size() == 1) { + auto& eltw_in = node.get_dependency(fused_op.dep_start_idx); + if (program_helpers::are_layouts_identical_for_onednn_sum_post_op(eltw_in.get_output_layout(), node.get_output_layout()) && + fused_op.node->as().get_primitive()->needs_onednn_sum_post_op(eltw_in.get_output_layout())) { + if (sum_post_op_cnt > 0) + return impl_types::ocl; + + sum_post_op_cnt += 1; + } + } + } + } + if (node.is_type()) { // oneDNN doesn't have good support for groups with fsv16 fmt auto& conv = node.as(); From 04dc16f861216e3d4a528b916fdbf072d671234d Mon Sep 17 00:00:00 2001 From: Maxim Andronov Date: Fri, 17 Dec 2021 11:12:07 +0300 Subject: [PATCH 09/10] [CPU] General fixes for dynamic shapes. Part 2 (#8871) --- .../src/mkldnn_plugin/cpu_shape.h | 10 + .../memory_desc/cpu_blocked_memory_desc.cpp | 50 +++- .../memory_desc/cpu_blocked_memory_desc.h | 1 + .../memory_desc/cpu_memory_desc.h | 7 +- .../memory_desc/cpu_memory_desc_utils.cpp | 43 ++- .../memory_desc/dnnl_blocked_memory_desc.cpp | 61 ++++- .../memory_desc/dnnl_blocked_memory_desc.h | 2 +- .../memory_desc/dnnl_memory_desc.cpp | 4 + .../memory_desc/dnnl_memory_desc.h | 1 + .../src/mkldnn_plugin/mkldnn_graph.cpp | 29 +- .../src/mkldnn_plugin/mkldnn_graph.h | 3 - .../mkldnn_plugin/mkldnn_infer_request.cpp | 3 +- .../src/mkldnn_plugin/mkldnn_node.cpp | 62 ++++- .../src/mkldnn_plugin/mkldnn_node.h | 10 +- .../mkldnn_plugin/mkldnn_nodes_factory.cpp | 2 + .../nodes/common/dnnl_executor.cpp | 45 +++ .../nodes/common/dnnl_executor.h | 39 +++ .../nodes/mkldnn_adaptive_pooling.cpp | 6 +- .../nodes/mkldnn_adaptive_pooling.h | 3 +- .../nodes/mkldnn_batch_to_space_node.cpp | 4 + .../nodes/mkldnn_batch_to_space_node.h | 3 +- .../nodes/mkldnn_broadcast_node.cpp | 16 +- .../nodes/mkldnn_broadcast_node.h | 6 +- .../nodes/mkldnn_bucketize_node.cpp | 8 +- .../nodes/mkldnn_bucketize_node.h | 3 +- .../nodes/mkldnn_concat_node.cpp | 49 ++-- .../mkldnn_plugin/nodes/mkldnn_concat_node.h | 5 +- .../mkldnn_plugin/nodes/mkldnn_conv_node.cpp | 175 ++++++++---- .../mkldnn_plugin/nodes/mkldnn_conv_node.h | 20 +- .../nodes/mkldnn_convert_node.cpp | 11 +- .../mkldnn_plugin/nodes/mkldnn_convert_node.h | 3 +- .../nodes/mkldnn_ctc_greedy_decoder_node.cpp | 10 +- .../nodes/mkldnn_ctc_greedy_decoder_node.h | 1 - ...mkldnn_ctc_greedy_decoder_seq_len_node.cpp | 10 +- .../mkldnn_ctc_greedy_decoder_seq_len_node.h | 1 - .../nodes/mkldnn_ctc_loss_node.cpp | 8 +- .../nodes/mkldnn_ctc_loss_node.h | 3 +- .../nodes/mkldnn_cum_sum_node.cpp | 8 +- .../mkldnn_plugin/nodes/mkldnn_cum_sum_node.h | 1 - .../nodes/mkldnn_deconv_node.cpp | 131 ++++----- .../mkldnn_plugin/nodes/mkldnn_deconv_node.h | 51 +--- .../nodes/mkldnn_detection_output_node.cpp | 14 +- .../nodes/mkldnn_detection_output_node.h | 3 +- .../nodes/mkldnn_eltwise_node.cpp | 12 +- .../mkldnn_plugin/nodes/mkldnn_eltwise_node.h | 4 +- .../mkldnn_embedding_bag_offset_sum_node.cpp | 16 +- .../mkldnn_embedding_bag_offset_sum_node.h | 4 +- .../mkldnn_embedding_bag_packed_sum_node.cpp | 16 +- .../mkldnn_embedding_bag_packed_sum_node.h | 4 +- .../mkldnn_embedding_segments_sum_node.cpp | 16 +- .../mkldnn_embedding_segments_sum_node.h | 4 +- ...mental_detectron_detection_output_node.cpp | 6 - ...rimental_detectron_detection_output_node.h | 1 - ...n_generate_proposals_single_image_node.cpp | 6 - ...ron_generate_proposals_single_image_node.h | 1 - ...ntal_detectron_priorgridgenerator_node.cpp | 6 - ...mental_detectron_priorgridgenerator_node.h | 1 - ...ental_detectron_roifeatureextractor_node.h | 1 - ...dnn_experimental_detectron_topkrois_node.h | 1 - .../mkldnn_extract_image_patches_node.cpp | 10 +- .../nodes/mkldnn_extract_image_patches_node.h | 1 - .../nodes/mkldnn_fake_quantize_node.cpp | 11 +- .../nodes/mkldnn_fake_quantize_node.h | 3 +- .../nodes/mkldnn_gather_elements_node.cpp | 12 +- .../nodes/mkldnn_gather_elements_node.h | 3 +- .../nodes/mkldnn_gather_nd_node.cpp | 10 +- .../nodes/mkldnn_gather_nd_node.h | 1 - .../nodes/mkldnn_gather_node.cpp | 10 +- .../mkldnn_plugin/nodes/mkldnn_gather_node.h | 1 - .../nodes/mkldnn_interpolate_node.cpp | 2 +- .../nodes/mkldnn_log_softmax_node.cpp | 12 +- .../nodes/mkldnn_log_softmax_node.h | 3 +- .../mkldnn_plugin/nodes/mkldnn_lrn_node.cpp | 12 +- .../src/mkldnn_plugin/nodes/mkldnn_lrn_node.h | 1 - .../mkldnn_plugin/nodes/mkldnn_math_node.cpp | 4 + .../mkldnn_plugin/nodes/mkldnn_math_node.h | 3 +- .../nodes/mkldnn_matmul_node.cpp | 8 - .../mkldnn_plugin/nodes/mkldnn_matmul_node.h | 1 - .../nodes/mkldnn_matrix_nms_node.cpp | 24 +- .../nodes/mkldnn_matrix_nms_node.h | 4 +- .../nodes/mkldnn_multiclass_nms.cpp | 24 +- .../nodes/mkldnn_multiclass_nms.hpp | 4 +- .../mkldnn_plugin/nodes/mkldnn_mvn_node.cpp | 12 +- .../src/mkldnn_plugin/nodes/mkldnn_mvn_node.h | 3 +- .../nodes/mkldnn_non_max_suppression_node.cpp | 19 +- .../nodes/mkldnn_non_max_suppression_node.h | 2 +- .../mkldnn_plugin/nodes/mkldnn_non_zero.cpp | 6 + .../src/mkldnn_plugin/nodes/mkldnn_non_zero.h | 6 +- .../nodes/mkldnn_normalize_node.cpp | 8 + .../nodes/mkldnn_normalize_node.h | 4 +- .../nodes/mkldnn_one_hot_node.cpp | 4 + .../mkldnn_plugin/nodes/mkldnn_one_hot_node.h | 2 +- .../mkldnn_plugin/nodes/mkldnn_pad_node.cpp | 53 ++-- .../src/mkldnn_plugin/nodes/mkldnn_pad_node.h | 4 + .../nodes/mkldnn_pooling_node.cpp | 8 +- .../mkldnn_plugin/nodes/mkldnn_pooling_node.h | 3 +- .../nodes/mkldnn_proposal_node.cpp | 4 + .../nodes/mkldnn_proposal_node.h | 3 +- .../mkldnn_plugin/nodes/mkldnn_range_node.cpp | 4 + .../mkldnn_plugin/nodes/mkldnn_range_node.h | 6 +- .../nodes/mkldnn_reduce_node.cpp | 11 + .../mkldnn_plugin/nodes/mkldnn_reduce_node.h | 3 +- .../nodes/mkldnn_reorder_node.cpp | 12 + .../mkldnn_plugin/nodes/mkldnn_reorder_node.h | 6 +- .../nodes/mkldnn_reorg_yolo_node.cpp | 4 + .../nodes/mkldnn_reorg_yolo_node.h | 5 +- .../nodes/mkldnn_reshape_node.cpp | 14 +- .../mkldnn_plugin/nodes/mkldnn_reshape_node.h | 1 - .../nodes/mkldnn_roi_align_node.cpp | 8 +- .../nodes/mkldnn_roi_align_node.h | 1 - .../nodes/mkldnn_roi_pooling_node.cpp | 4 +- .../nodes/mkldnn_scatter_update_node.cpp | 34 +-- .../nodes/mkldnn_scatter_update_node.h | 2 +- .../nodes/mkldnn_select_node.cpp | 11 +- .../mkldnn_plugin/nodes/mkldnn_select_node.h | 3 +- .../mkldnn_plugin/nodes/mkldnn_shapeof.cpp | 4 + .../src/mkldnn_plugin/nodes/mkldnn_shapeof.h | 6 +- .../nodes/mkldnn_shuffle_channels_node.cpp | 6 +- .../nodes/mkldnn_shuffle_channels_node.h | 2 +- .../nodes/mkldnn_softmax_node.cpp | 12 +- .../mkldnn_plugin/nodes/mkldnn_softmax_node.h | 1 - .../nodes/mkldnn_space_to_batch_node.cpp | 4 + .../nodes/mkldnn_space_to_batch_node.h | 3 +- .../nodes/mkldnn_space_to_depth_node.cpp | 2 +- .../mkldnn_plugin/nodes/mkldnn_split_node.cpp | 48 ++-- .../mkldnn_plugin/nodes/mkldnn_split_node.h | 11 +- .../nodes/mkldnn_strided_slice_node.cpp | 12 +- .../nodes/mkldnn_strided_slice_node.h | 2 + .../mkldnn_plugin/nodes/mkldnn_tile_node.cpp | 12 +- .../mkldnn_plugin/nodes/mkldnn_tile_node.h | 5 +- .../mkldnn_plugin/nodes/mkldnn_topk_node.cpp | 8 +- .../mkldnn_plugin/nodes/mkldnn_topk_node.h | 1 - .../nodes/mkldnn_transpose_node.cpp | 8 +- .../nodes/mkldnn_transpose_node.h | 1 + src/bindings/python/tests/__init__.py | 6 - .../python/tests/test_onnx/test_backend.py | 1 - .../python/tests/test_onnx/test_zoo_models.py | 6 - .../test_onnx/test_backend.py | 1 - .../test_onnx/test_zoo_models.py | 2 - src/core/src/op/topk.cpp | 4 +- .../skip_tests_config.cpp | 2 + .../plugin/cpu/single_layer_tests/concat.cpp | 60 +++- .../cpu/single_layer_tests/convolution.cpp | 37 +++ .../plugin/cpu/single_layer_tests/nonzero.cpp | 47 +++- .../cpu/single_layer_tests/variadic_split.cpp | 88 +++++- .../subgraph_tests/src/static_zero_dims.cpp | 62 +++++ src/tests/unit/cpu/mkldnn_zero_dims_test.cpp | 258 ++++++++++++++++++ 147 files changed, 1438 insertions(+), 742 deletions(-) create mode 100644 inference-engine/src/mkldnn_plugin/nodes/common/dnnl_executor.cpp create mode 100644 inference-engine/src/mkldnn_plugin/nodes/common/dnnl_executor.h create mode 100644 src/tests/functional/plugin/cpu/subgraph_tests/src/static_zero_dims.cpp create mode 100644 src/tests/unit/cpu/mkldnn_zero_dims_test.cpp diff --git a/inference-engine/src/mkldnn_plugin/cpu_shape.h b/inference-engine/src/mkldnn_plugin/cpu_shape.h index 0972ebd227f..9fbc0a55616 100644 --- a/inference-engine/src/mkldnn_plugin/cpu_shape.h +++ b/inference-engine/src/mkldnn_plugin/cpu_shape.h @@ -25,6 +25,8 @@ public: type = shape.is_static() ? ShapeType::Static : ShapeType::Dynamic; initDims(); + + hasZeroDimensions = std::any_of(dims.begin(), dims.end(), [](size_t dim) { return dim == 0; } ); } explicit Shape(const InferenceEngine::SizeVector& shape) { @@ -33,6 +35,8 @@ public: type = ShapeType::Static; initDims(); + + hasZeroDimensions = std::any_of(dims.begin(), dims.end(), [](size_t dim) { return dim == 0; } ); } /** @@ -106,6 +110,10 @@ public: return type == ShapeType::Dynamic; } + bool hasZeroDims() const { + return hasZeroDimensions; + } + size_t getRank() const { return minDims.size(); } @@ -169,6 +177,8 @@ private: Dynamic } type {ShapeType::Static}; + bool hasZeroDimensions = false; + VectorDims minDims; VectorDims maxDims; VectorDims dims; diff --git a/inference-engine/src/mkldnn_plugin/memory_desc/cpu_blocked_memory_desc.cpp b/inference-engine/src/mkldnn_plugin/memory_desc/cpu_blocked_memory_desc.cpp index 4718151ea84..dd8e3597598 100644 --- a/inference-engine/src/mkldnn_plugin/memory_desc/cpu_blocked_memory_desc.cpp +++ b/inference-engine/src/mkldnn_plugin/memory_desc/cpu_blocked_memory_desc.cpp @@ -16,7 +16,8 @@ CpuBlockedMemoryDesc::CpuBlockedMemoryDesc(InferenceEngine::Precision prc, const offsetPadding = 0; offsetPaddingToData.resize(dims.size(), 0); strides.resize(order.size()); - strides[strides.size() - 1] = 1; + // for empty tensor case we fill all strides with 0 values + strides[strides.size() - 1] = shape.hasZeroDims() ? 0 : 1; for (size_t i = 2; i <= order.size(); i++) { strides[strides.size() - i] = strides[strides.size() - (i - 1)] * blockedDims[blockedDims.size() - (i - 1)]; } @@ -33,6 +34,15 @@ CpuBlockedMemoryDesc::CpuBlockedMemoryDesc(InferenceEngine::Precision prc, const IE_THROW() << "CpuBlockedMemoryDesc doesn't support undefined blockedDims."; } + if (shape.hasZeroDims()) { + const auto& dims = shape.getDims(); + for (size_t i = 0; i < shape.getRank(); i++) { + if (dims[order[i]] == 0 && !dimsEqualWeak(blockedDims[i], 0)) { + IE_THROW() << "Can't create CpuBlockedMemoryDesc. Mistmatch zero dims in dims and blocked dims"; + } + } + } + this->order = order; this->blockedDims = blockedDims; this->offsetPadding = offsetPadding; @@ -44,7 +54,9 @@ CpuBlockedMemoryDesc::CpuBlockedMemoryDesc(InferenceEngine::Precision prc, const } if (strides.empty() && !order.empty()) { - if (std::any_of(this->blockedDims.begin(), this->blockedDims.end(), [](size_t val) { return val == Shape::UNDEFINED_DIM; })) { + if (shape.hasZeroDims()) { + this->strides.resize(order.size(), 0); + } else if (std::any_of(this->blockedDims.begin(), this->blockedDims.end(), [](size_t val) { return val == Shape::UNDEFINED_DIM; })) { this->strides.resize(order.size(), Shape::UNDEFINED_DIM); } else { this->strides.resize(order.size()); @@ -54,6 +66,9 @@ CpuBlockedMemoryDesc::CpuBlockedMemoryDesc(InferenceEngine::Precision prc, const } } } else { + if (shape.hasZeroDims() && std::any_of(strides.begin(), strides.end(), [](size_t stride) { return stride != 0; } )) { + IE_THROW() << "Can't create CpuBlockedMemoryDesc with zero dim, but with non zero strides"; + } this->strides = strides; } @@ -92,11 +107,16 @@ bool CpuBlockedMemoryDesc::isCompatible(const DnnlBlockedMemoryDesc &rhs) const return rhs.isCompatible(*this); } +bool CpuBlockedMemoryDesc::canComputeMemSizeZeroDims() const { + return getShape().hasZeroDims() && getOffsetPadding() != Shape::UNDEFINED_DIM; +} + size_t CpuBlockedMemoryDesc::getCurrentMemSizeImp() const { int64_t e_size = getOffsetPadding() + 1; // size in bytes (from begin of data to last element) - for (int j = 0; j < getBlockDims().size(); j++) - e_size += (getBlockDims()[j] - 1) * getStrides()[j]; - + if (!getShape().hasZeroDims()) { + for (int j = 0; j < getBlockDims().size(); j++) + e_size += (getBlockDims()[j] - 1) * getStrides()[j]; + } e_size *= getPrecision() == InferenceEngine::Precision::BIN ? 1 : getPrecision().size(); @@ -104,14 +124,14 @@ size_t CpuBlockedMemoryDesc::getCurrentMemSizeImp() const { } size_t CpuBlockedMemoryDesc::getMaxMemSize() const { - if (shape.isStatic()) { + if (shape.isStatic() || shape.hasZeroDims()) { return getCurrentMemSize(); } - auto& maxDims = shape.getMaxDims(); + const auto& maxDims = shape.getMaxDims(); if (std::any_of(maxDims.begin(), maxDims.end(), [](size_t x){ return Shape::UNDEFINED_DIM == x || // WA: for some nodes ngraph compute upper bound depending on precision max value - std::numeric_limits::max() == x; })) { + x >= std::numeric_limits::max(); })) { return UNDEFINED_SIZE; } @@ -270,15 +290,23 @@ bool CpuBlockedMemoryDesc::blocksExtended() const { } size_t CpuBlockedMemoryDesc::getPaddedElementsCount() const { - if (std::any_of(blockedDims.begin(), blockedDims.end(), [](Dim dim) { return dim == Shape::UNDEFINED_DIM; })) + if (getShape().hasZeroDims()) { + return 0; + } + if (std::any_of(blockedDims.begin(), blockedDims.end(), [](Dim dim) { return dim == Shape::UNDEFINED_DIM; })) { IE_THROW() << "Can't compute padded elements count for non undefined blocked dims"; + } return std::accumulate(blockedDims.begin(), blockedDims.end(), size_t{1}, std::multiplies()); } MemoryDescPtr CpuBlockedMemoryDesc::cloneWithUndefStridesAndOffset() const { const auto orderSize = getOrder().size(); - return std::make_shared(getPrecision(), getShape(), getBlockDims(), getOrder(), Shape::UNDEFINED_DIM, - VectorDims(orderSize, 0), VectorDims(orderSize, Shape::UNDEFINED_DIM)); + CpuBlockedMemoryDescPtr newDesc = std::make_shared(*this); + newDesc->strides = VectorDims(orderSize, Shape::UNDEFINED_DIM); + newDesc->offsetPadding = Shape::UNDEFINED_DIM; + newDesc->offsetPaddingToData = VectorDims(orderSize, 0); + newDesc->status = descStatus::Undefined; + return newDesc; } MemoryDescPtr CpuBlockedMemoryDesc::cloneWithDefaultStridesAndOffset() const { diff --git a/inference-engine/src/mkldnn_plugin/memory_desc/cpu_blocked_memory_desc.h b/inference-engine/src/mkldnn_plugin/memory_desc/cpu_blocked_memory_desc.h index 6f8cb9b9e45..28515ff03b4 100644 --- a/inference-engine/src/mkldnn_plugin/memory_desc/cpu_blocked_memory_desc.h +++ b/inference-engine/src/mkldnn_plugin/memory_desc/cpu_blocked_memory_desc.h @@ -84,6 +84,7 @@ public: private: size_t getElementOffset(size_t elemNumber) const override; + bool canComputeMemSizeZeroDims() const override; size_t getCurrentMemSizeImp() const override; size_t getOffset(const InferenceEngine::SizeVector& v) const; bool isPlainFormat() const; diff --git a/inference-engine/src/mkldnn_plugin/memory_desc/cpu_memory_desc.h b/inference-engine/src/mkldnn_plugin/memory_desc/cpu_memory_desc.h index 660a65117fb..ff79c8e4426 100644 --- a/inference-engine/src/mkldnn_plugin/memory_desc/cpu_memory_desc.h +++ b/inference-engine/src/mkldnn_plugin/memory_desc/cpu_memory_desc.h @@ -93,7 +93,7 @@ public: */ size_t getCurrentMemSize() const { size_t retVal = UNDEFINED_SIZE; - if (isDefined()) { + if (canComputeMemSize()) { retVal = getCurrentMemSizeImp(); } return retVal; @@ -140,8 +140,13 @@ protected: // Get offset to the n'th element. Returns physical index of the element by the logical one considering padding, layout, blocking etc. virtual size_t getElementOffset(size_t elemNumber) const = 0; + virtual bool canComputeMemSizeZeroDims() const = 0; virtual bool isDefinedImp() const = 0; + bool canComputeMemSize() const { + return isDefined() || canComputeMemSizeZeroDims(); + } + virtual MemoryDescPtr cloneWithNewDimsImp(const VectorDims& dims) const = 0; MemoryDescType type; diff --git a/inference-engine/src/mkldnn_plugin/memory_desc/cpu_memory_desc_utils.cpp b/inference-engine/src/mkldnn_plugin/memory_desc/cpu_memory_desc_utils.cpp index 38dd1220e29..ee38482a9cc 100644 --- a/inference-engine/src/mkldnn_plugin/memory_desc/cpu_memory_desc_utils.cpp +++ b/inference-engine/src/mkldnn_plugin/memory_desc/cpu_memory_desc_utils.cpp @@ -48,17 +48,37 @@ DnnlBlockedMemoryDesc MemoryDescUtils::convertToDnnlBlockedMemoryDesc(const Memo CpuBlockedMemoryDesc MemoryDescUtils::convertToCpuBlockedMemoryDesc(const InferenceEngine::TensorDesc& desc) { if (desc.getLayout() == InferenceEngine::Layout::ANY) IE_THROW() << "Cannot convert InferenceEngine::TensorDesc with ANY layout to CpuBlockedMemoryDesc"; - const auto &blkDesc = desc.getBlockingDesc(); - return CpuBlockedMemoryDesc(desc.getPrecision(), Shape(desc.getDims()), blkDesc.getBlockDims(), blkDesc.getOrder(), blkDesc.getOffsetPadding(), - blkDesc.getOffsetPaddingToData(), blkDesc.getStrides()); + + const auto& blkDesc = desc.getBlockingDesc(); + const auto& dims = desc.getDims(); + + auto strides = blkDesc.getStrides(); + // for empty tensor case InferenceEngine::TensorDesc fill strides with non zero values before first 0 dims + // i.e. dims[1, 0, 2, 3] -> strides [0, 6, 3, 1] + if (std::any_of(dims.begin(), dims.end(), [](size_t dim){ return dim == 0; })) { + std::fill(strides.begin(), strides.end(), 0); + } + + return CpuBlockedMemoryDesc(desc.getPrecision(), Shape(dims), blkDesc.getBlockDims(), blkDesc.getOrder(), blkDesc.getOffsetPadding(), + blkDesc.getOffsetPaddingToData(), strides); } DnnlBlockedMemoryDesc MemoryDescUtils::convertToDnnlBlockedMemoryDesc(const InferenceEngine::TensorDesc& desc) { - const auto &blkDesc = desc.getBlockingDesc(); if (desc.getLayout() == InferenceEngine::Layout::ANY) IE_THROW() << "Cannot convert InferenceEngine::TensorDesc with ANY layout to DnnlBlockedMemoryDesc"; + + const auto& blkDesc = desc.getBlockingDesc(); + const auto& dims = desc.getDims(); + + auto strides = blkDesc.getStrides(); + // for empty tensor case InferenceEngine::TensorDesc fill strides with non zero values before first 0 dims + // i.e. dims[1, 0, 2, 3] -> strides [0, 6, 3, 1] + if (std::any_of(dims.begin(), dims.end(), [](size_t dim){ return dim == 0; })) { + std::fill(strides.begin(), strides.end(), 0); + } + return DnnlBlockedMemoryDesc(desc.getPrecision(), Shape(desc.getDims()), blkDesc.getBlockDims(), blkDesc.getOrder(), blkDesc.getOffsetPadding(), - blkDesc.getOffsetPaddingToData(), blkDesc.getStrides()); + blkDesc.getOffsetPaddingToData(), strides); } BlockedMemoryDescPtr MemoryDescUtils::convertToBlockedMemoryDesc(const MemoryDescPtr &desc) { @@ -80,9 +100,16 @@ InferenceEngine::Blob::Ptr MemoryDescUtils::interpretAsBlob(const MKLDNNMemory & InferenceEngine::TensorDesc MemoryDescUtils::convertToTensorDesc(const MemoryDesc& desc) { if (auto blockingDesc = dynamic_cast(&desc)) { - return InferenceEngine::TensorDesc(blockingDesc->getPrecision(), blockingDesc->getShape().getStaticDims(), - {blockingDesc->getBlockDims(), blockingDesc->getOrder(), blockingDesc->getOffsetPadding(), - blockingDesc->getOffsetPaddingToData(), blockingDesc->getStrides()}); + InferenceEngine::BlockingDesc blkDesc = desc.getShape().hasZeroDims() ? InferenceEngine::BlockingDesc(blockingDesc->getBlockDims(), + blockingDesc->getOrder(), + blockingDesc->getOffsetPadding(), + blockingDesc->getOffsetPaddingToData()) : + InferenceEngine::BlockingDesc(blockingDesc->getBlockDims(), + blockingDesc->getOrder(), + blockingDesc->getOffsetPadding(), + blockingDesc->getOffsetPaddingToData(), + blockingDesc->getStrides()); + return InferenceEngine::TensorDesc(blockingDesc->getPrecision(), blockingDesc->getShape().getStaticDims(), blkDesc); } else { IE_THROW() << "Cannot convert MemoryDesc to InferenceEngine::TensorDesc"; } diff --git a/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_blocked_memory_desc.cpp b/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_blocked_memory_desc.cpp index f4eed00661a..4b1efda623a 100644 --- a/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_blocked_memory_desc.cpp +++ b/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_blocked_memory_desc.cpp @@ -15,12 +15,17 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(InferenceEngine::Precision prc, con const auto &dims = shape.getDims(); if (!strides.empty()) { // custom strides + if (shape.hasZeroDims() && std::any_of(strides.begin(), strides.end(), [](size_t stride) { return stride != 0; } )) { + IE_THROW() << "Can't create DnnlBlockedMemoryDesc with zero dim, but with non zero strides"; + } desc = {MKLDNNExtensionUtils::convertToDnnlDims(dims), MKLDNNExtensionUtils::IEPrecisionToDataType(prc), MKLDNNExtensionUtils::convertToDnnlDims(strides)}; } else { mkldnn::memory::dims plain_strides; - if (std::any_of(dims.begin(), dims.end(), [](size_t val) { return val == Shape::UNDEFINED_DIM; })) { + if (shape.hasZeroDims()) { + plain_strides.resize(ndims, 0); + } else if (std::any_of(dims.begin(), dims.end(), [](size_t val) { return val == Shape::UNDEFINED_DIM; })) { plain_strides.resize(ndims, DNNL_RUNTIME_DIM_VAL); } else { plain_strides.resize(ndims, 1); @@ -58,8 +63,8 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(InferenceEngine::Precision prc, con * Limitation of conversion first N elements of order should be permutation of [0,1,2 ... N] */ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(InferenceEngine::Precision prc, const Shape& shape, const VectorDims& blockedDims, - const VectorDims& order, size_t offsetPadding, const VectorDims& offsetPaddingToData, - const VectorDims& strides) : MemoryDesc(shape, DnnlBlocked) { + const VectorDims& order, size_t offsetPadding, const VectorDims& offsetPaddingToData, + const VectorDims& strides) : MemoryDesc(shape, DnnlBlocked) { using namespace mkldnn; // scalar case if (shape.getRank() == 0) { @@ -90,8 +95,8 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(InferenceEngine::Precision prc, con IE_THROW() << "DnnlBlockedMemoryDesc doesn't support undefined order."; } - if (std::any_of(blockedDims.begin() + shape.getRank(), blockedDims.end(), [](size_t val) { return val == Shape::UNDEFINED_DIM; })) { - IE_THROW() << "DnnlBlockedMemoryDesc doesn't support undefined blockedDims."; + if (std::any_of(blockedDims.begin() + shape.getRank(), blockedDims.end(), [](size_t val) { return val == Shape::UNDEFINED_DIM || val == 0; })) { + IE_THROW() << "DnnlBlockedMemoryDesc doesn't support undefined or zero blockedDims."; } auto dims = MKLDNNExtensionUtils::convertToDnnlDims(shape.getDims()); @@ -106,7 +111,12 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(InferenceEngine::Precision prc, con size_t inner_ndims = order.size() - dims.size(); + const bool emptyDesc = shape.hasZeroDims(); if (!strides.empty()) { + if (emptyDesc && std::any_of(strides.begin(), strides.end(), [](size_t dim) { return dim != 0; } )) { + IE_THROW() << "Can't create DnnlBlockedMemoryDesc with zero dim, but with non zero strides"; + } + bool is_descending_strides = true; for (int i = 1; i < strides.size(); i++) { is_descending_strides &= (strides[i - 1] >= strides[i]); @@ -118,7 +128,7 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(InferenceEngine::Precision prc, con IE_THROW() << "Can not construct DnnlBlockedMemoryDesc from strides: " << vec2str(strides); } - if (!strides.empty() && std::none_of(strides.begin(), strides.end(), [](size_t x) { return Shape::UNDEFINED_DIM == x; })) { + if (!strides.empty() && !emptyDesc && std::none_of(strides.begin(), strides.end(), [](size_t x) { return Shape::UNDEFINED_DIM == x; })) { bool inner_block_are_dense = one_of(strides.back(), 0, 1); // stride 1 - is dense case, 0 - broad casted for (int i = outer_ndims; i < strides.size() - 1; i++) { inner_block_are_dense &= (strides[i] == strides[i + 1] * blockedDims[i + 1]); @@ -203,6 +213,11 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(const Shape& shape, mkldnn::memory: order.swap(perm); order.insert(order.end(), inner_idxs.begin(), inner_idxs.end()); + if (shape.hasZeroDims()) { + auto& blk = desc.data.format_desc.blocking; + std::fill(std::begin(blk.strides), std::begin(blk.strides) + desc.data.ndims, 0); + } + initBlockedParams(); } @@ -296,6 +311,12 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(const mkldnn::memory::desc& mdesc) IE_THROW(Unexpected) << "Can't create DnnlBlockedMemoryDesc from not blocking desc"; order = extractOrder(desc); + + if (getShape().hasZeroDims()) { + auto& blk = desc.data.format_desc.blocking; + std::fill(std::begin(blk.strides), std::begin(blk.strides) + desc.data.ndims, 0); + } + initBlockedParams(); } @@ -368,6 +389,7 @@ bool DnnlBlockedMemoryDesc::isTailCFormat() const { static mkldnn::memory::desc cloneDescWithNewDims(const mkldnn::memory::desc& desc, const VectorDims& dims, const VectorDims& order) { using namespace dnnl::impl::utils; auto mklDims = MKLDNNExtensionUtils::convertToDnnlDims(dims); + const auto offsetPadding = desc.data.offset0; mkldnn::memory::desc newMklDesc = desc; array_copy(newMklDesc.data.dims, mklDims.data(), mklDims.size()); std::vector perm(order.begin(), order.begin() + mklDims.size()); @@ -379,6 +401,9 @@ static mkldnn::memory::desc cloneDescWithNewDims(const mkldnn::memory::desc& des if (retCode != dnnl::impl::status::success) { IE_THROW() << "Can not clone DnnlBlockedMemoryDesc with dims: " << MemoryDescUtils::dims2str(dims); } + // dnnl::impl::fill_blocked always set offset0 to 0 + // so we need to restore actual value + newMklDesc.data.offset0 = offsetPadding; return newMklDesc; } @@ -476,14 +501,14 @@ bool DnnlBlockedMemoryDesc::isSame(mkldnn::memory::format_tag fmt) const { } size_t DnnlBlockedMemoryDesc::getMaxMemSize() const { - if (shape.isStatic()) { + if (shape.isStatic() || shape.hasZeroDims()) { return getCurrentMemSize(); } - auto& maxDims = shape.getMaxDims(); + const auto& maxDims = shape.getMaxDims(); if (std::any_of(maxDims.begin(), maxDims.end(), [](size_t x){ return Shape::UNDEFINED_DIM == x || // WA: for some nodes ngraph compute upper bound depending on precision max value - std::numeric_limits::max() == x; })) { + x >= std::numeric_limits::max(); })) { return UNDEFINED_SIZE; } @@ -492,6 +517,13 @@ size_t DnnlBlockedMemoryDesc::getMaxMemSize() const { } size_t DnnlBlockedMemoryDesc::getPaddedElementsCount() const { + if (getShape().hasZeroDims()) { + return 0; + } + if (std::any_of(std::begin(desc.data.padded_dims), std::begin(desc.data.padded_dims) + desc.data.ndims, + [](dnnl_dim_t dim) { return dim == DNNL_RUNTIME_DIM_VAL; })) { + IE_THROW() << "Can't compute padded elements count for non undefined blocked dims"; + } return std::accumulate(std::begin(desc.data.padded_dims), std::begin(desc.data.padded_dims) + desc.data.ndims, size_t{1}, std::multiplies()); } @@ -548,7 +580,7 @@ void DnnlBlockedMemoryDesc::initStrides() { const size_t total_ndims = outer_ndims + inner_ndims; // strides of inner dims. In case of 4i16o4i will be {64, 4, 1} - VectorDims inner_strides(inner_ndims, 1); + VectorDims inner_strides(inner_ndims, getShape().hasZeroDims() ? 0 : 1); for (size_t i = 1; i < blk_desc.inner_nblks; i++) { inner_strides[blk_desc.inner_nblks - 1 - i] = inner_strides[blk_desc.inner_nblks - i] * blk_desc.inner_blks[blk_desc.inner_nblks - i]; } @@ -600,7 +632,9 @@ void DnnlBlockedMemoryDesc::recomputeDefaultStrides() { IE_THROW() << "Can't recompute stride: order size != blocked dims size"; auto &oneDnnStrides = desc.data.format_desc.blocking.strides; - if (std::any_of(blockedDims.begin(), blockedDims.end(), [](Dim val) { return val == Shape::UNDEFINED_DIM; })) { + if (getShape().hasZeroDims()) { + std::fill(std::begin(oneDnnStrides), std::begin(oneDnnStrides) + getShape().getRank(), 0); + } else if (std::any_of(blockedDims.begin(), blockedDims.end(), [](Dim val) { return val == Shape::UNDEFINED_DIM; })) { std::fill(std::begin(oneDnnStrides), std::begin(oneDnnStrides) + rank, DNNL_RUNTIME_DIM_VAL); initStrides(); } else { @@ -633,6 +667,11 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(const mkldnn::memory::desc& mdesc, desc = cloneDescWithNewDims(mdesc, shape.getDims(), order); + if (shape.hasZeroDims()) { + auto& blk = desc.data.format_desc.blocking; + std::fill(std::begin(blk.strides), std::begin(blk.strides) + desc.data.ndims, 0); + } + initBlockedParams(); } diff --git a/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_blocked_memory_desc.h b/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_blocked_memory_desc.h index 770686c6317..f78707e03d6 100644 --- a/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_blocked_memory_desc.h +++ b/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_blocked_memory_desc.h @@ -71,7 +71,7 @@ private: explicit DnnlBlockedMemoryDesc(const mkldnn::memory::desc& mdesc); // Creates DnnlBlockedMemoryDesc using the shape parameter as a true shape but all other params (layout, blocks, etc.) are used from the mdesc, but - // the mdesc own shape is ignored. The main purpose of this constructor is making dynamic descriptor form some dummy mdesc, which stores info about + // the mdesc own shape is ignored. The main purpose of this constructor is making dynamic descriptor from some dummy mdesc, which stores info about // layout, blocking, strides, etc., and the provided dynamic shape. DnnlBlockedMemoryDesc(const mkldnn::memory::desc& mdesc, const Shape& shape); diff --git a/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_memory_desc.cpp b/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_memory_desc.cpp index 0e060912df2..5747529e587 100644 --- a/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_memory_desc.cpp +++ b/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_memory_desc.cpp @@ -15,6 +15,10 @@ DnnlMemoryDesc::DnnlMemoryDesc(const mkldnn::memory::desc& desc) : IE_THROW(Unexpected) << "Memory format any is prohibited!"; } +bool DnnlMemoryDesc::canComputeMemSizeZeroDims() const { + return getShape().hasZeroDims() && desc.data.offset0 != DNNL_RUNTIME_DIM_VAL; +} + size_t DnnlMemoryDesc::getCurrentMemSizeImp() const { return MKLDNNExtensionUtils::getMemSizeForDnnlDesc(desc); } diff --git a/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_memory_desc.h b/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_memory_desc.h index c17b847dc16..d796c7ef234 100644 --- a/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_memory_desc.h +++ b/inference-engine/src/mkldnn_plugin/memory_desc/dnnl_memory_desc.h @@ -63,6 +63,7 @@ private: size_t getElementOffset(size_t elemNumber) const override; + bool canComputeMemSizeZeroDims() const override; size_t getCurrentMemSizeImp() const override; bool isDefinedImp() const override; MemoryDescPtr cloneWithNewDimsImp(const VectorDims& dims) const override; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp index 6765a1c0b67..37d8633bd5c 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp @@ -384,15 +384,16 @@ void MKLDNNGraph::InitOptimalPrimitiveDescriptors() { void MKLDNNGraph::ExtractConstantAndExecutableNodes() { OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::MKLDNN_LT, "MKLDNNGraph::ExtractConstantAndExecutableNodes"); for (const auto& graphNode : graphNodes) { - if (graphNode->isConstant()) + if (graphNode->isConstant()) { constantGraphNodes.emplace_back(graphNode); - else if (CPU_DEBUG_CAPS_ALWAYS_TRUE(graphNode->isExecutable())) + } else if (CPU_DEBUG_CAPS_ALWAYS_TRUE(graphNode->isExecutable())) { /* @todo * Revise implementation. * With current way it is possible that with debug_caps enabled * we execute a node, which is not ready to be executed */ executableGraphNodes.emplace_back(graphNode); + } } } @@ -792,7 +793,7 @@ void MKLDNNGraph::PullOutputData(BlobMap &out) { // check for empty output blob if (std::any_of(outDims.begin(), outDims.end(), [](const Dim dim) {return dim == 0;})) { - return; + continue; } auto srcPrec = actualDesc.getPrecision(); @@ -835,10 +836,11 @@ inline void MKLDNNGraph::ExecuteNode(const MKLDNNNodePtr& node, const mkldnn::st DUMP(node, infer_count); OV_ITT_SCOPED_TASK(itt::domains::MKLDNNPlugin, node->profiling.execute); - if (node->isDynamicNode()) + if (node->isDynamicNode()) { node->executeDynamic(stream); - else + } else { node->execute(stream); + } } void MKLDNNGraph::Infer(MKLDNNInferRequest* request, int batch) { @@ -854,7 +856,6 @@ void MKLDNNGraph::Infer(MKLDNNInferRequest* request, int batch) { if (request) request->ThrowIfCanceled(); - ExecuteNode(node, stream); } @@ -993,22 +994,6 @@ Config MKLDNNGraph::getProperty() const { return config; } -Blob::Ptr MKLDNNGraph::getInputBlob(const std::string& name) { - auto itr = inputNodesMap.find(name); - if (itr != inputNodesMap.end()) { - return MemoryDescUtils::interpretAsBlob(itr->second->getChildEdgeAt(0)->getMemory()); - } - return nullptr; -} - -Blob::Ptr MKLDNNGraph::getOutputBlob(const std::string& name) { - auto itr = outputNodesMap.find(name); - if (itr != outputNodesMap.end()) { - return MemoryDescUtils::interpretAsBlob(itr->second->getParentEdgeAt(0)->getMemory()); - } - return nullptr; -} - void MKLDNNGraph::RemoveEdge(MKLDNNEdgePtr& edge) { for (auto it = graphEdges.begin(); it != graphEdges.end(); it++) { if ((*it) == edge) { diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph.h b/inference-engine/src/mkldnn_plugin/mkldnn_graph.h index 38e1bb01dc4..9d6703df665 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph.h @@ -44,9 +44,6 @@ public: void setProperty(const std::map &properties); Config getProperty() const; - InferenceEngine::Blob::Ptr getInputBlob(const std::string& name); - InferenceEngine::Blob::Ptr getOutputBlob(const std::string& name); - template void CreateGraph(NET &network, const MKLDNNExtensionManager::Ptr& extMgr, diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp index ccd9c96e675..6179f52394a 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp @@ -190,8 +190,9 @@ void MKLDNNPlugin::MKLDNNInferRequest::redefineMemoryForInputNodes() { const auto inputNode = cpuInputNodes.find(blob.first); if (inputNode == cpuInputNodes.end()) IE_THROW() << "CPU execution graph doesn't contain input node with name: " << blob.first; - if (inputNode->second->isDynamicNode()) + if (inputNode->second->isDynamicNode()) { inputNode->second->redefineOutputMemory({blob.second->getTensorDesc().getDims()}); + } } } diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp index a0a10a93ec4..32e64b625ca 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp @@ -84,7 +84,7 @@ MKLDNNNode::MKLDNNNode(const std::shared_ptr& op, const mkldnn::en for (size_t i = 0; i < op->get_input_size(); i++) { const auto &shape = op->get_input_partial_shape(i); if (shape.rank().is_dynamic()) { - IE_THROW(Unexpected) << "CPU plug-in doesn't support operation with dynamic rank"; + IE_THROW(Unexpected) << "CPU plug-in doesn't support " << getTypeStr() << " operation with dynamic rank. Operation name: " << getName(); } bool isScalar = shape.rank().get_length() == 0; @@ -99,7 +99,7 @@ MKLDNNNode::MKLDNNNode(const std::shared_ptr& op, const mkldnn::en for (size_t i = 0; i < op->get_output_size(); i++) { const auto &shape = op->get_output_partial_shape(i); if (shape.rank().is_dynamic()) { - IE_THROW(Unexpected) << "CPU plug-in doesn't support operation with dynamic rank"; + IE_THROW(Unexpected) << "CPU plug-in doesn't support " << getTypeStr() << " operation with dynamic rank. Operation name: " << getName(); } bool isScalar = shape.rank().get_length() == 0; @@ -230,6 +230,15 @@ bool MKLDNNNode::isEdgesEmpty(const std::vector& edges) const return true; } +void MKLDNNNode::createPrimitive() { + if (inputShapesDefined() && isExecutable()) { + if (needPrepareParams()) { + prepareParams(); + } + updateLastInputDims(); + } +} + void MKLDNNNode::selectOptimalPrimitiveDescriptor() { selectPreferPrimitiveDescriptor(getPrimitivesPriority(), false); } @@ -510,12 +519,14 @@ void MKLDNNNode::executeDynamic(mkldnn::stream strm) { if (needShapeInfer()) { redefineOutputMemory(shapeInfer()); } - if (needPrepareParams()) { - IE_ASSERT(inputShapesDefined()) << "Can't prepare params for " << getTypeStr() << " node with name: " << getName() << - " since the input shapes are not defined."; - prepareParams(); + if (isExecutable()) { + if (needPrepareParams()) { + IE_ASSERT(inputShapesDefined()) << "Can't prepare params for " << getTypeStr() << " node with name: " << getName() << + " since the input shapes are not defined."; + prepareParams(); + } + executeDynamicImpl(strm); } - executeDynamicImpl(strm); updateLastInputDims(); } @@ -1333,6 +1344,36 @@ std::pair, std::vector> MKLDNNNode::getScalesAndShifts return {scales, shifts}; } +bool MKLDNNNode::isInputTensorAtPortEmpty(size_t port) const { + if (inputShapes.size() <= port) { + IE_THROW() << "Incorrect input port number for node " << getName(); + } + return getParentEdgesAtPort(port)[0]->getMemory().GetShape().hasZeroDims(); +} + +bool MKLDNNNode::isOutputTensorAtPortEmpty(size_t port) const { + if (outputShapes.size() <= port) { + IE_THROW() << "Incorrect output port number for node " << getName(); + } + return getChildEdgesAtPort(port)[0]->getMemory().GetShape().hasZeroDims(); +} + +bool MKLDNNNode::hasEmptyInputTensors() const { + for (size_t i = 0; i < getParentEdges().size(); i++) { + if (isInputTensorAtPortEmpty(i)) + return true; + } + return false; +} + +bool MKLDNNNode::hasEmptyOutputTensors() const { + for (size_t i = 0; i < outputShapes.size(); i++) { + if (isOutputTensorAtPortEmpty(i)) + return true; + } + return false; +} + bool MKLDNNNode::inputShapesDefined() const { for (size_t i = 0; i < getParentEdges().size(); i++) { if (!getParentEdgesAtPort(i)[0]->getMemory().getDesc().isDefined()) @@ -1406,8 +1447,11 @@ std::vector MKLDNNNode::shapeInferGeneric(const std::vector& std::vector newOutputShapes(opToShapeInfer->get_output_size()); for (size_t i = 0; i < newOutputShapes.size(); i++) { const auto &partShape = opToShapeInfer->get_output_partial_shape(i); - if (partShape.is_dynamic()) - IE_THROW(NotImplemented) << "CPU plug-in doesn't support default shape infer for nodes with internal dynamism"; + if (partShape.is_dynamic()) { + IE_THROW(NotImplemented) << "CPU plug-in doesn't support default shape infer for node " << getTypeStr() + << " with internal dynamism. Operation name: " << getName(); + } + newOutputShapes[i] = partShape.get_shape(); } return newOutputShapes; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.h b/inference-engine/src/mkldnn_plugin/mkldnn_node.h index 97517d54fbc..85475733b13 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.h @@ -199,7 +199,7 @@ public: // must be called only after MKLDNNGraph::InitEdges() virtual bool isExecutable() const { - return true; + return !hasEmptyInputTensors(); } bool isConstant(); @@ -370,7 +370,7 @@ public: */ virtual void filterSupportedPrimitiveDescriptors(); - virtual void createPrimitive() = 0; + virtual void createPrimitive(); virtual void selectOptimalPrimitiveDescriptor(); virtual void initOptimalPrimitiveDescriptor(); @@ -728,6 +728,12 @@ protected: bool isDynamic = false; + bool isInputTensorAtPortEmpty(size_t port) const; + bool isOutputTensorAtPortEmpty(size_t port) const; + + bool hasEmptyInputTensors() const; + bool hasEmptyOutputTensors() const; + bool inputShapesDefined() const; bool outputShapesDefined() const; bool shapesDefined() const; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_nodes_factory.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_nodes_factory.cpp index f7f8aa2c7c7..f3ff2beba9a 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_nodes_factory.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_nodes_factory.cpp @@ -80,6 +80,7 @@ #include "nodes/mkldnn_reduce_node.h" #include "nodes/mkldnn_if_node.h" #include "nodes/mkldnn_ctc_greedy_decoder_node.h" +#include "nodes/mkldnn_non_zero.h" #define MKLDNN_NODE(__prim, __type) \ registerNodeIfRequired(MKLDNNPlugin, __prim, __type, MKLDNNNodeImpl<__prim>) @@ -168,4 +169,5 @@ MKLDNNPlugin::MKLDNNNode::NodesFactory::NodesFactory() MKLDNN_NODE(MKLDNNTopKNode, TopK); MKLDNN_NODE(MKLDNNStridedSliceNode, StridedSlice); MKLDNN_NODE(MKLDNNGRNNode, GRN); + MKLDNN_NODE(MKLDNNNonZeroNode, NonZero); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/dnnl_executor.cpp b/inference-engine/src/mkldnn_plugin/nodes/common/dnnl_executor.cpp new file mode 100644 index 00000000000..6f001fe8db1 --- /dev/null +++ b/inference-engine/src/mkldnn_plugin/nodes/common/dnnl_executor.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "dnnl_executor.h" + +using namespace mkldnn; +using namespace MKLDNNPlugin; + +DnnlExecutor::IntermReorder::IntermReorder(const mkldnn::memory::desc& descSrc, + const mkldnn::memory::desc& descDst, + const mkldnn::engine& engine) : m_descSrc(descSrc), m_descDst(descDst) { + auto reorderPd = mkldnn::reorder::primitive_desc(engine, descSrc, engine, descDst); + m_reorder = mkldnn::reorder(reorderPd); +} + +void DnnlExecutor::IntermReorder::exec(mkldnn::memory& memSrc, mkldnn::memory& memDst, mkldnn::stream strm) { + m_reorder.execute(strm, memSrc, memDst); +} + +void DnnlExecutor::exec(std::unordered_map primArgs, mkldnn::stream strm) { + for (auto &inReorder : inputReorders) { + if (primArgs.count(inReorder.first)) { + mkldnn::memory memDst(inReorder.second.getDstDesc(), strm.get_engine()); + inReorder.second.exec(primArgs[inReorder.first], memDst, strm); + primArgs[inReorder.first] = memDst; + } else { + IE_THROW() << "DnnlExecutor has reorder for input " << inReorder.first << ", but doesn't have source memory"; + } + } + std::unordered_map outputMem; + for (auto &outReorder : outputReorders) { + if (primArgs.count(outReorder.first)) { + mkldnn::memory memSrc(outReorder.second.getSrcDesc(), strm.get_engine()); + outputMem[outReorder.first] = primArgs[outReorder.first]; + primArgs[outReorder.first] = memSrc; + } else { + IE_THROW() << "DnnlExecutor has reorder for output " << outReorder.first << ", but doesn't have destination memory"; + } + } + (*execPrim).execute(strm, primArgs); + for (auto &outReorder : outputReorders) { + outReorder.second.exec(primArgs[outReorder.first], outputMem[outReorder.first], strm); + } +} diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/dnnl_executor.h b/inference-engine/src/mkldnn_plugin/nodes/common/dnnl_executor.h new file mode 100644 index 00000000000..b6e6c83030d --- /dev/null +++ b/inference-engine/src/mkldnn_plugin/nodes/common/dnnl_executor.h @@ -0,0 +1,39 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "mkldnn_memory.h" +#include "mkldnn_primitive.h" + +namespace MKLDNNPlugin { + +class DnnlExecutor { + protected: + class IntermReorder { + public: + IntermReorder(const mkldnn::memory::desc& descSrc, const mkldnn::memory::desc& descDst, const mkldnn::engine& engine); + void exec(mkldnn::memory& memSrc, mkldnn::memory& memDst, mkldnn::stream strm); + const mkldnn::memory::desc& getSrcDesc() const { return m_descSrc; } + const mkldnn::memory::desc& getDstDesc() const { return m_descDst; } + + private: + mkldnn::reorder m_reorder; + mkldnn::memory::desc m_descSrc; + mkldnn::memory::desc m_descDst; + }; + + public: + void exec(std::unordered_map primArgs, mkldnn::stream strm); + virtual ~DnnlExecutor() = default; + + protected: + DnnlExecutor() = default; + MKLDNNPrimitive execPrim; + // key is the port number for the primitive that needs memory reordering + std::unordered_map inputReorders; + std::unordered_map outputReorders; +}; + +} // namespace MKLDNNPlugin diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_adaptive_pooling.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_adaptive_pooling.cpp index 64ed60cbdb9..0909f99deb2 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_adaptive_pooling.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_adaptive_pooling.cpp @@ -145,6 +145,10 @@ void MKLDNNAdaptivePoolingNode::initSupportedPrimitiveDescriptors() { } } +void MKLDNNAdaptivePoolingNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNAdaptivePoolingNode::execute(mkldnn::stream strm) { auto inputPrec = getParentEdgeAt(0)->getMemory().GetDataType(); auto outputPrec = getChildEdgeAt(0)->getMemory().GetDataType(); @@ -283,8 +287,6 @@ bool MKLDNNAdaptivePoolingNode::created() const { return getType() == AdaptivePooling; } -void MKLDNNAdaptivePoolingNode::createPrimitive() {} - inline void MKLDNNAdaptivePoolingNode::setBinBorders(size_t *startPtr, size_t *endPtr, size_t idx, size_t inputLength, size_t outputLength) { *(startPtr) = idx * inputLength / outputLength; *(endPtr) = ceil(static_cast((idx + 1) * inputLength) / outputLength); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_adaptive_pooling.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_adaptive_pooling.h index c47740e75ec..88a3e9a9324 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_adaptive_pooling.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_adaptive_pooling.h @@ -18,7 +18,6 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; @@ -36,7 +35,7 @@ protected: bool needShapeInfer() const override; std::vector shapeInfer() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }; + void executeDynamicImpl(mkldnn::stream strm) override; }; } // namespace MKLDNNPlugin diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_batch_to_space_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_batch_to_space_node.cpp index a4811c30aff..60a37375cdc 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_batch_to_space_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_batch_to_space_node.cpp @@ -225,6 +225,10 @@ void MKLDNNBatchToSpaceNode::batchToSpaceKernel() { }); } +void MKLDNNBatchToSpaceNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNBatchToSpaceNode::execute(mkldnn::stream strm) { switch (getParentEdgeAt(0)->getMemory().getDesc().getPrecision().size()) { case 1: batchToSpaceKernel::value_type>(); break; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_batch_to_space_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_batch_to_space_node.h index 0c0ef5ee06c..91fe87e4f71 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_batch_to_space_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_batch_to_space_node.h @@ -18,12 +18,11 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override {}; void execute(mkldnn::stream strm) override; bool created() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }; + void executeDynamicImpl(mkldnn::stream strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.cpp index 898c03f9b40..eba9e00666f 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.cpp @@ -107,14 +107,6 @@ void MKLDNNBroadcastNode::initSupportedPrimitiveDescriptors() { supportedPrimitiveDescriptors = getSupportedConfigs(this); } -void MKLDNNBroadcastNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - bool MKLDNNBroadcastNode::needPrepareParams() const { return needPrepareParamsVar; } @@ -215,6 +207,14 @@ std::vector MKLDNNBroadcastNode::shapeInfer() const { return newOutputShapes; } +bool MKLDNNBroadcastNode::isExecutable() const { + return !isInputTensorAtPortEmpty(0); +} + +void MKLDNNBroadcastNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNBroadcastNode::execute(mkldnn::stream strm) { if (optimizedCase) { optimizedExecute(getParentEdgeAt(INPUT_DATA_IDX)->getMemoryPtr(), getChildEdgeAt(0)->getMemoryPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.h index 4851cf19c9a..93cbddb0b45 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.h @@ -19,13 +19,11 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; - void executeDynamicImpl(mkldnn::stream strm) override { - execute(strm); - } + void executeDynamicImpl(mkldnn::stream strm) override; bool created() const override; + bool isExecutable() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_bucketize_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_bucketize_node.cpp index 86e29b292cb..d875d228191 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_bucketize_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_bucketize_node.cpp @@ -203,12 +203,8 @@ void MKLDNNBucketizeNode::prepareParams() { std::accumulate(input_tensor_dims.begin(), input_tensor_dims.end(), size_t(1), std::multiplies()); } -void MKLDNNBucketizeNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } +bool MKLDNNBucketizeNode::isExecutable() const { + return !isInputTensorAtPortEmpty(0); } std::vector MKLDNNBucketizeNode::shapeInfer() const { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_bucketize_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_bucketize_node.h index ae4a4030bfd..04f0b0df3f0 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_bucketize_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_bucketize_node.h @@ -15,15 +15,16 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void prepareParams() override; std::vector shapeInfer() const override; + bool isExecutable() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp index 3b0e1e481c8..a6e044b4353 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp @@ -31,6 +31,10 @@ namespace { constexpr size_t channelAxis = 1lu; } +bool MKLDNNConcatNode::isExecutable() const { + return !hasEmptyOutputTensors() && !isOptimized(); +} + bool MKLDNNConcatNode::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { const auto concatOp = ngraph::as_type_ptr(op); @@ -173,7 +177,7 @@ void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() { } // TODO [DS]: inplace - if (!canBeInPlace) + if (!canBeInPlace || std::any_of(inputShapes.begin(), inputShapes.end(), [](const Shape& shape) { return shape.hasZeroDims(); })) return; // Optimized inplace case @@ -353,7 +357,6 @@ void MKLDNNConcatNode::prepareParams() { IE_THROW() << "Preferable primitive descriptor is not set."; std::vector srcs_d; - for (size_t i = 0; i < getParentEdges().size(); i++) { const auto& srcMemPtr = getParentEdgesAtPort(i)[0]->getMemoryPtr(); if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr()) { @@ -362,6 +365,10 @@ void MKLDNNConcatNode::prepareParams() { << getName() << "."; } + if (srcMemPtr->GetShape().hasZeroDims()) { + continue; + } + auto desc = srcMemPtr->GetDescWithType()->getDnnlDesc(); const auto& dims = srcMemPtr->getStaticDims(); for (size_t j = 0; j < dims.size(); j++) { @@ -382,14 +389,6 @@ void MKLDNNConcatNode::prepareParams() { prim.reset(new concat(primitive_desc)); } -void MKLDNNConcatNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - size_t MKLDNNConcatNode::inverseOrder(const SizeVector& order, size_t axis) { for (size_t i = 0; i < order.size(); i++) { if (axis == order[i]) { @@ -489,16 +488,23 @@ void MKLDNNConcatNode::execute(mkldnn::stream strm) { return; } + const MKLDNNMemory& dst_memory = getChildEdgeAt(0)->getMemory(); if (canOptimizeNspc) { execNspcSpecCase(); return; } - const MKLDNNMemory& dst_memory = getChildEdgeAt(0)->getMemory(); const size_t num_src = getParentEdges().size(); std::unordered_map mem_ags {{DNNL_ARG_DST, dst_memory.GetPrimitive()}}; - for (int i = 0; i < num_src; i++) - mem_ags[DNNL_ARG_MULTIPLE_SRC + i] = getParentEdgeAt(i)->getMemory().GetPrimitive(); + size_t nonZeroInShapes = 0; + for (int i = 0; i < num_src; i++) { + const auto& srcMem = getParentEdgesAtPort(i)[0]->getMemory(); + if (srcMem.GetShape().hasZeroDims()) { + continue; + } + mem_ags[DNNL_ARG_MULTIPLE_SRC + nonZeroInShapes] = srcMem.GetPrimitive(); + nonZeroInShapes++; + } (*prim).execute(strm, mem_ags); } @@ -518,21 +524,32 @@ void MKLDNNConcatNode::execNspcSpecCase() { std::vector src_ptrs; std::vector dst_ptrs; + size_t nonZeroInShapes = 0; + int firstNonZeroEdge = -1; for (size_t i = 0; i < num_src; i++) { - const MKLDNNMemory& src_mem = getParentEdgeAt(i)->getMemory(); + const MKLDNNMemory& src_mem = getParentEdgesAtPort(i)[0]->getMemory(); + if (src_mem.GetShape().hasZeroDims()) { + continue; + } const size_t num_channels = src_mem.getStaticDims()[channelAxis]; channelsDataSize.push_back(num_channels * dataSize); src_ptrs.push_back(reinterpret_cast(src_mem.GetData())); dst_ptrs.push_back(dst_ptr + channels_size); channels_size += num_channels * dataSize; + + if (firstNonZeroEdge == -1) { + firstNonZeroEdge = i; + } + + nonZeroInShapes++; } - const size_t iter_count = getParentEdgeAt(0)->getMemory().GetSize() / channelsDataSize[0]; + const size_t iter_count = getParentEdgeAt(firstNonZeroEdge)->getMemory().GetSize() / channelsDataSize[0]; parallel_for(iter_count, [&](int i) { const size_t dst_off = i * channels_size; - for (int j = 0; j < num_src; j++) { + for (int j = 0; j < nonZeroInShapes; j++) { cpu_memcpy(dst_ptrs[j] + dst_off, src_ptrs[j] + i * channelsDataSize[j], channelsDataSize[j]); } }); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.h index 09686ca5f24..5e51f9c8a6e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.h @@ -19,7 +19,6 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void initOptimalPrimitiveDescriptor() override; - void createPrimitive() override; void selectOptimalPrimitiveDescriptor() override; bool created() const override; void execute(mkldnn::stream strm) override; @@ -28,10 +27,8 @@ public: bool isOptimized() const; InferenceEngine::Precision getRuntimePrecision() const override; - bool isExecutable() const override { - return !isOptimized(); - } + bool isExecutable() const override; bool needPrepareParams() const override; void prepareParams() override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.cpp index 03f7700e7af..6e46b42b4a8 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.cpp @@ -487,15 +487,6 @@ void MKLDNNConvolutionNode::initSupportedPrimitiveDescriptors() { } } - -void MKLDNNConvolutionNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - bool MKLDNNConvolutionNode::created() const { return getType() == Convolution; } @@ -545,7 +536,14 @@ MKLDNNConvolutionNode::createDescriptorInternal(const mkldnn::memory::desc& inpu void MKLDNNConvolutionNode::createDescriptor(const std::vector& inputDesc, const std::vector& outputDesc) { - auto inpDesc = inputDesc[0]->isDefined() ? inputDesc[0] : MemoryDescUtils::makeDummyDesc(*inputDesc[0]); + MemoryDescPtr inpDesc; + if (inputDesc[0]->isDefined()) { + inpDesc = inputDesc[0]; + } else { + auto dummyInDims = MemoryDescUtils::makeDummyShape(inputDesc[0]->getShape()).getStaticDims(); + dummyInDims[1] = IC; + inpDesc = inputDesc[0]->cloneWithNewDims(dummyInDims); + } DnnlMemoryDescPtr definedInpMemDesc = MemoryDescUtils::convertToDnnlMemoryDesc(inpDesc); DnnlMemoryDescPtr definedOutMemDesc; @@ -906,14 +904,57 @@ InferenceEngine::Blob::Ptr MKLDNNConvolutionNode::createInternalBlob(InferenceEn return internalBlob; } +std::shared_ptr MKLDNNConvolutionNode::createMkldnnConvDesc(const mkldnn::memory::desc& srcDesc, + const mkldnn::memory::desc& wghDesc, + const mkldnn::memory::desc& dstDesc, + const mkldnn::memory::desc& biasDesc) { + std::shared_ptr dnnlConvDesc; + auto alg = isWinograd() ? mkldnn::algorithm::convolution_winograd : mkldnn::algorithm::convolution_direct; + + if (withBiases) { + // WA to align IR bias representation (3 to 5 rank tensors) to oneDNN representation (1 rank tensor) + mkldnn::memory::desc dnnlBiasDesc = biasDesc.reshape(MKLDNNExtensionUtils::convertToDnnlDims(biasesDims)); + return std::make_shared(createDescriptorInternal(srcDesc, + wghDesc, + dnnlBiasDesc, + dstDesc, + alg)); + } else { + return std::make_shared(createDescriptorInternal(srcDesc, + wghDesc, + dstDesc, + alg)); + } +} + void MKLDNNConvolutionNode::prepareParams() { + auto srcMemPtr = getParentEdgesAtPort(0)[0]->getMemoryPtr(); + auto wghMemPtr = getParentEdgesAtPort(1)[0]->getMemoryPtr(); + auto dstMemPtr = getChildEdgesAtPort(0)[0]->getMemoryPtr(); + if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr()) + IE_THROW() << "Destination memory didn't allocate."; + if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr()) + IE_THROW() << "Input memory didn't allocate."; + if (!wghMemPtr || !wghMemPtr->GetPrimitivePtr()) + IE_THROW() << "Weight memory didn't allocate."; + MKLDNNMemoryPtr biasMemPtr = nullptr; + if (withBiases) { + biasMemPtr = getParentEdgesAtPort(2)[0]->getMemoryPtr(); + if (!biasMemPtr || !biasMemPtr->GetPrimitivePtr()) + IE_THROW() << "Input memory didn't allocate."; + } + const NodeDesc *selected_pd = getSelectedPrimitiveDescriptor(); if (selected_pd == nullptr) IE_THROW() << "Preferable primitive descriptor is not set for node " << getName() << "."; - auto inMemoryDesc = getParentEdgesAtPort(0).front()->getMemory().GetDescWithType(); - auto weightMemoryDesc = getParentEdgesAtPort(1).front()->getMemory().GetDescWithType(); - auto outMemoryDesc = getChildEdgesAtPort(0).front()->getMemory().GetDescWithType(); + auto inMemoryDesc = srcMemPtr->GetDescWithType(); + auto weightMemoryDesc = wghMemPtr->GetDescWithType(); + auto outMemoryDesc = dstMemPtr->GetDescWithType(); + mkldnn::memory::desc biasDesc; + if (biasMemPtr) { + biasDesc = biasMemPtr->GetDescWithType()->getDnnlDesc(); + } auto initPrimitiveAttr = [&]() { mkldnn::primitive_attr attr; @@ -934,55 +975,95 @@ void MKLDNNConvolutionNode::prepareParams() { pAttrLocal = initPrimitiveAttr(); } - std::shared_ptr dnnlConvDesc; - auto alg = isWinograd() ? mkldnn::algorithm::convolution_winograd : mkldnn::algorithm::convolution_direct; + std::shared_ptr desc = createMkldnnConvDesc(inMemoryDesc->getDnnlDesc(), + weightMemoryDesc->getDnnlDesc(), + outMemoryDesc->getDnnlDesc(), + biasDesc); - if (withBiases) { - auto biasMemoryDesc = getParentEdgesAtPort(2).front()->getMemory().GetDescWithType(); - // WA to align IR bias representation (3 to 5 rank tensors) to oneDNN representation (1 rank tensor) - mkldnn::memory::desc dnnlBiasDesc = biasMemoryDesc->getDnnlDesc().reshape(MKLDNNExtensionUtils::convertToDnnlDims(biasesDims)); - dnnlConvDesc = createDescriptorInternal(inMemoryDesc->getDnnlDesc(), - weightMemoryDesc->getDnnlDesc(), - dnnlBiasDesc, - outMemoryDesc->getDnnlDesc(), - alg); - } else { - dnnlConvDesc = createDescriptorInternal(inMemoryDesc->getDnnlDesc(), - weightMemoryDesc->getDnnlDesc(), - outMemoryDesc->getDnnlDesc(), - alg); - } - - MKLDNNDescriptor desc(dnnlConvDesc); - - auto itpd = desc.createPrimitiveDescriptorIterator(getEngine(), *pAttrLocal); + auto itpd = desc->createPrimitiveDescriptorIterator(getEngine(), *pAttrLocal); convolution_forward::primitive_desc prim_desc; - while (static_cast(itpd)) { + + execPtr = nullptr; + while (static_cast(itpd)) { impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str()); if (impl_type == selected_pd->getImplementationType()) { prim_desc = convolution_forward::primitive_desc(itpd.get()); + execPtr = std::make_shared(prim_desc, + srcMemPtr->GetPrimitive().get_desc(), + wghMemPtr->GetPrimitive().get_desc(), + dstMemPtr->GetPrimitive().get_desc(), + getEngine()); break; } - if (!itpd.next_impl()) - IE_THROW() << "Primitive descriptor was not found for node " << getName() << "."; + + if (!itpd.next_impl()) { + auto inDesc = mkldnn::memory::desc(MKLDNNExtensionUtils::convertToDnnlDims(srcMemPtr->getStaticDims()), + srcMemPtr->GetDataType(), + memory::format_tag::any); + auto wghDesc = mkldnn::memory::desc(MKLDNNExtensionUtils::convertToDnnlDims(wghMemPtr->getStaticDims()), + wghMemPtr->GetDataType(), + memory::format_tag::any); + auto outDesc = mkldnn::memory::desc(MKLDNNExtensionUtils::convertToDnnlDims(dstMemPtr->getStaticDims()), + dstMemPtr->GetDataType(), + memory::format_tag::any); + + std::shared_ptr reorderConvDesc = createMkldnnConvDesc(inDesc, wghDesc, outDesc, biasDesc); + auto reordItpd = reorderConvDesc->createPrimitiveDescriptorIterator(getEngine(), *pAttrLocal); + if (static_cast(reordItpd)) { + auto prim_desc = convolution_forward::primitive_desc(reordItpd.get()); + execPtr = std::make_shared(prim_desc, srcMemPtr->GetPrimitive().get_desc(), + wghMemPtr->GetPrimitive().get_desc(), + dstMemPtr->GetPrimitive().get_desc(), + getEngine()); + break; + } + } } + if (execPtr) { + primArgs[DNNL_ARG_SRC] = srcMemPtr->GetPrimitive(); + primArgs[DNNL_ARG_WEIGHTS] = wghMemPtr->GetPrimitive(); + primArgs[DNNL_ARG_DST] = dstMemPtr->GetPrimitive(); - prim.reset(new convolution_forward(prim_desc)); + if (withBiases) { + primArgs[DNNL_ARG_BIAS] = biasMemPtr->GetPrimitive(); + } - primArgs[DNNL_ARG_SRC] = getParentEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive(); - primArgs[DNNL_ARG_WEIGHTS] = getWeights(); - primArgs[DNNL_ARG_DST] = getChildEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive(); - - if (withBiases) { - primArgs[DNNL_ARG_BIAS] = getBias(); + MKLDNNNode::appendPostOpArgs(*pAttrLocal, primArgs, binaryPostOpsArgs); + } else { + IE_THROW() << "Primitive descriptor was not found for node " << getName() << "."; } - - appendPostOpArgs(*pAttrLocal, primArgs, binaryPostOpsArgs); } -void MKLDNNConvolutionNode::executeDynamicImpl(dnnl::stream strm) { +MKLDNNConvolutionNode::ConvolutionExecutor::ConvolutionExecutor(const mkldnn::convolution_forward::primitive_desc& pd, + const mkldnn::memory::desc& inMemDesc, + const mkldnn::memory::desc& weightMemDesc, + const mkldnn::memory::desc& outMemDesc, + const mkldnn::engine& engine) { + execPrim.reset(new mkldnn::convolution_forward(pd)); + + if (inMemDesc != pd.src_desc()) { + inputReorders.insert({DNNL_ARG_SRC, IntermReorder(inMemDesc, pd.src_desc(), engine)}); + } + + if (weightMemDesc != pd.weights_desc()) { + inputReorders.insert({DNNL_ARG_WEIGHTS, IntermReorder(weightMemDesc, pd.weights_desc(), engine)}); + } + + if (outMemDesc != pd.dst_desc()) { + outputReorders.insert({DNNL_ARG_DST, IntermReorder(pd.dst_desc(), outMemDesc, engine)}); + } +} + +void MKLDNNConvolutionNode::execute(mkldnn::stream strm) { + if (!execPtr) { + IE_THROW() << "Can't execute Convolution node with name: " << getName() << ", because executor is not compiled"; + } + execPtr->exec(primArgs, strm); +} + +void MKLDNNConvolutionNode::executeDynamicImpl(mkldnn::stream strm) { execute(strm); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.h index dcdd18092d5..f70fd806617 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.h @@ -9,6 +9,7 @@ #include #include #include +#include "common/dnnl_executor.h" namespace MKLDNNPlugin { @@ -23,7 +24,6 @@ public: void createDescriptor(const std::vector& inputDesc, const std::vector& outputDesc) override; void initDescriptor(const NodeConfig& config) override; - void createPrimitive() override; void selectOptimalPrimitiveDescriptor() override; void initSupportedPrimitiveDescriptors() override; void filterSupportedPrimitiveDescriptors() override; @@ -65,7 +65,25 @@ protected: InferenceEngine::Precision fusedEltwisePrecision(const MKLDNNNodePtr& fusingNode) const; private: + using executorPtr = std::shared_ptr; + executorPtr execPtr = nullptr; + + class ConvolutionExecutor : public DnnlExecutor { + public: + ConvolutionExecutor(const mkldnn::convolution_forward::primitive_desc& pd, + const mkldnn::memory::desc& inMemDesc, + const mkldnn::memory::desc& weightMemDesc, + const mkldnn::memory::desc& outMemDesc, + const mkldnn::engine& engine); + }; + + std::shared_ptr createMkldnnConvDesc(const mkldnn::memory::desc& srcDesc, + const mkldnn::memory::desc& wghDesc, + const mkldnn::memory::desc& dstDesc, + const mkldnn::memory::desc& biasDesc); + void prepareParams() override; + void execute(mkldnn::stream strm) override; void executeDynamicImpl(mkldnn::stream strm) override; void addZeroPoints(mkldnn::primitive_attr& attr) const; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_convert_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_convert_node.cpp index 2a20f45ff29..00150f7eba4 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_convert_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_convert_node.cpp @@ -129,15 +129,8 @@ void MKLDNNConvertNode::initSupportedPrimitiveDescriptors() { } } -void MKLDNNConvertNode::createPrimitive() { - auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); - auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr(); - if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr()) - IE_THROW() << errorPrefix << " has not allocated destination memory"; - if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr()) - IE_THROW() << errorPrefix << " has not allocated input memory"; - if (getSelectedPrimitiveDescriptor() == nullptr) - IE_THROW() << errorPrefix << " has nullable preferable primitive descriptor"; +void MKLDNNConvertNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); } void MKLDNNConvertNode::execute(mkldnn::stream strm) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_convert_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_convert_node.h index 08042187788..b345259fa5f 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_convert_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_convert_node.h @@ -19,9 +19,8 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; bool created() const override; bool canBeInPlace() const override { return false; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_node.cpp index b6881da496f..33b3859cc06 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_node.cpp @@ -165,14 +165,8 @@ bool MKLDNNCTCGreedyDecoderNode::created() const { return getType() == CTCGreedyDecoder; } -void MKLDNNCTCGreedyDecoderNode::executeDynamicImpl(dnnl::stream strm) { - MKLDNNCTCGreedyDecoderNode::execute(strm); -} - -void MKLDNNCTCGreedyDecoderNode::createPrimitive() { - if (inputShapesDefined()) { - updateLastInputDims(); - } +void MKLDNNCTCGreedyDecoderNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); } bool MKLDNNCTCGreedyDecoderNode::needPrepareParams() const { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_node.h index a936d47d75d..84da43a9e5a 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_node.h @@ -15,7 +15,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; void executeDynamicImpl(dnnl::stream strm) override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_seq_len_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_seq_len_node.cpp index 81e94cee450..e9de4e7d5fd 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_seq_len_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_seq_len_node.cpp @@ -168,14 +168,8 @@ bool MKLDNNCTCGreedyDecoderSeqLenNode::created() const { return getType() == CTCGreedyDecoderSeqLen; } -void MKLDNNCTCGreedyDecoderSeqLenNode::createPrimitive() { - if (inputShapesDefined()) { - updateLastInputDims(); - } -} - -void MKLDNNCTCGreedyDecoderSeqLenNode::executeDynamicImpl(dnnl::stream strm) { - MKLDNNCTCGreedyDecoderSeqLenNode::execute(strm); +void MKLDNNCTCGreedyDecoderSeqLenNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); } bool MKLDNNCTCGreedyDecoderSeqLenNode::needPrepareParams() const { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_seq_len_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_seq_len_node.h index 023a9e8f409..6d997ae600d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_seq_len_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_greedy_decoder_seq_len_node.h @@ -15,7 +15,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; void executeDynamicImpl(dnnl::stream strm) override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_loss_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_loss_node.cpp index 3019cf20e86..0647f235155 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_loss_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_loss_node.cpp @@ -57,12 +57,8 @@ void MKLDNNCTCLossNode::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void MKLDNNCTCLossNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } +void MKLDNNCTCLossNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); } void MKLDNNCTCLossNode::execute(mkldnn::stream strm) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_loss_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_loss_node.h index b37f4500f3e..65df24e6457 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_loss_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_ctc_loss_node.h @@ -15,13 +15,12 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }; + void executeDynamicImpl(mkldnn::stream strm) override; bool needPrepareParams() const override { return false; }; private: diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_cum_sum_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_cum_sum_node.cpp index a99e30a1db0..0613f10cb70 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_cum_sum_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_cum_sum_node.cpp @@ -258,13 +258,7 @@ bool MKLDNNCumSumNode::needPrepareParams() const { } void MKLDNNCumSumNode::executeDynamicImpl(mkldnn::stream strm) { - return execute(strm); -} - -void MKLDNNCumSumNode::createPrimitive() { - if (inputShapesDefined()) { - updateLastInputDims(); - } + execute(strm); } REG_MKLDNN_PRIM_FOR(MKLDNNCumSumNode, CumSum) diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_cum_sum_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_cum_sum_node.h index f917a53ef34..dc1d1b219d7 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_cum_sum_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_cum_sum_node.h @@ -15,7 +15,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_deconv_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_deconv_node.cpp index f81e4601eab..19684324fbf 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_deconv_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_deconv_node.cpp @@ -475,7 +475,7 @@ void MKLDNNDeconvolutionNode::execute(mkldnn::stream strm) { if (!execPtr) { IE_THROW() << "Can't execute Deconvolution node with name: " << getName() << ", because executor is not compiled"; } - execPtr->exec(strm); + execPtr->exec(primArgs, strm); if (externOutShape) { lastOutputSpatialDims = readOutputSpatialDims(); @@ -519,12 +519,18 @@ void MKLDNNDeconvolutionNode::createDeconvPrim(std::shared_ptr prepareMemory(itpd); } auto prim_desc = deconvolution_forward::primitive_desc(itpd.get()); - execPtr = std::make_shared(prim_desc, srcMemPtr, internalBlobMemory.front(), dstMemPtr, *attr, - binaryPostOpsArgs, getEngine()); + execPtr = std::make_shared(prim_desc, + srcMemPtr->GetPrimitive().get_desc(), + internalBlobMemory.front()->GetPrimitive().get_desc(), + dstMemPtr->GetPrimitive().get_desc(), + getEngine()); } else { auto prim_desc = convolution_backward_data::primitive_desc(itpd.get()); - execPtr = std::make_shared(prim_desc, srcMemPtr, wghMemPtr, dstMemPtr, *attr, - binaryPostOpsArgs, getEngine()); + execPtr = std::make_shared(prim_desc, + srcMemPtr->GetPrimitive().get_desc(), + wghMemPtr->GetPrimitive().get_desc(), + dstMemPtr->GetPrimitive().get_desc(), + getEngine()); } return; } @@ -544,8 +550,11 @@ void MKLDNNDeconvolutionNode::createDeconvPrim(std::shared_ptr auto anyDeconvItpd = anyDeconvDesc->createPrimitiveDescriptorIterator(getEngine(), *attr); if (static_cast(anyDeconvItpd)) { auto prim_desc = convolution_backward_data::primitive_desc(anyDeconvItpd.get()); - execPtr = std::make_shared(prim_desc, srcMemPtr, wghMemPtr, dstMemPtr, *attr, - binaryPostOpsArgs, getEngine()); + execPtr = std::make_shared(prim_desc, + srcMemPtr->GetPrimitive().get_desc(), + wghMemPtr->GetPrimitive().get_desc(), + dstMemPtr->GetPrimitive().get_desc(), + getEngine()); return; } } @@ -555,12 +564,15 @@ void MKLDNNDeconvolutionNode::createDeconvPrim(std::shared_ptr void MKLDNNDeconvolutionNode::prepareParams() { auto srcMemPtr = getParentEdgesAtPort(0)[0]->getMemoryPtr(); + auto wghMemPtr = getParentEdgesAtPort(1)[0]->getMemoryPtr(); auto dstMemPtr = getChildEdgesAtPort(0)[0]->getMemoryPtr(); if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr()) IE_THROW() << "Destination memory didn't allocate."; if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr()) IE_THROW() << "Input memory didn't allocate."; const NodeDesc *selected_pd = getSelectedPrimitiveDescriptor(); + if (!wghMemPtr || !wghMemPtr->GetPrimitivePtr()) + IE_THROW() << "Weight memory didn't allocate."; if (selected_pd == nullptr) IE_THROW() << "Preferable primitive descriptor is not set for node " << getName() << "."; @@ -610,7 +622,18 @@ void MKLDNNDeconvolutionNode::prepareParams() { selected_pd->getImplementationType() == MKLDNNPlugin::impl_desc_type::jit_avx512_winograd); } - createDeconvPrim(desc, srcMemPtr, getParentEdgesAtPort(1)[0]->getMemoryPtr(), dstMemPtr, pAttrLocal, selected_pd->getImplementationType()); + createDeconvPrim(desc, srcMemPtr, wghMemPtr, dstMemPtr, pAttrLocal, selected_pd->getImplementationType()); + + if (std::dynamic_pointer_cast(execPtr)) { + primArgs = {{DNNL_ARG_SRC, srcMemPtr->GetPrimitive()}, + {DNNL_ARG_WEIGHTS, internalBlobMemory.front()->GetPrimitive()}, + {DNNL_ARG_DST, dstMemPtr->GetPrimitive()}}; + } else { + primArgs = {{DNNL_ARG_DIFF_DST, srcMemPtr->GetPrimitive()}, + {DNNL_ARG_WEIGHTS, wghMemPtr->GetPrimitive()}, + {DNNL_ARG_DIFF_SRC, dstMemPtr->GetPrimitive()}}; + } + MKLDNNNode::appendPostOpArgs(attr, primArgs, binaryPostOpsArgs); } void MKLDNNDeconvolutionNode::createPrimitive() { @@ -738,100 +761,44 @@ InferenceEngine::Precision MKLDNNDeconvolutionNode::getRuntimePrecision() const return getMaxPrecision(inputPrecisions); } -MKLDNNDeconvolutionNode::DeconvExecutor::IntermReorder::IntermReorder(MKLDNNMemoryPtr memFrom, - const mkldnn::memory::desc& descTo, - const mkldnn::engine& engine) : m_memFrom(memFrom) { - m_memTo = std::make_shared(engine); - m_memTo->Create(MKLDNNExtensionUtils::makeDescriptor(descTo)); - m_reorder = mkldnn::reorder(m_memFrom->GetPrimitive(), m_memTo->GetPrimitive()); -} - -MKLDNNDeconvolutionNode::DeconvExecutor::IntermReorder::IntermReorder(const mkldnn::memory::desc& descFrom, - MKLDNNMemoryPtr memTo, - const mkldnn::engine& engine) : m_memTo(memTo) { - m_memFrom = std::make_shared(engine); - m_memFrom->Create(MKLDNNExtensionUtils::makeDescriptor(descFrom)); - m_reorder = mkldnn::reorder(m_memFrom->GetPrimitive(), m_memTo->GetPrimitive()); -} - -void MKLDNNDeconvolutionNode::DeconvExecutor::IntermReorder::exec(mkldnn::stream strm) { - auto src = m_memFrom->GetPrimitive(); - auto dst = m_memTo->GetPrimitive(); - m_reorder.execute(strm, src, dst); -} - -void MKLDNNDeconvolutionNode::DeconvExecutor::exec(mkldnn::stream strm) { - for (auto &inReorder : inputReorders) { - inReorder.exec(strm); - } - (*execPrim).execute(strm, primArgs); - for (auto &outReorder : outputReorders) { - outReorder.exec(strm); - } -} - MKLDNNDeconvolutionNode::DeconvExecutorDefault::DeconvExecutorDefault(const mkldnn::convolution_backward_data::primitive_desc& pd, - MKLDNNMemoryPtr inMem, - MKLDNNMemoryPtr weightMem, - MKLDNNMemoryPtr outMem, - const mkldnn::primitive_attr &attr, - const std::vector& binPostOpsArgs, + const mkldnn::memory::desc& inMemDesc, + const mkldnn::memory::desc& weightMemDesc, + const mkldnn::memory::desc& outMemDesc, const mkldnn::engine& engine) { execPrim.reset(new mkldnn::convolution_backward_data(pd)); - if (inMem->GetPrimitive().get_desc() != pd.diff_dst_desc()) { - inputReorders.push_back(IntermReorder(inMem, pd.diff_dst_desc(), engine)); - primArgs[DNNL_ARG_DIFF_DST] = inputReorders.back().getToMem()->GetPrimitive(); - } else { - primArgs[DNNL_ARG_DIFF_DST] = inMem->GetPrimitive(); + if (inMemDesc != pd.diff_dst_desc()) { + inputReorders.insert({DNNL_ARG_DIFF_DST, IntermReorder(inMemDesc, pd.diff_dst_desc(), engine)}); } - if (weightMem->GetPrimitive().get_desc() != pd.weights_desc()) { - inputReorders.push_back(IntermReorder(weightMem, pd.weights_desc(), engine)); - primArgs[DNNL_ARG_WEIGHTS] = inputReorders.back().getToMem()->GetPrimitive(); - } else { - primArgs[DNNL_ARG_WEIGHTS] = weightMem->GetPrimitive(); + if (weightMemDesc != pd.weights_desc()) { + inputReorders.insert({DNNL_ARG_WEIGHTS, IntermReorder(weightMemDesc, pd.weights_desc(), engine)}); } - if (outMem->GetPrimitive().get_desc() != pd.diff_src_desc()) { - outputReorders.push_back(IntermReorder(pd.diff_src_desc(), outMem, engine)); - primArgs[DNNL_ARG_DIFF_SRC] = outputReorders.back().getFromMem()->GetPrimitive(); - } else { - primArgs[DNNL_ARG_DIFF_SRC] = outMem->GetPrimitive(); + if (outMemDesc != pd.diff_src_desc()) { + outputReorders.insert({DNNL_ARG_DIFF_SRC, IntermReorder(pd.diff_src_desc(), outMemDesc, engine)}); } - MKLDNNNode::appendPostOpArgs(attr, primArgs, binPostOpsArgs); } MKLDNNDeconvolutionNode::DeconvExecutorInt8::DeconvExecutorInt8(const mkldnn::deconvolution_forward::primitive_desc& pd, - MKLDNNMemoryPtr inMem, - MKLDNNMemoryPtr weightMem, - MKLDNNMemoryPtr outMem, - const mkldnn::primitive_attr &attr, - const std::vector& binPostOpsArgs, + const mkldnn::memory::desc& inMemDesc, + const mkldnn::memory::desc& weightMemDesc, + const mkldnn::memory::desc& outMemDesc, const mkldnn::engine& engine) { execPrim.reset(new mkldnn::deconvolution_forward(pd)); - if (inMem->GetPrimitive().get_desc() != pd.src_desc()) { - inputReorders.push_back(IntermReorder(inMem, pd.src_desc(), engine)); - primArgs[DNNL_ARG_SRC] = inputReorders.back().getToMem()->GetPrimitive(); - } else { - primArgs[DNNL_ARG_SRC] = inMem->GetPrimitive(); + if (inMemDesc != pd.src_desc()) { + inputReorders.insert({DNNL_ARG_SRC, IntermReorder(inMemDesc, pd.src_desc(), engine)}); } - if (weightMem->GetPrimitive().get_desc() != pd.weights_desc()) { - inputReorders.push_back(IntermReorder(weightMem, pd.weights_desc(), engine)); - primArgs[DNNL_ARG_WEIGHTS] = inputReorders.back().getToMem()->GetPrimitive(); - } else { - primArgs[DNNL_ARG_WEIGHTS] = weightMem->GetPrimitive(); + if (weightMemDesc != pd.weights_desc()) { + inputReorders.insert({DNNL_ARG_WEIGHTS, IntermReorder(weightMemDesc, pd.weights_desc(), engine)}); } - if (outMem->GetPrimitive().get_desc() != pd.dst_desc()) { - outputReorders.push_back(IntermReorder(pd.dst_desc(), outMem, engine)); - primArgs[DNNL_ARG_DST] = outputReorders.back().getFromMem()->GetPrimitive(); - } else { - primArgs[DNNL_ARG_DST] = outMem->GetPrimitive(); + if (outMemDesc != pd.dst_desc()) { + outputReorders.insert({DNNL_ARG_DST, IntermReorder(pd.dst_desc(), outMemDesc, engine)}); } - MKLDNNNode::appendPostOpArgs(attr, primArgs, binPostOpsArgs); } std::vector MKLDNNDeconvolutionNode::readOutputSpatialDims() const { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_deconv_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_deconv_node.h index 6af4a3d35ed..9b3bab44676 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_deconv_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_deconv_node.h @@ -9,6 +9,7 @@ #include #include #include +#include "common/dnnl_executor.h" namespace MKLDNNPlugin { @@ -53,56 +54,24 @@ public: std::vector shapeInfer() const override; private: - class DeconvExecutor { - protected: - class IntermReorder { - public: - IntermReorder(MKLDNNMemoryPtr memFrom, const mkldnn::memory::desc& descTo, const mkldnn::engine& engine); - IntermReorder(const mkldnn::memory::desc& descFrom, MKLDNNMemoryPtr memTo, const mkldnn::engine& engine); - MKLDNNMemoryPtr getFromMem() const { return m_memFrom; } - MKLDNNMemoryPtr getToMem() const { return m_memTo; } - void exec(mkldnn::stream strm); - - private: - MKLDNNMemoryPtr m_memFrom; - MKLDNNMemoryPtr m_memTo; - mkldnn::reorder m_reorder; - }; - - public: - void exec(mkldnn::stream strm); - virtual ~DeconvExecutor() = default; - - protected: - DeconvExecutor() = default; - std::vector inputReorders; - MKLDNNPrimitive execPrim; - std::vector outputReorders; - std::unordered_map primArgs; - }; - - using executorPtr = std::shared_ptr; + using executorPtr = std::shared_ptr; executorPtr execPtr = nullptr; - class DeconvExecutorDefault : public DeconvExecutor { + class DeconvExecutorDefault : public DnnlExecutor { public: DeconvExecutorDefault(const mkldnn::convolution_backward_data::primitive_desc& pd, - MKLDNNMemoryPtr inMem, - MKLDNNMemoryPtr weightMem, - MKLDNNMemoryPtr outMem, - const mkldnn::primitive_attr &attr, - const std::vector& binPostOpsArgs, + const mkldnn::memory::desc& inMemDesc, + const mkldnn::memory::desc& weightMemDesc, + const mkldnn::memory::desc& outMemDesc, const mkldnn::engine& engine); }; - class DeconvExecutorInt8 : public DeconvExecutor { + class DeconvExecutorInt8 : public DnnlExecutor { public: DeconvExecutorInt8(const mkldnn::deconvolution_forward::primitive_desc& pd, - MKLDNNMemoryPtr inMem, - MKLDNNMemoryPtr weightMem, - MKLDNNMemoryPtr outMem, - const mkldnn::primitive_attr &attr, - const std::vector& binPostOpsArgs, + const mkldnn::memory::desc& inMemDesc, + const mkldnn::memory::desc& weightMemDesc, + const mkldnn::memory::desc& outMemDesc, const mkldnn::engine& engine); }; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_detection_output_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_detection_output_node.cpp index 3d5e8a24744..2d20f4ee1ea 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_detection_output_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_detection_output_node.cpp @@ -49,15 +49,7 @@ bool MKLDNNDetectionOutputNode::isSupportedOperation(const std::shared_ptr& op, const mkldnn::engine& eng, +MKLDNNDetectionOutputNode::MKLDNNDetectionOutputNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) : MKLDNNNode(op, eng, cache) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -170,6 +162,10 @@ struct ConfidenceComparatorDO { const float* confData; }; +void MKLDNNDetectionOutputNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNDetectionOutputNode::execute(mkldnn::stream strm) { float *dstData = reinterpret_cast(getChildEdgesAtPort(0)[0]->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_detection_output_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_detection_output_node.h index 41fc5478328..c602e636259 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_detection_output_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_detection_output_node.h @@ -16,7 +16,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; @@ -24,7 +23,7 @@ public: protected: void prepareParams() override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; private: static const int ID_LOC = 0; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp index f87e32c1476..c046fb968f9 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp @@ -1525,14 +1525,6 @@ void MKLDNNEltwiseNode::selectOptimalPrimitiveDescriptor() { selectPreferPrimitiveDescriptor(getPrimitivesPriority(), true); } -void MKLDNNEltwiseNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNEltwiseNode::initOptimalPrimitiveDescriptor() { auto selected_pd = getSelectedPrimitiveDescriptor(); if (selected_pd == nullptr) @@ -1683,6 +1675,10 @@ void MKLDNNEltwiseNode::executeReference(const jit_eltwise_params &jep, const ji }); } +void MKLDNNEltwiseNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNEltwiseNode::execute(mkldnn::stream strm) { if (execPtr) { jit_eltwise_call_args_ptrs args_ptrs = {}; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.h index b5e7768b52a..ce34a2096fc 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.h @@ -90,13 +90,11 @@ public: bool isWithBroadcast(); bool isSpecialConvolutionAddFusing() const { return specialConvolutionAddFusing; } - void createPrimitive() override; - std::vector shapeInfer() const override; bool needPrepareParams() const override; void prepareParams() override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; enum BroadcastingPolicy { PerChannel, diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_offset_sum_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_offset_sum_node.cpp index 86cb02d45d7..499d9b5b0de 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_offset_sum_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_offset_sum_node.cpp @@ -70,14 +70,6 @@ void MKLDNNEmbeddingBagOffsetSumNode::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc(inDataConfigurators, {{LayoutType::ncsp, inDataPrecision}}, impl_desc_type::ref_any); } -void MKLDNNEmbeddingBagOffsetSumNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNEmbeddingBagOffsetSumNode::prepareParams() { _indicesLen = getParentEdgesAtPort(INDICES_IDX)[0]->getMemory().getStaticDims()[0]; _offsetsLen = getParentEdgesAtPort(OFFSETS_IDX)[0]->getMemory().getStaticDims()[0]; @@ -126,6 +118,14 @@ void MKLDNNEmbeddingBagOffsetSumNode::getIndices(int embIndex, const int*& indic weightsIdx = offsetsData_[embIndex]; } +void MKLDNNEmbeddingBagOffsetSumNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + +bool MKLDNNEmbeddingBagOffsetSumNode::isExecutable() const { + return !isInputTensorAtPortEmpty(0); +} + void MKLDNNEmbeddingBagOffsetSumNode::execute(mkldnn::stream strm) { const auto *srcData = reinterpret_cast(getParentEdgeAt(0)->getMemoryPtr()->GetPtr()); auto *dstData = reinterpret_cast(getChildEdgeAt(0)->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_offset_sum_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_offset_sum_node.h index 146003c0b41..9fd0e2ff4fe 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_offset_sum_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_offset_sum_node.h @@ -19,15 +19,15 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; + bool isExecutable() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: void prepareParams() override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; private: void initFromInputs() override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_packed_sum_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_packed_sum_node.cpp index 9dbaad08d80..bda4ebc7c28 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_packed_sum_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_packed_sum_node.cpp @@ -64,14 +64,6 @@ void MKLDNNEmbeddingBagPackedSumNode::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc(inDataConfigurators, {{LayoutType::ncsp, inDataPrecision}}, impl_desc_type::ref_any); } -void MKLDNNEmbeddingBagPackedSumNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNEmbeddingBagPackedSumNode::prepareParams() { _batch = getParentEdgesAtPort(INDICES_IDX)[0]->getMemory().getStaticDims()[0]; _indicesPerBag = getParentEdgesAtPort(INDICES_IDX)[0]->getMemory().getStaticDims()[1]; @@ -94,6 +86,14 @@ void MKLDNNEmbeddingBagPackedSumNode::getIndices(int embIndex, const int*& indic weightsIdx = embIndex * _indicesPerBag; } +void MKLDNNEmbeddingBagPackedSumNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + +bool MKLDNNEmbeddingBagPackedSumNode::isExecutable() const { + return !isInputTensorAtPortEmpty(0); +} + void MKLDNNEmbeddingBagPackedSumNode::execute(mkldnn::stream strm) { const auto *srcData = reinterpret_cast(getParentEdgeAt(0)->getMemoryPtr()->GetPtr()); auto *dstData = reinterpret_cast(getChildEdgeAt(0)->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_packed_sum_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_packed_sum_node.h index fb01897debb..fddec417c65 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_packed_sum_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_bag_packed_sum_node.h @@ -19,15 +19,15 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; + bool isExecutable() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: void prepareParams() override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; private: void initFromInputs() override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_segments_sum_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_segments_sum_node.cpp index 2dca1793df1..58e5de78882 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_segments_sum_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_segments_sum_node.cpp @@ -11,14 +11,6 @@ using namespace MKLDNNPlugin; using namespace InferenceEngine; -void MKLDNNEmbeddingSegmentsSumNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - bool MKLDNNEmbeddingSegmentsSumNode::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { const auto embBagSegSumOp = ngraph::as_type_ptr(op); @@ -129,6 +121,14 @@ void MKLDNNEmbeddingSegmentsSumNode::getIndices(int embIndex, const int*& indice } } +void MKLDNNEmbeddingSegmentsSumNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + +bool MKLDNNEmbeddingSegmentsSumNode::isExecutable() const { + return !isInputTensorAtPortEmpty(0); +} + void MKLDNNEmbeddingSegmentsSumNode::execute(mkldnn::stream strm) { const auto *srcData = reinterpret_cast(getParentEdgeAt(0)->getMemoryPtr()->GetPtr()); auto *dstData = reinterpret_cast(getChildEdgeAt(0)->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_segments_sum_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_segments_sum_node.h index 942d337d099..31b3f4163e1 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_segments_sum_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_embedding_segments_sum_node.h @@ -19,15 +19,15 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; + bool isExecutable() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: void prepareParams() override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; private: void initFromInputs() override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_detection_output_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_detection_output_node.cpp index 96d8c48be47..96f1c85e738 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_detection_output_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_detection_output_node.cpp @@ -272,12 +272,6 @@ void MKLDNNExperimentalDetectronDetectionOutputNode::initSupportedPrimitiveDescr impl_desc_type::ref_any); } -void MKLDNNExperimentalDetectronDetectionOutputNode::createPrimitive() { - if (inputShapesDefined()) { - updateLastInputDims(); - } -} - void MKLDNNExperimentalDetectronDetectionOutputNode::execute(mkldnn::stream strm) { const int rois_num = getParentEdgeAt(INPUT_ROIS)->getMemory().getStaticDims()[0]; assert(classes_num_ == static_cast(getParentEdgeAt(INPUT_SCORES)->getMemory().getStaticDims()[1])); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_detection_output_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_detection_output_node.h index 3c73bd036bb..eeb9ccb9399 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_detection_output_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_detection_output_node.h @@ -15,7 +15,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_generate_proposals_single_image_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_generate_proposals_single_image_node.cpp index fc36c163484..88fd2f44abd 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_generate_proposals_single_image_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_generate_proposals_single_image_node.cpp @@ -320,12 +320,6 @@ void MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::initSupportedP impl_desc_type::ref_any); } -void MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::createPrimitive() { - if (inputShapesDefined()) { - updateLastInputDims(); - } -} - void MKLDNNExperimentalDetectronGenerateProposalsSingleImageNode::execute(mkldnn::stream strm) { try { if (inputShapes.size() != 4 || outputShapes.size() != 2) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_generate_proposals_single_image_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_generate_proposals_single_image_node.h index a18f41e5a94..d00ec14601c 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_generate_proposals_single_image_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_generate_proposals_single_image_node.h @@ -16,7 +16,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_priorgridgenerator_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_priorgridgenerator_node.cpp index a30031d9b84..c4d544e0b39 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_priorgridgenerator_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_priorgridgenerator_node.cpp @@ -56,12 +56,6 @@ void MKLDNNExperimentalDetectronPriorGridGeneratorNode::initSupportedPrimitiveDe impl_desc_type::ref_any); } -void MKLDNNExperimentalDetectronPriorGridGeneratorNode::createPrimitive() { - if (inputShapesDefined()) { - updateLastInputDims(); - } -} - void MKLDNNExperimentalDetectronPriorGridGeneratorNode::execute(mkldnn::stream strm) { const int num_priors_ = getParentEdgeAt(INPUT_PRIORS)->getMemory().getStaticDims()[0]; assert(getParentEdgeAt(INPUT_PRIORS)->getMemory().getStaticDims()[1] == 4); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_priorgridgenerator_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_priorgridgenerator_node.h index c908add3223..b1a39dade02 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_priorgridgenerator_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_priorgridgenerator_node.h @@ -15,7 +15,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_roifeatureextractor_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_roifeatureextractor_node.h index aceba74e694..4ebf1467366 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_roifeatureextractor_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_roifeatureextractor_node.h @@ -15,7 +15,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override {}; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_topkrois_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_topkrois_node.h index 2f66469502e..7cefc63ee8d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_topkrois_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_experimental_detectron_topkrois_node.h @@ -15,7 +15,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override {}; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_extract_image_patches_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_extract_image_patches_node.cpp index 29faa1dac52..ccd98c5c2ae 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_extract_image_patches_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_extract_image_patches_node.cpp @@ -327,14 +327,6 @@ MKLDNNExtractImagePatchesNode::MKLDNNExtractImagePatchesNode(const std::shared_p IE_THROW() << errorPrefix << "must have the following attributes with shape {2}: sizes, strides, rates."; } -void MKLDNNExtractImagePatchesNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNExtractImagePatchesNode::prepareParams() { const auto& srcMemPtr0 = getParentEdgeAt(0)->getMemoryPtr(); const auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); @@ -381,7 +373,7 @@ void MKLDNNExtractImagePatchesNode::execute(mkldnn::stream strm) { } void MKLDNNExtractImagePatchesNode::executeDynamicImpl(mkldnn::stream strm) { - return execute(strm); + execute(strm); } void MKLDNNExtractImagePatchesNode::ExtractImagePatchesRefExecutor::executeReference( diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_extract_image_patches_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_extract_image_patches_node.h index 62df89c9822..d35d9f48d3b 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_extract_image_patches_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_extract_image_patches_node.h @@ -45,7 +45,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp index 3597719521b..b658f1b82ab 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp @@ -1387,13 +1387,6 @@ void MKLDNNFakeQuantizeNode::prepareParams() { } } -void MKLDNNFakeQuantizeNode::createPrimitive() { - if (inputShapesDefined()) { - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNFakeQuantizeNode::executeReference() { auto &srcMemory = getParentEdgeAt(0)->getMemoryPtr(); auto &dstMemory = getChildEdgeAt(0)->getMemoryPtr(); @@ -1652,6 +1645,10 @@ void MKLDNNFakeQuantizeNode::executeQuantization(const std::unique_ptrgetMemory().getStaticDims(); const auto& dstDims = getChildEdgesAtPort(0)[0]->getMemory().getStaticDims(); @@ -102,6 +94,10 @@ void MKLDNNGatherElementsNode::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } +void MKLDNNGatherElementsNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + template void MKLDNNGatherElementsNode::directExecution() { const auto *srcData = reinterpret_cast(getParentEdgeAt(dataIndex_)->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_elements_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_elements_node.h index 632076b08d7..f5d38944795 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_elements_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_elements_node.h @@ -18,14 +18,13 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; void prepareParams() override; private: diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_nd_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_nd_node.cpp index ae98c0e74ab..fc72e152713 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_nd_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_nd_node.cpp @@ -80,14 +80,6 @@ void MKLDNNGatherNDNode::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void MKLDNNGatherNDNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNGatherNDNode::prepareParams() { auto& srcMemPtr = getParentEdgeAt(GATHERND_DATA)->getMemoryPtr(); auto& idxMemPtr = getParentEdgeAt(GATHERND_INDEXES)->getMemoryPtr(); @@ -227,7 +219,7 @@ void MKLDNNGatherNDNode::GatherNDExecutor::gatherElementwise(const MKLDNNMemoryP }); } -void MKLDNNGatherNDNode::executeDynamicImpl(dnnl::stream strm) { +void MKLDNNGatherNDNode::executeDynamicImpl(mkldnn::stream strm) { execute(strm); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_nd_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_nd_node.h index 53661c4d342..29ff88b1466 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_nd_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_nd_node.h @@ -18,7 +18,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_node.cpp index 0e44c0e4c3b..706ad37f8dc 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_node.cpp @@ -110,20 +110,12 @@ void MKLDNNGatherNode::prepareParams() { } bool MKLDNNGatherNode::needPrepareParams() const { - bool result = MKLDNNNode::needPrepareParams(); + bool result = inputShapesModified(); if (!isAxisInputConst) result = result || axis != (reinterpret_cast(getParentEdgeAt(GATHER_AXIS)->getMemoryPtr()->GetPtr()))[0]; return result; } -void MKLDNNGatherNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNGatherNode::execute(mkldnn::stream strm) { const int32_t* srcIndexes = reinterpret_cast(getParentEdgeAt(GATHER_INDEXES)->getMemoryPtr()->GetPtr()); const uint8_t* srcData = reinterpret_cast(getParentEdgeAt(GATHER_DATA)->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_node.h index 1477f9325f2..df66ac52578 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_gather_node.h @@ -18,7 +18,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_interpolate_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_interpolate_node.cpp index d828878bbb3..0f102e6dd28 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_interpolate_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_interpolate_node.cpp @@ -2075,7 +2075,7 @@ void MKLDNNInterpolateNode::createPrimitive() { interpAttrs.inPrc = srcMemPtr->getDesc().getPrecision(); interpAttrs.outPrc = dstMemPtr->getDesc().getPrecision(); - if (shapesDefined()) { + if (shapesDefined() && isExecutable()) { if (needPrepareParams()) prepareParams(); updateLastInputDims(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_log_softmax_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_log_softmax_node.cpp index 9f537b490fc..9ab047473a6 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_log_softmax_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_log_softmax_node.cpp @@ -60,14 +60,6 @@ void MKLDNNLogSoftmaxNode::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void MKLDNNLogSoftmaxNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNLogSoftmaxNode::prepareParams() { const auto &dims = getParentEdgesAtPort(0)[0]->getMemory().getStaticDims(); reducedAxisStride = 1; @@ -87,6 +79,10 @@ void MKLDNNLogSoftmaxNode::prepareParams() { reducedAxisStride *= dims[i]; } +void MKLDNNLogSoftmaxNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNLogSoftmaxNode::execute(mkldnn::stream strm) { const float *srcData = reinterpret_cast(getParentEdgeAt(0)->getMemoryPtr()->GetPtr()); float* dstData = reinterpret_cast(getChildEdgesAtPort(0)[0]->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_log_softmax_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_log_softmax_node.h index c7fd5aac3c2..b51e72ed0ff 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_log_softmax_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_log_softmax_node.h @@ -16,12 +16,11 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; void prepareParams() override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }; + void executeDynamicImpl(mkldnn::stream strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp index e1430776725..72bc3e2d61a 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp @@ -110,14 +110,6 @@ std::shared_ptr MKLDNNLrnNode::getSrcMemDesc(mkldnn::primitive_desc_ } } -void MKLDNNLrnNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNLrnNode::prepareParams() { auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr(); auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); @@ -174,8 +166,8 @@ std::vector MKLDNNLrnNode::shapeInfer() const { return { getParentEdgesAtPort(0).front()->getMemory().getStaticDims() }; } -void MKLDNNLrnNode::executeDynamicImpl(dnnl::stream strm) { - MKLDNNNode::execute(strm); +void MKLDNNLrnNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); } REG_MKLDNN_PRIM_FOR(MKLDNNLrnNode, Lrn); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.h index 97ef5163f43..348ecc3ae3a 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.h @@ -23,7 +23,6 @@ public: return static_cast(getOriginalInputsNumber()); } std::shared_ptr getSrcMemDesc(mkldnn::primitive_desc_iterator &primitive_desc_it, size_t idx) override; - void createPrimitive() override; bool created() const override; bool canBeInPlace() const override { return false; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_math_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_math_node.cpp index 163e76ca483..b90b14d1423 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_math_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_math_node.cpp @@ -63,6 +63,10 @@ std::vector MKLDNNMathNode::shapeInfer() const { return std::vector{getParentEdgesAtPort(0)[0]->getMemory().getStaticDims()}; } +void MKLDNNMathNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNMathNode::execute(mkldnn::stream strm) { size_t dataSize = getChildEdgesAtPort(0)[0]->getMemory().GetShape().getElementsCount(); const float *src_data = reinterpret_cast(getParentEdgeAt(0)->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_math_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_math_node.h index ee2ce5d75b1..c580e7a1a64 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_math_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_math_node.h @@ -15,13 +15,12 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override {}; void execute(mkldnn::stream strm) override; bool created() const override; std::vector shapeInfer() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }; + void executeDynamicImpl(mkldnn::stream strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matmul_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matmul_node.cpp index c443eedf2c2..5513a27f0d2 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matmul_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matmul_node.cpp @@ -313,14 +313,6 @@ void MKLDNNMatMulNode::initSupportedPrimitiveDescriptors() { } } -void MKLDNNMatMulNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - MemoryDescPtr MKLDNNMatMulNode::getSrcMemDesc(mkldnn::primitive_desc_iterator &primitive_desc_it, size_t idx) { auto desc = idx > 0 ? primitive_desc_it.weights_desc(idx - 1): primitive_desc_it.src_desc(idx); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matmul_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matmul_node.h index ab3abd3a35c..2d7e8c00e60 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matmul_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matmul_node.h @@ -22,7 +22,6 @@ public: const std::vector& outputDesc) override; void initSupportedPrimitiveDescriptors() override; MemoryDescPtr getSrcMemDesc(mkldnn::primitive_desc_iterator &primitive_desc_it, size_t idx) override; - void createPrimitive() override; bool canFuse(const MKLDNNNodePtr& node) const override; bool created() const override; size_t getMaxBatch() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matrix_nms_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matrix_nms_node.cpp index b01aa731e68..c9668785548 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matrix_nms_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matrix_nms_node.cpp @@ -240,13 +240,6 @@ size_t MKLDNNMatrixNmsNode::nmsMatrix(const float* boxesData, const float* score return numDet; } -void MKLDNNMatrixNmsNode::createPrimitive() { - if (inputShapesDefined()) { - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNMatrixNmsNode::prepareParams() { const auto& boxes_dims = getParentEdgeAt(NMS_BOXES)->getMemory().getStaticDims(); const auto& scores_dims = getParentEdgeAt(NMS_SCORES)->getMemory().getStaticDims(); @@ -288,6 +281,23 @@ void MKLDNNMatrixNmsNode::prepareParams() { } } +bool MKLDNNMatrixNmsNode::isExecutable() const { + return isDynamicNode() || MKLDNNNode::isExecutable(); +} + +void MKLDNNMatrixNmsNode::executeDynamicImpl(mkldnn::stream strm) { + if (hasEmptyInputTensors()) { + getChildEdgesAtPort(NMS_SELECTED_OUTPUTS)[0]->getMemoryPtr()->redefineDesc( + getBaseMemDescAtOutputPort(NMS_SELECTED_OUTPUTS)->cloneWithNewDims({0, 6})); + getChildEdgesAtPort(NMS_SELECTED_INDICES)[0]->getMemoryPtr()->redefineDesc( + getBaseMemDescAtOutputPort(NMS_SELECTED_INDICES)->cloneWithNewDims({0, 1})); + getChildEdgesAtPort(NMS_VALID_OUTPUTS)[0]->getMemoryPtr()->redefineDesc( + getBaseMemDescAtOutputPort(NMS_VALID_OUTPUTS)->cloneWithNewDims({0})); + return; + } + execute(strm); +} + void MKLDNNMatrixNmsNode::execute(mkldnn::stream strm) { const float* boxes = reinterpret_cast(getParentEdgeAt(NMS_BOXES)->getMemoryPtr()->GetPtr()); const float* scores = reinterpret_cast(getParentEdgeAt(NMS_SCORES)->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matrix_nms_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matrix_nms_node.h index 338247cf103..add276f3dc6 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matrix_nms_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_matrix_nms_node.h @@ -27,13 +27,13 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + bool isExecutable() const override; + void executeDynamicImpl(mkldnn::stream strm) override; bool needShapeInfer() const override { return false; } void prepareParams() override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.cpp index 85d13de3829..15d810fbb4d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.cpp @@ -105,13 +105,6 @@ void MKLDNNMultiClassNmsNode::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void MKLDNNMultiClassNmsNode::createPrimitive() { - if (inputShapesDefined()) { - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNMultiClassNmsNode::prepareParams() { const auto& boxes_dims = getParentEdgeAt(NMS_BOXES)->getMemory().getStaticDims(); const auto& scores_dims = getParentEdgeAt(NMS_SCORES)->getMemory().getStaticDims(); @@ -145,6 +138,23 @@ void MKLDNNMultiClassNmsNode::prepareParams() { m_numBoxOffset.resize(m_numBatches); } +bool MKLDNNMultiClassNmsNode::isExecutable() const { + return isDynamicNode() || MKLDNNNode::isExecutable(); +} + +void MKLDNNMultiClassNmsNode::executeDynamicImpl(mkldnn::stream strm) { + if (hasEmptyInputTensors()) { + getChildEdgesAtPort(NMS_SELECTEDOUTPUTS)[0]->getMemoryPtr()->redefineDesc( + getBaseMemDescAtOutputPort(NMS_SELECTEDOUTPUTS)->cloneWithNewDims({0, 6})); + getChildEdgesAtPort(NMS_SELECTEDINDICES)[0]->getMemoryPtr()->redefineDesc( + getBaseMemDescAtOutputPort(NMS_SELECTEDINDICES)->cloneWithNewDims({0, 1})); + getChildEdgesAtPort(NMS_SELECTEDNUM)[0]->getMemoryPtr()->redefineDesc( + getBaseMemDescAtOutputPort(NMS_SELECTEDNUM)->cloneWithNewDims({0})); + return; + } + execute(strm); +} + void MKLDNNMultiClassNmsNode::execute(mkldnn::stream strm) { const float* boxes = reinterpret_cast(getParentEdgeAt(NMS_BOXES)->getMemoryPtr()->GetPtr()); const float* scores = reinterpret_cast(getParentEdgeAt(NMS_SCORES)->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.hpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.hpp index 6acf3755001..3c1e928bbf0 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.hpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.hpp @@ -23,13 +23,13 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + bool isExecutable() const override; + void executeDynamicImpl(mkldnn::stream strm) override; bool needShapeInfer() const override { return false; } void prepareParams() override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp index e8e913ecb9a..cb6496100eb 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp @@ -849,14 +849,6 @@ void MKLDNNMVNNode::prepareParams() { } } -void MKLDNNMVNNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNMVNNode::transformTo5DCase(const SizeVector& shape) { switch (shape.size()) { // for 1 and 2 rank, if initAcrossChannels_ is true, adjust shape to fully vectorize under unified 5d procedure. @@ -908,6 +900,10 @@ void MKLDNNMVNNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights) { attr.set_post_ops(ops); } +void MKLDNNMVNNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNMVNNode::execute(mkldnn::stream strm) { auto &dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); auto &srcMemPtr = getParentEdgeAt(0)->getMemoryPtr(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h index 0c740ba9fb1..9b4fe9fff14 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h @@ -77,10 +77,9 @@ public: static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; bool created() const override; void execute(mkldnn::stream strm) override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; bool canBeInPlace() const override { return false; } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_max_suppression_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_max_suppression_node.cpp index 6bd0e44ae8c..5cb090a6951 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_max_suppression_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_max_suppression_node.cpp @@ -147,14 +147,20 @@ void MKLDNNNonMaxSuppressionNode::prepareParams() { i.resize(num_classes); } -void MKLDNNNonMaxSuppressionNode::createPrimitive() { - if (inputShapesDefined()) { - prepareParams(); - updateLastInputDims(); - } +bool MKLDNNNonMaxSuppressionNode::isExecutable() const { + return isDynamicNode() || MKLDNNNode::isExecutable(); } void MKLDNNNonMaxSuppressionNode::executeDynamicImpl(mkldnn::stream strm) { + if (hasEmptyInputTensors() || (inputShapes.size() > NMS_MAXOUTPUTBOXESPERCLASS && + reinterpret_cast(getParentEdgeAt(NMS_MAXOUTPUTBOXESPERCLASS)->getMemoryPtr()->GetPtr())[0] == 0)) { + getChildEdgesAtPort(NMS_SELECTEDINDICES)[0]->getMemoryPtr()->redefineDesc( + getBaseMemDescAtOutputPort(NMS_SELECTEDINDICES)->cloneWithNewDims({0, 3})); + getChildEdgesAtPort(NMS_SELECTEDSCORES)[0]->getMemoryPtr()->redefineDesc( + getBaseMemDescAtOutputPort(NMS_SELECTEDSCORES)->cloneWithNewDims({0, 3})); + *reinterpret_cast(getChildEdgesAtPort(NMS_VALIDOUTPUTS)[0]->getMemoryPtr()->GetPtr()) = 0; + return; + } execute(strm); } @@ -168,8 +174,9 @@ void MKLDNNNonMaxSuppressionNode::execute(mkldnn::stream strm) { max_output_boxes_per_class = std::min(max_output_boxes_per_class, num_boxes); - if (max_output_boxes_per_class == 0) + if (max_output_boxes_per_class == 0) { return; + } if (inputShapes.size() > NMS_IOUTHRESHOLD) iou_threshold = reinterpret_cast(getParentEdgeAt(NMS_IOUTHRESHOLD)->getMemoryPtr()->GetPtr())[0]; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_max_suppression_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_max_suppression_node.h index fd8ecc94d6b..e30ffcdc65d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_max_suppression_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_max_suppression_node.h @@ -20,7 +20,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; @@ -52,6 +51,7 @@ public: void executeDynamicImpl(mkldnn::stream strm) override; + bool isExecutable() const override; bool needShapeInfer() const override { return false; } void prepareParams() override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_zero.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_zero.cpp index f6e0624a4ba..5e270ba56cc 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_zero.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_zero.cpp @@ -85,6 +85,11 @@ struct MKLDNNNonZeroNode::NonZeroExecute { ctx.node.executeSpecified(); } }; + +void MKLDNNNonZeroNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNNonZeroNode::execute(mkldnn::stream strm) { auto inputPrec = getParentEdgesAtPort(0)[0]->getMemory().getDesc().getPrecision(); NonZeroContext ctx = {*this }; @@ -104,6 +109,7 @@ void MKLDNNNonZeroNode::executeSpecified() { Shape inShape = getParentEdgeAt(0)->getMemory().GetShape(); size_t inRank = inShape.getRank(); size_t nonZeroCount = getNonZeroElementsCount(src, inShape); + if (isDynamicNode()) { VectorDims newDims{inRank, nonZeroCount}; dstMemPtr->redefineDesc(getBaseMemDescAtOutputPort(0)->cloneWithNewDims(newDims)); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_zero.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_zero.h index 2f46042a331..a454a2da268 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_zero.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_non_zero.h @@ -18,15 +18,11 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override { - if (inputShapesDefined()) - updateLastInputDims(); - }; void execute(mkldnn::stream strm) override; bool created() const override; bool needShapeInfer() const override {return false;}; bool needPrepareParams() const override {return false;}; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp index 90ff314da8a..3e350b39613 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp @@ -854,12 +854,20 @@ void MKLDNNNormalizeL2Node::createPrimitive() { } } +bool MKLDNNNormalizeL2Node::isExecutable() const { + return !isInputTensorAtPortEmpty(0); +} + void MKLDNNNormalizeL2Node::prepareParams() { const auto& dims = getParentEdgeAt(DATA)->getMemoryPtr()->getStaticDims(); setPostOps(kernel_attrs, dims, true); execPtr = NormalizeL2Executor::getNormalizeL2Executor(attrs, kernel_attrs, dims); } +void MKLDNNNormalizeL2Node::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNNormalizeL2Node::execute(mkldnn::stream strm) { if (!execPtr) THROW_ERROR << "doesn't have a compiled executor."; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.h index 1086acdcf8c..dd6a44f9c16 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.h @@ -91,7 +91,9 @@ public: std::vector shapeInfer() const override; void prepareParams() override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; + + bool isExecutable() const override; private: enum class NormEpsMode { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_one_hot_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_one_hot_node.cpp index dbd6f0fafc6..f016d80c349 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_one_hot_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_one_hot_node.cpp @@ -137,6 +137,10 @@ void MKLDNNOneHotNode::one_hot(size_t prefix_size, size_t suffix_size) { }); } +void MKLDNNOneHotNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNOneHotNode::execute(mkldnn::stream strm) { std::size_t prefix_size = 1; auto input_dims = getParentEdgeAt(0)->getMemory().getStaticDims(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_one_hot_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_one_hot_node.h index 52e44acf026..9d43911d3f2 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_one_hot_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_one_hot_node.h @@ -26,7 +26,7 @@ public: bool needShapeInfer() const override; std::vector shapeInfer() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }; + void executeDynamicImpl(mkldnn::stream strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.cpp index db4eb862921..40d80ca409b 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.cpp @@ -227,6 +227,10 @@ void MKLDNNPadNode::createPrimitive() { } } +bool MKLDNNPadNode::isExecutable() const { + return !isOutputTensorAtPortEmpty(0); +} + void MKLDNNPadNode::prepareParams() { execPtr = std::make_shared(attrs, getParentEdgeAt(0)->getMemoryPtr()->GetDescWithType()->getBlockDims(), @@ -237,8 +241,15 @@ MKLDNNPadNode::PadExecutor::PadExecutor(const PadAttrs& attrs, const VectorDims& srcDims, const VectorDims& dstDims) { params.attrs = attrs; - params.srcDims = srcDims; params.dstDims = dstDims; + + zeroInputDimsCase = std::any_of(srcDims.begin(), srcDims.end(), [](size_t dim) { return dim == 0; } ) && + std::none_of(dstDims.begin(), dstDims.end(), [](size_t dim) { return dim == 0; } ); + if (zeroInputDimsCase) { + return; + } + + params.srcDims = srcDims; params.dataSize = attrs.prc.size(); size_t nDims = params.srcDims.size(); @@ -289,19 +300,23 @@ MKLDNNPadNode::PadExecutor::PadExecutor(const PadAttrs& attrs, } void MKLDNNPadNode::PadExecutor::exec(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr) { - switch (params.attrs.padMode) { - case CONSTANT: - padConstant(srcMemPtr, dstMemPtr); - break; - case EDGE: - padEdge(srcMemPtr, dstMemPtr); - break; - case REFLECT: - padReflectOrSymmetric(srcMemPtr, dstMemPtr); - break; - case SYMMETRIC: - padReflectOrSymmetric(srcMemPtr, dstMemPtr, true); - break; + if (zeroInputDimsCase) { + padConstant(srcMemPtr, dstMemPtr); + } else { + switch (params.attrs.padMode) { + case CONSTANT: + padConstant(srcMemPtr, dstMemPtr); + break; + case EDGE: + padEdge(srcMemPtr, dstMemPtr); + break; + case REFLECT: + padReflectOrSymmetric(srcMemPtr, dstMemPtr); + break; + case SYMMETRIC: + padReflectOrSymmetric(srcMemPtr, dstMemPtr, true); + break; + } } } @@ -335,7 +350,7 @@ static inline void parallel_step(size_t nDims, const VectorDims& dims, VectorDim } void MKLDNNPadNode::PadExecutor::padConstant(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr) { - if (params.attrs.padValue == 0) { + if (params.attrs.padValue == 0 && !zeroInputDimsCase) { padConstantZero(srcMemPtr, dstMemPtr); return; } @@ -351,10 +366,16 @@ void MKLDNNPadNode::PadExecutor::padConstant(MKLDNNMemoryPtr& srcMemPtr, MKLDNNM template void MKLDNNPadNode::PadExecutor::padConstantCommon(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr) { - const T* srcData = reinterpret_cast(srcMemPtr->GetPtr()); T* dstData = reinterpret_cast(dstMemPtr->GetPtr()); const T value = static_cast(params.attrs.padValue); + if (zeroInputDimsCase) { + const auto workAmount = dstMemPtr->GetDescWithType()->getPaddedElementsCount(); + parallel_for(workAmount, [&](size_t i) { + dstData[i] = value; + }); + } + const T* srcData = reinterpret_cast(srcMemPtr->GetPtr()); const size_t beginShift = params.attrs.padsBegin[params.nDimsForWork] * params.shift; const size_t copySize = params.srcDims[params.nDimsForWork] * params.shift; const size_t endShift = params.attrs.padsEnd[params.nDimsForWork] * params.shift; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.h index 74189292fd8..d8beaac96cd 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.h @@ -23,6 +23,8 @@ public: void prepareParams() override; + bool isExecutable() const override; + protected: void executeDynamicImpl(mkldnn::stream strm) override; @@ -71,6 +73,8 @@ private: } }; + bool zeroInputDimsCase = false; + struct { PadAttrs attrs; VectorDims srcDims; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp index 0a95c61d86e..2de486d6f8d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp @@ -273,12 +273,8 @@ void MKLDNNPoolingNode::prepareParams() { primArgs = {{DNNL_ARG_SRC, src}, {DNNL_ARG_DST, dst}}; } -void MKLDNNPoolingNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } +void MKLDNNPoolingNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); } bool MKLDNNPoolingNode::created() const { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.h index 75f726d567b..a60423cd036 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.h @@ -22,14 +22,13 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void initDescriptor(const NodeConfig& config) override; - void createPrimitive() override; bool created() const override; bool canBeInPlace() const override { return false; } void prepareParams() override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_proposal_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_proposal_node.cpp index 1d3f2ff60d1..7309d578c33 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_proposal_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_proposal_node.cpp @@ -155,6 +155,10 @@ void MKLDNNProposalNode::initSupportedPrimitiveDescriptors() { } } +void MKLDNNProposalNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNProposalNode::execute(mkldnn::stream strm) { try { const float* probabilitiesData = reinterpret_cast(getParentEdgeAt(PROBABILITIES_IN_IDX)->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_proposal_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_proposal_node.h index 0c0c6d443a8..ac6272f8a51 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_proposal_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_proposal_node.h @@ -18,12 +18,11 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override {}; void execute(mkldnn::stream strm) override; bool created() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }; + void executeDynamicImpl(mkldnn::stream strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_range_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_range_node.cpp index bfc0d63f446..10e9d14c6ba 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_range_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_range_node.cpp @@ -83,6 +83,10 @@ void MKLDNNRangeNode::initSupportedPrimitiveDescriptors() { } } +void MKLDNNRangeNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNRangeNode::execute(mkldnn::stream strm) { StatusCode retcode = OK; switch (getParentEdgeAt(0)->getMemory().getDesc().getPrecision()) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_range_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_range_node.h index d7b600e3f27..720c3111729 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_range_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_range_node.h @@ -15,15 +15,11 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override { - if (inputShapesDefined()) - updateLastInputDims(); - }; void execute(mkldnn::stream strm) override; bool created() const override; bool needPrepareParams() const override {return false;}; bool needShapeInfer() const override {return false;}; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp index 4ccaf709471..aa78c2e9304 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp @@ -1827,6 +1827,10 @@ void MKLDNNReduceNode::initSupportedPrimitiveDescriptors() { } } +bool MKLDNNReduceNode::isExecutable() const { + return !isInputTensorAtPortEmpty(REDUCE_DATA); +} + void MKLDNNReduceNode::prepareParams() { src_dims = getParentEdgesAtPort(REDUCE_DATA)[0]->getMemory().getDesc().getShape().getDims(); std::vector reduce_axes; @@ -1864,6 +1868,9 @@ void MKLDNNReduceNode::prepareParams() { } void MKLDNNReduceNode::createPrimitive() { + if (!isExecutable()) { + return; + } auto &dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); auto &srcMemPtr = getParentEdgeAt(REDUCE_DATA)->getMemoryPtr(); if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr()) @@ -1919,6 +1926,10 @@ void MKLDNNReduceNode::createPrimitive() { jit_mode = jit_mode && reduce_kernel; } +void MKLDNNReduceNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNReduceNode::execute(mkldnn::stream strm) { auto &dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); auto &srcMemPtr = getParentEdgeAt(REDUCE_DATA)->getMemoryPtr(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.h index 07effa0891f..9e3541a5a1d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.h @@ -90,12 +90,13 @@ public: void createPrimitive() override; bool created() const override; void execute(mkldnn::stream strm) override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; bool canFuse(const MKLDNNNodePtr& node) const override; bool canBeInPlace() const override { return false; } + bool isExecutable() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorder_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorder_node.cpp index 02ccf1b546b..4ebeba5c587 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorder_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorder_node.cpp @@ -19,6 +19,10 @@ using namespace mkldnn; using namespace MKLDNNPlugin; using namespace InferenceEngine; +bool MKLDNNReorderNode::isExecutable() const { + return MKLDNNNode::isExecutable() && !isOptimized; +} + MKLDNNReorderNode::MKLDNNReorderNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &w_cache) : MKLDNNNode(op, eng, w_cache) { IE_THROW() << "Can't create reorder node from ngraph node"; @@ -100,6 +104,10 @@ void MKLDNNReorderNode::createPrimitive() { } } +void MKLDNNReorderNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNReorderNode::prepareParams() { if (!isOptimized) { auto &srcMemPtr = getParentEdgeAt(0)->getMemoryPtr(); @@ -348,6 +356,10 @@ void MKLDNNReorderNode::reorderData(const MKLDNNMemory &input, const MKLDNNMemor if (!input.getDesc().isDefined() || !output.getDesc().isDefined()) IE_THROW() << "Can't reorder data with dynamic shapes"; + if (input.GetShape().hasZeroDims() || output.GetShape().hasZeroDims()) { + return; + } + if (size != 0) IE_ASSERT(size <= output.GetSize()); if (input.getDesc().isCompatible(output.getDesc())) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorder_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorder_node.h index 1ccb23768d3..cad52fe3998 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorder_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorder_node.h @@ -24,9 +24,7 @@ public: bool created() const override; const std::vector& getPrimitivesPriority() override; - bool isExecutable() const override { - return !isOptimized; - } + bool isExecutable() const override; void createPrimitive() override; @@ -34,7 +32,7 @@ public: void prepareParams() override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; void setDescs(const MemoryDesc& input, const MemoryDesc& output) { this->input = input.clone(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorg_yolo_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorg_yolo_node.cpp index 3ebc977bc41..65ce1beae8c 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorg_yolo_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorg_yolo_node.cpp @@ -51,6 +51,10 @@ void MKLDNNReorgYoloNode::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } +void MKLDNNReorgYoloNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNReorgYoloNode::execute(mkldnn::stream strm) { const auto *src_data = reinterpret_cast(getParentEdgeAt(0)->getMemoryPtr()->GetPtr()); auto *dst_data = reinterpret_cast(getChildEdgesAtPort(0)[0]->getMemoryPtr()->GetPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorg_yolo_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorg_yolo_node.h index c3be7362484..e83dbde7288 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorg_yolo_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reorg_yolo_node.h @@ -15,13 +15,10 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override {}; void execute(mkldnn::stream strm) override; bool created() const override; bool needPrepareParams() const override { return false; } - void executeDynamicImpl(mkldnn::stream strm) override { - execute(strm); - } + void executeDynamicImpl(mkldnn::stream strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp index 10a9a07b480..35dd1e74946 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp @@ -135,21 +135,13 @@ void MKLDNNReshapeNode::initSupportedPrimitiveDescriptors() { supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::unknown); } -void MKLDNNReshapeNode::createPrimitive() { - auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); - auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr(); - if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr()) - IE_THROW() << "Destination memory didn't allocate."; - if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr()) - IE_THROW() << "Input memory didn't allocate."; - if (getSelectedPrimitiveDescriptor() == nullptr) - IE_THROW() << "Preferable primitive descriptor is not set."; -} - void MKLDNNReshapeNode::executeDynamicImpl(mkldnn::stream strm) { auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr(); auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); const auto count = srcMemPtr->GetShape().getElementsCount(); + if (count == 0) { + return; + } if (count != dstMemPtr->GetShape().getElementsCount()) IE_THROW() << errorPrefix << " has different elements number in input and output buffers"; cpu_memcpy(dstMemPtr->GetPtr(), srcMemPtr->GetPtr(), count * MKLDNNExtensionUtils::sizeOfDataType(srcMemPtr->GetDataType())); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.h index 86770c13cea..aed9c18cc6b 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.h @@ -19,7 +19,6 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; bool created() const override; bool isExecutable() const override { return isDynamicNode(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp index 91ee9cf58fd..836c2f7656e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp @@ -372,13 +372,7 @@ bool MKLDNNROIAlignNode::needPrepareParams() const { } void MKLDNNROIAlignNode::executeDynamicImpl(mkldnn::stream strm) { - return execute(strm); -} - -void MKLDNNROIAlignNode::createPrimitive() { - if (inputShapesDefined()) { - updateLastInputDims(); - } + execute(strm); } REG_MKLDNN_PRIM_FOR(MKLDNNROIAlignNode, ROIAlign) diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.h index 31d470d4bca..e8bd963bc80 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.h @@ -19,7 +19,6 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_pooling_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_pooling_node.cpp index 6f314411c9c..ee126838ca6 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_pooling_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_pooling_node.cpp @@ -419,7 +419,7 @@ void MKLDNNROIPoolingNode::createPrimitive() { refParams.dst_data_size = refParams.dst_prc.size(); if (inputShapesDefined()) { - if (needPrepareParams()) + if (needPrepareParams() && isExecutable()) prepareParams(); updateLastInputDims(); } @@ -437,7 +437,7 @@ void MKLDNNROIPoolingNode::execute(mkldnn::stream strm) { } void MKLDNNROIPoolingNode::executeDynamicImpl(mkldnn::stream strm) { - return execute(strm); + execute(strm); } void MKLDNNROIPoolingNode::prepareParams() { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp index 412d3853f1a..6ceed6869ca 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp @@ -35,6 +35,10 @@ bool MKLDNNScatterUpdateNode::isSupportedOperation(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) : MKLDNNNode(op, eng, cache), dataSize(0lu), indicesSize(0lu), axisSize(0lu), dataPrec(Precision::UNSPECIFIED), indicesPrec(Precision::UNSPECIFIED), axisPrec(Precision::UNSPECIFIED) { @@ -211,34 +215,12 @@ void MKLDNNScatterUpdateNode::initSupportedPrimitiveDescriptors() { impl_desc_type::unknown); } -void MKLDNNScatterUpdateNode::createPrimitive() { - auto &dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); - auto &srcMemPtr = getParentEdgeAt(DATA_ID)->getMemoryPtr(); - auto &indicesMemPtr = getParentEdgeAt(INDICES_ID)->getMemoryPtr(); - auto &updateMemPtr = getParentEdgeAt(UPDATE_ID)->getMemoryPtr(); - - if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr()) - IE_THROW() << errorPrefix << " did not allocate destination memory"; - if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr()) - IE_THROW() << errorPrefix << " did not allocate input memory"; - if (!indicesMemPtr || !indicesMemPtr->GetPrimitivePtr()) - IE_THROW() << errorPrefix << " did not allocate indices memory"; - if (!updateMemPtr || !updateMemPtr->GetPrimitivePtr()) - IE_THROW() << errorPrefix << " did not allocate update memory"; - if (getSelectedPrimitiveDescriptor() == nullptr) - IE_THROW() << errorPrefix << " did not set preferable primitive descriptor"; - - if (inputShapesDefined()) { - updateLastInputDims(); - } -} - bool MKLDNNScatterUpdateNode::needPrepareParams() const { return false; } void MKLDNNScatterUpdateNode::executeDynamicImpl(mkldnn::stream strm) { - return execute(strm); + execute(strm); } int64_t MKLDNNScatterUpdateNode::getIndicesValue(uint8_t *indices, size_t offset) { @@ -268,8 +250,8 @@ static std::vector getBlockND(const VectorDims& shape) { } void MKLDNNScatterUpdateNode::execute(mkldnn::stream strm) { - auto &dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); auto &srcMemPtr = getParentEdgeAt(DATA_ID)->getMemoryPtr(); + auto &dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); auto &indicesMemPtr = getParentEdgeAt(INDICES_ID)->getMemoryPtr(); auto &updateMemPtr = getParentEdgeAt(UPDATE_ID)->getMemoryPtr(); @@ -354,6 +336,10 @@ void MKLDNNScatterUpdateNode::execute(mkldnn::stream strm) { }); } + if (isInputTensorAtPortEmpty(INDICES_ID)) { + return; + } + switch (scatterUpdateMode) { case ScatterUpdateMode::ScatterUpdate: { scatterUpdate(indicesPtr, updatePtr, axis, dstPtr); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.h index 30b0ca5735d..59ba5054b74 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.h @@ -24,7 +24,6 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; bool created() const override; void execute(mkldnn::stream strm) override; bool canBeInPlace() const override { @@ -34,6 +33,7 @@ public: bool needPrepareParams() const override; void executeDynamicImpl(mkldnn::stream strm) override; + bool isExecutable() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_select_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_select_node.cpp index d907d76b369..3752dae38ca 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_select_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_select_node.cpp @@ -152,13 +152,6 @@ void MKLDNNSelectNode::prepareParams() { } } -void MKLDNNSelectNode::createPrimitive() { - if (inputShapesDefined()) { - prepareParams(); - updateLastInputDims(); - } -} - void MKLDNNSelectNode::calcOutOffset(VectorDims& offset, const VectorDims& dims) { int k = 1; for (int i = dims.size() - 1; i >= 0; i--) { @@ -200,6 +193,10 @@ void MKLDNNSelectNode::execute_impl() { } } +void MKLDNNSelectNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNSelectNode::execute(mkldnn::stream strm) { const size_t condPrecSize = getParentEdgeAt(CONDITION)->getMemory().getDesc().getPrecision().size(); const size_t inputsPrecSize = getParentEdgeAt(THEN)->getMemory().getDesc().getPrecision().size(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_select_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_select_node.h index 6602195f122..4312a95bf04 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_select_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_select_node.h @@ -18,11 +18,10 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); } + void executeDynamicImpl(mkldnn::stream strm) override; void prepareParams() override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.cpp index 28097fcaf58..20df75a421d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.cpp @@ -57,6 +57,10 @@ void MKLDNNShapeOfNode::initSupportedPrimitiveDescriptors() { } } +bool MKLDNNShapeOfNode::isExecutable() const { + return true; +} + void MKLDNNShapeOfNode::execute(mkldnn::stream strm) { auto inPtr = getParentEdgeAt(0)->getMemoryPtr(); auto outPtr = getChildEdgeAt(0)->getMemoryPtr(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.h index 2a7eb9560e6..0152e3f1bb9 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shapeof.h @@ -18,10 +18,6 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override { - if (inputShapesDefined()) - updateLastInputDims(); - }; void execute(mkldnn::stream strm) override; bool created() const override; bool needPrepareParams() const override {return false;}; @@ -30,6 +26,8 @@ public: return {VectorDims{getParentEdgesAtPort(0)[0]->getMemory().getStaticDims().size()}}; } + bool isExecutable() const override; + static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.cpp index b6b68d59221..4909eb8ccc6 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.cpp @@ -114,7 +114,7 @@ void MKLDNNShuffleChannelsNode::createPrimitive() { memoryDesc.hasLayoutType(LayoutType::nCsp8c) ? LayoutType::nCsp8c : memoryDesc.hasLayoutType(LayoutType::nspc) ? LayoutType::nspc : LayoutType::ncsp; - if (inputShapesDefined()) { + if (inputShapesDefined() && isExecutable()) { if (needPrepareParams()) prepareParams(); updateLastInputDims(); @@ -245,6 +245,10 @@ void MKLDNNShuffleChannelsNode::ShuffleChannelsExecutor::exec(const uint8_t* src permuteKernel->execute(srcData, dstData); } +void MKLDNNShuffleChannelsNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNShuffleChannelsNode::execute(mkldnn::stream strm) { if (!execPtr) THROW_SHCH_ERROR << "doesn't have a compiled executor."; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.h index dbac77aaa05..91362b9a941 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.h @@ -28,7 +28,7 @@ public: void prepareParams() override; protected: - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }; + void executeDynamicImpl(mkldnn::stream strm) override; private: struct ShuffleChannelsAttributes { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.cpp index 8463344a52d..3732640e09e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.cpp @@ -66,14 +66,6 @@ void MKLDNNSoftMaxNode::getSupportedDescriptors() { } } -void MKLDNNSoftMaxNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - bool MKLDNNSoftMaxNode::created() const { return getType() == Softmax; } @@ -148,8 +140,8 @@ void MKLDNNSoftMaxNode::prepareParams() { primArgs = {{DNNL_ARG_SRC, src}, {DNNL_ARG_DST, dst}}; } -void MKLDNNSoftMaxNode::executeDynamicImpl(dnnl::stream strm) { - MKLDNNNode::execute(strm); +void MKLDNNSoftMaxNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); } std::vector MKLDNNSoftMaxNode::shapeInfer() const { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.h index 0b95aab8123..72372ca3f5e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.h @@ -20,7 +20,6 @@ public: void createDescriptor(const std::vector& inputDesc, const std::vector& outputDesc) override; void getSupportedDescriptors() override; - void createPrimitive() override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_batch_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_batch_node.cpp index 85fc3e114d7..83ab528e1b2 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_batch_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_batch_node.cpp @@ -229,6 +229,10 @@ void MKLDNNSpaceToBatchNode::SpaceToBatchKernel() { }); } +void MKLDNNSpaceToBatchNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNSpaceToBatchNode::execute(mkldnn::stream strm) { switch (getParentEdgeAt(0)->getMemory().getDesc().getPrecision().size()) { case 1: SpaceToBatchKernel::value_type>(); break; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_batch_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_batch_node.h index 8bbf4f1fb73..33c893b0c94 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_batch_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_batch_node.h @@ -18,12 +18,11 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override {}; void execute(mkldnn::stream strm) override; bool created() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }; + void executeDynamicImpl(mkldnn::stream strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_depth_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_depth_node.cpp index c0ac5db9468..0d64170c406 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_depth_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_space_to_depth_node.cpp @@ -154,7 +154,7 @@ void MKLDNNSpaceToDepthNode::createPrimitive() { ? LayoutType::nCsp8c : memoryDesc.hasLayoutType(LayoutType::nspc) ? LayoutType::nspc : LayoutType::ncsp; - if (inputShapesDefined()) { + if (inputShapesDefined() && isExecutable()) { if (needPrepareParams()) prepareParams(); updateLastInputDims(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.cpp index 1412d0d5afe..348c1238fe9 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.cpp @@ -194,10 +194,12 @@ void MKLDNNSplitNode::initSupportedPrimitiveDescriptors() { for (size_t i = 0; i < outputShapes.size(); i++) { auto outBlockingDesc = refConfig.outConfs[i].desc->as(); const auto& outBlkDims = outBlockingDesc->getBlockDims(); - const auto& dims = outBlockingDesc->getShape().getStaticDims(); + const auto& shape = outBlockingDesc->getShape(); + const auto& dims = shape.getStaticDims(); config.outConfs[i].inPlace = 0; - config.outConfs[i].desc = std::make_shared(outPrecision, Shape(dims), outBlkDims, order, offset, offsets, strides); + config.outConfs[i].desc = std::make_shared(outPrecision, Shape(dims), outBlkDims, order, offset, offsets, + shape.hasZeroDims() ? SizeVector(numOfDim, 0) : strides); } supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::unknown); } @@ -245,16 +247,20 @@ void MKLDNNSplitNode::prepareParams() { dstMemPtrs.clear(); std::vector outDescs; - for (size_t i = 0; i < outputShapes.size(); ++i) { - const auto &outMemPtr = this->getChildEdgesAtPort(i)[0]->getMemoryPtr(); + for (size_t port = 0; port < outputShapes.size(); ++port) { + const auto &outMemPtr = this->getChildEdgesAtPort(port)[0]->getMemoryPtr(); if (!outMemPtr || !outMemPtr->GetPrimitivePtr()) { THROW_ERROR << "has not allocated destination memory"; } + if (outMemPtr->GetShape().hasZeroDims()) { + continue; + } + if (uint8_t* dstData = reinterpret_cast(outMemPtr->GetPtr())) { - dstMemPtrs.push_back(dstData); + dstMemPtrs.emplace_back(port, dstData); } else { - THROW_ERROR << "can't get child edge indx " << i << "data."; + THROW_ERROR << "can't get child edge indx " << port << "data."; } if (!canUseOptimizedNspc2Ncsp) { @@ -268,15 +274,8 @@ void MKLDNNSplitNode::prepareParams() { } } -void MKLDNNSplitNode::createPrimitive() { - if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR << "Preferable primitive descriptor is not set."; - - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } +bool MKLDNNSplitNode::isExecutable() const { + return !isInputTensorAtPortEmpty(0) && !isOptimized(); } void MKLDNNSplitNode::execute(mkldnn::stream strm) { @@ -346,13 +345,16 @@ void MKLDNNSplitNode::initOptimalPrimitiveDescriptor() { for (size_t i = 0; i < outputShapes.size(); i++) { auto oldDesc = config.outConfs[i].desc; auto outBlockingDesc = oldDesc->as(); + const auto& shape = outBlockingDesc->getShape(); + const auto& blkDims = outBlockingDesc->getBlockDims(); config.outConfs[i].desc = std::make_shared(outBlockingDesc->getPrecision(), - outBlockingDesc->getShape(), - outBlockingDesc->getBlockDims(), + shape, + blkDims, outBlockingDesc->getOrder(), firstInBlockingDesc->getOffsetPadding() + offset, firstInBlockingDesc->getOffsetPaddingToData(), - firstInBlockingDesc->getStrides()); + (shape.hasZeroDims() ? VectorDims(blkDims.size(), 0) : + firstInBlockingDesc->getStrides())); size_t axisSize = 1; for (size_t j = axis; j < outBlockingDesc->getBlockDims().size(); j++) { @@ -497,11 +499,11 @@ void MKLDNNSplitNode::optimizedNspc2Ncsp(size_t MB) { const size_t strideIW = IC*dataSize; const size_t strideOC = DHW * dataSize; - for (size_t i = 0, sIdx = 0; i < outputShapes.size(); i++) { - auto dstData = dstMemPtrs[i]; + for (size_t i = 0, sIdx = 0; i < dstMemPtrs.size(); i++) { + auto dstData = dstMemPtrs[i].second; size_t innerSize = 1; - auto dims = getChildEdgesAtPort(i)[0]->getMemory().getStaticDims(); + auto dims = getChildEdgesAtPort(dstMemPtrs[i].first)[0]->getMemory().getStaticDims(); for (size_t j = axis; j < dims.size(); j++) { innerSize *= dims[j]; @@ -568,14 +570,14 @@ MKLDNNSplitNode::SplitOptimizedExecutor::SplitOptimizedExecutor(BlockedMemoryDes } } -void MKLDNNSplitNode::SplitOptimizedExecutor::exec(const uint8_t* srcData, const std::vector &dstMemPtrs, +void MKLDNNSplitNode::SplitOptimizedExecutor::exec(const uint8_t* srcData, const std::vector> &dstMemPtrs, const Dim origBatch, const Dim perInferBatch) { size_t execCountStrides = countStrides; if (origBatch != perInferBatch) execCountStrides = execCountStrides / origBatch * perInferBatch; parallel_for2d(dstMemPtrs.size(), execCountStrides, [&](size_t i, size_t j) { - uint8_t* dstData = dstMemPtrs[i]; + uint8_t* dstData = dstMemPtrs[i].second; cpu_memcpy(&dstData[j * dataSize[i]], &srcData[srcDataOffsets[i] + j * srcDataStride], diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.h index aef06929cd2..00c0f719bf2 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.h @@ -18,7 +18,6 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void selectOptimalPrimitiveDescriptor() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; @@ -26,9 +25,7 @@ public: void initOptimalPrimitiveDescriptor() override; void setDynamicBatchLim(int lim) override; - bool isExecutable() const override { - return !isOptimized(); - } + bool isExecutable() const override; bool needPrepareParams() const override; void prepareParams() override; @@ -36,7 +33,7 @@ public: private: struct SplitExecutor { - virtual void exec(const uint8_t* srcData, const std::vector &dstMemPtrs, + virtual void exec(const uint8_t* srcData, const std::vector> &dstMemPtrs, const Dim origBatch, const Dim perInferBatch) = 0; virtual ~SplitExecutor() = default; }; @@ -45,7 +42,7 @@ private: struct SplitOptimizedExecutor : public SplitExecutor { public: SplitOptimizedExecutor(BlockedMemoryDescCPtr inDesc, const std::vector &outDescs, const size_t axis); - void exec(const uint8_t* srcData, const std::vector &dstMemPtrs, + void exec(const uint8_t* srcData, const std::vector> &dstMemPtrs, const Dim origBatch, const Dim perInferBatch) override; private: @@ -60,7 +57,7 @@ private: bool canUseOptimizedNspc2Ncsp = false; size_t axis = 1; - std::vector dstMemPtrs; + std::vector> dstMemPtrs; size_t INPUTS_NUM = 2; }; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_strided_slice_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_strided_slice_node.cpp index ebfbab56329..2fd04d85146 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_strided_slice_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_strided_slice_node.cpp @@ -300,7 +300,14 @@ void MKLDNNStridedSliceNode::initSupportedPrimitiveDescriptors() { } } +bool MKLDNNStridedSliceNode::isExecutable() const { + return !isInputTensorAtPortEmpty(0); +} + void MKLDNNStridedSliceNode::createPrimitive() { + if (!isExecutable()) { + return; + } auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); auto& srcMemPtr = getParentEdgeAt(DATA_ID)->getMemoryPtr(); if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr()) @@ -670,9 +677,8 @@ void MKLDNNStridedSliceNode::StridedSliceExecutor::exec(const uint8_t* srcData, void MKLDNNStridedSliceNode::execute(mkldnn::stream strm) { if (!execPtr) THROW_ERROR << "doesn't have compiled executor!"; - - const uint8_t* srcData = reinterpret_cast(getParentEdgeAt(0)->getMemoryPtr()->GetPtr()); - uint8_t* dstData = reinterpret_cast(getChildEdgeAt(0)->getMemoryPtr()->GetPtr()); + const uint8_t* srcData = reinterpret_cast(getParentEdgeAt(0)->getMemory().GetPtr()); + uint8_t* dstData = reinterpret_cast(getChildEdgeAt(0)->getMemory().GetPtr()); execPtr->exec(srcData, dstData); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_strided_slice_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_strided_slice_node.h index 9feadce871d..18e9c757431 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_strided_slice_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_strided_slice_node.h @@ -24,6 +24,8 @@ public: return false; } + bool isExecutable() const override; + protected: void prepareParams() override; void executeDynamicImpl(mkldnn::stream strm) override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_tile_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_tile_node.cpp index 74a1d2911c2..4a035681c25 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_tile_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_tile_node.cpp @@ -80,14 +80,6 @@ void MKLDNNTileNode::initSupportedPrimitiveDescriptors() { supportedPrimitiveDescriptors = getSupportedConfigs(this); } -void MKLDNNTileNode::createPrimitive() { - if (inputShapesDefined()) { - if (needPrepareParams()) - prepareParams(); - updateLastInputDims(); - } -} - bool MKLDNNTileNode::needPrepareParams() const { return needPrepareParamsVar; } @@ -150,6 +142,10 @@ std::vector MKLDNNTileNode::shapeInfer() const { return newOutputShapes; } +void MKLDNNTileNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + void MKLDNNTileNode::execute(mkldnn::stream strm) { if (optimizedCase) { optimizedExecute(getParentEdgeAt(TILE_INPUT)->getMemoryPtr(), getChildEdgeAt(0)->getMemoryPtr()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_tile_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_tile_node.h index b973a3a27c9..3af3a0b9c0f 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_tile_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_tile_node.h @@ -16,11 +16,8 @@ public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; - void executeDynamicImpl(mkldnn::stream strm) override { - execute(strm); - } + void executeDynamicImpl(mkldnn::stream strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_topk_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_topk_node.cpp index c1d200f79c3..14746d819a4 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_topk_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_topk_node.cpp @@ -158,13 +158,7 @@ bool MKLDNNTopKNode::needPrepareParams() const { } void MKLDNNTopKNode::executeDynamicImpl(mkldnn::stream strm) { - return execute(strm); -} - -void MKLDNNTopKNode::createPrimitive() { - if (inputShapesDefined()) { - updateLastInputDims(); - } + execute(strm); } bool MKLDNNTopKNode::needShapeInfer() const { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_topk_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_topk_node.h index bd2a72824cc..7d288c4fb8b 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_topk_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_topk_node.h @@ -17,7 +17,6 @@ public: void getSupportedDescriptors() override {}; void initSupportedPrimitiveDescriptors() override; - void createPrimitive() override; void execute(mkldnn::stream strm) override; bool created() const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.cpp index 93a2ab8f299..d11261fd55d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.cpp @@ -106,10 +106,14 @@ void MKLDNNTransposeNode::initSupportedPrimitiveDescriptors() { } } +bool MKLDNNTransposeNode::isExecutable() const { + return !isInputTensorAtPortEmpty(0); +} + bool MKLDNNTransposeNode::needPrepareParams() const { if (isOptimized) return false; - return MKLDNNNode::needPrepareParams(); + return inputShapesModified(); } void MKLDNNTransposeNode::prepareParams() { @@ -151,7 +155,7 @@ void MKLDNNTransposeNode::createPrimitive() { auto dstDesc = getChildEdgeAt(0)->getMemory().GetDescWithType(); params.dst_block_order = dstDesc->getOrder(); - if (inputShapesDefined()) { + if (inputShapesDefined() && isExecutable()) { prepareParams(); updateLastInputDims(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.h index 0b45d826159..a5af65e8131 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_transpose_node.h @@ -31,6 +31,7 @@ public: return order; } + bool isExecutable() const override; bool needPrepareParams() const override; void prepareParams() override; diff --git a/src/bindings/python/tests/__init__.py b/src/bindings/python/tests/__init__.py index f8522ff5f7d..08eb6fdd7b5 100644 --- a/src/bindings/python/tests/__init__.py +++ b/src/bindings/python/tests/__init__.py @@ -51,12 +51,6 @@ xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is no xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable") xfail_issue_36486 = xfail_test(reason="RuntimeError: HardSigmoid operation should be converted " "to HardSigmoid_IE") -xfail_issue_38084 = xfail_test(reason="RuntimeError: AssertionFailed: layer->get_output_partial_shape(i)." - "is_static() nGraph operation with name: cannot be " - "converted to layer with name: because output " - "with index 0 contains dynamic shapes: {}. Try to use " - "CNNNetwork::reshape() method in order to specialize shapes " - "before the conversion.") xfail_issue_38091 = xfail_test(reason="AssertionError: Mismatched elements") xfail_issue_38699 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " "ai.onnx.preview.training.Gradient") diff --git a/src/bindings/python/tests/test_onnx/test_backend.py b/src/bindings/python/tests/test_onnx/test_backend.py index 7385c5057ef..1f7ab1d76c4 100644 --- a/src/bindings/python/tests/test_onnx/test_backend.py +++ b/src/bindings/python/tests/test_onnx/test_backend.py @@ -202,7 +202,6 @@ tests_expected_to_fail = [ ( xfail_issue_38706, "OnnxBackendNodeModelTest.test_split_zero_size_splits_cpu", - "OnnxBackendNodeModelTest.test_slice_start_out_of_bounds_cpu", ), ( xfail_issue_33538, diff --git a/src/bindings/python/tests/test_onnx/test_zoo_models.py b/src/bindings/python/tests/test_onnx/test_zoo_models.py index ac455d76168..9530e73be2e 100644 --- a/src/bindings/python/tests/test_onnx/test_zoo_models.py +++ b/src/bindings/python/tests/test_onnx/test_zoo_models.py @@ -15,7 +15,6 @@ from tests import ( xfail_issue_38701, xfail_issue_45457, xfail_issue_37957, - xfail_issue_38084, xfail_issue_39669, xfail_issue_37973, xfail_issue_47430, @@ -174,8 +173,6 @@ if len(zoo_models) > 0: # ONNX Model Zoo (xfail_issue_39669, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_encoder_12_t5_encoder_cpu"), (xfail_issue_39669, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_decoder_with_lm_head_12_t5_decoder_with_lm_head_cpu"), - (xfail_issue_38084, "test_onnx_model_zoo_vision_object_detection_segmentation_mask_rcnn_model_MaskRCNN_10_mask_rcnn_R_50_FPN_1x_cpu"), - (xfail_issue_38084, "test_onnx_model_zoo_vision_object_detection_segmentation_faster_rcnn_model_FasterRCNN_10_faster_rcnn_R_50_FPN_1x_cpu"), (xfail_issue_47430, "test_onnx_model_zoo_vision_object_detection_segmentation_fcn_model_fcn_resnet50_11_fcn_resnet50_11_model_cpu"), (xfail_issue_47430, "test_onnx_model_zoo_vision_object_detection_segmentation_fcn_model_fcn_resnet101_11_fcn_resnet101_11_model_cpu"), (xfail_issue_48145, "test_onnx_model_zoo_text_machine_comprehension_bert_squad_model_bertsquad_8_download_sample_8_bertsquad8_cpu"), @@ -193,9 +190,6 @@ if len(zoo_models) > 0: (xfail_issue_58676, "test_MSFT_opset7_fp16_tiny_yolov2_onnxzoo_winmlperf_tiny_yolov2_cpu"), (xfail_issue_58676, "test_MSFT_opset8_fp16_tiny_yolov2_onnxzoo_winmlperf_tiny_yolov2_cpu"), - (xfail_issue_38084, "test_MSFT_opset10_mask_rcnn_mask_rcnn_R_50_FPN_1x_cpu"), - (xfail_issue_38084, "test_MSFT_opset10_faster_rcnn_faster_rcnn_R_50_FPN_1x_cpu"), - (xfail_issue_39669, "test_MSFT_opset9_cgan_cgan_cpu"), (xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"), (xfail_issue_45457, "test_MSFT_opset10_mlperf_ssd_resnet34_1200_ssd_resnet34_mAP_20.2_cpu"), diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py index 3da0078c75b..58d36643606 100644 --- a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py +++ b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py @@ -194,7 +194,6 @@ tests_expected_to_fail = [ ( xfail_issue_38706, "OnnxBackendNodeModelTest.test_split_zero_size_splits_cpu", - "OnnxBackendNodeModelTest.test_slice_start_out_of_bounds_cpu", ), ( xfail_issue_33538, diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py b/src/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py index 6f7f90b2970..f24c16e5a28 100644 --- a/src/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py +++ b/src/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py @@ -162,7 +162,6 @@ if len(zoo_models) > 0: # ONNX Model Zoo (xfail_issue_39669, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_encoder_12_t5_encoder_cpu"), (xfail_issue_38084, "test_onnx_model_zoo_vision_object_detection_segmentation_mask_rcnn_model_MaskRCNN_10_mask_rcnn_R_50_FPN_1x_cpu"), - (xfail_issue_38084, "test_onnx_model_zoo_vision_object_detection_segmentation_faster_rcnn_model_FasterRCNN_10_faster_rcnn_R_50_FPN_1x_cpu"), (xfail_issue_47430, "test_onnx_model_zoo_vision_object_detection_segmentation_fcn_model_fcn_resnet50_11_fcn_resnet50_11_model_cpu"), (xfail_issue_47430, "test_onnx_model_zoo_vision_object_detection_segmentation_fcn_model_fcn_resnet101_11_fcn_resnet101_11_model_cpu"), (xfail_issue_48145, "test_onnx_model_zoo_text_machine_comprehension_bert_squad_model_bertsquad_8_download_sample_8_bertsquad8_cpu"), @@ -181,7 +180,6 @@ if len(zoo_models) > 0: (xfail_issue_58676, "test_MSFT_opset8_fp16_tiny_yolov2_onnxzoo_winmlperf_tiny_yolov2_cpu"), (xfail_issue_38084, "test_MSFT_opset10_mask_rcnn_mask_rcnn_R_50_FPN_1x_cpu"), - (xfail_issue_38084, "test_MSFT_opset10_faster_rcnn_faster_rcnn_R_50_FPN_1x_cpu"), (xfail_issue_39669, "test_MSFT_opset9_cgan_cgan_cpu"), (xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"), diff --git a/src/core/src/op/topk.cpp b/src/core/src/op/topk.cpp index ef7b46f7290..ad95d9703df 100644 --- a/src/core/src/op/topk.cpp +++ b/src/core/src/op/topk.cpp @@ -319,8 +319,8 @@ size_t op::v1::TopK::validate_and_get_k(const shared_ptr& k_co " elements)."); NODE_VALIDATION_CHECK(this, - k_const_contents[0] > 0, - "The value of 'K' must be a positive number.", + k_const_contents[0] >= 0, + "The value of 'K' must be more or equal zero.", " (got ", k_const_contents[0], ")."); diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp index b6857ff3671..d661cd45182 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp @@ -169,6 +169,8 @@ std::vector disabledTestPatterns() { R"(smoke_ConversionLayerTest/ConversionLayerTest.CompareWithRefs.*BIN.*)", R"(smoke_ConversionLayerTest/ConversionLayerTest.CompareWithRefs.*CUSTOM.*)", R"(smoke_ConversionLayerTest/ConversionLayerTest.CompareWithRefs.*UNSPECIFIED.*)", + // Issue: + R"(.*smoke_VariadicSplit4D_CPU_zero_dims.*)", }; #define FIX_62820 0 diff --git a/src/tests/functional/plugin/cpu/single_layer_tests/concat.cpp b/src/tests/functional/plugin/cpu/single_layer_tests/concat.cpp index d7281975f06..81094e09f1b 100644 --- a/src/tests/functional/plugin/cpu/single_layer_tests/concat.cpp +++ b/src/tests/functional/plugin/cpu/single_layer_tests/concat.cpp @@ -49,7 +49,21 @@ public: return result.str(); } + void compare(const std::vector &expected, const std::vector &actual) override { + if (actual.front().get_size() == 0) { + ASSERT_EQ(0, expected.front().get_size()); + for (const auto& shape : targetStaticShapes[inferNum]) { + ASSERT_EQ(shape_size(shape), 0); + } + } else { + SubgraphBaseTest::compare(expected, actual); + } + inferNum++; + } + protected: + size_t inferNum = 0; + void SetUp() override { targetDevice = CommonTestUtils::DEVICE_CPU; @@ -77,7 +91,7 @@ TEST_P(ConcatLayerCPUTest, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED() run(); -// CheckPluginRelatedresult(executableNetwork, "Concatenation"); + CheckPluginRelatedResults(executableNetwork, "Concatenation"); } namespace { @@ -150,9 +164,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_1, ConcatLayerCPU const std::vector> inputShapes4D_axis1 = { { - {{-1, -1, -1, -1}, {{2, 32, 5, 7}, {1, 18, 10, 2}, {3, 8, 1, 8}}}, - {{-1, -1, -1, -1}, {{2, 16, 5, 7}, {1, 5, 10, 2}, {3, 3, 1, 8}}}, - {{-1, -1, -1, -1}, {{2, 64, 5, 7}, {1, 45, 10, 2}, {3, 1, 1, 8}}} + {{-1, -1, -1, -1}, {{2, 32, 0, 7}, {2, 32, 5, 7}, {2, 32, 5, 7}, {1, 18, 10, 2}, {2, 32, 5, 7}, {3, 8, 1, 8}, {2, 0, 5, 7}}}, + {{-1, -1, -1, -1}, {{2, 16, 0, 7}, {2, 16, 5, 7}, {2, 16, 5, 7}, {1, 5, 10, 2}, {2, 0, 5, 7}, {3, 3, 1, 8}, {2, 16, 5, 7}}}, + {{-1, -1, -1, -1}, {{2, 64, 0, 7}, {2, 64, 5, 7}, {2, 0, 5, 7}, {1, 45, 10, 2}, {2, 64, 5, 7}, {3, 1, 1, 8}, {2, 64, 5, 7}}} }, { {{{1, 3}, {8, 32}, {1, 10}, {2, 8}}, {{2, 32, 5, 7}, {1, 18, 10, 2}, {3, 8, 1, 8}}}, @@ -447,10 +461,13 @@ INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_4, ConcatLayerCPUTest, ::testing::Values(planar_5D_ref, planarChannels_5D)), ConcatLayerCPUTest::getTestCaseName); -const std::vector> inputShapes_byBatch = { +const std::vector> inputShapes_byBatch_static = { static_shapes_to_test_representation({{5, 2, 2, 2}, {2, 2, 2, 2}}), static_shapes_to_test_representation({{1, 3, 5}, {3, 3, 5}}), - static_shapes_to_test_representation({{4, 3, 2}, {1, 3, 2}}), + static_shapes_to_test_representation({{4, 3, 2}, {1, 3, 2}}) +}; + +const std::vector> inputShapes_byBatch_dynamic = { // 5D { {{-1, -1, -1, -1, -1}, {{10, 32, 4, 5, 5}, {4, 7, 1, 1, 3}, {3, 20, 7, 9, 1}, }}, @@ -475,14 +492,22 @@ const std::vector> inputShapes_byBatch = { } }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch, ConcatLayerCPUTest, +INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch_static, ConcatLayerCPUTest, ::testing::Combine( ::testing::Values(0), - ::testing::ValuesIn(inputShapes_byBatch), + ::testing::ValuesIn(inputShapes_byBatch_static), ::testing::ValuesIn(netPrecisions), ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch_dynamic, ConcatLayerCPUTest, + ::testing::Combine( + ::testing::Values(0), + ::testing::ValuesIn(inputShapes_byBatch_dynamic), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + ConcatLayerCPUTest::getTestCaseName); + const std::vector> inputShapes3D_axis1 = { static_shapes_to_test_representation({{2, 4, 5}, {2, 4, 5}}), { @@ -549,11 +574,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_Concat_2D_axis1, ConcatLayerCPUTest, ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), ConcatLayerCPUTest::getTestCaseName); -const std::vector> inputShapes1D = { +const std::vector> inputShapes1D_static = { static_shapes_to_test_representation({ov::Shape{5}, ov::Shape{5}}), static_shapes_to_test_representation({ov::Shape{2}, ov::Shape{2}}), static_shapes_to_test_representation({ov::Shape{1}, ov::Shape{1}}), - static_shapes_to_test_representation({ov::Shape{3}, ov::Shape{3}}), + static_shapes_to_test_representation({ov::Shape{3}, ov::Shape{3}}) +}; + +const std::vector> inputShapes1D_dynamic = { { {{-1}, {{19}, {8}, {5}}}, {{-1}, {{19}, {8}, {5}}}, @@ -566,14 +594,22 @@ const std::vector> inputShapes1D = { }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D, ConcatLayerCPUTest, +INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D_static, ConcatLayerCPUTest, ::testing::Combine( ::testing::Values(0), - ::testing::ValuesIn(inputShapes1D), + ::testing::ValuesIn(inputShapes1D_static), ::testing::ValuesIn(netPrecisions), ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D_dynamic, ConcatLayerCPUTest, + ::testing::Combine( + ::testing::Values(0), + ::testing::ValuesIn(inputShapes1D_dynamic), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + ConcatLayerCPUTest::getTestCaseName); + // ============================================== inPlace cases ============================================ INSTANTIATE_TEST_SUITE_P(concat_Concat4D_CPU_Block8inPlace, ConcatLayerCPUTest, ::testing::Combine( diff --git a/src/tests/functional/plugin/cpu/single_layer_tests/convolution.cpp b/src/tests/functional/plugin/cpu/single_layer_tests/convolution.cpp index e71139b48db..add0d6d08ae 100755 --- a/src/tests/functional/plugin/cpu/single_layer_tests/convolution.cpp +++ b/src/tests/functional/plugin/cpu/single_layer_tests/convolution.cpp @@ -974,6 +974,43 @@ INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_2D_BF16_dilated, ConvolutionLayerCP ::testing::Values(cpuEmptyPluginConfig)), ConvolutionLayerCPUTest::getTestCaseName); +/* ============= Reorder + Convolution ============= */ +const auto convParams_Reorder_2D = ::testing::Combine( + ::testing::Values(SizeVector{1, 1}), + ::testing::Values(SizeVector{2, 2}), + ::testing::Values(std::vector{0, 0}), + ::testing::Values(std::vector{0, 0}), + ::testing::Values(SizeVector{1, 1}), + ::testing::Values(64), + ::testing::Values(ngraph::op::PadType::EXPLICIT) +); + +std::vector inputShapes_Reorder_2D = { + { + // dynamic shape + { -1, 32, -1, -1 }, + // target static shapes + { + { 1, 32, 39, 40 }, + { 2, 32, 20, 20} + } + } +}; + +INSTANTIATE_TEST_SUITE_P(smoke_reorder_Conv_2D, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_Reorder_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_Reorder_2D), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_1x1})), + ::testing::Values(emptyFusingSpec), + ::testing::Values(cpuEmptyPluginConfig)), + ConvolutionLayerCPUTest::getTestCaseName); + /* ============= Convolution (3D) ============= */ const auto convParams_ExplicitPadding_3D = ::testing::Combine( ::testing::ValuesIn(kernels3d), diff --git a/src/tests/functional/plugin/cpu/single_layer_tests/nonzero.cpp b/src/tests/functional/plugin/cpu/single_layer_tests/nonzero.cpp index a0f9f609ce1..eb4853a54d8 100644 --- a/src/tests/functional/plugin/cpu/single_layer_tests/nonzero.cpp +++ b/src/tests/functional/plugin/cpu/single_layer_tests/nonzero.cpp @@ -7,6 +7,7 @@ #include "ngraph_functions/builders.hpp" #include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "functional_test_utils/ov_tensor_utils.hpp" using namespace InferenceEngine; using namespace CPUTestUtils; @@ -21,6 +22,7 @@ typedef std::tuple< typedef std::tuple< NonZeroLayerTestParams, + std::pair, // start from, range CPUSpecificParams> NonZeroLayerCPUTestParamsSet; class NonZeroLayerCPUTest : public testing::WithParamInterface, @@ -28,8 +30,9 @@ class NonZeroLayerCPUTest : public testing::WithParamInterface obj) { NonZeroLayerTestParams basicParamsSet; + std::pair genData; CPUSpecificParams cpuParams; - std::tie(basicParamsSet, cpuParams) = obj.param; + std::tie(basicParamsSet, genData, cpuParams) = obj.param; std::string td; ElementType netType = ElementType::undefined; InputShape inputShape; @@ -44,28 +47,55 @@ public: result << CommonTestUtils::vec2str(shape) << "_"; } result << ")_"; + result << "StartFrom=" << genData.first << "_"; + result << "Range=" << genData.second << "_"; result << "netPRC=" << netType; result << CPUTestsBase::getTestCaseName(cpuParams); return result.str(); } + + void generate_inputs(const std::vector& targetInputStaticShapes) override { + inputs.clear(); + const auto& funcInputs = function->inputs(); + for (int i = 0; i < funcInputs.size(); ++i) { + const auto& funcInput = funcInputs[i]; + ov::runtime::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], range, startFrom); + inputs.insert({funcInput.get_node_shared_ptr(), tensor}); + } + } + + void compare(const std::vector &expected, const std::vector &actual) override { + const auto dims = targetStaticShapes[inferNum].front(); + if (!((startFrom == 0 && range == 1) || std::any_of(dims.begin(), dims.end(), [](size_t dim) { return dim == 0; } ))) { + SubgraphBaseTest::compare(expected, actual); + } + inferNum++; + } + protected: + size_t startFrom = 0, range = 10; + size_t inferNum = 0; + void SetUp() override { targetDevice = CommonTestUtils::DEVICE_CPU; NonZeroLayerTestParams basicParamsSet; + std::pair genData; CPUSpecificParams cpuParams; - std::tie(basicParamsSet, cpuParams) = this->GetParam(); + std::tie(basicParamsSet, genData, cpuParams) = this->GetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; ElementType netType = ElementType::undefined; InputShape inputShape; std::tie(inputShape, netType) = basicParamsSet; + std::tie(startFrom, range) = genData; + init_input_shapes({inputShape}); auto inputParams = ngraph::builder::makeDynamicParams(netType, inputDynamicShapes); auto nonZero = std::make_shared(inputParams[0]); // I8 was used as a special placeholder during calculating of primitive type if input was U8, // real runtime precision is still U8 - selectedType = makeSelectedTypeStr("ref_", netType == ElementType::u8 ? ElementType::i8 : netType); + selectedType = makeSelectedTypeStr("ref", netType == ElementType::u8 ? ElementType::i8 : netType); inputParams[0]->set_friendly_name("input"); function = makeNgraphFunction(netType, inputParams, nonZero, "NonZero"); } @@ -92,6 +122,11 @@ const std::vector netPrecisions = { ElementType::u8 }; +const std::vector> genData = { + {0, 10}, + {0, 1} +}; + std::vector inShapesDynamic = { { //dynamic shape @@ -116,6 +151,7 @@ std::vector inShapesDynamic = { {-1, -1, -1}, { //target static shapes {4, 4, 100}, + {5, 0, 2}, {4, 4, 200}, {4, 4, 300} } @@ -126,6 +162,7 @@ std::vector inShapesDynamic = { { //target static shapes {4, 4, 4, 100}, {4, 4, 4, 200}, + {5, 0, 0, 2}, {4, 4, 4, 300} } }, @@ -160,16 +197,20 @@ const auto paramsStatic = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic)), ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(genData), ::testing::ValuesIn(filterCPUInfoForDevice())); const auto paramsDynamic = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inShapesDynamic), ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(genData), ::testing::ValuesIn(filterCPUInfoForDevice())); INSTANTIATE_TEST_SUITE_P(smoke_NonZeroStaticCPUTest, NonZeroLayerCPUTest, paramsStatic, NonZeroLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_NonZeroDynamicCPUTest, NonZeroLayerCPUTest, paramsDynamic, NonZeroLayerCPUTest::getTestCaseName); + } // namespace + } // namespace CPULayerTestsDefinitions diff --git a/src/tests/functional/plugin/cpu/single_layer_tests/variadic_split.cpp b/src/tests/functional/plugin/cpu/single_layer_tests/variadic_split.cpp index d7395f31597..99d3a9a141b 100644 --- a/src/tests/functional/plugin/cpu/single_layer_tests/variadic_split.cpp +++ b/src/tests/functional/plugin/cpu/single_layer_tests/variadic_split.cpp @@ -78,7 +78,7 @@ TEST_P(VariadicSplitLayerCPUTest, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED() run(); -// CheckPluginRelatedResults(executableNetwork, "Split"); + CheckPluginRelatedResults(executableNetwork, "Split"); } namespace { @@ -180,8 +180,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_Nspc2NcspSpecial, VariadicSpl ::testing::Values(perChannelsToPlanar_5D)), VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_planar_static, VariadicSplitLayerCPUTest, + ::testing::Combine( + ::testing::Values(InputShape{ {}, {{3, 6, 5, 6}} }), + ::testing::Values(2, 3), + ::testing::Values(std::vector{1, 3, -1}), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_4D, planar_4D_ref, perChannels_4D)), + VariadicSplitLayerCPUTest::getTestCaseName); + const std::vector inputShapes4D_planar = { - { {}, {{3, 6, 5, 6}} }, { // dynamic {-1, -1, -1, -1}, @@ -210,7 +218,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_planar, VariadicSplitLayerCPU ::testing::Values(2, 3), ::testing::Values(std::vector{1, 3, -1}), ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_4D, planar_4D_ref, perChannels_4D)), + ::testing::Values(planar_4D_ref, perChannels_4D)), VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes4D_block = { @@ -255,8 +263,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_Block16, VariadicSplitLayerCP ::testing::Values(blocked16_4D_ref)), VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_planar_static, VariadicSplitLayerCPUTest, + ::testing::Combine( + ::testing::Values(InputShape{ {}, {{3, 24, 4, 5, 6}} }), + ::testing::Values(2, 3, 4), + ::testing::Values(std::vector{2, 1, -1}), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_5D, planar_5D_ref, perChannels_5D)), + VariadicSplitLayerCPUTest::getTestCaseName); + const std::vector inputShapes5D_planar = { - { {}, {{3, 24, 4, 5, 6}} }, { // dynamic {-1, -1, -1, -1, -1}, @@ -285,7 +301,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_planar, VariadicSplitLayerCPU ::testing::Values(2, 3, 4), ::testing::Values(std::vector{2, 1, -1}), ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_5D, planar_5D_ref, perChannels_5D)), + ::testing::Values(planar_5D_ref, perChannels_5D)), VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes5D_block = { @@ -330,8 +346,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_Block16, VariadicSplitLayerCP ::testing::Values(blocked16_5D_ref)), VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit3D_static, VariadicSplitLayerCPUTest, + ::testing::Combine( + ::testing::Values(InputShape{ {}, {{14, 7, 21}} }), + ::testing::Values(0, 1, 2), + ::testing::Values(std::vector{2, 4, -1}), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"}, CPUSpecificParams{{}, {}, {"ref"}, "ref"})), + VariadicSplitLayerCPUTest::getTestCaseName); + const std::vector inputShapes3D = { - { {}, {{14, 7, 21}} }, { // dynamic {-1, -1, -1}, @@ -360,11 +384,19 @@ INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit3D, VariadicSplitLayerCPUTest, ::testing::Values(0, 1, 2), ::testing::Values(std::vector{2, 4, -1}), ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {"ref"}, "ref"})), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit2D_static, VariadicSplitLayerCPUTest, + ::testing::Combine( + ::testing::Values(InputShape{ {}, {{6, 12}} }), + ::testing::Values(0, 1), + ::testing::Values(std::vector{2, -1}), + ::testing::ValuesIn(netPrecisions), ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"}, CPUSpecificParams{{}, {}, {"ref"}, "ref"})), VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes2D = { - { {}, {{6, 12}} }, { // dynamic {-1, -1}, @@ -393,11 +425,19 @@ INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit2D, VariadicSplitLayerCPUTest, ::testing::Values(0, 1), ::testing::Values(std::vector{2, -1}), ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {"ref"}, "ref"})), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit1D_static, VariadicSplitLayerCPUTest, + ::testing::Combine( + ::testing::Values(InputShape{ {}, {{10}} }), + ::testing::Values(0), + ::testing::Values(std::vector{2, 1, 1, -1}), + ::testing::ValuesIn(netPrecisions), ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"}, CPUSpecificParams{{}, {}, {"ref"}, "ref"})), VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes1D = { - { {}, {{10}} }, { // dynamic {-1}, @@ -426,7 +466,37 @@ INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit1D, VariadicSplitLayerCPUTest, ::testing::Values(0), ::testing::Values(std::vector{2, 1, 1, -1}), ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"}, CPUSpecificParams{{}, {}, {"ref"}, "ref"})), + ::testing::Values(CPUSpecificParams{{}, {}, {"ref"}, "ref"})), + VariadicSplitLayerCPUTest::getTestCaseName); + +const std::vector inputShapes4D_zero_dims = { + { + // dynamic + {-1, -1, -1, -1}, + // target + { + {1, 7, 7, 7}, + {3, 7, 7, 7}, + } + } +}; + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_zero_dims, VariadicSplitLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(inputShapes4D_zero_dims), + ::testing::Values(1, 2, 3), + ::testing::Values(std::vector{3, 4, -1}, std::vector{3, -1, 4}, std::vector{-1, 3, 4}), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_4D_ref)), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_zero_dims_nspc_ncsp, VariadicSplitLayerCPUTest, + ::testing::Combine( + ::testing::ValuesIn(inputShapes4D_zero_dims), + ::testing::Values(1), + ::testing::Values(std::vector{3, 4, -1}, std::vector{3, -1, 4}, std::vector{-1, 3, 4}), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(perChannelsToPlanar_4D)), VariadicSplitLayerCPUTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/cpu/subgraph_tests/src/static_zero_dims.cpp b/src/tests/functional/plugin/cpu/subgraph_tests/src/static_zero_dims.cpp new file mode 100644 index 00000000000..1082dfa8a2f --- /dev/null +++ b/src/tests/functional/plugin/cpu/subgraph_tests/src/static_zero_dims.cpp @@ -0,0 +1,62 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "ngraph_functions/builders.hpp" +#include "functional_test_utils/ov_tensor_utils.hpp" +#include "functional_test_utils/skip_tests_config.hpp" + +using namespace ov::test; + +namespace SubgraphTestsDefinitions { + +class StaticZeroDims : public SubgraphBaseTest { +protected: + void SetUp() override { + targetDevice = CommonTestUtils::DEVICE_CPU; + + InputShape inputShapes{{}, {{7, 4}}}; + + init_input_shapes({inputShapes}); + + auto ngPrc = ngraph::element::f32; + auto inputParams = ngraph::builder::makeDynamicParams(ngPrc, inputDynamicShapes); + + auto splitAxisOp = std::make_shared(ngraph::element::i64, ngraph::Shape{}, std::vector{0}); + std::vector splitLenght = {1, 0, 6}; + auto splitLengthsOp = std::make_shared(ngraph::element::i32, ngraph::Shape{splitLenght.size()}, splitLenght); + auto varSplit = std::make_shared(inputParams[0], splitAxisOp, splitLengthsOp); + + auto relu1 = std::make_shared(varSplit->output(0)); + + auto numInRoi = ngraph::builder::makeConstant(ngPrc, {0}, std::vector{}, false); + auto expDet = std::make_shared(varSplit->output(1), numInRoi, 10); + auto relu2 = std::make_shared(expDet); + + auto relu3 = std::make_shared(varSplit->output(2)); + + ngraph::NodeVector results{relu1, relu2, relu3}; + function = std::make_shared(results, inputParams, "StaticZeroDims"); + } + + void compare(const std::vector &expected, const std::vector &actual) override { + ASSERT_EQ(expected.size(), actual.size()); + for (size_t i = 0; i < expected.size(); i++) { + // skip second output tensor because it's output ExperimentalDetectronTopKROIs: input dims [0, 4] + // so according to spec output values undefined + if (i == 1) { + continue; + } + ov::test::utils::compare(expected[i], actual[i], abs_threshold, rel_threshold); + } + } +}; + +TEST_F(StaticZeroDims, smoke_CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + run(); +} + +} // namespace SubgraphTestsDefinitions \ No newline at end of file diff --git a/src/tests/unit/cpu/mkldnn_zero_dims_test.cpp b/src/tests/unit/cpu/mkldnn_zero_dims_test.cpp new file mode 100644 index 00000000000..cb2944dc6b6 --- /dev/null +++ b/src/tests/unit/cpu/mkldnn_zero_dims_test.cpp @@ -0,0 +1,258 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "mkldnn_memory.h" +#include "memory_desc/cpu_memory_desc_utils.h" +#include "nodes/common/blocked_desc_creator.h" +#include "mkldnn_extension_utils.h" +#include "memory_desc/dnnl_blocked_memory_desc.h" + +using namespace MKLDNNPlugin; +using namespace InferenceEngine; +using namespace testing; + +/* ======================================= BASE ZERO DIM TEST ======================================= */ +class MemDescWithZeroDimsBaseTest: public ::testing::Test { +protected: + Shape shape; + dnnl::memory::format_tag fmt; + const InferenceEngine::Precision precision = InferenceEngine::Precision::FP32; + + void validate(const BlockedMemoryDesc& desc, const VectorDims& expectedStrieds, size_t offsetSize, size_t offsetPaddingSize, + size_t maxMemSize, bool orderCheckSkip = false) { + VectorDims expectedBlkDims; + VectorDims expectedOrder; + { + auto origShape = shape.toPartialShape(); + auto replaceShape = origShape; + std::replace(replaceShape.begin(), replaceShape.end(), ngraph::Dimension(0), ngraph::Dimension(3)); + Shape dummyShape(replaceShape); + DnnlBlockedMemoryDesc dummyDesc(dummyShape, MKLDNNExtensionUtils::IEPrecisionToDataType(precision), fmt); + expectedBlkDims = dummyDesc.getBlockDims(); + expectedOrder = dummyDesc.getOrder(); + for (size_t i = 0; i < dummyShape.getRank(); i++) { + if (origShape[expectedOrder[i]] == ngraph::Dimension(0)) { + expectedBlkDims[i] = 0; + } + } + } + + ASSERT_EQ(shape.getDims(), desc.getShape().getDims()); + ASSERT_EQ(shape.getMinDims(), desc.getShape().getMinDims()); + ASSERT_EQ(shape.getMaxDims(), desc.getShape().getMaxDims()); + + ASSERT_EQ(expectedStrieds, desc.getStrides()); + ASSERT_EQ(expectedBlkDims, desc.getBlockDims()); + if (!orderCheckSkip) { + ASSERT_EQ(expectedOrder, desc.getOrder()); + } + + ASSERT_EQ(0, desc.getPaddedElementsCount()); + ASSERT_EQ(maxMemSize, desc.getMaxMemSize()); + ASSERT_EQ(maxMemSize, desc.getCurrentMemSize()); + + ASSERT_EQ(offsetSize, desc.getOffsetPadding()); + ASSERT_EQ(VectorDims(expectedBlkDims.size(), offsetPaddingSize), desc.getOffsetPaddingToData()); + } + + virtual std::pair createDescs() const { + DnnlBlockedMemoryDesc descDnnl(precision, shape); + CpuBlockedMemoryDesc descCpu(precision, shape); + return {descDnnl, descCpu}; + } + + void Run() { + const size_t offset = 0, offsetPadding = 0; + + auto descs = createDescs(); + DnnlBlockedMemoryDesc descDnnl(descs.first); + CpuBlockedMemoryDesc descCpu(descs.second); + + VectorDims zeroStrides(descDnnl.getBlockDims().size(), 0); + validate(descDnnl, zeroStrides, offset, offsetPadding, 0); + validate(descCpu, zeroStrides, offset, offsetPadding, precision.size()); + + ASSERT_TRUE(descDnnl.isCompatible(descCpu)); + ASSERT_TRUE(descCpu.isCompatible(descDnnl)); + + // undefined + VectorDims undefDnnlStrides(descDnnl.getBlockDims().size(), Shape::UNDEFINED_DIM); + std::fill(undefDnnlStrides.begin() + descDnnl.getShape().getRank(), undefDnnlStrides.end(), 0); + const auto undefDnnl = descDnnl.cloneWithUndefStridesAndOffset(); + validate(*undefDnnl->as(), undefDnnlStrides, Shape::UNDEFINED_DIM, offsetPadding, Shape::UNDEFINED_DIM); + + VectorDims undefCpuStrides(descCpu.getBlockDims().size(), Shape::UNDEFINED_DIM); + const auto undefCpu = descCpu.cloneWithUndefStridesAndOffset(); + validate(*undefCpu->as(), undefCpuStrides, Shape::UNDEFINED_DIM, offsetPadding, + Shape::UNDEFINED_DIM); + + // defined + const auto definedDnnl = descDnnl.cloneWithDefaultStridesAndOffset(); + validate(*definedDnnl->as(), zeroStrides, offset, offsetPadding, 0); + + const auto definedCpu = descCpu.cloneWithDefaultStridesAndOffset(); + validate(*definedCpu->as(), zeroStrides, offset, offsetPadding, precision.size()); + } +}; + +/* ======================================= TEST DATA ======================================= */ +const std::vector staticShapes = { + Shape(VectorDims{0, 32, 48, 64}), + Shape(VectorDims{16, 0, 48, 64}), + Shape(VectorDims{16, 32, 0, 64}), + Shape(VectorDims{16, 32, 48, 0}), + Shape(VectorDims{16, 32, 0, 0}), + Shape(VectorDims{0, 0, 48, 64}), + Shape(VectorDims{16, 0, 0, 64}), + Shape(VectorDims{0, 0, 0, 64}), + Shape(VectorDims{16, 0, 0, 0}), + Shape(VectorDims{0, 0, 0, 0}) +}; + +const std::vector dynamicShapes = { + Shape(ngraph::PartialShape{0, -1, {0, 48}, -1}), + Shape(ngraph::PartialShape{16, 0, -1, {0, 64}}), + Shape(ngraph::PartialShape{-1, -1, 0, -1}), + Shape(ngraph::PartialShape{{0, 16}, -1, {0, 48}, 0}), + Shape(ngraph::PartialShape{-1, 32, 0, 0}), + Shape(ngraph::PartialShape{0, 0, 48, -1}), + Shape(ngraph::PartialShape{{0, 16}, 0, 0, 64}), + Shape(ngraph::PartialShape{0, 0, 0, -1}), + Shape(ngraph::PartialShape{{0, 16}, 0, 0, 0}), + Shape(ngraph::PartialShape{0, 0, 0, 0}) +}; + +const std::vector fmts = { + dnnl::memory::format_tag::nchw, + dnnl::memory::format_tag::nhwc, + dnnl::memory::format_tag::nChw8c, + dnnl::memory::format_tag::nChw16c, + dnnl::memory::format_tag::NChw16n16c, + dnnl::memory::format_tag::Acdb16a +}; + +/* ======================================= SPECIFIC TEST CASES ======================================= */ +using MemDescWithZeroDimsParams = std::tuple; + +class MemDescWithZeroDimsFmtTest: public testing::WithParamInterface, + public MemDescWithZeroDimsBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj) { + Shape shape; + dnnl::memory::format_tag fmt; + std::tie(fmt, shape) = obj.param; + std::ostringstream result; + result << "Shape=" << shape.toString(); + result << "_Fmt=" << mkldnn::utils::fmt2str(fmt); + return result.str(); + } + + std::pair createDescs() const override { + DnnlBlockedMemoryDesc descDnnl(shape, MKLDNNExtensionUtils::IEPrecisionToDataType(precision), fmt); + CpuBlockedMemoryDesc descCpu(precision, shape, descDnnl.getBlockDims(), descDnnl.getOrder()); + return {descDnnl, descCpu}; + } + +protected: + void SetUp() override { + std::tie(fmt, shape) = this->GetParam(); + ASSERT_TRUE(shape.hasZeroDims()) << "Can't run MemDescWithZeroDimsTest, because shape doesn't contain zero dims"; + } +}; + +TEST_P(MemDescWithZeroDimsFmtTest, CreateDescWithFmt) { + Run(); +} + +INSTANTIATE_TEST_SUITE_P(smoke_MemDescWithZeroDimsFmtTest_static, MemDescWithZeroDimsFmtTest, + ::testing::Combine(::testing::ValuesIn(fmts), + ::testing::ValuesIn(staticShapes)), + MemDescWithZeroDimsFmtTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_MemDescWithZeroDimsFmtTest_dynamic, MemDescWithZeroDimsFmtTest, + ::testing::Combine(::testing::ValuesIn(fmts), + ::testing::ValuesIn(dynamicShapes)), + MemDescWithZeroDimsFmtTest::getTestCaseName); + +class MemDescWithZeroDimsPlanarTest: public testing::WithParamInterface, + public MemDescWithZeroDimsBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj) { + Shape shape; + shape = obj.param; + std::ostringstream result; + result << "Shape=" << shape.toString(); + return result.str(); + } +protected: + void SetUp() override { + shape = this->GetParam(); + fmt = dnnl::memory::format_tag::nchw; + ASSERT_TRUE(shape.hasZeroDims()) << "Can't run MemDescWithZeroDimsTest, because shape doesn't contain zero dims"; + } +}; + +TEST_P(MemDescWithZeroDimsPlanarTest, CreateDescPlanar) { + Run(); +} + +INSTANTIATE_TEST_SUITE_P(smoke_MemDescWithZeroDimsPlanarTest, MemDescWithZeroDimsPlanarTest, + ::testing::ValuesIn(staticShapes), + MemDescWithZeroDimsPlanarTest::getTestCaseName); + +using MemDescWithZeroDimsCloneNewDimsParams = std::tuple; // static shapes + +class MemDescWithZeroDimsCloneNewDimsTest: public testing::WithParamInterface, + public MemDescWithZeroDimsBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj) { + Shape shapeDynamic, shapeClone; + dnnl::memory::format_tag fmt; + std::tie(fmt, shapeDynamic, shapeClone) = obj.param; + std::ostringstream result; + result << "ShapeDynamic=" << shapeDynamic.toString(); + result << "_ShapeClone=" << shapeClone.toString(); + result << "_Fmt=" << mkldnn::utils::fmt2str(fmt); + return result.str(); + } +protected: + Shape shapeDynamic; + + void SetUp() override { + std::tie(fmt, shapeDynamic, shape) = this->GetParam(); + ASSERT_TRUE(shape.hasZeroDims()) << "Can't run MemDescWithZeroDimsTest, because shape doesn't contain zero dims"; + } +}; + +TEST_P(MemDescWithZeroDimsCloneNewDimsTest, CloneWithNewDims) { + DnnlBlockedMemoryDesc dynamicDescDnnl(shapeDynamic, MKLDNNExtensionUtils::IEPrecisionToDataType(precision), fmt); + CpuBlockedMemoryDesc dynamicDescCpu(precision, shape, dynamicDescDnnl.getBlockDims(), dynamicDescDnnl.getOrder()); + const size_t offset = 0, offsetPadding = 0; + VectorDims zeroStrides(dynamicDescDnnl.getBlockDims().size(), 0); + + const auto clonedDescDnnl = dynamicDescDnnl.cloneWithNewDims(shape.getStaticDims()); + const auto clonedDescCpu = dynamicDescCpu.cloneWithNewDims(shape.getStaticDims()); + + // can't compute order correct since strides equal + const auto& dims = shape.getDims(); + bool skipOrderCheck = std::all_of(dims.begin() + 1, dims.end(), [](const size_t& dim) { return dim == 0; }); + validate(*clonedDescDnnl->as(), zeroStrides, offset, offsetPadding, 0, skipOrderCheck); + validate(*clonedDescCpu->as(), zeroStrides, offset, offsetPadding, precision.size()); +} + +const std::vector srcDynShapes = { + Shape(ngraph::PartialShape({-1, -1, -1, -1})), + Shape(ngraph::PartialShape({{0, 16}, {0, 32}, {0, 48}, {0, 64}})) +}; + +INSTANTIATE_TEST_SUITE_P(smoke_MemDescWithZeroDimsCloneNewDimsTest, MemDescWithZeroDimsCloneNewDimsTest, + ::testing::Combine(::testing::ValuesIn(fmts), + ::testing::ValuesIn(srcDynShapes), + ::testing::ValuesIn(staticShapes)), + MemDescWithZeroDimsCloneNewDimsTest::getTestCaseName); From 0644aaa947442143640f3bb8563f265b92e915a8 Mon Sep 17 00:00:00 2001 From: Indira Salyahova Date: Fri, 17 Dec 2021 12:05:27 +0300 Subject: [PATCH 10/10] [POT] Add new special metrics in aa (#9251) * Update utils.py * Update utils.py --- .../pot/algorithms/quantization/accuracy_aware_common/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware_common/utils.py b/tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware_common/utils.py index 554425326e8..48aaf9ae825 100644 --- a/tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware_common/utils.py +++ b/tools/pot/openvino/tools/pot/algorithms/quantization/accuracy_aware_common/utils.py @@ -17,7 +17,8 @@ from ....samplers.creator import create_sampler SPECIAL_METRICS = ['cmc', 'reid_map', 'pairwise_accuracy_subsets', 'pairwise_accuracy', 'normalized_embedding_accuracy', 'face_recognition_tafa_pair_metric', 'localization_recall', - 'coco_orig_keypoints_precision', 'coco_orig_segm_precision', 'coco_orig_keypoints_precision'] + 'coco_orig_keypoints_precision', 'coco_orig_segm_precision', 'coco_orig_keypoints_precision', + 'spearman_correlation_coef', 'pearson_correlation_coef'] METRICS_CONFIGS = {'sigmoid_recom_loss': {'metrics': 'log_loss', 'postprocessing': 'sigmoid_normalize_recommendation'},