From dd54cb9c17e57e392babd6b5996746d359424e0d Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Thu, 31 Mar 2022 21:57:05 +0300 Subject: [PATCH 01/16] [Python API] Remove old api class from the new api (#10470) * [Python API] Remove old api class from the new api * start working on refactoring of OVAny * fix tests * fix code-style * remove tuple test * fix test * fix omz hash * one more overload * fix pyfloat * move from_ov_any to utils * code-style * move function from common to utils --- .../python/src/openvino/runtime/__init__.py | 4 +- .../python/src/openvino/runtime/ie_api.py | 46 ------ .../python/src/pyopenvino/core/common.cpp | 132 --------------- .../python/src/pyopenvino/core/common.hpp | 4 - .../src/pyopenvino/core/compiled_model.cpp | 9 +- .../python/src/pyopenvino/core/core.cpp | 12 +- .../src/pyopenvino/core/ie_parameter.cpp | 16 -- .../src/pyopenvino/core/ie_parameter.hpp | 11 -- .../python/src/pyopenvino/graph/any.cpp | 59 +++++-- .../python/src/pyopenvino/graph/any.hpp | 15 -- .../python/src/pyopenvino/graph/rt_map.cpp | 4 +- .../python/src/pyopenvino/pyopenvino.cpp | 2 - .../python/src/pyopenvino/utils/utils.cpp | 155 ++++++++++++++++++ .../python/src/pyopenvino/utils/utils.hpp | 44 ++--- .../test_inference_engine/test_input_node.py | 6 +- .../test_output_const_node.py | 4 +- .../python/tests/test_ngraph/test_any.py | 5 - .../python/tests/test_ngraph/test_basic.py | 4 +- 18 files changed, 243 insertions(+), 289 deletions(-) delete mode 100644 src/bindings/python/src/pyopenvino/core/ie_parameter.cpp delete mode 100644 src/bindings/python/src/pyopenvino/core/ie_parameter.hpp create mode 100644 src/bindings/python/src/pyopenvino/utils/utils.cpp diff --git a/src/bindings/python/src/openvino/runtime/__init__.py b/src/bindings/python/src/openvino/runtime/__init__.py index f40efa258d2..2726fe8a56f 100644 --- a/src/bindings/python/src/openvino/runtime/__init__.py +++ b/src/bindings/python/src/openvino/runtime/__init__.py @@ -32,15 +32,13 @@ from openvino.pyopenvino import Coordinate from openvino.pyopenvino import Layout from openvino.pyopenvino import ConstOutput from openvino.pyopenvino import layout_helpers +from openvino.pyopenvino import OVAny from openvino.pyopenvino import RTMap - from openvino.runtime.ie_api import Core from openvino.runtime.ie_api import CompiledModel from openvino.runtime.ie_api import InferRequest from openvino.runtime.ie_api import AsyncInferQueue -from openvino.runtime.ie_api import OVAny from openvino.pyopenvino import Version -from openvino.pyopenvino import Parameter from openvino.pyopenvino import Tensor from openvino.pyopenvino import Extension from openvino.pyopenvino import ProfilingInfo diff --git a/src/bindings/python/src/openvino/runtime/ie_api.py b/src/bindings/python/src/openvino/runtime/ie_api.py index a49f6f62ee5..3d8262bc615 100644 --- a/src/bindings/python/src/openvino/runtime/ie_api.py +++ b/src/bindings/python/src/openvino/runtime/ie_api.py @@ -11,7 +11,6 @@ from openvino.pyopenvino import InferRequest as InferRequestBase from openvino.pyopenvino import AsyncInferQueue as AsyncInferQueueBase from openvino.pyopenvino import ConstOutput from openvino.pyopenvino import Tensor -from openvino.pyopenvino import OVAny as OVAnyBase from openvino.runtime.utils.types import get_dtype @@ -345,48 +344,3 @@ def compile_model(model_path: str) -> CompiledModel: """ core = Core() return core.compile_model(model_path, "AUTO") - - -class OVAny(OVAnyBase): - """OVAny wrapper. - - Wrapper provides some useful overloads for simple built-in Python types. - - Access to the `OVAny` value is direct if it is a built-in Python data type. - - :Example: - .. code-block:: ipython - - any = OVAny([1, 2]) - print(any[0]) - - Output: 2 - - Otherwise if `OVAny` value is a custom data type (for example user class), - access to the value is possible by `get()` method or property 'value'. - - :Example: - .. code-block:: python - - class Test: - def __init__(self): - self.data = "test" - - any = OVAny(Test()) - print(any.value.data) - """ - - def __getitem__(self, key: Union[str, int]) -> Any: - return self.value[key] - - def __get__(self) -> Any: - return self.value - - def __setitem__(self, key: Union[str, int], val: Any) -> None: - self.value[key] = val - - def __set__(self, val: Any) -> None: - self.value = val - - def __len__(self) -> int: - return len(self.value) diff --git a/src/bindings/python/src/pyopenvino/core/common.cpp b/src/bindings/python/src/pyopenvino/core/common.cpp index 997dcb22927..3540d9fef44 100644 --- a/src/bindings/python/src/pyopenvino/core/common.cpp +++ b/src/bindings/python/src/pyopenvino/core/common.cpp @@ -293,138 +293,6 @@ void set_request_tensors(ov::InferRequest& request, const py::dict& inputs) { } } -PyAny from_ov_any(const ov::Any& any) { - // Check for py::object - if (any.is()) { - return any.as(); - } - // Check for std::string - else if (any.is()) { - return PyUnicode_FromString(any.as().c_str()); - } - // Check for int - else if (any.is()) { - auto val = any.as(); - return PyLong_FromLong((long)val); - } else if (any.is()) { - auto val = any.as(); - return PyLong_FromLong((long)val); - } - // Check for unsinged int - else if (any.is()) { - auto val = any.as(); - return PyLong_FromLong((unsigned long)val); - } - // Check for float - else if (any.is()) { - auto val = any.as(); - return PyFloat_FromDouble((double)val); - } else if (any.is()) { - auto val = any.as(); - return PyFloat_FromDouble(val); - } - // Check for bool - else if (any.is()) { - auto val = any.as(); - return val ? Py_True : Py_False; - } - // Check for std::vector - else if (any.is>()) { - auto val = any.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyObject* str_val = PyUnicode_FromString(it.c_str()); - PyList_Append(list, str_val); - } - return list; - } - // Check for std::vector - else if (any.is>()) { - auto val = any.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyList_Append(list, PyLong_FromLong(it)); - } - return list; - } - // Check for std::vector - else if (any.is>()) { - auto val = any.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyList_Append(list, PyLong_FromLong(it)); - } - return list; - } - // Check for std::vector - else if (any.is>()) { - auto val = any.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyList_Append(list, PyLong_FromLong(it)); - } - return list; - } - // Check for std::vector - else if (any.is>()) { - auto val = any.as>(); - PyObject* list = PyList_New(0); - for (const auto& it : val) { - PyList_Append(list, PyFloat_FromDouble((double)it)); - } - return list; - } - // Check for std::tuple - else if (any.is>()) { - auto val = any.as>(); - PyObject* tuple = PyTuple_New(2); - PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val))); - PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val))); - return tuple; - } - // Check for std::tuple - else if (any.is>()) { - auto val = any.as>(); - PyObject* tuple = PyTuple_New(3); - PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val))); - PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val))); - PyTuple_SetItem(tuple, 2, PyLong_FromUnsignedLong((unsigned long)std::get<2>(val))); - return tuple; - } - // Check for std::map - else if (any.is>()) { - auto val = any.as>(); - PyObject* dict = PyDict_New(); - for (const auto& it : val) { - PyDict_SetItemString(dict, it.first.c_str(), PyUnicode_FromString(it.second.c_str())); - } - return dict; - } - // Check for std::map - else if (any.is>()) { - auto val = any.as>(); - PyObject* dict = PyDict_New(); - for (const auto& it : val) { - PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long)it.second)); - } - return dict; - } - // Check for std::vector - else if (any.is>()) { - auto val = any.as>(); - PyObject* dict = PyDict_New(); - for (const auto& it : val) { - std::string property_name = it; - std::string mutability = it.is_mutable() ? "RW" : "RO"; - PyDict_SetItemString(dict, property_name.c_str(), PyUnicode_FromString(mutability.c_str())); - } - return dict; - } else { - PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!"); - return (PyObject*)NULL; - } -} - uint32_t get_optimal_number_of_requests(const ov::CompiledModel& actual) { try { auto supported_properties = actual.get_property(ov::supported_properties); diff --git a/src/bindings/python/src/pyopenvino/core/common.hpp b/src/bindings/python/src/pyopenvino/core/common.hpp index f3f9ad0be03..36c0326fd12 100644 --- a/src/bindings/python/src/pyopenvino/core/common.hpp +++ b/src/bindings/python/src/pyopenvino/core/common.hpp @@ -14,11 +14,9 @@ #include #include "Python.h" -#include "ie_common.h" #include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/infer_request.hpp" #include "openvino/runtime/tensor.hpp" -#include "openvino/runtime/properties.hpp" #include "openvino/pass/serialize.hpp" #include "pyopenvino/core/containers.hpp" #include "pyopenvino/graph/any.hpp" @@ -50,8 +48,6 @@ const Containers::TensorIndexMap cast_to_tensor_index_map(const py::dict& inputs void set_request_tensors(ov::InferRequest& request, const py::dict& inputs); -PyAny from_ov_any(const ov::Any& any); - uint32_t get_optimal_number_of_requests(const ov::CompiledModel& actual); py::dict outputs_to_dict(const std::vector>& outputs, ov::InferRequest& request); diff --git a/src/bindings/python/src/pyopenvino/core/compiled_model.cpp b/src/bindings/python/src/pyopenvino/core/compiled_model.cpp index c56a545f2d3..158d7e2447a 100644 --- a/src/bindings/python/src/pyopenvino/core/compiled_model.cpp +++ b/src/bindings/python/src/pyopenvino/core/compiled_model.cpp @@ -7,11 +7,10 @@ #include #include -#include - #include "common.hpp" #include "pyopenvino/core/containers.hpp" #include "pyopenvino/core/infer_request.hpp" +#include "pyopenvino/utils/utils.hpp" PYBIND11_MAKE_OPAQUE(Containers::TensorIndexMap); PYBIND11_MAKE_OPAQUE(Containers::TensorNameMap); @@ -148,9 +147,9 @@ void regclass_CompiledModel(py::module m) { cls.def( "set_property", [](ov::CompiledModel& self, const std::map& properties) { - std::map properties_to_cpp; + std::map properties_to_cpp; for (const auto& property : properties) { - properties_to_cpp[property.first] = PyAny(property.second); + properties_to_cpp[property.first] = ov::Any(py_object_to_any(property.second)); } self.set_property({properties_to_cpp.begin(), properties_to_cpp.end()}); }, @@ -166,7 +165,7 @@ void regclass_CompiledModel(py::module m) { cls.def( "get_property", [](ov::CompiledModel& self, const std::string& name) -> py::object { - return Common::from_ov_any(self.get_property(name)).as(); + return Common::utils::from_ov_any(self.get_property(name)); }, py::arg("name"), R"( diff --git a/src/bindings/python/src/pyopenvino/core/core.cpp b/src/bindings/python/src/pyopenvino/core/core.cpp index 866315052a3..36fc0951acf 100644 --- a/src/bindings/python/src/pyopenvino/core/core.cpp +++ b/src/bindings/python/src/pyopenvino/core/core.cpp @@ -10,9 +10,9 @@ #include #include #include -#include #include "common.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -35,9 +35,9 @@ void regclass_Core(py::module m) { cls.def( "set_property", [](ov::Core& self, const std::map& properties) { - std::map properties_to_cpp; + std::map properties_to_cpp; for (const auto& property : properties) { - properties_to_cpp[property.first] = PyAny(property.second); + properties_to_cpp[property.first] = ov::Any(py_object_to_any(property.second)); } self.set_property({properties_to_cpp.begin(), properties_to_cpp.end()}); }, @@ -52,9 +52,9 @@ void regclass_Core(py::module m) { cls.def( "set_property", [](ov::Core& self, const std::string& device_name, const std::map& properties) { - std::map properties_to_cpp; + std::map properties_to_cpp; for (const auto& property : properties) { - properties_to_cpp[property.first] = PyAny(property.second); + properties_to_cpp[property.first] = ov::Any(py_object_to_any(property.second)); } self.set_property(device_name, {properties_to_cpp.begin(), properties_to_cpp.end()}); }, @@ -388,7 +388,7 @@ void regclass_Core(py::module m) { cls.def( "get_property", [](ov::Core& self, const std::string& device_name, const std::string& name) -> py::object { - return Common::from_ov_any(self.get_property(device_name, name)).as(); + return Common::utils::from_ov_any(self.get_property(device_name, name)); }, py::arg("device_name"), py::arg("name"), diff --git a/src/bindings/python/src/pyopenvino/core/ie_parameter.cpp b/src/bindings/python/src/pyopenvino/core/ie_parameter.cpp deleted file mode 100644 index 93c2cc412d6..00000000000 --- a/src/bindings/python/src/pyopenvino/core/ie_parameter.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyopenvino/core/ie_parameter.hpp" - -#include - -namespace py = pybind11; - -void regclass_Parameter(py::module m) { - py::class_> cls(m, "Parameter"); - - cls.def(py::init()); - cls.def(py::init()); -} diff --git a/src/bindings/python/src/pyopenvino/core/ie_parameter.hpp b/src/bindings/python/src/pyopenvino/core/ie_parameter.hpp deleted file mode 100644 index f70478a21ce..00000000000 --- a/src/bindings/python/src/pyopenvino/core/ie_parameter.hpp +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace py = pybind11; - -void regclass_Parameter(py::module m); diff --git a/src/bindings/python/src/pyopenvino/graph/any.cpp b/src/bindings/python/src/pyopenvino/graph/any.cpp index 715f49b2d42..a6a25cda93b 100644 --- a/src/bindings/python/src/pyopenvino/graph/any.cpp +++ b/src/bindings/python/src/pyopenvino/graph/any.cpp @@ -6,37 +6,64 @@ #include -#include "pyopenvino/core/common.hpp" #include "pyopenvino/graph/any.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; void regclass_graph_Any(py::module m) { - py::class_> ov_any(m, "OVAny", py::module_local()); + py::class_> ov_any(m, "OVAny"); + ov_any.doc() = "openvino.runtime.OVAny provides object wrapper for OpenVINO" "ov::Any class. It allows to pass different types of objects" "into C++ based core of the project."; - ov_any.def(py::init()); + ov_any.def(py::init([](py::object& input_value) { + return ov::Any(py_object_to_any(input_value)); + })); - ov_any.def("__repr__", [](const PyAny& self) { + ov_any.def("__repr__", [](const ov::Any& self) { std::stringstream ret; self.print(ret); return ret.str(); }); - ov_any.def("__eq__", [](const PyAny& a, const PyAny& b) -> bool { + + ov_any.def("__getitem__", [](const ov::Any& self, py::object& k) { + return Common::utils::from_ov_any(self)[k]; + }); + + ov_any.def("__setitem__", [](const ov::Any& self, py::object& k, const std::string& v) { + Common::utils::from_ov_any(self)[k] = v; + }); + + ov_any.def("__setitem__", [](const ov::Any& self, py::object& k, const int64_t& v) { + Common::utils::from_ov_any(self)[k] = v; + }); + + ov_any.def("__get__", [](const ov::Any& self) { + return Common::utils::from_ov_any(self); + }); + + ov_any.def("__set__", [](const ov::Any& self, const ov::Any& val) { + Common::utils::from_ov_any(self) = Common::utils::from_ov_any(val); + }); + + ov_any.def("__len__", [](const ov::Any& self) { + py::handle some_object = Common::utils::from_ov_any(self); + PyObject* source = some_object.ptr(); + return PyObject_Length(source); + }); + + ov_any.def("__eq__", [](const ov::Any& a, const ov::Any& b) -> bool { return a == b; }); - ov_any.def("__eq__", [](const PyAny& a, const ov::Any& b) -> bool { - return a == b; - }); - ov_any.def("__eq__", [](const PyAny& a, py::object b) -> bool { - return a == PyAny(b); + ov_any.def("__eq__", [](const ov::Any& a, py::object& b) -> bool { + return a == ov::Any(py_object_to_any(b)); }); ov_any.def( "get", - [](const PyAny& self) -> py::object { - return self.as(); + [](const ov::Any& self) -> py::object { + return Common::utils::from_ov_any(self); }, R"( :return: Value of this OVAny. @@ -44,8 +71,8 @@ void regclass_graph_Any(py::module m) { )"); ov_any.def( "set", - [](PyAny& self, py::object value) { - self = PyAny(value); + [](ov::Any& self, py::object& value) { + self = ov::Any(py_object_to_any(value)); }, R"( :param: Value to be set in OVAny. @@ -53,8 +80,8 @@ void regclass_graph_Any(py::module m) { )"); ov_any.def_property_readonly( "value", - [](const PyAny& self) { - return self.as(); + [](const ov::Any& self) { + return Common::utils::from_ov_any(self); }, R"( :return: Value of this OVAny. diff --git a/src/bindings/python/src/pyopenvino/graph/any.hpp b/src/bindings/python/src/pyopenvino/graph/any.hpp index 70cf6214b1e..2b74880d869 100644 --- a/src/bindings/python/src/pyopenvino/graph/any.hpp +++ b/src/bindings/python/src/pyopenvino/graph/any.hpp @@ -6,21 +6,6 @@ #include -#include -#include - -#include "Python.h" -#include "openvino/core/any.hpp" // ov::RuntimeAttribute - namespace py = pybind11; void regclass_graph_Any(py::module m); - -class PyAny : public ov::Any { -public: - using ov::Any::Any; - PyAny() = default; - PyAny(py::object object) : ov::Any(object) {} - PyAny(PyObject* object) : ov::Any(py::reinterpret_borrow(object)) {} - PyAny(const ov::Any& any) : ov::Any(any) {} -}; diff --git a/src/bindings/python/src/pyopenvino/graph/rt_map.cpp b/src/bindings/python/src/pyopenvino/graph/rt_map.cpp index 95fae8f0ed2..f950f223e19 100644 --- a/src/bindings/python/src/pyopenvino/graph/rt_map.cpp +++ b/src/bindings/python/src/pyopenvino/graph/rt_map.cpp @@ -4,6 +4,7 @@ #include "pyopenvino/graph/rt_map.hpp" +#include #include #include #include @@ -18,6 +19,7 @@ #include "pyopenvino/core/common.hpp" #include "pyopenvino/graph/any.hpp" #include "pyopenvino/graph/node.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -37,7 +39,7 @@ void regclass_graph_PyRTMap(py::module m) { m[k] = v; }); py_map.def("__getitem__", [](PyRTMap& m, const std::string& k) -> py::object { - return Common::from_ov_any(m[k]).as(); + return Common::utils::from_ov_any(m[k]); }); py_map.def( "__bool__", diff --git a/src/bindings/python/src/pyopenvino/pyopenvino.cpp b/src/bindings/python/src/pyopenvino/pyopenvino.cpp index c38cab64ffe..8894da14f09 100644 --- a/src/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/src/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -26,7 +26,6 @@ #include "pyopenvino/core/containers.hpp" #include "pyopenvino/core/core.hpp" #include "pyopenvino/core/extension.hpp" -#include "pyopenvino/core/ie_parameter.hpp" #include "pyopenvino/core/infer_request.hpp" #include "pyopenvino/core/offline_transformations.hpp" #include "pyopenvino/core/profiling_info.hpp" @@ -199,7 +198,6 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_InferRequest(m); regclass_VariableState(m); regclass_Version(m); - regclass_Parameter(m); regclass_AsyncInferQueue(m); regclass_ProfilingInfo(m); regclass_Extension(m); diff --git a/src/bindings/python/src/pyopenvino/utils/utils.cpp b/src/bindings/python/src/pyopenvino/utils/utils.cpp new file mode 100644 index 00000000000..f39fdf5ae14 --- /dev/null +++ b/src/bindings/python/src/pyopenvino/utils/utils.cpp @@ -0,0 +1,155 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/utils/utils.hpp" + +#include + +#include +#include +#include +#include + +#include "Python.h" +#include "openvino/runtime/properties.hpp" + +namespace Common { +namespace utils { + +py::object from_ov_any(const ov::Any& any) { + // Check for py::object + if (any.is()) { + return any.as(); + } + // Check for std::string + else if (any.is()) { + return py::cast(PyUnicode_FromString(any.as().c_str())); + } + // Check for int + else if (any.is()) { + return py::cast(PyLong_FromLong(any.as())); + } else if (any.is()) { + return py::cast(PyLong_FromLong(any.as())); + } + // Check for unsigned int + else if (any.is()) { + return py::cast(PyLong_FromLong(any.as())); + } + // Check for float + else if (any.is()) { + return py::cast(PyFloat_FromDouble(any.as())); + } else if (any.is()) { + return py::cast(PyFloat_FromDouble(any.as())); + } + // Check for bool + else if (any.is()) { + return py::cast(any.as() ? Py_True : Py_False); + } + // Check for std::vector + else if (any.is>()) { + auto val = any.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) { + PyObject* str_val = PyUnicode_FromString(it.c_str()); + PyList_Append(list, str_val); + } + return py::cast(list); + } + // Check for std::vector + else if (any.is>()) { + auto val = any.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) { + PyList_Append(list, PyLong_FromLong(it)); + } + return py::cast(list); + } + // Check for std::vector + else if (any.is>()) { + auto val = any.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) { + PyList_Append(list, PyLong_FromLong(it)); + } + return py::cast(list); + } + // Check for std::vector + else if (any.is>()) { + auto val = any.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) { + PyList_Append(list, PyLong_FromLong(it)); + } + return py::cast(list); + } + // Check for std::vector + else if (any.is>()) { + auto val = any.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) { + PyList_Append(list, PyFloat_FromDouble((double)it)); + } + return py::cast(list); + } + // Check for std::vector + else if (any.is>()) { + auto val = any.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) { + PyList_Append(list, PyFloat_FromDouble(it)); + } + return py::cast(list); + } + // Check for std::tuple + else if (any.is>()) { + auto val = any.as>(); + PyObject* tuple = PyTuple_New(2); + PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val))); + PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val))); + return py::cast(tuple); + } + // Check for std::tuple + else if (any.is>()) { + auto val = any.as>(); + PyObject* tuple = PyTuple_New(3); + PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val))); + PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val))); + PyTuple_SetItem(tuple, 2, PyLong_FromUnsignedLong((unsigned long)std::get<2>(val))); + return py::cast(tuple); + } + // Check for std::map + else if (any.is>()) { + auto val = any.as>(); + PyObject* dict = PyDict_New(); + for (const auto& it : val) { + PyDict_SetItemString(dict, it.first.c_str(), PyUnicode_FromString(it.second.c_str())); + } + return py::cast(dict); + } + // Check for std::map + else if (any.is>()) { + auto val = any.as>(); + PyObject* dict = PyDict_New(); + for (const auto& it : val) { + PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long)it.second)); + } + return py::cast(dict); + } + // Check for std::vector + else if (any.is>()) { + auto val = any.as>(); + PyObject* dict = PyDict_New(); + for (const auto& it : val) { + std::string property_name = it; + std::string mutability = it.is_mutable() ? "RW" : "RO"; + PyDict_SetItemString(dict, property_name.c_str(), PyUnicode_FromString(mutability.c_str())); + } + return py::cast(dict); + } else { + PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!"); + return py::cast((PyObject*)NULL); + } +} +}; // namespace utils +}; // namespace Common \ No newline at end of file diff --git a/src/bindings/python/src/pyopenvino/utils/utils.hpp b/src/bindings/python/src/pyopenvino/utils/utils.hpp index 4d862594f00..5fffcd058a3 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.hpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.hpp @@ -7,26 +7,29 @@ #include #include -ov::Any py_object_to_any(const pybind11::object& py_obj) { - if (pybind11::isinstance(py_obj)) { +namespace py = pybind11; + +namespace Common { +namespace utils { + + py::object from_ov_any(const ov::Any &any); +}; // namespace utils +}; // namespace Common + +inline ov::Any py_object_to_any(const py::object& py_obj) { + if (py::isinstance(py_obj)) { return py_obj.cast(); - } else if (pybind11::isinstance(py_obj)) { + } else if (py::isinstance(py_obj)) { return py_obj.cast(); - } else if (pybind11::isinstance(py_obj)) { + } else if (py::isinstance(py_obj)) { return py_obj.cast(); - } else if (pybind11::isinstance(py_obj)) { + } else if (py::isinstance(py_obj)) { return py_obj.cast(); - } else if (pybind11::isinstance(py_obj)) { - auto _list = py_obj.cast(); - enum class PY_TYPE : int { - UNKNOWN = 0, - STR, - INT, - FLOAT, - BOOL - }; + } else if (py::isinstance(py_obj)) { + auto _list = py_obj.cast(); + enum class PY_TYPE : int { UNKNOWN = 0, STR, INT, FLOAT, BOOL }; PY_TYPE detected_type = PY_TYPE::UNKNOWN; - for (const auto &it: _list) { + for (const auto& it : _list) { auto check_type = [&](PY_TYPE type) { if (detected_type == PY_TYPE::UNKNOWN || detected_type == type) { detected_type = type; @@ -34,13 +37,13 @@ ov::Any py_object_to_any(const pybind11::object& py_obj) { } OPENVINO_ASSERT("Incorrect attribute. Mixed types in the list are not allowed."); }; - if (pybind11::isinstance(it)) { + if (py::isinstance(it)) { check_type(PY_TYPE::STR); - } else if (pybind11::isinstance(it)) { + } else if (py::isinstance(it)) { check_type(PY_TYPE::INT); - } else if (pybind11::isinstance(it)) { + } else if (py::isinstance(it)) { check_type(PY_TYPE::FLOAT); - } else if (pybind11::isinstance(it)) { + } else if (py::isinstance(it)) { check_type(PY_TYPE::BOOL); } } @@ -57,6 +60,9 @@ ov::Any py_object_to_any(const pybind11::object& py_obj) { default: OPENVINO_ASSERT(false, "Unsupported attribute type."); } + + } else if (py::isinstance(py_obj)) { + return py_obj; } OPENVINO_ASSERT(false, "Unsupported attribute type."); } diff --git a/src/bindings/python/tests/test_inference_engine/test_input_node.py b/src/bindings/python/tests/test_inference_engine/test_input_node.py index 87b87f36983..c3e66feefee 100644 --- a/src/bindings/python/tests/test_inference_engine/test_input_node.py +++ b/src/bindings/python/tests/test_inference_engine/test_input_node.py @@ -4,12 +4,12 @@ import os from ..conftest import model_path -from openvino.runtime import Input, Shape, PartialShape, Type, Parameter, \ +from openvino.runtime import Input, Shape, PartialShape, Type, \ RTMap from openvino.pyopenvino import DescriptorTensor import openvino.runtime.opset8 as ops -from openvino.runtime import Core +from openvino.runtime import Core, OVAny is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" test_net_xml, test_net_bin = model_path(is_myriad) @@ -146,4 +146,4 @@ def test_input_update_rt_info(device): rt["test12345"] = "test" for k, v in input_node.get_rt_info().items(): assert k == "test12345" - assert isinstance(v, Parameter) + assert isinstance(v, OVAny) diff --git a/src/bindings/python/tests/test_inference_engine/test_output_const_node.py b/src/bindings/python/tests/test_inference_engine/test_output_const_node.py index 00d4c2522f8..cbe4655ba79 100644 --- a/src/bindings/python/tests/test_inference_engine/test_output_const_node.py +++ b/src/bindings/python/tests/test_inference_engine/test_output_const_node.py @@ -6,7 +6,7 @@ import os from ..conftest import model_path import openvino.runtime.opset8 as ops from openvino.runtime import ConstOutput, Shape, PartialShape, Type, \ - Output, Parameter, RTMap + Output, RTMap, OVAny from openvino.runtime import Core @@ -130,7 +130,7 @@ def test_update_rt_info(device): rt["test12345"] = "test" for k, v in output_node.get_rt_info().items(): assert k == "test12345" - assert isinstance(v, Parameter) + assert isinstance(v, OVAny) def test_operations(): diff --git a/src/bindings/python/tests/test_ngraph/test_any.py b/src/bindings/python/tests/test_ngraph/test_any.py index 28860107e9c..3b9c2c24e03 100644 --- a/src/bindings/python/tests/test_ngraph/test_any.py +++ b/src/bindings/python/tests/test_ngraph/test_any.py @@ -42,11 +42,6 @@ def test_any_float_list(): assert isinstance(v[0], float) -def test_any_tuple(): - v = OVAny((2, 1)) - assert isinstance(v.value, tuple) - - def test_any_bool(): v = OVAny(False) assert isinstance(v.value, bool) diff --git a/src/bindings/python/tests/test_ngraph/test_basic.py b/src/bindings/python/tests/test_ngraph/test_basic.py index da0f39531ba..1b63dbe9a71 100644 --- a/src/bindings/python/tests/test_ngraph/test_basic.py +++ b/src/bindings/python/tests/test_ngraph/test_basic.py @@ -9,12 +9,10 @@ import pytest import openvino.runtime.opset8 as ops import openvino.runtime as ov -from openvino.pyopenvino import OVAny - from openvino.runtime.exceptions import UserInputError from openvino.runtime import Model, PartialShape, Shape, Type, layout_helpers from openvino.runtime import Strides, AxisVector, Coordinate, CoordinateDiff -from openvino.runtime import Tensor +from openvino.runtime import Tensor, OVAny from openvino.pyopenvino import DescriptorTensor from openvino.runtime.op import Parameter from tests.runtime import get_runtime From e52bd441e2ff0f09566230ed924c481da8d651d8 Mon Sep 17 00:00:00 2001 From: Mikhail Nosov Date: Thu, 31 Mar 2022 22:23:40 +0300 Subject: [PATCH 02/16] Frontend exception safety (#11368) * Frontend exception safety Every call to frontend's API (except Places) can throw exception. If during exception handling, FrontEndManager is destroyed and calls 'dlclose' for plugin - call stack will be corrupted and crash will occur. Solution is to wrap 'plugins' calls with try/catch and throw new exception in 'openvino' context TODO: currently "Place" objects don't have 'actual' wrappers, so exception in 'place' objects will potentially cause such crash (if exception handler destroys FrontEndManager). Workaround for user would be to try/catch any calls of Place API on their side. We're not expecting users to use Place API directly, so this workaround looks acceptable * Add check for exception message * Keep type of frontend exception during rethrow * IR FE tests: don't expect InferenceEngine::exception as it be not propagated as is by FrontEndManager --- src/core/tests/frontend/frontend_manager.cpp | 137 +++++++++++++++ src/core/tests/frontend/mock_frontend.cpp | 160 +++++++++++++++++- src/frontends/common/src/frontend.cpp | 21 ++- src/frontends/common/src/input_model.cpp | 51 +++--- src/frontends/common/src/utils.hpp | 35 ++++ .../partial_shape_deserialization.cpp | 12 +- 6 files changed, 377 insertions(+), 39 deletions(-) diff --git a/src/core/tests/frontend/frontend_manager.cpp b/src/core/tests/frontend/frontend_manager.cpp index 893d7974328..521b590e7bf 100644 --- a/src/core/tests/frontend/frontend_manager.cpp +++ b/src/core/tests/frontend/frontend_manager.cpp @@ -344,3 +344,140 @@ TEST(FrontEndExceptionTest, frontend_initialization_error_throw_info) { } FAIL() << "Test is expected to throw an exception."; } + +// FrontEndManager exception safety +#define CHECK_EXCEPTION_FRONTEND(statement) \ + try { \ + FrontEndManager fem; \ + auto fe = fem.load_by_framework("mock1"); \ + auto input_model = fe->load("throw_next"); \ + statement; \ + FAIL() << "Throw was expected"; \ + } catch (ov::frontend::GeneralFailure & error) { \ + EXPECT_NE(std::string(error.what()).find("Test exception"), std::string::npos) << error.what(); \ + } catch (...) { \ + FAIL() << "Unexpected error is thrown"; \ + } + +TEST(FrontEndManagerTest, Exception_Safety_FrontEnd_Load_By_Framework) { + EXPECT_ANY_THROW({ + FrontEndManager fem; + auto fe = fem.load_by_framework("mock1"); + fe->load("throw_now"); + }); +} + +TEST(FrontEndManagerTest, Exception_Safety_FrontEnd_Convert){CHECK_EXCEPTION_FRONTEND(fe->convert(input_model))} + +TEST(FrontEndManagerTest, + Exception_Safety_FrontEnd_Convert_OV_Model){CHECK_EXCEPTION_FRONTEND(fe->convert(std::shared_ptr()))} + +TEST(FrontEndManagerTest, Exception_Safety_FrontEnd_Get_Name){CHECK_EXCEPTION_FRONTEND(fe->get_name())} + +TEST(FrontEndManagerTest, Exception_Safety_FrontEnd_Supported) { + EXPECT_ANY_THROW({ + FrontEndManager fem; + auto fe = fem.load_by_framework("mock1"); + fe->supported("throw_now"); + }); +} + +TEST(FrontEndManagerTest, Exception_Safety_FrontEnd_Add_Extension){ + CHECK_EXCEPTION_FRONTEND(fe->add_extension(std::make_shared()))} + +TEST(FrontEndManagerTest, + Exception_Safety_FrontEnd_Convert_Partially){CHECK_EXCEPTION_FRONTEND(fe->convert_partially(input_model))} + +TEST(FrontEndManagerTest, Exception_Safety_FrontEnd_Normalize){CHECK_EXCEPTION_FRONTEND(fe->normalize(nullptr))} + +TEST(FrontEndManagerTest, Exception_Safety_FrontEnd_Decode) { + CHECK_EXCEPTION_FRONTEND(fe->decode(input_model)) +} + +// InputModel exception safety + +#define CHECK_EXCEPTION_INPUT_MODEL(statement) \ + try { \ + FrontEndManager fem; \ + auto fe = fem.load_by_framework("mock1"); \ + auto input_model = fe->load("throw_model"); \ + statement; \ + FAIL() << "Throw was expected"; \ + } catch (ov::frontend::GeneralFailure & error) { \ + EXPECT_NE(std::string(error.what()).find("Test exception"), std::string::npos) << error.what(); \ + } catch (...) { \ + FAIL() << "Unexpected error is thrown"; \ + } + +TEST(FrontEndManagerTest, + Exception_Safety_Input_Model_get_inputs){CHECK_EXCEPTION_INPUT_MODEL(input_model->get_inputs())} + +TEST(FrontEndManagerTest, + Exception_Safety_Input_Model_get_outputs){CHECK_EXCEPTION_INPUT_MODEL(input_model->get_outputs())} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_get_place_by_tensor_name){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->get_place_by_tensor_name({}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_get_place_by_operation_name){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->get_place_by_operation_name({}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_get_place_by_operation_name_and_input_port){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->get_place_by_operation_name_and_input_port({}, {}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_get_place_by_operation_name_and_output_port){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->get_place_by_operation_name_and_output_port({}, {}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_set_name_for_tensor){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->set_name_for_tensor({}, {}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_add_name_for_tensor){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->add_name_for_tensor({}, {}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_set_name_for_operation){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->set_name_for_operation({}, {}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_free_name_for_tensor){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->free_name_for_tensor({}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_free_name_for_operation){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->free_name_for_operation({}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_set_name_for_dimension){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->set_name_for_dimension({}, {}, {}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_cut_and_add_new_input){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->cut_and_add_new_input({}, {}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_cut_and_add_new_output){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->cut_and_add_new_output({}, {}))} + +TEST(FrontEndManagerTest, + Exception_Safety_Input_Model_add_output){CHECK_EXCEPTION_INPUT_MODEL(input_model->add_output({}))} + +TEST(FrontEndManagerTest, + Exception_Safety_Input_Model_remove_output){CHECK_EXCEPTION_INPUT_MODEL(input_model->remove_output({}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_override_all_outputs){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->override_all_outputs({}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_override_all_inputs){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->override_all_inputs({}))} + +TEST(FrontEndManagerTest, + Exception_Safety_Input_Model_extract_subgraph){CHECK_EXCEPTION_INPUT_MODEL(input_model->extract_subgraph({}, {}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_set_partial_shape){ + CHECK_EXCEPTION_INPUT_MODEL(input_model->set_partial_shape({}, {}))} + +TEST(FrontEndManagerTest, + Exception_Safety_Input_Model_get_partial_shape){CHECK_EXCEPTION_INPUT_MODEL(input_model->get_partial_shape({}))} + +TEST(FrontEndManagerTest, + Exception_Safety_Input_Model_set_element_type){CHECK_EXCEPTION_INPUT_MODEL(input_model->set_element_type({}, {}))} + +TEST(FrontEndManagerTest, + Exception_Safety_Input_Model_set_tensor_value){CHECK_EXCEPTION_INPUT_MODEL(input_model->set_tensor_value({}, {}))} + +TEST(FrontEndManagerTest, Exception_Safety_Input_Model_set_tensor_partial_value) { + CHECK_EXCEPTION_INPUT_MODEL(input_model->set_tensor_partial_value({}, {}, {})) +} diff --git a/src/core/tests/frontend/mock_frontend.cpp b/src/core/tests/frontend/mock_frontend.cpp index 4248a30da0d..75476c14532 100644 --- a/src/core/tests/frontend/mock_frontend.cpp +++ b/src/core/tests/frontend/mock_frontend.cpp @@ -3,6 +3,7 @@ // #include "ngraph/visibility.hpp" +#include "openvino/frontend/exception.hpp" #include "openvino/frontend/manager.hpp" #include "openvino/frontend/visibility.hpp" #include "openvino/opsets/opset8.hpp" @@ -17,19 +18,174 @@ using namespace ngraph; using namespace ov::frontend; -class InputModelMock : public InputModel {}; +class InputModelMock : public InputModel { +public: + bool m_throw = false; + + std::vector get_inputs() const override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + return {}; + } + + std::vector get_outputs() const override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + return {}; + } + + Place::Ptr get_place_by_tensor_name(const std::string& tensor_name) const override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + return {}; + } + + Place::Ptr get_place_by_operation_name(const std::string& operation_name) const override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + return {}; + } + + Place::Ptr get_place_by_operation_name_and_input_port(const std::string& operation_name, + int input_port_index) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + return {}; + } + + Place::Ptr get_place_by_operation_name_and_output_port(const std::string& operation_name, + int output_port_index) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + return {}; + } + + void set_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void add_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void set_name_for_operation(const Place::Ptr& operation, const std::string& new_name) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void free_name_for_tensor(const std::string& name) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void free_name_for_operation(const std::string& name) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void set_name_for_dimension(const Place::Ptr& place, size_t shape_dim_index, const std::string& dim_name) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void cut_and_add_new_input(const Place::Ptr& place, const std::string& new_name_optional) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void cut_and_add_new_output(const Place::Ptr& place, const std::string& new_name_optional) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + Place::Ptr add_output(const Place::Ptr& place) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + return {}; + } + + void remove_output(const Place::Ptr& place) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void override_all_outputs(const std::vector& outputs) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void override_all_inputs(const std::vector& inputs) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void extract_subgraph(const std::vector& inputs, const std::vector& outputs) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void set_partial_shape(const Place::Ptr& place, const PartialShape& shape) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + PartialShape get_partial_shape(const Place::Ptr& place) const override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + return {}; + } + + void set_element_type(const Place::Ptr& place, const element::Type& type) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void set_tensor_value(const Place::Ptr& place, const void* value) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } + + void set_tensor_partial_value(const Place::Ptr& place, const void* min_value, const void* max_value) override { + FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); + } +}; class FrontEndMock : public FrontEnd { + mutable bool m_throw_next{false}; + public: std::string get_name() const override { + FRONT_END_GENERAL_CHECK(!m_throw_next, "Test exception"); return "mock1"; } + bool supported_impl(const std::vector& variants) const override { + if (variants.size() == 1 && variants[0].is()) { + std::string command = variants[0].as(); + FRONT_END_GENERAL_CHECK(command != "throw_now", "Test exception"); + } + return false; + } + + void add_extension(const std::shared_ptr& extension) override { + FRONT_END_GENERAL_CHECK(!m_throw_next, "Test exception"); + } + InputModel::Ptr load_impl(const std::vector& variants) const override { - return std::make_shared(); + auto input_model = std::make_shared(); + if (variants.size() == 1 && variants[0].is()) { + std::string command = variants[0].as(); + if (command == "throw_now") { + OPENVINO_UNREACHABLE("Test throw load input model"); + } else if (command == "throw_next") { + m_throw_next = true; + } else if (command == "throw_model") { + input_model->m_throw = true; + } + } + return input_model; + } + + std::shared_ptr convert_partially(const InputModel::Ptr& model) const override { + FRONT_END_GENERAL_CHECK(!m_throw_next, "Test exception"); + return nullptr; + } + + std::shared_ptr decode(const InputModel::Ptr& model) const override { + FRONT_END_GENERAL_CHECK(!m_throw_next, "Test exception"); + + return nullptr; + } + + void convert(const std::shared_ptr& model) const override { + FRONT_END_GENERAL_CHECK(!m_throw_next, "Test exception"); + } + + void normalize(const std::shared_ptr& model) const override { + FRONT_END_GENERAL_CHECK(!m_throw_next, "Test exception"); } std::shared_ptr convert(const InputModel::Ptr& model) const override { + FRONT_END_GENERAL_CHECK(!m_throw_next, "Test exception"); auto shape = Shape{1, 2, 300, 300}; auto param = std::make_shared(ov::element::f32, shape); std::vector data(ov::shape_size(shape), 1.f); diff --git a/src/frontends/common/src/frontend.cpp b/src/frontends/common/src/frontend.cpp index cbc650ed1f4..14f0d12bc49 100644 --- a/src/frontends/common/src/frontend.cpp +++ b/src/frontends/common/src/frontend.cpp @@ -35,7 +35,7 @@ FrontEnd::~FrontEnd() = default; bool FrontEnd::supported_impl(const std::vector& variants) const { if (m_actual) { - return m_actual->supported_impl(variants); + FRONTEND_RETURN_STATEMENT("Check supported", m_actual->supported_impl(variants)) } return false; } @@ -44,39 +44,42 @@ InputModel::Ptr FrontEnd::load_impl(const std::vector& variants) const FRONT_END_CHECK_IMPLEMENTED(m_actual, load_impl); auto model = std::make_shared(); model->m_shared_object = m_shared_object; - model->m_actual = m_actual->load_impl(variants); + FRONTEND_CALL_STATEMENT("Loading input model", model->m_actual = m_actual->load_impl(variants)) return model; } std::shared_ptr FrontEnd::convert(const InputModel::Ptr& model) const { FRONT_END_CHECK_IMPLEMENTED(m_actual, convert); - return FrontEnd::create_copy(m_actual->convert(model->m_actual), m_shared_object); + FRONTEND_RETURN_STATEMENT("Converting input model", + FrontEnd::create_copy(m_actual->convert(model->m_actual), m_shared_object)) } void FrontEnd::convert(const std::shared_ptr& model) const { FRONT_END_CHECK_IMPLEMENTED(m_actual, convert); - m_actual->convert(model); + FRONTEND_CALL_STATEMENT("Converting partially converted model", m_actual->convert(model)) } std::shared_ptr FrontEnd::convert_partially(const InputModel::Ptr& model) const { FRONT_END_CHECK_IMPLEMENTED(m_actual, convert_partially); - return FrontEnd::create_copy(m_actual->convert_partially(model->m_actual), m_shared_object); + FRONTEND_RETURN_STATEMENT("Partially convert model", + FrontEnd::create_copy(m_actual->convert_partially(model->m_actual), m_shared_object)) } std::shared_ptr FrontEnd::decode(const InputModel::Ptr& model) const { FRONT_END_CHECK_IMPLEMENTED(m_actual, decode); - return FrontEnd::create_copy(m_actual->decode(model->m_actual), m_shared_object); + FRONTEND_RETURN_STATEMENT("Decoding model", + FrontEnd::create_copy(m_actual->decode(model->m_actual), m_shared_object)) } void FrontEnd::normalize(const std::shared_ptr& model) const { FRONT_END_CHECK_IMPLEMENTED(m_actual, normalize); - m_actual->normalize(model); + FRONTEND_CALL_STATEMENT("Normalizing model", m_actual->normalize(model);) } void FrontEnd::add_extension(const std::shared_ptr& extension) { if (m_actual) { add_extension_to_shared_data(m_shared_object, extension); - m_actual->add_extension(extension); + FRONTEND_CALL_STATEMENT("Adding extension", m_actual->add_extension(extension)) return; } // Left unimplemented intentionally. @@ -103,5 +106,5 @@ std::string FrontEnd::get_name() const { if (!m_actual) { return {}; } - return m_actual->get_name(); + FRONTEND_RETURN_STATEMENT("Getting frontend name", m_actual->get_name();) } diff --git a/src/frontends/common/src/input_model.cpp b/src/frontends/common/src/input_model.cpp index ca37a774720..7a71ceaf6c4 100644 --- a/src/frontends/common/src/input_model.cpp +++ b/src/frontends/common/src/input_model.cpp @@ -16,28 +16,28 @@ std::vector InputModel::get_inputs() const { if (!m_actual) { return {}; } - return m_actual->get_inputs(); + FRONTEND_RETURN_STATEMENT("get_inputs", m_actual->get_inputs()) } std::vector InputModel::get_outputs() const { if (!m_actual) { return {}; } - return m_actual->get_outputs(); + FRONTEND_RETURN_STATEMENT("get_outputs", m_actual->get_outputs()) } Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensor_name) const { if (!m_actual) { return {}; } - return m_actual->get_place_by_tensor_name(tensor_name); + FRONTEND_RETURN_STATEMENT("get_place_by_tensor_name", m_actual->get_place_by_tensor_name(tensor_name)) } Place::Ptr InputModel::get_place_by_operation_name(const std::string& operation_name) const { if (!m_actual) { return {}; } - return m_actual->get_place_by_operation_name(operation_name); + FRONTEND_RETURN_STATEMENT("get_place_by_operation_name", m_actual->get_place_by_operation_name(operation_name)) } Place::Ptr InputModel::get_place_by_operation_name_and_input_port(const std::string& operation_name, @@ -45,7 +45,8 @@ Place::Ptr InputModel::get_place_by_operation_name_and_input_port(const std::str if (!m_actual) { return {}; } - return m_actual->get_place_by_operation_name_and_input_port(operation_name, input_port_index); + FRONTEND_RETURN_STATEMENT("get_place_by_operation_name_and_input_port", + m_actual->get_place_by_operation_name_and_input_port(operation_name, input_port_index)) } Place::Ptr InputModel::get_place_by_operation_name_and_output_port(const std::string& operation_name, @@ -53,96 +54,98 @@ Place::Ptr InputModel::get_place_by_operation_name_and_output_port(const std::st if (!m_actual) { return {}; } - return m_actual->get_place_by_operation_name_and_output_port(operation_name, output_port_index); + FRONTEND_RETURN_STATEMENT("get_place_by_operation_name_and_output_port", + m_actual->get_place_by_operation_name_and_output_port(operation_name, output_port_index)) } void InputModel::set_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) { FRONT_END_CHECK_IMPLEMENTED(m_actual, set_name_for_tensor); - m_actual->set_name_for_tensor(tensor, new_name); + FRONTEND_CALL_STATEMENT("set_name_for_tensor", m_actual->set_name_for_tensor(tensor, new_name)) } void InputModel::add_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) { FRONT_END_CHECK_IMPLEMENTED(m_actual, add_name_for_tensor); - m_actual->add_name_for_tensor(tensor, new_name); + FRONTEND_CALL_STATEMENT("add_name_for_tensor", m_actual->add_name_for_tensor(tensor, new_name)) } void InputModel::set_name_for_operation(const Place::Ptr& operation, const std::string& new_name) { FRONT_END_CHECK_IMPLEMENTED(m_actual, set_name_for_operation); - m_actual->set_name_for_operation(operation, new_name); + FRONTEND_CALL_STATEMENT("set_name_for_operation", m_actual->set_name_for_operation(operation, new_name)) } void InputModel::free_name_for_tensor(const std::string& name) { FRONT_END_CHECK_IMPLEMENTED(m_actual, free_name_for_tensor); - m_actual->free_name_for_tensor(name); + FRONTEND_CALL_STATEMENT("free_name_for_tensor", m_actual->free_name_for_tensor(name)) } void InputModel::free_name_for_operation(const std::string& name) { FRONT_END_CHECK_IMPLEMENTED(m_actual, free_name_for_operation); - m_actual->free_name_for_operation(name); + FRONTEND_CALL_STATEMENT("free_name_for_operation", m_actual->free_name_for_operation(name)) } void InputModel::set_name_for_dimension(const Place::Ptr& place, size_t shape_dim_index, const std::string& dim_name) { FRONT_END_CHECK_IMPLEMENTED(m_actual, set_name_for_dimension); - m_actual->set_name_for_dimension(place, shape_dim_index, dim_name); + FRONTEND_CALL_STATEMENT("set_name_for_dimension", + m_actual->set_name_for_dimension(place, shape_dim_index, dim_name)) } void InputModel::cut_and_add_new_input(const Place::Ptr& place, const std::string& new_name_optional) { FRONT_END_CHECK_IMPLEMENTED(m_actual, cut_and_add_new_input); - m_actual->cut_and_add_new_input(place, new_name_optional); + FRONTEND_CALL_STATEMENT("cut_and_add_new_input", m_actual->cut_and_add_new_input(place, new_name_optional)) } void InputModel::cut_and_add_new_output(const Place::Ptr& place, const std::string& new_name_optional) { FRONT_END_CHECK_IMPLEMENTED(m_actual, cut_and_add_new_output); - m_actual->cut_and_add_new_output(place, new_name_optional); + FRONTEND_CALL_STATEMENT("cut_and_add_new_output", m_actual->cut_and_add_new_output(place, new_name_optional)) } Place::Ptr InputModel::add_output(const Place::Ptr& place) { FRONT_END_CHECK_IMPLEMENTED(m_actual, add_output); - return m_actual->add_output(place); + FRONTEND_RETURN_STATEMENT("add_output", m_actual->add_output(place)) } void InputModel::remove_output(const Place::Ptr& place) { FRONT_END_CHECK_IMPLEMENTED(m_actual, remove_output); - m_actual->remove_output(place); + FRONTEND_CALL_STATEMENT("remove_output", m_actual->remove_output(place)) } void InputModel::override_all_outputs(const std::vector& outputs) { FRONT_END_CHECK_IMPLEMENTED(m_actual, override_all_outputs); - m_actual->override_all_outputs(outputs); + FRONTEND_CALL_STATEMENT("override_all_outputs", m_actual->override_all_outputs(outputs)) } void InputModel::override_all_inputs(const std::vector& inputs) { FRONT_END_CHECK_IMPLEMENTED(m_actual, override_all_inputs); - m_actual->override_all_inputs(inputs); + FRONTEND_CALL_STATEMENT("override_all_inputs", m_actual->override_all_inputs(inputs)) } void InputModel::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { FRONT_END_CHECK_IMPLEMENTED(m_actual, extract_subgraph); - m_actual->extract_subgraph(inputs, outputs); + FRONTEND_CALL_STATEMENT("extract_subgraph", m_actual->extract_subgraph(inputs, outputs)) } // Setting tensor properties void InputModel::set_partial_shape(const Place::Ptr& place, const PartialShape& shape) { FRONT_END_CHECK_IMPLEMENTED(m_actual, set_partial_shape); - m_actual->set_partial_shape(place, shape); + FRONTEND_CALL_STATEMENT("set_partial_shape", m_actual->set_partial_shape(place, shape)) } PartialShape InputModel::get_partial_shape(const Place::Ptr& place) const { FRONT_END_CHECK_IMPLEMENTED(m_actual, get_partial_shape); - return m_actual->get_partial_shape(place); + FRONTEND_RETURN_STATEMENT("get_partial_shape", m_actual->get_partial_shape(place)) } void InputModel::set_element_type(const Place::Ptr& place, const element::Type& type) { FRONT_END_CHECK_IMPLEMENTED(m_actual, set_element_type); - m_actual->set_element_type(place, type); + FRONTEND_CALL_STATEMENT("set_element_type", m_actual->set_element_type(place, type)) } void InputModel::set_tensor_value(const Place::Ptr& place, const void* value) { FRONT_END_CHECK_IMPLEMENTED(m_actual, set_tensor_value); - m_actual->set_tensor_value(place, value); + FRONTEND_CALL_STATEMENT("set_tensor_value", m_actual->set_tensor_value(place, value)) } void InputModel::set_tensor_partial_value(const Place::Ptr& place, const void* min_value, const void* max_value) { FRONT_END_CHECK_IMPLEMENTED(m_actual, set_tensor_partial_value); - m_actual->set_tensor_partial_value(place, min_value, max_value); + FRONTEND_CALL_STATEMENT("set_tensor_partial_value", m_actual->set_tensor_partial_value(place, min_value, max_value)) } diff --git a/src/frontends/common/src/utils.hpp b/src/frontends/common/src/utils.hpp index 9256c55de08..4464af632a4 100644 --- a/src/frontends/common/src/utils.hpp +++ b/src/frontends/common/src/utils.hpp @@ -6,6 +6,41 @@ #include "openvino/frontend/visibility.hpp" +#define RETHROW_FRONTEND_EXCEPTION(Type) \ + catch (const Type& ex) { \ + throw Type(ex); \ + } + +#define FRONTEND_CALL_STATEMENT(MESSAGE, ...) \ + try { \ + __VA_ARGS__; \ + } \ + RETHROW_FRONTEND_EXCEPTION(ov::frontend::GeneralFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::frontend::OpValidationFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::frontend::InitializationFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::frontend::OpConversionFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::frontend::NotImplementedFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::AssertFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::Exception) \ + catch (...) { \ + OPENVINO_ASSERT(false, (MESSAGE)); \ + } + +#define FRONTEND_RETURN_STATEMENT(MESSAGE, FUNCTION) \ + try { \ + return FUNCTION; \ + } \ + RETHROW_FRONTEND_EXCEPTION(ov::frontend::GeneralFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::frontend::OpValidationFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::frontend::InitializationFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::frontend::OpConversionFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::frontend::NotImplementedFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::AssertFailure) \ + RETHROW_FRONTEND_EXCEPTION(ov::Exception) \ + catch (...) { \ + OPENVINO_ASSERT(false, (MESSAGE)); \ + } + namespace ov { namespace frontend { std::string get_frontend_library_path(); diff --git a/src/tests/functional/inference_engine/ir_serialization/partial_shape_deserialization.cpp b/src/tests/functional/inference_engine/ir_serialization/partial_shape_deserialization.cpp index a51e55ad3aa..6d0944e2297 100644 --- a/src/tests/functional/inference_engine/ir_serialization/partial_shape_deserialization.cpp +++ b/src/tests/functional/inference_engine/ir_serialization/partial_shape_deserialization.cpp @@ -296,7 +296,8 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicRankNegative) )V0G0N"; - ASSERT_THROW(getWithIRFrontend(model), InferenceEngine::Exception); + // TODO: change to ov::Exception (69781) + ASSERT_ANY_THROW(getWithIRFrontend(model)); } TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicDimNegative) { @@ -322,7 +323,8 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestDynamicDimNegative) { )V0G0N"; - ASSERT_THROW(getWithIRFrontend(model), InferenceEngine::Exception); + // TODO: change to ov::Exception (69781) + ASSERT_ANY_THROW(getWithIRFrontend(model)); } TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestWrongDim) { @@ -348,7 +350,8 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestWrongDim) { )V0G0N"; - ASSERT_THROW(getWithIRFrontend(model), InferenceEngine::Exception); + // TODO: change to ov::Exception (69781) + ASSERT_ANY_THROW(getWithIRFrontend(model)); } TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestWrongBoundary) { @@ -374,5 +377,6 @@ TEST_F(PartialShapeDeserialization, ShapeWithBoundariesTestWrongBoundary) { )V0G0N"; - ASSERT_THROW(getWithIRFrontend(model), InferenceEngine::Exception); + // TODO: change to ov::Exception (69781) + ASSERT_ANY_THROW(getWithIRFrontend(model)); } From 4057e408d8d958fba748c6522957ff8dc867f4d7 Mon Sep 17 00:00:00 2001 From: yanlan song Date: Fri, 1 Apr 2022 11:09:22 +0800 Subject: [PATCH 03/16] Bell/shape auto (#11284) * Fix batchability check of MAX_BATCH_SIZE * Applied review comment * clonenetwork in auto Signed-off-by: fishbell * clone in correct way Signed-off-by: fishbell Co-authored-by: Taylor Yeonbok Lee --- src/plugins/auto/plugin.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/plugins/auto/plugin.cpp b/src/plugins/auto/plugin.cpp index c5472151fcc..28402109c8e 100644 --- a/src/plugins/auto/plugin.cpp +++ b/src/plugins/auto/plugin.cpp @@ -20,7 +20,7 @@ #include "plugin.hpp" #include #include - +#include #include "itt.hpp" // ------------------------------MultiDeviceInferencePlugin---------------------------- namespace MultiDevicePlugin { @@ -345,7 +345,10 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons strDevices += ((iter + 1) == supportDevices.end()) ? "" : ","; LOG_INFO("[AUTOPLUGIN]:device:%s, priority:%ld", iter->deviceName.c_str(), iter->devicePriority); } - return std::make_shared(modelPath, network, supportDevices, strDevices, this, context, context.needPerfCounters); + // clone the network, in case of reshape conflict + CNNNetwork clonedNetwork = InferenceEngine::details::cloneNetwork(network); + + return std::make_shared(modelPath, clonedNetwork, supportDevices, strDevices, this, context, context.needPerfCounters); } OV_ITT_SCOPED_TASK(itt::domains::MULTIPlugin, "MultiDeviceInferencePlugin::LoadNetworkImpl:MultiMode"); if (priorities == fullConfig.end()) { From 070f27a0892661367ca92f081aa6994d0c3be004 Mon Sep 17 00:00:00 2001 From: Bo Liu Date: Fri, 1 Apr 2022 14:37:28 +0800 Subject: [PATCH 04/16] Paddle FasterRCNN Ops Conversion: roi_align, strided_slice, where (#10893) * Paddle FasterRCNN Ops Conversion: roi_align, strided_slice, where * add check for 'aligned' feature of 'roi_align' op; use common function for idx_node in 'striede_slice' op * Apply suggestions from code review * use common funciton for stride_slice and slice, OP_CHECK for 'where' op conversion * Apply suggestions from code review --- src/core/tests/frontend/paddle/op_fuzzy.cpp | 14 ++ .../gen_scripts/generate_roi_align.py | 114 +++++++++++++++ .../gen_scripts/generate_strided_slice.py | 134 ++++++++++++++++++ .../test_models/gen_scripts/generate_where.py | 69 +++++++++ src/frontends/paddle/src/op/roi_align.cpp | 52 +++++++ src/frontends/paddle/src/op/slice.cpp | 93 +----------- src/frontends/paddle/src/op/slice_ops.hpp | 123 ++++++++++++++++ src/frontends/paddle/src/op/strided_slice.cpp | 16 +++ src/frontends/paddle/src/op/where.cpp | 28 ++++ src/frontends/paddle/src/op_table.cpp | 6 + 10 files changed, 558 insertions(+), 91 deletions(-) create mode 100644 src/core/tests/frontend/paddle/test_models/gen_scripts/generate_roi_align.py create mode 100644 src/core/tests/frontend/paddle/test_models/gen_scripts/generate_strided_slice.py create mode 100644 src/core/tests/frontend/paddle/test_models/gen_scripts/generate_where.py create mode 100644 src/frontends/paddle/src/op/roi_align.cpp create mode 100644 src/frontends/paddle/src/op/slice_ops.hpp create mode 100644 src/frontends/paddle/src/op/strided_slice.cpp create mode 100644 src/frontends/paddle/src/op/where.cpp diff --git a/src/core/tests/frontend/paddle/op_fuzzy.cpp b/src/core/tests/frontend/paddle/op_fuzzy.cpp index b7637fa67b8..b5009de3f6a 100644 --- a/src/core/tests/frontend/paddle/op_fuzzy.cpp +++ b/src/core/tests/frontend/paddle/op_fuzzy.cpp @@ -256,6 +256,8 @@ static const std::vector models{std::string("argmax"), std::string("rnn_lstm_layer_2_forward"), std::string("rnn_lstm_layer_1_forward_seq_len_4"), std::string("rnn_lstm_layer_2_bidirectional_seq_len_4"), + std::string("roi_align_test"), + std::string("roi_align_test2"), std::string("scale_bias_after_float32"), std::string("scale_bias_after_int32"), std::string("scale_bias_after_int64"), @@ -290,6 +292,15 @@ static const std::vector models{std::string("argmax"), std::string("stack_test_int32"), std::string("stack_test_neg_axis"), std::string("stack_test_none_axis"), + std::string("strided_slice_input1_1"), + std::string("strided_slice_input1_2"), + std::string("strided_slice_input1_3"), + std::string("strided_slice_input1_4"), + std::string("strided_slice_input2_1"), + std::string("strided_slice_input2_2"), + std::string("strided_slice_input2_3"), + std::string("strided_slice_input3_1"), + std::string("strided_slice_input3_2"), std::string("tanh"), std::string("trilinear_downsample_false_0"), std::string("trilinear_downsample_false_1"), @@ -300,6 +311,9 @@ static const std::vector models{std::string("argmax"), std::string("trilinear_upsample_scales2"), std::string("trilinear_upsample_true_0"), std::string("unsqueeze"), + std::string("where_1"), + std::string("where_2"), + std::string("where_3"), // Temporily disable them until root caused to secure CI stable. // CVS-66703 to track this. // std::string("yolo_box_clip_box"), diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_roi_align.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_roi_align.py new file mode 100644 index 00000000000..2475a5ed666 --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_roi_align.py @@ -0,0 +1,114 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# roi_align paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def make_rois(batch_size, width, height, pooled_width, pooled_height, spatial_scale, roi_per_batch): + rois = [] + rois_num = [] + for bno in range(batch_size): + for i in range(roi_per_batch): + x1 = np.random.randint( + 0, width // spatial_scale - pooled_width) + y1 = np.random.randint( + 0, height // spatial_scale - pooled_height) + + x2 = np.random.randint(x1 + pooled_width, + width // spatial_scale) + y2 = np.random.randint( + y1 + pooled_height, height // spatial_scale) + + roi = [x1, y1, x2, y2] + rois.append(roi) + rois_num.append(len(rois)) + rois = np.array(rois).astype("float32") + rois_num = np.array(rois_num).astype("int32") + + return rois, rois_num + + +def roi_align(name: str, x_data, rois_data, rois_num_data, pooled_height, pooled_width, spatial_scale, sampling_ratio): + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + x = paddle.static.data( + name='x', shape=x_data.shape, dtype=x_data.dtype) + rois = paddle.static.data( + name='rois', shape=rois_data.shape, dtype=rois_data.dtype) + rois_num = paddle.static.data( + name='rois_num', shape=rois_num_data.shape, dtype=rois_num_data.dtype) + # TODO: 'aligned' attribute is not supported by Paddle 2.1 + out = paddle.fluid.layers.roi_align(input=x, + rois=rois, + pooled_height=pooled_height, + pooled_width=pooled_width, + spatial_scale=spatial_scale, + sampling_ratio=sampling_ratio, + rois_num=rois_num) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x_data, 'rois': rois_data, 'rois_num': rois_num_data}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x', 'rois', 'rois_num'], fetchlist=[out], inputs=[ + x_data, rois_data, rois_num_data], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + batch_size = 1 + channels = 3 + height = 8 + width = 6 + + x_dim = (batch_size, channels, height, width) + x = np.random.random(x_dim).astype('float32') + + spatial_scale = 1.0 / 2.0 + pooled_height = 2 + pooled_width = 2 + sampling_ratio = -1 + + roi_per_batch = 1 + rois, rois_num = make_rois(batch_size, width, height, pooled_width, + pooled_height, spatial_scale, roi_per_batch) + + roi_align("roi_align_test", x, rois, rois_num, pooled_height, + pooled_width, spatial_scale, sampling_ratio) + + batch_size = 1 + channels = 3 + height = 8 + width = 6 + + x_dim = (batch_size, channels, height, width) + x = np.random.random(x_dim).astype('float32') + + spatial_scale = 1.0 / 2.0 + pooled_height = 2 + pooled_width = 2 + sampling_ratio = 2 + + roi_per_batch = 2 + rois, rois_num = make_rois(batch_size, width, height, pooled_width, + pooled_height, spatial_scale, roi_per_batch) + + roi_align("roi_align_test2", x, rois, rois_num, pooled_height, + pooled_width, spatial_scale, sampling_ratio) + + +if __name__ == "__main__": + main() diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_strided_slice.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_strided_slice.py new file mode 100644 index 00000000000..2c1b29ee2d8 --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_strided_slice.py @@ -0,0 +1,134 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# strided_slice paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def strided_slice(name: str, input_data, attrs: dict): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + Input = paddle.static.data( + name='x', shape=input_data.shape, dtype=input_data.dtype) + + out = paddle.fluid.layers.strided_slice(Input, axes=attrs['axes'], + starts=attrs['starts'], + ends=attrs['ends'], + strides=attrs['strides']) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': input_data}, + fetch_list=[out]) + + # Save inputs in order of ngraph function, to facilite Fuzzy test, + # which accepts inputs and outputs in this order as well. + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[input_data], outputs=[outs[0]], target_dir=sys.argv[1]) + return outs + + +if __name__ == "__main__": + + strided_slice_input1_1 = { + 'name': "strided_slice_input1_1", + 'axes': np.array([0]).astype('int32').tolist(), + 'starts': np.array([-4]).astype('int32').tolist(), + 'ends': np.array([-3]).astype('int32').tolist(), + 'strides': np.array([1]).astype('int32').tolist() + } + + strided_slice_input1_2 = { + 'name': "strided_slice_input1_2", + 'axes': np.array([0]).astype('int32').tolist(), + 'starts': np.array([3]).astype('int32').tolist(), + 'ends': np.array([8]).astype('int32').tolist(), + 'strides': np.array([1]).astype('int32').tolist() + } + + strided_slice_input1_3 = { + 'name': "strided_slice_input1_3", + 'axes': np.array([0]).astype('int32').tolist(), + 'starts': np.array([5]).astype('int32').tolist(), + 'ends': np.array([0]).astype('int32').tolist(), + 'strides': np.array([-1]).astype('int32').tolist() + } + + strided_slice_input1_4 = { + 'name': "strided_slice_input1_4", + 'axes': np.array([0]).astype('int32').tolist(), + 'starts': np.array([-1]).astype('int32').tolist(), + 'ends': np.array([-3]).astype('int32').tolist(), + 'strides': np.array([-1]).astype('int32').tolist() + } + + strided_slice_input2_1 = { + 'name': "strided_slice_input2_1", + 'axes': np.array([0, 1, 2]).astype('int32').tolist(), + 'starts': np.array([1, 0, 0]).astype('int32').tolist(), + 'ends': np.array([2, 1, 3]).astype('int32').tolist(), + 'strides': np.array([1, 1, 1]).astype('int32').tolist() + } + + strided_slice_input2_2 = { + 'name': "strided_slice_input2_2", + 'axes': np.array([0, 1, 2]).astype('int32').tolist(), + 'starts': np.array([1, -1, 0]).astype('int32').tolist(), + 'ends': np.array([2, -3, 3]).astype('int32').tolist(), + 'strides': np.array([1, -1, 1]).astype('int32').tolist() + } + + strided_slice_input2_3 = { + 'name': "strided_slice_input2_3", + 'axes': np.array([0, 1, 2]).astype('int32').tolist(), + 'starts': np.array([1, 0, 0]).astype('int32').tolist(), + 'ends': np.array([2, 2, 3]).astype('int32').tolist(), + 'strides': np.array([1, 1, 1]).astype('int32').tolist() + } + + strided_slice_input3_1 = { + 'name': "strided_slice_input3_1", + 'axes': np.array([1]).astype('int32').tolist(), + 'starts': np.array([1]).astype('int32').tolist(), + 'ends': np.array([2]).astype('int32').tolist(), + 'strides': np.array([1]).astype('int32').tolist() + } + + strided_slice_input3_2 = { + 'name': "strided_slice_input3_2", + 'axes': np.array([1]).astype('int32').tolist(), + 'starts': np.array([-1]).astype('int32').tolist(), + 'ends': np.array([-2]).astype('int32').tolist(), + 'strides': np.array([-1]).astype('int32').tolist() + } + + strided_slice_input1_list = [strided_slice_input1_1, + strided_slice_input1_2, strided_slice_input1_3, strided_slice_input1_4] + + strided_slice_input2_list = [strided_slice_input2_1, + strided_slice_input2_2, strided_slice_input2_3] + + strided_slice_input3_list = [ + strided_slice_input3_1, strided_slice_input3_2] + + input1 = np.random.rand(100).astype('float32') + for item in strided_slice_input1_list: + pred_paddle = strided_slice(item['name'], input1, item) + + input2 = np.random.rand(5, 5, 5).astype('int32') + for item in strided_slice_input2_list: + pred_paddle = strided_slice(item['name'], input2, item) + + input3 = np.random.rand(1, 100, 1).astype('float32') + for item in strided_slice_input3_list: + pred_paddle = strided_slice(item['name'], input3, item) diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_where.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_where.py new file mode 100644 index 00000000000..400d7732879 --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_where.py @@ -0,0 +1,69 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# where paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def where(name, test_x, test_y, test_cond): + import paddle + paddle.enable_static() + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + X_Node = paddle.static.data( + name='x', shape=test_x.shape, dtype=test_x.dtype) + Y_Node = paddle.static.data( + name='y', shape=test_y.shape, dtype=test_y.dtype) + Cond_Node = paddle.static.data( + name='cond', shape=test_cond.shape, dtype=test_cond.dtype) + + Cond_Node_bl = paddle.fluid.layers.cast(Cond_Node, "bool") + + out = paddle.where(Cond_Node_bl, X_Node, Y_Node) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': test_x, 'y': test_y, 'cond': test_cond}, + fetch_list=[out] + ) + + saveModel(name, exe, feedkeys=['x', 'y', 'cond'], fetchlist=[out], inputs=[ + test_x, test_y, test_cond], outputs=[outs[0]], target_dir=sys.argv[1]) + + +def main(): + + test_cases = [ + { + "name": "where_1", + "x": np.random.uniform(-3, 5, (100)).astype("float32"), + "y": np.random.uniform(-3, 5, (100)).astype("float32"), + "cond": np.zeros((100)).astype("int32") + }, + { + "name": "where_2", + "x": np.random.uniform(-5, 5, (60, 2)).astype("int32"), + "y": np.random.uniform(-5, 5, (60, 2)).astype("int32"), + "cond": np.ones((60, 2)).astype("int32") + }, + { + "name": "where_3", + "x": np.random.uniform(-3, 5, (20, 2, 4)).astype("float32"), + "y": np.random.uniform(-3, 5, (20, 2, 4)).astype("float32"), + "cond": np.array(np.random.randint(2, size=(20, 2, 4)), dtype="int32") + } + ] + for test in test_cases: + where(test['name'], test['x'], test['y'], test['cond']) + + +if __name__ == "__main__": + main() diff --git a/src/frontends/paddle/src/op/roi_align.cpp b/src/frontends/paddle/src/op/roi_align.cpp new file mode 100644 index 00000000000..51347182050 --- /dev/null +++ b/src/frontends/paddle/src/op/roi_align.cpp @@ -0,0 +1,52 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs roi_align(const NodeContext& node) { + const auto data_node = node.get_input("X"); + const auto roi_node = node.get_input("ROIs"); + // TODO: support 'aligned' feature #82319 + const auto aligned = node.get_attribute("aligned", false); + PADDLE_OP_CHECK(node, !aligned, "OpenVINO not support 'aligned' feature!"); + + // TODO: support multiple batches #83232 + if (data_node.get_partial_shape().rank().is_static() && data_node.get_partial_shape()[0].is_static()) + PADDLE_OP_CHECK(node, data_node.get_partial_shape()[0] == 1, "roi_align currenty only support batch_size = 1!"); + + const auto roi_node_shape = std::make_shared(roi_node, element::i32); + const auto start = default_opset::Constant::create(element::i64, {1}, {0}); + const auto stop = default_opset::Constant::create(element::i64, {1}, {1}); + const auto step = default_opset::Constant::create(element::i64, {1}, {1}); + const auto roisNum = std::make_shared(roi_node_shape, start, stop, step); + + const auto zero_const = std::make_shared(element::i32, Shape{1}, 0); + const auto fake_roisNum_node = std::make_shared(zero_const, roisNum); + + const auto pooled_h = node.get_attribute("pooled_height", 1); + const auto pooled_w = node.get_attribute("pooled_width", 1); + const auto spatial_scale = node.get_attribute("spatial_scale", 1.0); + auto sampling_ratio = node.get_attribute("sampling_ratio", -1); + sampling_ratio = (sampling_ratio <= 0) ? 0 : sampling_ratio; + + // Paddle only use 'avg' interpolation mode + return node.default_single_output_mapping({std::make_shared(data_node, + roi_node, + fake_roisNum_node, + pooled_h, + pooled_w, + sampling_ratio, + spatial_scale, + "avg")}, + {"Out"}); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/paddle/src/op/slice.cpp b/src/frontends/paddle/src/op/slice.cpp index 382cea40317..fdbb317ad01 100644 --- a/src/frontends/paddle/src/op/slice.cpp +++ b/src/frontends/paddle/src/op/slice.cpp @@ -1,103 +1,14 @@ // Copyright (C) 2018-2022 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // - -#include - -#include "default_opset.hpp" -#include "openvino/frontend/paddle/node_context.hpp" +#include "slice_ops.hpp" namespace ov { namespace frontend { namespace paddle { namespace op { -using namespace default_opset; NamedOutputs slice(const NodeContext& node) { - auto data = node.get_input("Input"); - auto axes = node.get_attribute>("axes"); - Output start_idx_node, end_idx_node; - if (node.has_input("StartsTensor")) { - start_idx_node = node.get_input("StartsTensor"); - } else if (node.has_input("StartsTensorList")) { - auto inputs = node.get_ng_inputs("StartsTensorList"); - start_idx_node = std::make_shared(inputs, 0); - } else { - auto starts = node.get_attribute>("starts"); - start_idx_node = Constant::create(element::i32, {starts.size()}, starts); - } - - if (node.has_input("EndsTensor")) { - end_idx_node = node.get_input("EndsTensor"); - } else if (node.has_input("EndsTensorList")) { - auto inputs = node.get_ng_inputs("EndsTensorList"); - end_idx_node = std::make_shared(inputs, 0); - } else { - auto ends = node.get_attribute>("ends"); - end_idx_node = Constant::create(element::i32, {ends.size()}, ends); - } - - // The following process is: - // Given: - // data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] // shape is: [2, 4] - // axes = [0] - // starts = [1] - // ends = [2] - // Our process is: - // 1. Get 'axes': [0, 1], 'starts', 'ends' - // 2. Get data shape: [2,4] and dims: 2 - // 3. Create two tensor t1 and t2, shape is the dims from step2: 2. t1: [0, 0], t2: [INT_MAX, INT_MAX] - // 4. Use 'ScatterNDUpdate' to update some elements in t1, the updated indexes are coming from 'axes', the contents - // are coming from 'starts', t1: [1, 0]; apply the similar process to t2 - // 5. Call 'StrideSlice' with t1 and t2 - // Why using ScatterNDUpdate is that 'axes' may be discontinuous. - - // the shape of input, such as [2, 4] - auto shape_node = std::make_shared(data, element::Type_t::i32); - // the input dim, such as [2] - auto shape_shape_node = std::make_shared(shape_node, element::i32); - auto const_0_node = Constant::create(element::i32, {}, {0}); - auto const_max_node = Constant::create(element::i32, {}, {INT_MAX}); - // t1: [0, 0] - auto start_node = std::make_shared(const_0_node, shape_shape_node); - // t2: [INT_MAX, INT_MAX] - auto end_node = std::make_shared(const_max_node, shape_shape_node); - auto axes_node = Constant::create(element::i32, {axes.size(), 1}, axes); - // update t1 - auto fixed_start_node = std::make_shared(start_node, axes_node, start_idx_node); - // update t2 - auto fixed_end_node = std::make_shared(end_node, axes_node, end_idx_node); - - auto stride_slice_node = std::make_shared(data, - fixed_start_node, - fixed_end_node, - std::vector{0}, - std::vector{0}); - - auto decrease_axis = node.get_attribute>("decrease_axis"); - - if (decrease_axis.size() > 0) { - // according to paddle slice_op, when all axes are decreased, output shape is [1], instead of scalar. - // Ref: paddle/fluid/operators/slice_op.h - PartialShape input_shape = data.get_partial_shape(); - PADDLE_OP_CHECK(node, - input_shape.rank().is_static(), - "input rank of slice must be static when decrease_axis is set."); - - auto squeeze_index_node = Constant::create(element::i32, {decrease_axis.size()}, decrease_axis); - auto decreased_node = std::make_shared(stride_slice_node, squeeze_index_node); - - auto input_rank = input_shape.rank().get_length(); - if (input_rank == decrease_axis.size()) { - auto restore_node = std::make_shared(decreased_node, - std::make_shared(element::i64, Shape{1}, 1), - false); // restore to shape (1,) - return node.default_single_output_mapping({restore_node}, {"Out"}); - } - - return node.default_single_output_mapping({decreased_node}, {"Out"}); - } - - return node.default_single_output_mapping({stride_slice_node}, {"Out"}); + return slice_op(node, false); } } // namespace op } // namespace paddle diff --git a/src/frontends/paddle/src/op/slice_ops.hpp b/src/frontends/paddle/src/op/slice_ops.hpp new file mode 100644 index 00000000000..8896097ac98 --- /dev/null +++ b/src/frontends/paddle/src/op/slice_ops.hpp @@ -0,0 +1,123 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +namespace { +Output idx_node(const std::string& tensor_alias, + const std::string& list_alias, + const std::string& attr_alias, + const NodeContext& node) { + if (node.has_input(tensor_alias)) { + return std::make_shared(node.get_input(tensor_alias), element::i32); + } else if (node.has_input(list_alias)) { + auto inputs = node.get_ng_inputs(list_alias); + return std::make_shared(std::make_shared(inputs, 0), + element::i32); + } else { + auto values = node.get_attribute>(attr_alias); + return default_opset::Constant::create(element::i32, {values.size()}, values); + } +} +NamedOutputs slice_op(const NodeContext& node, const bool& stride_input) { + const auto data = node.get_input("Input"); + const auto axes = node.get_attribute>("axes"); + + Output start_idx_node = idx_node("StartsTensor", "StartsTensorList", "starts", node); + Output end_idx_node = idx_node("EndsTensor", "EndsTensorList", "ends", node); + Output strides_idx_node; + if (stride_input) + strides_idx_node = idx_node("StridesTensor", "StridesTensorList", "strides", node); + + // The following process is: + // Given: + // data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] // shape is: [2, 4] + // axes = [0] + // starts = [1] + // ends = [2] + // Our process is: + // 1. Get 'axes': [0, 1], 'starts', 'ends' + // 2. Get data shape: [2,4] and dims: 2 + // 3. Create two tensor t1 and t2, shape is the dims from step2: 2. t1: [0, 0], t2: [INT_MAX, INT_MAX] + // 4. Use 'ScatterNDUpdate' to update some elements in t1, the updated indexes are coming from 'axes', the contents + // are coming from 'starts', t1: [1, 0]; apply the similar process to t2 + // 5. Call 'StrideSlice' with t1 and t2 + // Why using ScatterNDUpdate is that 'axes' may be discontinuous. + + // the shape of input, such as [2, 4] + const auto shape_node = std::make_shared(data, element::Type_t::i32); + // the input dim, such as [2] + const auto rank_node = std::make_shared(shape_node, element::i32); + const auto const_0_node = default_opset::Constant::create(element::i32, {}, {0}); + const auto const_max_node = default_opset::Constant::create(element::i32, {}, {INT_MAX}); + const auto const_1_node = default_opset::Constant::create(element::i32, {}, {1}); + // t1: [0, 0] + const auto start_node = std::make_shared(const_0_node, rank_node); + // t2: [INT_MAX, INT_MAX] + const auto end_node = std::make_shared(const_max_node, rank_node); + const auto strides_node = std::make_shared(const_1_node, rank_node); + const auto axes_node = default_opset::Constant::create(element::i32, {axes.size(), 1}, axes); + // update t1 + const auto fixed_start_node = + std::make_shared(start_node, axes_node, start_idx_node); + // update t2 + const auto fixed_end_node = std::make_shared(end_node, axes_node, end_idx_node); + std::shared_ptr stride_slice_node; + if (stride_input) { + const auto fixed_strides_node = + std::make_shared(strides_node, axes_node, strides_idx_node); + + stride_slice_node = std::make_shared(data, + fixed_start_node, + fixed_end_node, + fixed_strides_node, + std::vector{0}, + std::vector{0}); + } else { + stride_slice_node = std::make_shared(data, + fixed_start_node, + fixed_end_node, + std::vector{0}, + std::vector{0}); + } + + const auto decrease_axis = node.get_attribute>("decrease_axis"); + + if (decrease_axis.size() > 0) { + // according to paddle slice_op, when all axes are decreased, output shape is [1], instead of scalar. + // Ref: paddle/fluid/operators/slice_op.h + PartialShape input_shape = data.get_partial_shape(); + PADDLE_OP_CHECK(node, + input_shape.rank().is_static(), + "input rank of slice must be static when decrease_axis is set."); + + const auto squeeze_index_node = + default_opset::Constant::create(element::i32, {decrease_axis.size()}, decrease_axis); + const auto decreased_node = std::make_shared(stride_slice_node, squeeze_index_node); + + const auto input_rank = input_shape.rank().get_length(); + if (input_rank == decrease_axis.size()) { + auto restore_node = std::make_shared( + decreased_node, + std::make_shared(element::i64, Shape{1}, 1), + false); // restore to shape (1,) + return node.default_single_output_mapping({restore_node}, {"Out"}); + } + + return node.default_single_output_mapping({decreased_node}, {"Out"}); + } + + return node.default_single_output_mapping({stride_slice_node}, {"Out"}); +} +} // namespace +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/paddle/src/op/strided_slice.cpp b/src/frontends/paddle/src/op/strided_slice.cpp new file mode 100644 index 00000000000..01e31db9ef2 --- /dev/null +++ b/src/frontends/paddle/src/op/strided_slice.cpp @@ -0,0 +1,16 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "slice_ops.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs strided_slice(const NodeContext& node) { + return slice_op(node, true); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/paddle/src/op/where.cpp b/src/frontends/paddle/src/op/where.cpp new file mode 100644 index 00000000000..56327493fac --- /dev/null +++ b/src/frontends/paddle/src/op/where.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs where(const NodeContext& node) { + const auto condition_node = node.get_input("Condition"); + const auto x_node = node.get_input("X"); + const auto y_node = node.get_input("Y"); + // TODO: support 'shape x != shape y' #83233 + const auto x_shape = x_node.get_partial_shape(); + const auto y_shape = y_node.get_partial_shape(); + PADDLE_OP_CHECK(node, x_shape.compatible(y_shape), "shape x should be compatible to shape y!"); + + return node.default_single_output_mapping( + {std::make_shared(condition_node, x_node, y_node, ov::op::AutoBroadcastType::PDPD)}, + {"Out"}); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index 040c476ebb9..797e45b6824 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -71,6 +71,7 @@ OP_CONVERTER(relu); OP_CONVERTER(relu6); OP_CONVERTER(reshape2); OP_CONVERTER(rnn); +OP_CONVERTER(roi_align); OP_CONVERTER(scale); OP_CONVERTER(shape); OP_CONVERTER(slice); @@ -80,10 +81,12 @@ OP_CONVERTER(sigmoid); OP_CONVERTER(split); OP_CONVERTER(squeeze); OP_CONVERTER(stack); +OP_CONVERTER(strided_slice); OP_CONVERTER(tanh); OP_CONVERTER(transpose2); OP_CONVERTER(trilinear_interp_v2); OP_CONVERTER(unsqueeze); +OP_CONVERTER(where); OP_CONVERTER(yolo_box); } // namespace op std::map get_supported_ops() { @@ -157,6 +160,7 @@ std::map get_supported_ops() { {"relu6", op::relu6}, {"reshape2", op::reshape2}, {"rnn", op::rnn}, + {"roi_align", op::roi_align}, {"scale", op::scale}, {"shape", op::shape}, {"slice", op::slice}, @@ -166,11 +170,13 @@ std::map get_supported_ops() { {"split", op::split}, {"squeeze2", op::squeeze}, {"stack", op::stack}, + {"strided_slice", op::strided_slice}, {"sync_batch_norm", op::batch_norm}, {"tanh", op::tanh}, {"transpose2", op::transpose2}, {"trilinear_interp_v2", op::trilinear_interp_v2}, {"unsqueeze2", op::unsqueeze}, + {"where", op::where}, {"yolo_box", op::yolo_box}}; }; From 8ab5dbade0cb9fd6df37526ef6ade237da7064ed Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 1 Apr 2022 10:16:14 +0300 Subject: [PATCH 05/16] Revert "Add constant folding to hetero to avoid dynamism on GPU (#10572)" (#11370) This reverts commit 5b18677f1bb2a7e5561cd4786e77b93491c9ff3d. --- src/plugins/hetero/executable_network.cpp | 31 +++++++++-------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/src/plugins/hetero/executable_network.cpp b/src/plugins/hetero/executable_network.cpp index 58e1c87d726..6c587add0a7 100644 --- a/src/plugins/hetero/executable_network.cpp +++ b/src/plugins/hetero/executable_network.cpp @@ -39,7 +39,6 @@ #include "plugin.hpp" #include -#include #include #include #include @@ -48,7 +47,6 @@ #include #include #include -#include // clang-format on using namespace InferenceEngine; @@ -60,22 +58,17 @@ using namespace InferenceEngine::HeteroConfigParams; template using NodeMap = std::unordered_map; -HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwork& originalNetwork, +HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwork& network, const Engine::Configs& config, Engine* plugin) : InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, std::make_shared()), _heteroPlugin{plugin}, - _name{originalNetwork.getName()}, + _name{network.getName()}, _config{config} { - auto clonned_network = InferenceEngine::details::cloneNetwork(originalNetwork); - auto clonned_function = clonned_network.getFunction(); - IE_ASSERT(clonned_function != nullptr); - - ngraph::pass::Manager manager; - manager.register_pass(); - manager.run_passes(clonned_function); - + auto function = network.getFunction(); + IE_ASSERT(function != nullptr); + auto clonedFunction = ngraph::clone_function(*function); bool dumpDotFile = false; if (std::getenv("OPENVINO_HETERO_VISUALIZE")) { dumpDotFile = true; @@ -85,7 +78,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwo } QueryNetworkResult queryNetworkResult; - auto orderedOps = clonned_function->get_ordered_ops(); + auto orderedOps = clonedFunction->get_ordered_ops(); bool allEmpty = true; // Get user defined affinity for (auto&& node : orderedOps) { @@ -104,7 +97,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwo it = _config.find(ov::device::priorities.name()); } if (it != _config.end()) { - queryNetworkResult = _heteroPlugin->QueryNetwork(clonned_network, _config); + queryNetworkResult = _heteroPlugin->QueryNetwork(network, _config); } else { IE_THROW() << "The '" << ov::device::priorities.name() << "' option was not defined for heterogeneous plugin"; @@ -120,7 +113,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwo }; // Set results, constants and parameters affinity - for (auto&& node : clonned_function->get_ops()) { + for (auto&& node : clonedFunction->get_ops()) { if (ngraph::op::is_constant(node) || ngraph::op::is_output(node) || ngraph::op::is_parameter(node)) { if (!contains(queryNetworkResult.supportedLayersMap, node->get_friendly_name())) { auto& nodeWithAffinityName = @@ -201,7 +194,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwo colorIndex++; } }} - .run_on_model(ngraph::clone_function(*clonned_function)); + .run_on_model(ngraph::clone_function(*function)); } NodeMap nodeInputDependencies; @@ -334,7 +327,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwo itLabel->pop_back(); (*itLabel) += label; }} - .run_on_model(std::const_pointer_cast(clonned_function)); + .run_on_model(std::const_pointer_cast(function)); } // Break graph using insertion of result parameter split @@ -451,8 +444,8 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwo std::move(std::begin(newOrderedSubgraphs), std::end(newOrderedSubgraphs), std::back_inserter(orderedSubgraphs)); } while (!allSubgraphs.empty()); - InputsDataMap externalInputsData = clonned_network.getInputsInfo(); - OutputsDataMap externalOutputsData = clonned_network.getOutputsInfo(); + InputsDataMap externalInputsData = network.getInputsInfo(); + OutputsDataMap externalOutputsData = network.getOutputsInfo(); _networks.resize(orderedSubgraphs.size()); std::vector> subFunctions(orderedSubgraphs.size()); int id = 0; From 80739700ff4804f039d36a8c1e6394746f01925b Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 1 Apr 2022 10:52:31 +0300 Subject: [PATCH 06/16] Added clone method for ov::Model (#11390) * Added clone method for ov::Model * Changed python API --- .../python/src/pyopenvino/graph/model.cpp | 9 +-- src/core/include/openvino/core/model.hpp | 3 + src/core/src/model.cpp | 4 ++ src/core/tests/model.cpp | 69 +++++++++++++++++++ 4 files changed, 79 insertions(+), 6 deletions(-) diff --git a/src/bindings/python/src/pyopenvino/graph/model.cpp b/src/bindings/python/src/pyopenvino/graph/model.cpp index 25e90d7dbf1..b2531304655 100644 --- a/src/bindings/python/src/pyopenvino/graph/model.cpp +++ b/src/bindings/python/src/pyopenvino/graph/model.cpp @@ -684,12 +684,9 @@ void regclass_graph_Model(py::module m) { :rtype: bool )"); - model.def( - "clone", - [](ov::Model& self) { - return ov::clone_model(self); - }, - R"( + model.def("clone", + &ov::Model::clone, + R"( Return a copy of self. :return: A copy of self. :rtype: openvino.runtime.Model diff --git a/src/core/include/openvino/core/model.hpp b/src/core/include/openvino/core/model.hpp index dbc61df62e7..566d79dcd89 100644 --- a/src/core/include/openvino/core/model.hpp +++ b/src/core/include/openvino/core/model.hpp @@ -111,6 +111,9 @@ public: /// Return the op that generates output i std::shared_ptr get_output_op(size_t i) const; + /// \brief Clones the original model + std::shared_ptr clone() const; + /// Model outputs std::vector> outputs(); ov::Output output(); diff --git a/src/core/src/model.cpp b/src/core/src/model.cpp index e1cb9a9954c..4ba4ed379a3 100644 --- a/src/core/src/model.cpp +++ b/src/core/src/model.cpp @@ -971,6 +971,10 @@ ov::Output ov::Model::add_output(const ov::Output& port) { return result->output(0); } +std::shared_ptr ov::Model::clone() const { + return ov::clone_model(*this); +} + namespace bs_util { static int64_t get_batch(const ov::Layout& layout, const ov::PartialShape& shape) { auto batch_idx = ov::layout::batch_idx(layout); diff --git a/src/core/tests/model.cpp b/src/core/tests/model.cpp index 2709c744fac..de7fcd47cdc 100644 --- a/src/core/tests/model.cpp +++ b/src/core/tests/model.cpp @@ -9,6 +9,7 @@ #include #include +#include "common_test_utils/graph_comparator.hpp" #include "openvino/core/partial_shape.hpp" #include "openvino/opsets/opset8.hpp" @@ -1863,3 +1864,71 @@ TEST(model, incompatible_layout) { verify_ex_set_layout_result_validate({1, 2, 3, 4}, "NDHWC"); verify_ex_set_layout_result_validate({1, 2, 3, 4}, "ND...HWC"); } + +TEST(model, clone_model_function) { + auto arg0 = std::make_shared(ov::element::f32, ov::PartialShape{1, 3, 3, 3}); + arg0->set_friendly_name("data"); + arg0->get_output_tensor(0).set_names({"input1"}); + + auto arg1 = std::make_shared(ov::element::f32, ov::PartialShape{1, 2, 3, 3}); + arg1->set_friendly_name("data1"); + arg1->get_output_tensor(0).set_names({"input2", "data1"}); + + auto concat = std::make_shared(ov::NodeVector{arg0, arg1}, 1); + concat->set_friendly_name("concat"); + concat->get_output_tensor(0).set_names({"concat_t"}); + auto result1 = std::make_shared(concat); + + auto shape_of = std::make_shared(concat); + shape_of->set_friendly_name("shape_of"); + shape_of->get_output_tensor(0).set_names({"shape_of_t", "identity"}); + auto result2 = std::make_shared(shape_of); + auto model = std::make_shared(ov::ResultVector{result1, result2}, ov::ParameterVector{arg0, arg1}); + + model->validate_nodes_and_infer_types(); + + auto input1 = model->input(0); + auto input2 = model->input("data1"); + + auto cloned_model = ov::clone_model(*model); + + const auto fc = FunctionsComparator::with_default() + .enable(FunctionsComparator::ATTRIBUTES) + .enable(FunctionsComparator::CONST_VALUES); + const auto res = fc.compare(model, cloned_model); + EXPECT_TRUE(res.valid) << res.message; +} + +TEST(model, clone_model) { + auto arg0 = std::make_shared(ov::element::f32, ov::PartialShape{1, 3, 3, 3}); + arg0->set_friendly_name("data"); + arg0->get_output_tensor(0).set_names({"input1"}); + + auto arg1 = std::make_shared(ov::element::f32, ov::PartialShape{1, 2, 3, 3}); + arg1->set_friendly_name("data1"); + arg1->get_output_tensor(0).set_names({"input2", "data1"}); + + auto concat = std::make_shared(ov::NodeVector{arg0, arg1}, 1); + concat->set_friendly_name("concat"); + concat->get_output_tensor(0).set_names({"concat_t"}); + auto result1 = std::make_shared(concat); + + auto shape_of = std::make_shared(concat); + shape_of->set_friendly_name("shape_of"); + shape_of->get_output_tensor(0).set_names({"shape_of_t", "identity"}); + auto result2 = std::make_shared(shape_of); + auto model = std::make_shared(ov::ResultVector{result1, result2}, ov::ParameterVector{arg0, arg1}); + + model->validate_nodes_and_infer_types(); + + auto input1 = model->input(0); + auto input2 = model->input("data1"); + + auto cloned_model = model->clone(); + + const auto fc = FunctionsComparator::with_default() + .enable(FunctionsComparator::ATTRIBUTES) + .enable(FunctionsComparator::CONST_VALUES); + const auto res = fc.compare(model, cloned_model); + EXPECT_TRUE(res.valid) << res.message; +} From 701d75eafa238e23de46775f2e6bfcdcb108d3ad Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Fri, 1 Apr 2022 11:03:40 +0200 Subject: [PATCH 07/16] [DOCS]continue_language_review-transitionguide (#11177) PR for 22.1 made, now porting to release... some discrepancy between this version and the 22.1 branch seems to exist, so I adjusted the conflicting link to avoid build check errors... the overview has been merged, the remaining articles are reviewed here --- .../common_inference_pipeline.md | 26 +++++----- .../migration_ov_2_0/configure_devices.md | 6 +-- .../migration_ov_2_0/deployment_migration.md | 52 +++++++++---------- .../migration_ov_2_0/graph_construction.md | 4 +- .../migration_ov_2_0/preprocessing.md | 24 ++++----- 5 files changed, 56 insertions(+), 56 deletions(-) diff --git a/docs/OV_Runtime_UG/migration_ov_2_0/common_inference_pipeline.md b/docs/OV_Runtime_UG/migration_ov_2_0/common_inference_pipeline.md index 75ce3495944..b316b396e21 100644 --- a/docs/OV_Runtime_UG/migration_ov_2_0/common_inference_pipeline.md +++ b/docs/OV_Runtime_UG/migration_ov_2_0/common_inference_pipeline.md @@ -1,10 +1,10 @@ # Inference Pipeline {#openvino_2_0_inference_pipeline} -Usually to infer models with OpenVINO™ Runtime, you need to do the following steps in the application pipeline: +Usually, to infer models with OpenVINO™ Runtime, you need to make the following steps in the application pipeline: - 1. Create Core object - 1.1. (Optional) Load extensions -- 2. Read model from the disk - - 2.1. (Optional) Model preprocessing +- 2. Read a model from a drive + - 2.1. (Optional) Perform model preprocessing - 3. Load the model to the device - 4. Create an inference request - 5. Fill input tensors with data @@ -45,7 +45,7 @@ OpenVINO™ Runtime API 2.0: ### 1.1 (Optional) Load extensions -To load model with custom operation, you need to add extensions for these operations. We highly recommend to use [OpenVINO Extensibility API](../../Extensibility_UG/Intro.md) to write extensions, but if you already have old extensions you can load it to new OpenVINO™ Runtime: +To load a model with custom operations, you need to add extensions for these operations. We highly recommend using [OpenVINO Extensibility API](../../Extensibility_UG/Intro.md) to write extensions, but if you already have old extensions you can also load them to the new OpenVINO™ Runtime: Inference Engine API: @@ -75,7 +75,7 @@ OpenVINO™ Runtime API 2.0: @endsphinxtabset -## 2. Read model from the disk +## 2. Read a model from a drive Inference Engine API: @@ -109,10 +109,10 @@ Read model has the same structure as in the example from [Model Creation](./grap Note, you can combine read and compile model stages into a single call `ov::Core::compile_model(filename, devicename)`. -### 2.1 (Optional) Model preprocessing +### 2.1 (Optional) Perform model preprocessing -When application's input data doesn't perfectly match with model's input format, preprocessing steps may need to be added. -See detailed guide [how to migrate preprocessing in OpenVINO Runtime API 2.0](./preprocessing.md) +When application's input data doesn't perfectly match the model's input format, preprocessing steps may be necessary. +See a detailed guide on [how to migrate preprocessing in OpenVINO Runtime API 2.0](./preprocessing.md) ## 3. Load the Model to the Device @@ -144,7 +144,7 @@ OpenVINO™ Runtime API 2.0: @endsphinxtabset -If you need to configure OpenVINO Runtime devices with additional configuration parameters, please, refer to the migration [Configure devices](./configure_devices.md) guide. +If you need to configure OpenVINO Runtime devices with additional configuration parameters, refer to the [Configure devices](./configure_devices.md) guide. ## 4. Create an Inference Request @@ -178,7 +178,7 @@ OpenVINO™ Runtime API 2.0: ## 5. Fill input tensors -Inference Engine API fills inputs as `I32` precision (**not** aligned with the original model): +The Inference Engine API fills inputs as `I32` precision (**not** aligned with the original model): @sphinxtabset @@ -398,7 +398,7 @@ OpenVINO™ Runtime API 2.0: ## 7. Process the Inference Results -Inference Engine API processes outputs as `I32` precision (**not** aligned with the original model): +The Inference Engine API processes outputs as `I32` precision (**not** aligned with the original model): @sphinxtabset @@ -469,8 +469,8 @@ Inference Engine API processes outputs as `I32` precision (**not** aligned with @endsphinxtabset OpenVINO™ Runtime API 2.0 processes outputs: -- For IR v10 as `I32` precision (**not** aligned with the original model) to match **old** behavior -- For IR v11, ONNX, ov::Model, Paddle as `I64` precision (aligned with the original model) to match **new** behavior +- For IR v10 as `I32` precision (**not** aligned with the original model) to match the **old** behavior. +- For IR v11, ONNX, ov::Model, Paddle as `I64` precision (aligned with the original model) to match the **new** behavior. @sphinxtabset diff --git a/docs/OV_Runtime_UG/migration_ov_2_0/configure_devices.md b/docs/OV_Runtime_UG/migration_ov_2_0/configure_devices.md index 324763ea30d..076656fb9e9 100644 --- a/docs/OV_Runtime_UG/migration_ov_2_0/configure_devices.md +++ b/docs/OV_Runtime_UG/migration_ov_2_0/configure_devices.md @@ -2,9 +2,9 @@ ### Introduction -Inference Engine API provides an [ability to configure devices](https://docs.openvino.ai/2021.4/openvino_docs_IE_DG_InferenceEngine_QueryAPI.html) via configuration keys and [get device specific metrics](https://docs.openvino.ai/2021.4/openvino_docs_IE_DG_InferenceEngine_QueryAPI.html#getmetric). The values taken from `InferenceEngine::Core::GetConfig` are requested by its string name, while return type is `InferenceEngine::Parameter` and users don't know what is the actual type is stored in this parameter. +The Inference Engine API provides the [ability to configure devices](https://docs.openvino.ai/2021.4/openvino_docs_IE_DG_InferenceEngine_QueryAPI.html) via configuration keys and [get device specific metrics](https://docs.openvino.ai/2021.4/openvino_docs_IE_DG_InferenceEngine_QueryAPI.html#getmetric). The values taken from `InferenceEngine::Core::GetConfig` are requested by the string name, while the return type is `InferenceEngine::Parameter`, making users lost on what the actual type stored in this parameter is. -OpenVINO Runtime API 2.0 solves these issues by introducing [properties](../supported_plugins/config_properties.md), which unify metrics and configuration key concepts, but the main advantage of properties - they have C++ type: +The OpenVINO Runtime API 2.0 solves these issues by introducing [properties](../supported_plugins/config_properties.md), which unify metrics and configuration key concepts. Their main advantage is that they have the C++ type: ``` static constexpr Property full_name{"FULL_DEVICE_NAME"}; @@ -14,7 +14,7 @@ And the property can be requested from an inference device as: @snippet ov_properties_migration.cpp core_get_ro_property -The snippets below show how to migrate from Inference Engine device configuration to OpenVINO Runtime API 2.0 steps. +The snippets below show how to migrate from an Inference Engine device configuration to OpenVINO Runtime API 2.0 steps. ### Set configuration values diff --git a/docs/OV_Runtime_UG/migration_ov_2_0/deployment_migration.md b/docs/OV_Runtime_UG/migration_ov_2_0/deployment_migration.md index 9bc193382a8..8fda4956982 100644 --- a/docs/OV_Runtime_UG/migration_ov_2_0/deployment_migration.md +++ b/docs/OV_Runtime_UG/migration_ov_2_0/deployment_migration.md @@ -1,29 +1,29 @@ # Installation & Deployment {#openvino_2_0_deployment} -"Easy to use" is one of the main concepts for OpenVINO™ API 2.0. It includes not only simplifying the migration from frameworks to OpenVINO, but also how OpenVINO is organized, how the development tools are used, and how to develop and deploy OpenVINO-based applications. +"Easy to use" is one of the main concepts for OpenVINO™ API 2.0. It is about simplifying migration from different frameworks to OpenVINO, but also touches on how OpenVINO is organized, how its development tools are used, and how OpenVINO-based applications are developed and deployed. -To accomplish that, we have made some changes on the installation and deployment of OpenVINO in the 2022.1 release. This guide will walk you through them. +To accomplish that, we made some changes to the installation and deployment processes of OpenVINO in the 2022.1 release. This guide will walk you through them. -## Installer Package Contains OpenVINO™ Runtime Only +## The Installer Package Contains OpenVINO™ Runtime Only -Starting from OpenVINO 2022.1, Model Optimizer, Post-Training Optimization tool and Python-based Development tools such as Open Model Zoo tools are distributed via [PyPI](https://pypi.org/project/openvino-dev/) only, and are no longer included in the OpenVINO installer package. This change has several benefits as it: +Starting from OpenVINO 2022.1, development tools are distributed via [PyPI](https://pypi.org/project/openvino-dev/) only and are no longer included in the OpenVINO installer package. For a list of these components, refer to the [installation overview](../../../install_guides/installing-openvino-overview.md). This approach has several benefits: -* Simplifies the user experience. In previous versions, the installation and usage of OpenVINO Development Tools differ according to the distribution type (via an OpenVINO installer or PyPI). -* Ensures that dependencies are handled properly via the PIP package manager and support virtual environments of development tools. +* simplifies the user experience - in previous versions, installation and usage of OpenVINO Development Tools differed from one distribution type to another (the OpenVINO installer vs. PyPI), +* ensures that dependencies are handled properly via the PIP package manager and support virtual environments of development tools. -The structure of OpenVINO 2022.1 installer package has been organized as below: +The structure of the OpenVINO 2022.1 installer package has been organized as follows: - The `runtime` folder includes headers, libraries and CMake interfaces. -- The `tools` folder contains [the compile tool](../../../tools/compile_tool/README.md), [deployment manager](../../OV_Runtime_UG/deployment/deployment-manager-tool.md) and a set of `requirements.txt` files with links to the corresponding versions of the `openvino-dev` package. +- The `tools` folder contains [the compile tool](../../../tools/compile_tool/README.md), [deployment manager](../../OV_Runtime_UG/deployment/deployment-manager-tool.md), and a set of `requirements.txt` files with links to the corresponding versions of the `openvino-dev` package. - The `python` folder contains the Python version for OpenVINO Runtime. ## Installing OpenVINO Development Tools via PyPI -Since OpenVINO Development Tools is no longer in the installer package, the installation process has changed too. This section describes it through a comparison with previous versions. +Since OpenVINO Development Tools is no longer in the installer package, the installation process has also changed. This section describes it through a comparison with previous versions. ### For Versions Prior to 2022.1 -In previous versions, OpenVINO Development Tools is a part of main package. After the package is installed, to convert models (for example, TensorFlow), you need to install additional dependencies by using the requirements files such as `requirements_tf.txt`, install Post-Training Optimization tool and Accuracy Checker tool via the `setup.py` scripts, and then use the `setupvars` scripts to make the tools available to the following command: +In previous versions, OpenVINO Development Tools was a part of the main package. After the package was installed, to convert models (for example, TensorFlow), you needed to install additional dependencies by using the requirement files, such as `requirements_tf.txt`, install Post-Training Optimization tool and Accuracy Checker tool via the `setup.py` scripts, and then use the `setupvars` scripts to make the tools available to the following command: ```sh $ mo.py -h @@ -31,13 +31,13 @@ $ mo.py -h ### For 2022.1 and After -Starting from OpenVINO 2022.1, you can install the development tools from [PyPI](https://pypi.org/project/openvino-dev/) repository only, using the following command (taking TensorFlow as an example): +In OpenVINO 2022.1 and later, you can install the development tools from a [PyPI](https://pypi.org/project/openvino-dev/) repository only, using the following command (taking TensorFlow as an example): ```sh $ python3 -m pip install -r /tools/requirements_tf.txt ``` -This will install all the development tools and additional necessary components to work with TensorFlow via the `openvino-dev` package (see **Step 4. Install the Package** on the [PyPI page](https://pypi.org/project/openvino-dev/) for parameters of other frameworks). +This will install all the development tools and additional components necessary to work with TensorFlow via the `openvino-dev` package (see **Step 4. Install the Package** on the [PyPI page](https://pypi.org/project/openvino-dev/) for parameters of other frameworks). Then, the tools can be used by commands like: @@ -50,11 +50,11 @@ You don't have to install any other dependencies. For more details on the instal ## Interface Changes for Building C/C++ Applications -The new OpenVINO Runtime with API 2.0 has also brought some changes for builiding your C/C++ applications. +The new OpenVINO Runtime with its API 2.0 has also brought some changes for building C/C++ applications. ### CMake Interface -The CMake interface has been changed as below: +The CMake interface has been changed as follows: **With Inference Engine of previous versions**: @@ -78,7 +78,7 @@ target_link_libraries(ov_c_app PRIVATE openvino::runtime::c) ### Native Interfaces -To build applications without CMake interface, you can also use MSVC IDE, UNIX makefiles and any other interfaces, which have been changed as below: +To build applications without the CMake interface, you can also use MSVC IDE, UNIX makefiles, and any other interface, which has been changed as shown here: **With Inference Engine of previous versions**: @@ -153,19 +153,19 @@ To build applications without CMake interface, you can also use MSVC IDE, UNIX m ## Clearer Library Structure for Deployment -OpenVINO 2022.1 has reorganized the libraries to make it easier for deployment. In previous versions, to perform deployment steps, you have to use several libraries. Now you can just use `openvino` or `openvino_c` based on your developing language plus necessary plugins to complete your task. For example, `openvino_intel_cpu_plugin` and `openvino_ir_frontend` plugins will enable you to load OpenVINO IRs and perform inference on CPU device (see [Local distribution with OpenVINO](../deployment/local-distribution.md) for more details). +OpenVINO 2022.1 introduced a reorganization of the libraries, to make deployment easier. In the previous versions, to perform deployment steps, you had to use several libraries. Now you can just use `openvino` or `openvino_c` based on your developing language, together with the necessary plugins to complete your task. For example, `openvino_intel_cpu_plugin` and `openvino_ir_frontend` plugins will enable you to load OpenVINO IRs and perform inference on the CPU device (see [Local distribution with OpenVINO](../deployment/local-distribution.md) for more details). -Here you can find some detailed comparisons on library structure between OpenVINO 2022.1 and previous versions: +Here you can find detailed comparisons on the library structure between OpenVINO 2022.1 and the previous versions: -* A single core library with all the functionalities (`openvino` for C++ Runtime, `openvino_c` for Inference Engine API C interface) is used in 2022.1, instead of the previous core libraries which contain `inference_engine`, `ngraph`, `inference_engine_transformations` and `inference_engine_lp_transformations`. -* The optional `inference_engine_preproc` preprocessing library (if `InferenceEngine::PreProcessInfo::setColorFormat` or `InferenceEngine::PreProcessInfo::setResizeAlgorithm` is used) is renamed as `openvino_gapi_preproc` and deprecated in 2022.1. See more details on [Preprocessing capabilities of OpenVINO API 2.0](preprocessing.md). -* The libraries of plugins are renamed as below: - * `openvino_intel_cpu_plugin` is used for [CPU](../supported_plugins/CPU.md) device instead of `MKLDNNPlugin` in previous versions. - * `openvino_intel_gpu_plugin` is used for [GPU](../supported_plugins/GPU.md) device instead of `clDNNPlugin` in previous versions. - * `openvino_auto_plugin` is used for [Auto-Device Plugin](../auto_device_selection.md) in 2022.1. -* The plugins for reading and converting models have been changed as below: - * `openvino_ir_frontend` is used to read IRs instead of `inference_engine_ir_reader` in previous versions. - * `openvino_onnx_frontend` is used to read ONNX models instead of `inference_engine_onnx_reader` (with its dependencies) in previous versions. +* A single core library with all the functionalities (`openvino` for C++ Runtime, `openvino_c` for Inference Engine API C interface) is used in 2022.1, instead of the previous core libraries which contained `inference_engine`, `ngraph`, `inference_engine_transformations` and `inference_engine_lp_transformations`. +* The optional `inference_engine_preproc` preprocessing library (if `InferenceEngine::PreProcessInfo::setColorFormat` or `InferenceEngine::PreProcessInfo::setResizeAlgorithm` is used) has been renamed to `openvino_gapi_preproc` and deprecated in 2022.1. See more details on [Preprocessing capabilities of OpenVINO API 2.0](preprocessing.md). +* The libraries of plugins have been renamed as follows: + * `openvino_intel_cpu_plugin` is used for [CPU](../supported_plugins/CPU.md) device instead of `MKLDNNPlugin`. + * `openvino_intel_gpu_plugin` is used for [GPU](../supported_plugins/GPU.md) device instead of `clDNNPlugin`. + * `openvino_auto_plugin` is used for [Auto-Device Plugin](../auto_device_selection.md). +* The plugins for reading and converting models have been changed as follows: + * `openvino_ir_frontend` is used to read IRs instead of `inference_engine_ir_reader`. + * `openvino_onnx_frontend` is used to read ONNX models instead of `inference_engine_onnx_reader` (with its dependencies). * `openvino_paddle_frontend` is added in 2022.1 to read PaddlePaddle models.