[PYTHON API] Expose ngraph bindings as part of openvino API (#8027)

This commit is contained in:
Jan Iwaszkiewicz 2021-10-18 21:36:48 +02:00 committed by GitHub
parent cf52ba5c08
commit 78358bacb0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
250 changed files with 24080 additions and 1086 deletions

View File

@ -17,10 +17,10 @@ from setuptools.command.develop import develop as _develop
from distutils.command.build import build as _build
__version__ = os.environ.get("NGRAPH_VERSION", "0.0.0.dev0")
PYNGRAPH_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
OPENVINO_ROOT_DIR = os.path.normpath(os.path.join(PYNGRAPH_ROOT_DIR, "../../.."))
PYTHON_API_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
OPENVINO_ROOT_DIR = os.path.normpath(os.path.join(PYTHON_API_ROOT_DIR, "../../.."))
# Change current working directory to runtime/bindings/python
os.chdir(PYNGRAPH_ROOT_DIR)
os.chdir(PYTHON_API_ROOT_DIR)
NGRAPH_LIBS = ["ngraph", "onnx_ngraph_frontend", "openvino"]
@ -40,13 +40,26 @@ packages = [
"ngraph.impl.op.util",
"ngraph.impl.passes",
"ngraph.frontend",
"openvino"
"openvino",
"openvino.opset1",
"openvino.opset2",
"openvino.opset3",
"openvino.opset4",
"openvino.opset5",
"openvino.opset6",
"openvino.opset7",
"openvino.opset8",
"openvino.utils",
"openvino.impl",
"openvino.impl.op",
"openvino.impl.op.util",
"openvino.impl.passes",
]
data_files = []
with open(os.path.join(PYNGRAPH_ROOT_DIR, "requirements.txt")) as req:
with open(os.path.join(PYTHON_API_ROOT_DIR, "requirements.txt")) as req:
requirements = req.read().splitlines()
cmdclass = {}
@ -143,6 +156,8 @@ class BuildCMakeExt(build_ext):
build_dir = pathlib.Path(self.build_temp)
extension_path = pathlib.Path(self.get_ext_fullpath(extension.name))
if extension.name == "pyopenvino":
extension_path = pathlib.Path(os.path.join(extension_path.parent.absolute(), "openvino"))
os.makedirs(build_dir, exist_ok=True)
os.makedirs(extension_path.parent.absolute(), exist_ok=True)
@ -152,7 +167,7 @@ class BuildCMakeExt(build_ext):
root_dir = OPENVINO_ROOT_DIR
bin_dir = os.path.join(OPENVINO_ROOT_DIR, "bin")
if os.environ.get("OpenVINO_DIR") is not None:
root_dir = PYNGRAPH_ROOT_DIR
root_dir = PYTHON_API_ROOT_DIR
bin_dir = build_dir
self.announce("Configuring cmake project", level=3)
@ -185,7 +200,7 @@ class InstallCMakeLibs(install_lib):
root_dir = os.path.join(OPENVINO_ROOT_DIR, "bin")
if os.environ.get("OpenVINO_DIR") is not None:
root_dir = pathlib.Path(PYNGRAPH_ROOT_DIR)
root_dir = pathlib.Path(PYTHON_API_ROOT_DIR)
lib_ext = ""
if "linux" in sys.platform:
@ -214,8 +229,8 @@ cmdclass["build_ext"] = BuildCMakeExt
cmdclass["install_lib"] = InstallCMakeLibs
setup(
name="ngraph-core",
description="nGraph - Intel's graph compiler and runtime for Neural Networks",
name="openvino",
description="OpenVINO - deploying pre-trained deep learning models",
version=__version__,
author="Intel Corporation",
url="https://github.com/openvinotoolkit/openvino",

View File

@ -16,7 +16,7 @@
namespace py = pybind11;
void regclass_pyngraph_AxisSet(py::module m) {
py::class_<ngraph::AxisSet, std::shared_ptr<ngraph::AxisSet>> axis_set(m, "AxisSet");
py::class_<ngraph::AxisSet, std::shared_ptr<ngraph::AxisSet>> axis_set(m, "AxisSet", py::module_local());
axis_set.doc() = "ngraph.impl.AxisSet wraps ngraph::AxisSet";
axis_set.def(py::init<const std::initializer_list<size_t>&>(), py::arg("axes"));
axis_set.def(py::init<const std::set<size_t>&>(), py::arg("axes"));

View File

@ -12,7 +12,9 @@
namespace py = pybind11;
void regclass_pyngraph_AxisVector(py::module m) {
py::class_<ngraph::AxisVector, std::shared_ptr<ngraph::AxisVector>> axis_vector(m, "AxisVector");
py::class_<ngraph::AxisVector, std::shared_ptr<ngraph::AxisVector>> axis_vector(m,
"AxisVector",
py::module_local());
axis_vector.doc() = "ngraph.impl.AxisVector wraps ngraph::AxisVector";
axis_vector.def(py::init<const std::initializer_list<size_t>&>(), py::arg("axes"));
axis_vector.def(py::init<const std::vector<size_t>&>(), py::arg("axes"));

View File

@ -12,7 +12,7 @@
namespace py = pybind11;
void regclass_pyngraph_Coordinate(py::module m) {
py::class_<ngraph::Coordinate, std::shared_ptr<ngraph::Coordinate>> coordinate(m, "Coordinate");
py::class_<ngraph::Coordinate, std::shared_ptr<ngraph::Coordinate>> coordinate(m, "Coordinate", py::module_local());
coordinate.doc() = "ngraph.impl.Coordinate wraps ngraph::Coordinate";
coordinate.def(py::init<const std::initializer_list<size_t>&>());
coordinate.def(py::init<const ngraph::Shape&>());

View File

@ -16,7 +16,9 @@
namespace py = pybind11;
void regclass_pyngraph_CoordinateDiff(py::module m) {
py::class_<ngraph::CoordinateDiff, std::shared_ptr<ngraph::CoordinateDiff>> coordinate_diff(m, "CoordinateDiff");
py::class_<ngraph::CoordinateDiff, std::shared_ptr<ngraph::CoordinateDiff>> coordinate_diff(m,
"CoordinateDiff",
py::module_local());
coordinate_diff.doc() = "ngraph.impl.CoordinateDiff wraps ngraph::CoordinateDiff";
coordinate_diff.def(py::init<const std::initializer_list<ptrdiff_t>&>());
coordinate_diff.def(py::init<const std::vector<ptrdiff_t>&>());

View File

@ -18,7 +18,7 @@ namespace py = pybind11;
void regclass_pyngraph_Dimension(py::module m) {
using value_type = ngraph::Dimension::value_type;
py::class_<ngraph::Dimension, std::shared_ptr<ngraph::Dimension>> dim(m, "Dimension");
py::class_<ngraph::Dimension, std::shared_ptr<ngraph::Dimension>> dim(m, "Dimension", py::module_local());
dim.doc() = "ngraph.impl.Dimension wraps ngraph::Dimension";
dim.def(py::init<>());
dim.def(py::init<value_type&>(),

View File

@ -16,7 +16,8 @@ namespace py = pybind11;
void regclass_pyngraph_FrontEnd(py::module m) {
py::class_<ngraph::frontend::FrontEnd, std::shared_ptr<ngraph::frontend::FrontEnd>> fem(m,
"FrontEnd",
py::dynamic_attr());
py::dynamic_attr(),
py::module_local());
fem.doc() = "ngraph.impl.FrontEnd wraps ngraph::frontend::FrontEnd";
fem.def(

View File

@ -17,7 +17,8 @@ void regclass_pyngraph_FrontEndManager(py::module m) {
py::class_<ngraph::frontend::FrontEndManager, std::shared_ptr<ngraph::frontend::FrontEndManager>> fem(
m,
"FrontEndManager",
py::dynamic_attr());
py::dynamic_attr(),
py::module_local());
fem.doc() = "ngraph.impl.FrontEndManager wraps ngraph::frontend::FrontEndManager";
fem.def(py::init<>());

View File

@ -14,7 +14,8 @@ namespace py = pybind11;
void regclass_pyngraph_InputModel(py::module m) {
py::class_<ngraph::frontend::InputModel, std::shared_ptr<ngraph::frontend::InputModel>> im(m,
"InputModel",
py::dynamic_attr());
py::dynamic_attr(),
py::module_local());
im.doc() = "ngraph.impl.InputModel wraps ngraph::frontend::InputModel";
im.def("get_place_by_tensor_name",

View File

@ -14,7 +14,10 @@
namespace py = pybind11;
void regclass_pyngraph_Place(py::module m) {
py::class_<ngraph::frontend::Place, std::shared_ptr<ngraph::frontend::Place>> place(m, "Place", py::dynamic_attr());
py::class_<ngraph::frontend::Place, std::shared_ptr<ngraph::frontend::Place>> place(m,
"Place",
py::dynamic_attr(),
py::module_local());
place.doc() = "ngraph.impl.Place wraps ngraph::frontend::Place";
place.def("is_input",

View File

@ -16,7 +16,7 @@ namespace py = pybind11;
static const char* CAPSULE_NAME = "ngraph_function";
void regclass_pyngraph_Function(py::module m) {
py::class_<ngraph::Function, std::shared_ptr<ngraph::Function>> function(m, "Function");
py::class_<ngraph::Function, std::shared_ptr<ngraph::Function>> function(m, "Function", py::module_local());
function.doc() = "ngraph.impl.Function wraps ngraph::Function";
function.def(py::init([](const ngraph::ResultVector& res,

View File

@ -36,7 +36,10 @@ using PyRTMap = std::map<std::string, std::shared_ptr<ngraph::Variant>>;
PYBIND11_MAKE_OPAQUE(PyRTMap);
void regclass_pyngraph_Node(py::module m) {
py::class_<ngraph::Node, std::shared_ptr<ngraph::Node>, PyNode> node(m, "Node", py::dynamic_attr());
py::class_<ngraph::Node, std::shared_ptr<ngraph::Node>, PyNode> node(m,
"Node",
py::dynamic_attr(),
py::module_local());
node.doc() = "ngraph.impl.Node wraps ngraph::Node";
node.def(
"__add__",

View File

@ -98,7 +98,7 @@ private:
} // namespace
void regclass_pyngraph_NodeFactory(py::module m) {
py::class_<NodeFactory> node_factory(m, "NodeFactory");
py::class_<NodeFactory> node_factory(m, "NodeFactory", py::module_local());
node_factory.doc() = "NodeFactory creates nGraph nodes";
node_factory.def(py::init());

View File

@ -14,7 +14,8 @@ namespace py = pybind11;
void regclass_pyngraph_Input(py::module m) {
py::class_<ngraph::Input<ngraph::Node>, std::shared_ptr<ngraph::Input<ngraph::Node>>> input(m,
"Input",
py::dynamic_attr());
py::dynamic_attr(),
py::module_local());
input.doc() = "ngraph.impl.Input wraps ngraph::Input<Node>";
input.def("get_node",

View File

@ -14,7 +14,8 @@ namespace py = pybind11;
void regclass_pyngraph_Output(py::module m) {
py::class_<ngraph::Output<ngraph::Node>, std::shared_ptr<ngraph::Output<ngraph::Node>>> output(m,
"Output",
py::dynamic_attr());
py::dynamic_attr(),
py::module_local());
output.doc() = "ngraph.impl.Output wraps ngraph::Output<Node>";
output.def("get_node",

View File

@ -61,7 +61,8 @@ void regclass_pyngraph_op_Constant(py::module m) {
py::class_<ngraph::op::Constant, std::shared_ptr<ngraph::op::Constant>, ngraph::Node> constant(
m,
"Constant",
py::buffer_protocol());
py::buffer_protocol(),
py::module_local());
constant.doc() = "ngraph.impl.op.Constant wraps ngraph::op::Constant";
constant.def(py::init<const ngraph::element::Type&, const ngraph::Shape&, const std::vector<char>&>());
constant.def(py::init<const ngraph::element::Type&, const ngraph::Shape&, const std::vector<ngraph::float16>&>());

View File

@ -16,7 +16,10 @@
namespace py = pybind11;
void regclass_pyngraph_op_Parameter(py::module m) {
py::class_<ngraph::op::Parameter, std::shared_ptr<ngraph::op::Parameter>, ngraph::Node> parameter(m, "Parameter");
py::class_<ngraph::op::Parameter, std::shared_ptr<ngraph::op::Parameter>, ngraph::Node> parameter(
m,
"Parameter",
py::module_local());
parameter.doc() = "ngraph.impl.op.Parameter wraps ngraph::op::Parameter";
parameter.def("__repr__", [](const ngraph::Node& self) {
std::string class_name = py::cast(self).get_type().attr("__name__").cast<std::string>();

View File

@ -15,6 +15,8 @@
namespace py = pybind11;
void regclass_pyngraph_op_Result(py::module m) {
py::class_<ngraph::op::Result, std::shared_ptr<ngraph::op::Result>, ngraph::Node> result(m, "Result");
py::class_<ngraph::op::Result, std::shared_ptr<ngraph::op::Result>, ngraph::Node> result(m,
"Result",
py::module_local());
result.doc() = "ngraph.impl.op.Result wraps ngraph::op::Result";
}

View File

@ -14,7 +14,7 @@ namespace py = pybind11;
void regclass_pyngraph_op_util_ArithmeticReduction(py::module m) {
py::class_<ngraph::op::util::ArithmeticReduction, std::shared_ptr<ngraph::op::util::ArithmeticReduction>>
arithmeticReduction(m, "ArithmeticReduction");
arithmeticReduction(m, "ArithmeticReduction", py::module_local());
// arithmeticReduction.def(py::init<const std::string&,
// const std::shared_ptr<ngraph::Node>&,
// const ngraph::AxisSet& >());

View File

@ -14,5 +14,5 @@ namespace py = pybind11;
void regclass_pyngraph_op_util_BinaryElementwiseArithmetic(py::module m) {
py::class_<ngraph::op::util::BinaryElementwiseArithmetic,
std::shared_ptr<ngraph::op::util::BinaryElementwiseArithmetic>>
binaryElementwiseArithmetic(m, "BinaryElementwiseArithmetic");
binaryElementwiseArithmetic(m, "BinaryElementwiseArithmetic", py::module_local());
}

View File

@ -14,5 +14,5 @@ namespace py = pybind11;
void regclass_pyngraph_op_util_BinaryElementwiseComparison(py::module m) {
py::class_<ngraph::op::util::BinaryElementwiseComparison,
std::shared_ptr<ngraph::op::util::BinaryElementwiseComparison>>
binaryElementwiseComparison(m, "BinaryElementwiseComparison");
binaryElementwiseComparison(m, "BinaryElementwiseComparison", py::module_local());
}

View File

@ -13,5 +13,5 @@ namespace py = pybind11;
void regclass_pyngraph_op_util_BinaryElementwiseLogical(py::module m) {
py::class_<ngraph::op::util::BinaryElementwiseLogical, std::shared_ptr<ngraph::op::util::BinaryElementwiseLogical>>
binaryElementwiseLogical(m, "BinaryElementwiseLogical");
binaryElementwiseLogical(m, "BinaryElementwiseLogical", py::module_local());
}

View File

@ -15,7 +15,8 @@ namespace py = pybind11;
void regclass_pyngraph_op_util_IndexReduction(py::module m) {
py::class_<ngraph::op::util::IndexReduction, std::shared_ptr<ngraph::op::util::IndexReduction>> indexReduction(
m,
"IndexRedection");
"IndexRedection",
py::module_local());
indexReduction.def("get_reduction_axis", &ngraph::op::util::IndexReduction::get_reduction_axis);
indexReduction.def("set_reduction_axis", &ngraph::op::util::IndexReduction::set_reduction_axis);

View File

@ -14,6 +14,7 @@ namespace py = pybind11;
void regclass_pyngraph_op_util_OpAnnotations(py::module m) {
py::class_<ngraph::op::util::OpAnnotations, std::shared_ptr<ngraph::op::util::OpAnnotations>> opAnnotations(
m,
"OpAnnotations");
"OpAnnotations",
py::module_local());
opAnnotations.def(py::init<>());
}

View File

@ -14,5 +14,5 @@ namespace py = pybind11;
void regclass_pyngraph_op_util_UnaryElementwiseArithmetic(py::module m) {
py::class_<ngraph::op::util::UnaryElementwiseArithmetic,
std::shared_ptr<ngraph::op::util::UnaryElementwiseArithmetic>>
unaryElementwiseArithmetic(m, "UnaryElementwiseArithmetic");
unaryElementwiseArithmetic(m, "UnaryElementwiseArithmetic", py::module_local());
}

View File

@ -20,7 +20,9 @@ namespace py = pybind11;
static const char* CAPSULE_NAME = "ngraph_partial_shape";
void regclass_pyngraph_PartialShape(py::module m) {
py::class_<ngraph::PartialShape, std::shared_ptr<ngraph::PartialShape>> shape(m, "PartialShape");
py::class_<ngraph::PartialShape, std::shared_ptr<ngraph::PartialShape>> shape(m,
"PartialShape",
py::module_local());
shape.doc() = "ngraph.impl.PartialShape wraps ngraph::PartialShape";
shape.def(py::init([](const std::vector<int64_t>& dimensions) {

View File

@ -32,7 +32,7 @@ public:
} // namespace
void regclass_pyngraph_passes_Manager(py::module m) {
py::class_<ManagerWrapper> manager(m, "Manager");
py::class_<ManagerWrapper> manager(m, "Manager", py::module_local());
manager.doc() = "ngraph.impl.passes.Manager wraps ngraph::pass::Manager using ManagerWrapper";
manager.def(py::init<>());

View File

@ -16,7 +16,7 @@
namespace py = pybind11;
void regclass_pyngraph_Shape(py::module m) {
py::class_<ngraph::Shape, std::shared_ptr<ngraph::Shape>> shape(m, "Shape");
py::class_<ngraph::Shape, std::shared_ptr<ngraph::Shape>> shape(m, "Shape", py::module_local());
shape.doc() = "ngraph.impl.Shape wraps ngraph::Shape";
shape.def(py::init<const std::initializer_list<size_t>&>(), py::arg("axis_lengths"));
shape.def(py::init<const std::vector<size_t>&>(), py::arg("axis_lengths"));

View File

@ -16,7 +16,7 @@
namespace py = pybind11;
void regclass_pyngraph_Strides(py::module m) {
py::class_<ngraph::Strides, std::shared_ptr<ngraph::Strides>> strides(m, "Strides");
py::class_<ngraph::Strides, std::shared_ptr<ngraph::Strides>> strides(m, "Strides", py::module_local());
strides.doc() = "ngraph.impl.Strides wraps ngraph::Strides";
strides.def(py::init<const std::initializer_list<size_t>&>(), py::arg("axis_strides"));
strides.def(py::init<const std::vector<size_t>&>(), py::arg("axis_strides"));

View File

@ -13,7 +13,7 @@
namespace py = pybind11;
void regclass_pyngraph_Type(py::module m) {
py::class_<ngraph::element::Type, std::shared_ptr<ngraph::element::Type>> type(m, "Type");
py::class_<ngraph::element::Type, std::shared_ptr<ngraph::element::Type>> type(m, "Type", py::module_local());
type.doc() = "ngraph.impl.Type wraps ngraph::element::Type";
type.attr("boolean") = ngraph::element::boolean;
type.attr("f16") = ngraph::element::f16;

View File

@ -11,7 +11,7 @@
namespace py = pybind11;
void regclass_pyngraph_Variant(py::module m) {
py::class_<ngraph::Variant, std::shared_ptr<ngraph::Variant>> variant_base(m, "Variant");
py::class_<ngraph::Variant, std::shared_ptr<ngraph::Variant>> variant_base(m, "Variant", py::module_local());
variant_base.doc() = "ngraph.impl.Variant wraps ngraph::Variant";
}

View File

@ -23,7 +23,7 @@ extern void regclass_pyngraph_VariantWrapper(py::module m, std::string typestrin
py::class_<ngraph::VariantWrapper<VT>,
std::shared_ptr<ngraph::VariantWrapper<VT>>,
ngraph::Variant>
variant_wrapper(m, pyclass_name);
variant_wrapper(m, pyclass_name, py::module_local());
variant_wrapper.doc() =
"ngraph.impl.Variant[typestring] wraps ngraph::VariantWrapper<typestring>";

View File

@ -1,6 +1,29 @@
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""openvino module namespace, exposing factory functions for all ops and other classes."""
# noqa: F401
from pkg_resources import get_distribution, DistributionNotFound
__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore # mypy issue #1422
try:
__version__ = get_distribution("openvino-core").version
except DistributionNotFound:
__version__ = "0.0.0.dev0"
from openvino.ie_api import BlobWrapper
from openvino.ie_api import infer
from openvino.ie_api import async_infer
from openvino.ie_api import get_result
from openvino.ie_api import blob_from_file
from openvino.impl import Dimension
from openvino.impl import Function
from openvino.impl import Node
from openvino.impl import PartialShape
from openvino.pyopenvino import Core
from openvino.pyopenvino import IENetwork
from openvino.pyopenvino import ExecutableNetwork
@ -22,11 +45,32 @@ from openvino.pyopenvino import ColorFormat
from openvino.pyopenvino import PreProcessChannel
from openvino.pyopenvino import Tensor
from openvino.ie_api import BlobWrapper
from openvino.ie_api import infer
from openvino.ie_api import async_infer
from openvino.ie_api import get_result
from openvino.ie_api import blob_from_file
from openvino import opset1
from openvino import opset2
from openvino import opset3
from openvino import opset4
from openvino import opset5
from openvino import opset6
from openvino import opset7
from openvino import opset8
# Extend Node class to support binary operators
Node.__add__ = opset8.add
Node.__sub__ = opset8.subtract
Node.__mul__ = opset8.multiply
Node.__div__ = opset8.divide
Node.__truediv__ = opset8.divide
Node.__radd__ = lambda left, right: opset8.add(right, left)
Node.__rsub__ = lambda left, right: opset8.subtract(right, left)
Node.__rmul__ = lambda left, right: opset8.multiply(right, left)
Node.__rdiv__ = lambda left, right: opset8.divide(right, left)
Node.__rtruediv__ = lambda left, right: opset8.divide(right, left)
Node.__eq__ = opset8.equal
Node.__ne__ = opset8.not_equal
Node.__lt__ = opset8.less
Node.__le__ = opset8.less_equal
Node.__gt__ = opset8.greater
Node.__ge__ = opset8.greater_equal
# Patching for Blob class
# flake8: noqa: F811

View File

@ -0,0 +1,16 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""openvino exceptions hierarchy. All exceptions are descendants of NgraphError."""
class NgraphError(Exception):
"""Base class for Ngraph exceptions."""
class UserInputError(NgraphError):
"""User provided unexpected input."""
class NgraphTypeError(NgraphError, TypeError):
"""Type mismatch error."""

View File

@ -0,0 +1,52 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""
Package: openvino.impl
Low level wrappers for the c++ api.
"""
# flake8: noqa
import os
import sys
if sys.platform == "win32":
# Installer, yum, pip installs openvino dlls to the different directories
# and those paths need to be visible to the openvino modules
#
# If you're using a custom installation of openvino,
# add the location of openvino dlls to your system PATH.
#
# looking for the libs in the pip installation path by default.
openvino_libs = [os.path.join(os.path.dirname(__file__), '..', '..', '..'),
os.path.join(os.path.dirname(__file__), '..', '..', 'openvino', 'libs')]
# setupvars.bat script set all libs paths to OPENVINO_LIB_PATHS environment variable.
openvino_libs_installer = os.getenv('OPENVINO_LIB_PATHS')
if openvino_libs_installer:
openvino_libs.extend(openvino_libs_installer.split(';'))
for lib in openvino_libs:
lib_path = os.path.join(os.path.dirname(__file__), lib)
if os.path.isdir(lib_path):
# On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH.
if (3, 8) <= sys.version_info:
os.add_dll_directory(os.path.abspath(lib_path))
else:
os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"]
from openvino.pyopenvino import Dimension
from openvino.pyopenvino import Function
from openvino.pyopenvino import Input
from openvino.pyopenvino import Output
from openvino.pyopenvino import Node
from openvino.pyopenvino import Type
from openvino.pyopenvino import PartialShape
from openvino.pyopenvino import Shape
from openvino.pyopenvino import Strides
from openvino.pyopenvino import CoordinateDiff
from openvino.pyopenvino import AxisSet
from openvino.pyopenvino import AxisVector
from openvino.pyopenvino import Coordinate
from openvino.pyopenvino import Output
from openvino.pyopenvino import util

View File

@ -0,0 +1,23 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""
Package: openvino.op
Low level wrappers for the c++ api in ov::op.
"""
# flake8: noqa
import numpy as np
from openvino.pyopenvino.op import Constant
"""Retrieve Constant inner data.
Internally uses PyBind11 Numpy's buffer protocol.
:return Numpy array containing internally stored constant data.
"""
Constant.get_data = lambda self: np.array(self, copy=True)
from openvino.pyopenvino.op import Parameter

View File

@ -0,0 +1,16 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""
Package: openvino.op.util
Low level wrappers for the c++ api in ov::op::util.
"""
# flake8: noqa
from openvino.pyopenvino.op.util import UnaryElementwiseArithmetic
from openvino.pyopenvino.op.util import BinaryElementwiseComparison
from openvino.pyopenvino.op.util import BinaryElementwiseArithmetic
from openvino.pyopenvino.op.util import BinaryElementwiseLogical
from openvino.pyopenvino.op.util import OpAnnotations
from openvino.pyopenvino.op.util import ArithmeticReduction
from openvino.pyopenvino.op.util import IndexReduction

View File

@ -0,0 +1,6 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# flake8: noqa
from openvino.pyopenvino.passes import Manager

View File

@ -0,0 +1,111 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.opset1.ops import absolute
from openvino.opset1.ops import absolute as abs
from openvino.opset1.ops import acos
from openvino.opset1.ops import add
from openvino.opset1.ops import asin
from openvino.opset1.ops import atan
from openvino.opset1.ops import avg_pool
from openvino.opset1.ops import batch_norm_inference
from openvino.opset1.ops import binary_convolution
from openvino.opset1.ops import broadcast
from openvino.opset1.ops import ceiling
from openvino.opset1.ops import ceiling as ceil
from openvino.opset1.ops import clamp
from openvino.opset1.ops import concat
from openvino.opset1.ops import constant
from openvino.opset1.ops import convert
from openvino.opset1.ops import convert_like
from openvino.opset1.ops import convolution
from openvino.opset1.ops import convolution_backprop_data
from openvino.opset1.ops import cos
from openvino.opset1.ops import cosh
from openvino.opset1.ops import ctc_greedy_decoder
from openvino.opset1.ops import deformable_convolution
from openvino.opset1.ops import deformable_psroi_pooling
from openvino.opset1.ops import depth_to_space
from openvino.opset1.ops import detection_output
from openvino.opset1.ops import divide
from openvino.opset1.ops import elu
from openvino.opset1.ops import equal
from openvino.opset1.ops import erf
from openvino.opset1.ops import exp
from openvino.opset1.ops import fake_quantize
from openvino.opset1.ops import floor
from openvino.opset1.ops import floor_mod
from openvino.opset1.ops import gather
from openvino.opset1.ops import gather_tree
from openvino.opset1.ops import greater
from openvino.opset1.ops import greater_equal
from openvino.opset1.ops import grn
from openvino.opset1.ops import group_convolution
from openvino.opset1.ops import group_convolution_backprop_data
from openvino.opset1.ops import hard_sigmoid
from openvino.opset1.ops import interpolate
from openvino.opset1.ops import less
from openvino.opset1.ops import less_equal
from openvino.opset1.ops import log
from openvino.opset1.ops import logical_and
from openvino.opset1.ops import logical_not
from openvino.opset1.ops import logical_or
from openvino.opset1.ops import logical_xor
from openvino.opset1.ops import lrn
from openvino.opset1.ops import lstm_cell
from openvino.opset1.ops import lstm_sequence
from openvino.opset1.ops import matmul
from openvino.opset1.ops import max_pool
from openvino.opset1.ops import maximum
from openvino.opset1.ops import minimum
from openvino.opset1.ops import mod
from openvino.opset1.ops import multiply
from openvino.opset1.ops import negative
from openvino.opset1.ops import non_max_suppression
from openvino.opset1.ops import normalize_l2
from openvino.opset1.ops import not_equal
from openvino.opset1.ops import one_hot
from openvino.opset1.ops import pad
from openvino.opset1.ops import parameter
from openvino.opset1.ops import power
from openvino.opset1.ops import prelu
from openvino.opset1.ops import prior_box
from openvino.opset1.ops import prior_box_clustered
from openvino.opset1.ops import psroi_pooling
from openvino.opset1.ops import proposal
from openvino.opset1.ops import range
from openvino.opset1.ops import reduce_logical_and
from openvino.opset1.ops import reduce_logical_or
from openvino.opset1.ops import reduce_max
from openvino.opset1.ops import reduce_mean
from openvino.opset1.ops import reduce_min
from openvino.opset1.ops import reduce_prod
from openvino.opset1.ops import reduce_sum
from openvino.opset1.ops import region_yolo
from openvino.opset1.ops import relu
from openvino.opset1.ops import reshape
from openvino.opset1.ops import result
from openvino.opset1.ops import reverse_sequence
from openvino.opset1.ops import select
from openvino.opset1.ops import selu
from openvino.opset1.ops import shape_of
from openvino.opset1.ops import sigmoid
from openvino.opset1.ops import sign
from openvino.opset1.ops import sin
from openvino.opset1.ops import sinh
from openvino.opset1.ops import softmax
from openvino.opset1.ops import space_to_depth
from openvino.opset1.ops import split
from openvino.opset1.ops import sqrt
from openvino.opset1.ops import squared_difference
from openvino.opset1.ops import squeeze
from openvino.opset1.ops import strided_slice
from openvino.opset1.ops import subtract
from openvino.opset1.ops import tan
from openvino.opset1.ops import tanh
from openvino.opset1.ops import tensor_iterator
from openvino.opset1.ops import tile
from openvino.opset1.ops import topk
from openvino.opset1.ops import transpose
from openvino.opset1.ops import unsqueeze
from openvino.opset1.ops import variadic_split

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,117 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.opset1.ops import absolute
from openvino.opset1.ops import absolute as abs
from openvino.opset1.ops import acos
from openvino.opset1.ops import add
from openvino.opset1.ops import asin
from openvino.opset1.ops import atan
from openvino.opset1.ops import avg_pool
from openvino.opset1.ops import batch_norm_inference
from openvino.opset2.ops import batch_to_space
from openvino.opset1.ops import binary_convolution
from openvino.opset1.ops import broadcast
from openvino.opset1.ops import ceiling
from openvino.opset1.ops import ceiling as ceil
from openvino.opset1.ops import clamp
from openvino.opset1.ops import concat
from openvino.opset1.ops import constant
from openvino.opset1.ops import convert
from openvino.opset1.ops import convert_like
from openvino.opset1.ops import convolution
from openvino.opset1.ops import convolution_backprop_data
from openvino.opset1.ops import cos
from openvino.opset1.ops import cosh
from openvino.opset1.ops import ctc_greedy_decoder
from openvino.opset1.ops import deformable_convolution
from openvino.opset1.ops import deformable_psroi_pooling
from openvino.opset1.ops import depth_to_space
from openvino.opset1.ops import detection_output
from openvino.opset1.ops import divide
from openvino.opset1.ops import elu
from openvino.opset1.ops import equal
from openvino.opset1.ops import erf
from openvino.opset1.ops import exp
from openvino.opset1.ops import fake_quantize
from openvino.opset1.ops import floor
from openvino.opset1.ops import floor_mod
from openvino.opset1.ops import gather
from openvino.opset1.ops import gather_tree
from openvino.opset2.ops import gelu
from openvino.opset1.ops import greater
from openvino.opset1.ops import greater_equal
from openvino.opset1.ops import grn
from openvino.opset1.ops import group_convolution
from openvino.opset1.ops import group_convolution_backprop_data
from openvino.opset1.ops import hard_sigmoid
from openvino.opset1.ops import interpolate
from openvino.opset1.ops import less
from openvino.opset1.ops import less_equal
from openvino.opset1.ops import log
from openvino.opset1.ops import logical_and
from openvino.opset1.ops import logical_not
from openvino.opset1.ops import logical_or
from openvino.opset1.ops import logical_xor
from openvino.opset1.ops import lrn
from openvino.opset1.ops import lstm_cell
from openvino.opset1.ops import lstm_sequence
from openvino.opset1.ops import matmul
from openvino.opset1.ops import max_pool
from openvino.opset1.ops import maximum
from openvino.opset1.ops import minimum
from openvino.opset1.ops import mod
from openvino.opset1.ops import multiply
from openvino.opset2.ops import mvn
from openvino.opset1.ops import negative
from openvino.opset1.ops import non_max_suppression
from openvino.opset1.ops import normalize_l2
from openvino.opset1.ops import not_equal
from openvino.opset1.ops import one_hot
from openvino.opset1.ops import pad
from openvino.opset1.ops import parameter
from openvino.opset1.ops import power
from openvino.opset1.ops import prelu
from openvino.opset1.ops import prior_box
from openvino.opset1.ops import prior_box_clustered
from openvino.opset1.ops import psroi_pooling
from openvino.opset1.ops import proposal
from openvino.opset1.ops import range
from openvino.opset1.ops import reduce_logical_and
from openvino.opset1.ops import reduce_logical_or
from openvino.opset1.ops import reduce_max
from openvino.opset1.ops import reduce_mean
from openvino.opset1.ops import reduce_min
from openvino.opset1.ops import reduce_prod
from openvino.opset1.ops import reduce_sum
from openvino.opset1.ops import region_yolo
from openvino.opset2.ops import reorg_yolo
from openvino.opset1.ops import relu
from openvino.opset1.ops import reshape
from openvino.opset1.ops import result
from openvino.opset1.ops import reverse_sequence
from openvino.opset2.ops import roi_pooling
from openvino.opset1.ops import select
from openvino.opset1.ops import selu
from openvino.opset1.ops import shape_of
from openvino.opset1.ops import sigmoid
from openvino.opset1.ops import sign
from openvino.opset1.ops import sin
from openvino.opset1.ops import sinh
from openvino.opset1.ops import softmax
from openvino.opset2.ops import space_to_batch
from openvino.opset1.ops import space_to_depth
from openvino.opset1.ops import split
from openvino.opset1.ops import sqrt
from openvino.opset1.ops import squared_difference
from openvino.opset1.ops import squeeze
from openvino.opset1.ops import strided_slice
from openvino.opset1.ops import subtract
from openvino.opset1.ops import tan
from openvino.opset1.ops import tanh
from openvino.opset1.ops import tensor_iterator
from openvino.opset1.ops import tile
from openvino.opset1.ops import topk
from openvino.opset1.ops import transpose
from openvino.opset1.ops import unsqueeze
from openvino.opset1.ops import variadic_split

View File

@ -0,0 +1,179 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Factory functions for all openvino ops."""
from typing import Callable, Iterable, List, Optional, Set, Union
import numpy as np
from functools import partial
from openvino.impl import Node, Shape
from openvino.impl.op import Constant, Parameter
from openvino.opset_utils import _get_node_factory
from openvino.utils.decorators import binary_op, nameable_op, unary_op
from openvino.utils.input_validation import (
assert_list_of_ints,
check_valid_attributes,
is_non_negative_value,
is_positive_value,
)
from openvino.utils.node_factory import NodeFactory
from openvino.utils.tensor_iterator_types import (
GraphBody,
TensorIteratorSliceInputDesc,
TensorIteratorMergedInputDesc,
TensorIteratorInvariantInputDesc,
TensorIteratorBodyOutputDesc,
TensorIteratorConcatOutputDesc,
)
from openvino.utils.types import (
NodeInput,
NumericData,
NumericType,
ScalarData,
TensorShape,
as_node,
as_nodes,
get_dtype,
get_element_type,
get_element_type_str,
make_constant_node,
)
_get_node_factory_opset2 = partial(_get_node_factory, "opset2")
# -------------------------------------------- ops ------------------------------------------------
@nameable_op
def batch_to_space(
data: NodeInput,
block_shape: NodeInput,
crops_begin: NodeInput,
crops_end: NodeInput,
name: Optional[str] = None,
) -> Node:
"""Perform BatchToSpace operation on the input tensor.
BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions.
@param data: Node producing the data tensor.
@param block_shape: The sizes of the block of values to be moved.
@param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`.
@param crops_end: Specifies the amount to crop from the end along each axis of `data`.
@param name: Optional output node name.
@return The new node performing a BatchToSpace operation.
"""
return _get_node_factory_opset2().create(
"BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end)
)
@unary_op
def gelu(node: NodeInput, name: Optional[str] = None) -> Node:
r"""Perform Gaussian Error Linear Unit operation element-wise on data from input node.
Computes GELU function:
\f[ f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}}) \f]
For more information refer to [Gaussian Error Linear Unit (GELU)](https://arxiv.org/pdf/1606.08415.pdf>)
@param node: Input tensor. One of: input node, array or scalar.
@param name: Optional output node name.
@return The new node performing a GELU operation on its input data element-wise.
"""
return _get_node_factory_opset2().create("Gelu", [node])
@nameable_op
def mvn(
data: Node,
across_channels: bool = False,
normalize_variance: bool = False,
eps: float = 1e-9,
name: str = None,
) -> Node:
r"""Perform Mean Variance Normalization operation on data from input node.
Computes MVN on the input tensor `data` (called `X`) using formula:
\f[ Y = \dfrac{X-EX}{\sqrt{E(X-EX)^2}} \f]
@param data: The node with data tensor.
@param across_channels: Denotes if mean values are shared across channels.
@param normalize_variance: Denotes whether to perform variance normalization.
@param eps: The number added to the variance to avoid division by zero
when normalizing the value. Scalar value.
@param name: Optional output node name.
@return The new node performing a MVN operation on input tensor.
"""
return _get_node_factory_opset2().create(
"MVN",
[data],
{"across_channels": across_channels, "normalize_variance": normalize_variance, "eps": eps},
)
@nameable_op
def reorg_yolo(input: Node, stride: List[int], name: Optional[str] = None) -> Node:
"""Return a node which produces the ReorgYolo operation.
@param input: Input data
@param stride: Stride to reorganize input by
@param name: Optional name for output node.
@return ReorgYolo node
"""
return _get_node_factory_opset2().create("ReorgYolo", [input], {"stride": stride})
@nameable_op
def roi_pooling(
input: NodeInput,
coords: NodeInput,
output_size: TensorShape,
spatial_scale: NumericData,
method: str,
name: Optional[str] = None,
) -> Node:
"""Return a node which produces an ROIPooling operation.
@param input: Input feature map {N, C, ...}
@param coords: Coordinates of bounding boxes
@param output_size: Height/Width of ROI output features (shape)
@param spatial_scale: Ratio of input feature map over input image size (float)
@param method: Method of pooling - string: "max" or "bilinear"
@return ROIPooling node
"""
method = method.lower()
return _get_node_factory_opset2().create(
"ROIPooling",
as_nodes(input, coords),
{"output_size": Shape(output_size), "spatial_scale": spatial_scale, "method": method},
)
@nameable_op
def space_to_batch(
data: NodeInput,
block_shape: NodeInput,
pads_begin: NodeInput,
pads_end: NodeInput,
name: Optional[str] = None,
) -> Node:
"""Perform SpaceToBatch operation on the input tensor.
SpaceToBatch permutes data tensor blocks of spatial data into batch dimension.
The operator returns a copy of the input tensor where values from spatial blocks dimensions
are moved in the batch dimension
@param data: Node producing the data tensor.
@param block_shape: The sizes of the block of values to be moved.
@param pads_begin: Specifies the padding for the beginning along each axis of `data`.
@param pads_end: Specifies the padding for the ending along each axis of `data`.
@param name: Optional output node name.
@return The new node performing a SpaceToBatch operation.
"""
return _get_node_factory_opset2().create(
"SpaceToBatch", as_nodes(data, block_shape, pads_begin, pads_end)
)

View File

@ -0,0 +1,133 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.opset1.ops import absolute
from openvino.opset1.ops import absolute as abs
from openvino.opset1.ops import acos
from openvino.opset1.ops import add
from openvino.opset1.ops import asin
from openvino.opset3.ops import assign
from openvino.opset1.ops import atan
from openvino.opset1.ops import avg_pool
from openvino.opset1.ops import batch_norm_inference
from openvino.opset2.ops import batch_to_space
from openvino.opset1.ops import binary_convolution
from openvino.opset3.ops import broadcast
from openvino.opset3.ops import bucketize
from openvino.opset1.ops import ceiling
from openvino.opset1.ops import ceiling as ceil
from openvino.opset1.ops import clamp
from openvino.opset1.ops import concat
from openvino.opset1.ops import constant
from openvino.opset1.ops import convert
from openvino.opset1.ops import convert_like
from openvino.opset1.ops import convolution
from openvino.opset1.ops import convolution_backprop_data
from openvino.opset1.ops import cos
from openvino.opset1.ops import cosh
from openvino.opset1.ops import ctc_greedy_decoder
from openvino.opset3.ops import cum_sum
from openvino.opset3.ops import cum_sum as cumsum
from openvino.opset1.ops import deformable_convolution
from openvino.opset1.ops import deformable_psroi_pooling
from openvino.opset1.ops import depth_to_space
from openvino.opset1.ops import detection_output
from openvino.opset1.ops import divide
from openvino.opset1.ops import elu
from openvino.opset3.ops import embedding_bag_offsets_sum
from openvino.opset3.ops import embedding_bag_packed_sum
from openvino.opset3.ops import embedding_segments_sum
from openvino.opset3.ops import extract_image_patches
from openvino.opset1.ops import equal
from openvino.opset1.ops import erf
from openvino.opset1.ops import exp
from openvino.opset1.ops import fake_quantize
from openvino.opset1.ops import floor
from openvino.opset1.ops import floor_mod
from openvino.opset1.ops import gather
from openvino.opset1.ops import gather_tree
from openvino.opset2.ops import gelu
from openvino.opset1.ops import greater
from openvino.opset1.ops import greater_equal
from openvino.opset1.ops import grn
from openvino.opset1.ops import group_convolution
from openvino.opset1.ops import group_convolution_backprop_data
from openvino.opset3.ops import gru_cell
from openvino.opset1.ops import hard_sigmoid
from openvino.opset1.ops import interpolate
from openvino.opset1.ops import less
from openvino.opset1.ops import less_equal
from openvino.opset1.ops import log
from openvino.opset1.ops import logical_and
from openvino.opset1.ops import logical_not
from openvino.opset1.ops import logical_or
from openvino.opset1.ops import logical_xor
from openvino.opset1.ops import lrn
from openvino.opset1.ops import lstm_cell
from openvino.opset1.ops import lstm_sequence
from openvino.opset1.ops import matmul
from openvino.opset1.ops import max_pool
from openvino.opset1.ops import maximum
from openvino.opset1.ops import minimum
from openvino.opset1.ops import mod
from openvino.opset1.ops import multiply
from openvino.opset2.ops import mvn
from openvino.opset1.ops import negative
from openvino.opset3.ops import non_max_suppression
from openvino.opset3.ops import non_zero
from openvino.opset1.ops import normalize_l2
from openvino.opset1.ops import not_equal
from openvino.opset1.ops import one_hot
from openvino.opset1.ops import pad
from openvino.opset1.ops import parameter
from openvino.opset1.ops import power
from openvino.opset1.ops import prelu
from openvino.opset1.ops import prior_box
from openvino.opset1.ops import prior_box_clustered
from openvino.opset1.ops import psroi_pooling
from openvino.opset1.ops import proposal
from openvino.opset1.ops import range
from openvino.opset3.ops import read_value
from openvino.opset1.ops import reduce_logical_and
from openvino.opset1.ops import reduce_logical_or
from openvino.opset1.ops import reduce_max
from openvino.opset1.ops import reduce_mean
from openvino.opset1.ops import reduce_min
from openvino.opset1.ops import reduce_prod
from openvino.opset1.ops import reduce_sum
from openvino.opset1.ops import region_yolo
from openvino.opset2.ops import reorg_yolo
from openvino.opset1.ops import relu
from openvino.opset1.ops import reshape
from openvino.opset1.ops import result
from openvino.opset1.ops import reverse_sequence
from openvino.opset3.ops import rnn_cell
from openvino.opset3.ops import roi_align
from openvino.opset2.ops import roi_pooling
from openvino.opset3.ops import scatter_elements_update
from openvino.opset3.ops import scatter_update
from openvino.opset1.ops import select
from openvino.opset1.ops import selu
from openvino.opset3.ops import shape_of
from openvino.opset3.ops import shuffle_channels
from openvino.opset1.ops import sigmoid
from openvino.opset1.ops import sign
from openvino.opset1.ops import sin
from openvino.opset1.ops import sinh
from openvino.opset1.ops import softmax
from openvino.opset2.ops import space_to_batch
from openvino.opset1.ops import space_to_depth
from openvino.opset1.ops import split
from openvino.opset1.ops import sqrt
from openvino.opset1.ops import squared_difference
from openvino.opset1.ops import squeeze
from openvino.opset1.ops import strided_slice
from openvino.opset1.ops import subtract
from openvino.opset1.ops import tan
from openvino.opset1.ops import tanh
from openvino.opset1.ops import tensor_iterator
from openvino.opset1.ops import tile
from openvino.opset3.ops import topk
from openvino.opset1.ops import transpose
from openvino.opset1.ops import unsqueeze
from openvino.opset1.ops import variadic_split

View File

@ -0,0 +1,634 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Factory functions for all openvino ops."""
from typing import Callable, Iterable, List, Optional, Set, Union
import numpy as np
from functools import partial
from openvino.impl import Node, Shape
from openvino.impl.op import Constant, Parameter
from openvino.opset_utils import _get_node_factory
from openvino.utils.decorators import binary_op, nameable_op, unary_op
from openvino.utils.input_validation import (
assert_list_of_ints,
check_valid_attributes,
is_non_negative_value,
is_positive_value,
)
from openvino.utils.node_factory import NodeFactory
from openvino.utils.tensor_iterator_types import (
GraphBody,
TensorIteratorSliceInputDesc,
TensorIteratorMergedInputDesc,
TensorIteratorInvariantInputDesc,
TensorIteratorBodyOutputDesc,
TensorIteratorConcatOutputDesc,
)
from openvino.utils.types import (
NodeInput,
NumericData,
NumericType,
ScalarData,
TensorShape,
as_node,
as_nodes,
get_dtype,
get_element_type,
get_element_type_str,
make_constant_node,
)
_get_node_factory_opset3 = partial(_get_node_factory, "opset3")
# -------------------------------------------- ops ------------------------------------------------
@nameable_op
def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node:
"""Return a node which produces the Assign operation.
@param new_value: Node producing a value to be assigned to a variable.
@param variable_id: Id of a variable to be updated.
@param name: Optional name for output node.
@return Assign node
"""
return _get_node_factory_opset3().create(
"Assign",
[as_node(new_value)],
{"variable_id": variable_id}
)
@nameable_op
def broadcast(
data: NodeInput,
target_shape: NodeInput,
axes_mapping: Optional[NodeInput] = None,
broadcast_spec: str = "NUMPY",
name: Optional[str] = None,
) -> Node:
"""Create a node which broadcasts the input node's values along specified axes to a desired shape.
@param data: The node with input tensor data.
@param target_shape: The node with a new shape we want to broadcast tensor to.
@param axes_mapping: The node with a axis positions (0-based) in the result
that are being broadcast.
@param broadcast_spec: The type of broadcasting that specifies mapping of input tensor axes
to output shape axes. Range of values: NUMPY, EXPLICIT, BIDIRECTIONAL.
@param name: Optional new name for output node.
@return New node with broadcast shape.
"""
inputs = as_nodes(data, target_shape)
if broadcast_spec.upper() == "EXPLICIT":
inputs.append(as_node(axes_mapping))
return _get_node_factory_opset3().create(
"Broadcast", inputs, {"mode": broadcast_spec.upper()}
)
@nameable_op
def bucketize(
data: Node,
buckets: NodeInput,
output_type: str = "i64",
with_right_bound: bool = True,
name: Optional[str] = None,
) -> Node:
"""Return a node which produces the Bucketize operation.
@param data: Input data to bucketize
@param buckets: 1-D of sorted unique boundaries for buckets
@param output_type: Output tensor type, "i64" or "i32", defaults to i64
@param with_right_bound: indicates whether bucket includes the right or left
edge of interval. default true = includes right edge
@param name: Optional name for output node.
@return Bucketize node
"""
return _get_node_factory_opset3().create(
"Bucketize",
[data, as_node(buckets)],
{"output_type": output_type, "with_right_bound": with_right_bound},
)
@nameable_op
def cum_sum(
arg: NodeInput,
axis: NodeInput,
exclusive: bool = False,
reverse: bool = False,
name: Optional[str] = None,
) -> Node:
"""Construct a cumulative summation operation.
@param arg: The tensor to be summed.
@param axis: zero dimension tensor specifying axis position along which sum will be performed.
@param exclusive: if set to true, the top element is not included
@param reverse: if set to true, will perform the sums in reverse direction
@return New node performing the operation
"""
return _get_node_factory_opset3().create(
"CumSum", as_nodes(arg, axis), {"exclusive": exclusive, "reverse": reverse}
)
@nameable_op
def embedding_bag_offsets_sum(
emb_table: Node,
indices: NodeInput,
offsets: NodeInput,
default_index: Optional[NodeInput] = None,
per_sample_weights: Optional[NodeInput] = None,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs sums of bags of embeddings without the intermediate embeddings.
@param emb_table: Tensor containing the embedding lookup table.
@param indices: Tensor with indices.
@param offsets: Tensor containing the starting index positions of each bag in indices.
@param per_sample_weights: Tensor with weights for each sample.
@param default_index: Scalar containing default index in embedding table to fill empty bags.
@param name: Optional name for output node.
@return The new node which performs EmbeddingBagOffsetsSum
"""
inputs = [emb_table, as_node(indices), as_node(offsets)]
if per_sample_weights is not None:
inputs.append(default_index)
inputs.append(per_sample_weights)
elif default_index is not None:
inputs.append(default_index)
return _get_node_factory_opset3().create("EmbeddingBagOffsetsSum", inputs, {})
@nameable_op
def embedding_bag_packed_sum(
emb_table: NodeInput,
indices: NodeInput,
per_sample_weights: Optional[NodeInput] = None,
name: Optional[str] = None,
) -> Node:
"""Return an EmbeddingBagPackedSum node.
EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given
input tensor with a row (from the weights matrix) at that index
@param emb_table: Tensor containing the embedding lookup table.
@param indices: Tensor with indices.
@param per_sample_weights: Weights to be multiplied with embedding table.
@param name: Optional name for output node.
@return EmbeddingBagPackedSum node
"""
inputs = [as_node(emb_table), as_node(indices)]
if per_sample_weights is not None:
inputs.append(as_node(per_sample_weights))
return _get_node_factory_opset3().create("EmbeddingBagPackedSum", inputs, {})
@nameable_op
def embedding_segments_sum(
emb_table: Node,
indices: NodeInput,
segment_ids: NodeInput,
num_segments: Optional[NodeInput] = None,
default_index: Optional[NodeInput] = None,
per_sample_weights: Optional[NodeInput] = None,
name: Optional[str] = None,
) -> Node:
"""Return an EmbeddingSegmentsSum node.
EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given
input tensor with a row (from the weights matrix) at that index
@param emb_table: Tensor containing the embedding lookup table.
@param indices: Tensor with indices.
@param segment_ids: Tensor with indices into the output Tensor
@param num_segments: Tensor with number of segments.
@param default_index: Scalar containing default index in embedding table to fill empty bags.
@param per_sample_weights: Weights to be multiplied with embedding table.
@param name: Optional name for output node.
@return EmbeddingSegmentsSum node
"""
inputs = [as_node(emb_table), as_node(indices), as_node(segment_ids)]
if per_sample_weights is not None:
inputs.append(as_node(num_segments))
inputs.append(as_node(default_index))
inputs.append(as_node(per_sample_weights))
elif default_index is not None:
inputs.append(as_node(num_segments))
inputs.append(as_node(default_index))
elif num_segments is not None:
inputs.append(as_node(num_segments))
return _get_node_factory_opset3().create("EmbeddingSegmentsSum", inputs, {})
@nameable_op
def extract_image_patches(
image: NodeInput,
sizes: TensorShape,
strides: List[int],
rates: TensorShape,
auto_pad: str,
name: Optional[str] = None,
) -> Node:
"""Return a node which produces the ExtractImagePatches operation.
@param image: 4-D Input data to extract image patches.
@param sizes: Patch size in the format of [size_rows, size_cols].
@param strides: Patch movement stride in the format of [stride_rows, stride_cols]
@param rates: Element seleciton rate for creating a patch.
@param auto_pad: Padding type.
@param name: Optional name for output node.
@return ExtractImagePatches node
"""
return _get_node_factory_opset3().create(
"ExtractImagePatches",
[as_node(image)],
{"sizes": sizes, "strides": strides, "rates": rates, "auto_pad": auto_pad},
)
@nameable_op
def gru_cell(
X: NodeInput,
initial_hidden_state: NodeInput,
W: NodeInput,
R: NodeInput,
B: NodeInput,
hidden_size: int,
activations: List[str] = None,
activations_alpha: List[float] = None,
activations_beta: List[float] = None,
clip: float = 0.0,
linear_before_reset: bool = False,
name: Optional[str] = None,
) -> Node:
"""Perform GRUCell operation on the tensor from input node.
GRUCell represents a single GRU Cell that computes the output
using the formula described in the paper: https://arxiv.org/abs/1406.1078
Note this class represents only single *cell* and not whole *layer*.
@param X: The input tensor with shape: [batch_size, input_size].
@param initial_hidden_state: The hidden state tensor at current time step with shape:
[batch_size, hidden_size].
@param W: The weights for matrix multiplication, gate order: zrh.
Shape: [3*hidden_size, input_size].
@param R: The recurrence weights for matrix multiplication.
Shape: [3*hidden_size, hidden_size].
@param B: The sum of biases (weight and recurrence).
For linear_before_reset set True the shape is [4*hidden_size].
Otherwise the shape is [3*hidden_size].
@param hidden_size: The number of hidden units for recurrent cell.
Specifies hidden state size.
@param activations: The vector of activation functions used inside recurrent cell.
@param activation_alpha: The vector of alpha parameters for activation functions in
order respective to activation list.
@param activation_beta: The vector of beta parameters for activation functions in order
respective to activation list.
@param clip: The value defining clipping range [-clip, clip] on input of
activation functions.
@param linear_before_reset: Flag denotes if the layer behaves according to the modification
of GRUCell described in the formula in the ONNX documentation.
@param name: Optional output node name.
@return The new node performing a GRUCell operation on tensor from input node.
"""
if activations is None:
activations = ["sigmoid", "tanh"]
if activations_alpha is None:
activations_alpha = []
if activations_beta is None:
activations_beta = []
input_nodes = as_nodes(X, initial_hidden_state, W, R, B)
attributes = {
"hidden_size": hidden_size,
"activations": activations,
"activations_alpha": activations_alpha,
"activations_beta": activations_beta,
"linear_before_reset": linear_before_reset,
"clip": clip,
}
return _get_node_factory_opset3().create("GRUCell", input_nodes, attributes)
@nameable_op
def non_max_suppression(
boxes: NodeInput,
scores: NodeInput,
max_output_boxes_per_class: Optional[NodeInput] = None,
iou_threshold: Optional[NodeInput] = None,
score_threshold: Optional[NodeInput] = None,
box_encoding: str = "corner",
sort_result_descending: bool = True,
output_type: str = "i64",
name: Optional[str] = None,
) -> Node:
"""Return a node which performs NonMaxSuppression.
@param boxes: Tensor with box coordinates.
@param scores: Tensor with box scores.
@param max_output_boxes_per_class: Tensor Specifying maximum number of boxes
to be selected per class.
@param iou_threshold: Tensor specifying intersection over union threshold
@param score_threshold: Tensor specifying minimum score to consider box for the processing.
@param box_encoding: Format of boxes data encoding.
@param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
boxes across batches or not.
@param output_type: Output element type.
@return The new node which performs NonMaxSuppression
"""
if max_output_boxes_per_class is None:
max_output_boxes_per_class = make_constant_node(0, np.int64)
if iou_threshold is None:
iou_threshold = make_constant_node(0, np.float32)
if score_threshold is None:
score_threshold = make_constant_node(0, np.float32)
inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
attributes = {
"box_encoding": box_encoding,
"sort_result_descending": sort_result_descending,
"output_type": output_type,
}
return _get_node_factory_opset3().create("NonMaxSuppression", inputs, attributes)
@nameable_op
def non_zero(data: NodeInput, output_type: str = "i64", name: Optional[str] = None,) -> Node:
"""Return the indices of the elements that are non-zero.
@param data: Input data.
@param output_type: Output tensor type.
@return The new node which performs NonZero
"""
return _get_node_factory_opset3().create(
"NonZero",
[as_node(data)],
{"output_type": output_type}
)
@nameable_op
def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node:
"""Return a node which produces the Assign operation.
@param init_value: Node producing a value to be returned instead of an unassigned variable.
@param variable_id: Id of a variable to be read.
@param name: Optional name for output node.
@return ReadValue node
"""
return _get_node_factory_opset3().create(
"ReadValue",
[as_node(init_value)],
{"variable_id": variable_id}
)
@nameable_op
def rnn_cell(
X: NodeInput,
initial_hidden_state: NodeInput,
W: NodeInput,
R: NodeInput,
B: NodeInput,
hidden_size: int,
activations: List[str],
activations_alpha: List[float],
activations_beta: List[float],
clip: float = 0.0,
name: Optional[str] = None,
) -> Node:
"""Perform RNNCell operation on tensor from input node.
It follows notation and equations defined as in ONNX standard:
https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN
Note this class represents only single *cell* and not whole RNN *layer*.
@param X: The input tensor with shape: [batch_size, input_size].
@param initial_hidden_state: The hidden state tensor at current time step with shape:
[batch_size, hidden_size].
@param W: The weight tensor with shape: [hidden_size, input_size].
@param R: The recurrence weight tensor with shape: [hidden_size,
hidden_size].
@param B: The sum of biases (weight and recurrence) with shape: [hidden_size].
@param hidden_size: The number of hidden units for recurrent cell.
Specifies hidden state size.
@param activations: The vector of activation functions used inside recurrent cell.
@param activation_alpha: The vector of alpha parameters for activation functions in
order respective to activation list.
@param activation_beta: The vector of beta parameters for activation functions in order
respective to activation list.
@param clip: The value defining clipping range [-clip, clip] on input of
activation functions.
@param name: Optional output node name.
@return The new node performing a RNNCell operation on tensor from input node.
"""
if activations is None:
activations = ["tanh"]
if activations_alpha is None:
activations_alpha = []
if activations_beta is None:
activations_beta = []
input_nodes = as_nodes(X, initial_hidden_state, W, R, B)
attributes = {
"hidden_size": hidden_size,
"activations": activations,
"activations_alpha": activations_alpha,
"activations_beta": activations_beta,
"clip": clip,
}
return _get_node_factory_opset3().create("RNNCell", input_nodes, attributes)
@nameable_op
def roi_align(
data: NodeInput,
rois: NodeInput,
batch_indices: NodeInput,
pooled_h: int,
pooled_w: int,
sampling_ratio: int,
spatial_scale: float,
mode: str,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs ROIAlign.
@param data: Input data.
@param rois: RoIs (Regions of Interest) to pool over.
@param batch_indices: Tensor with each element denoting the index of
the corresponding image in the batch.
@param pooled_h: Height of the ROI output feature map.
@param pooled_w: Width of the ROI output feature map.
@param sampling_ratio: Number of bins over height and width to use to calculate
each output feature map element.
@param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates.
@param mode: Method to perform pooling to produce output feature map elements.
@return The new node which performs ROIAlign
"""
inputs = as_nodes(data, rois, batch_indices)
attributes = {
"pooled_h": pooled_h,
"pooled_w": pooled_w,
"sampling_ratio": sampling_ratio,
"spatial_scale": spatial_scale,
"mode": mode,
}
return _get_node_factory_opset3().create("ROIAlign", inputs, attributes)
@nameable_op
def scatter_elements_update(
data: NodeInput,
indices: NodeInput,
updates: NodeInput,
axis: NodeInput,
name: Optional[str] = None,
) -> Node:
"""Return a node which produces a ScatterElementsUpdate operation.
@param data: The input tensor to be updated.
@param indices: The tensor with indexes which will be updated.
@param updates: The tensor with update values.
@param axis: The axis for scatter.
@return ScatterElementsUpdate node
ScatterElementsUpdate creates a copy of the first input tensor with updated elements
specified with second and third input tensors.
For each entry in `updates`, the target index in `data` is obtained by combining
the corresponding entry in `indices` with the index of the entry itself: the
index-value for dimension equal to `axis` is obtained from the value of the
corresponding entry in `indices` and the index-value for dimension not equal
to `axis` is obtained from the index of the entry itself.
"""
return _get_node_factory_opset3().create(
"ScatterElementsUpdate", as_nodes(data, indices, updates, axis)
)
@nameable_op
def scatter_update(
data: Node, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None
) -> Node:
"""Return a node which produces a ScatterUpdate operation.
ScatterUpdate sets new values to slices from data addressed by indices.
@param data: The input tensor to be updated.
@param indices: The tensor with indexes which will be updated.
@param updates: The tensor with update values.
@param axis: The axis at which elements will be updated.
@return ScatterUpdate node
"""
return _get_node_factory_opset3().create(
"ScatterUpdate",
as_nodes(data, indices, updates, axis)
)
@nameable_op
def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = None) -> Node:
"""Return a node which produces a tensor containing the shape of its input data.
@param data: The tensor containing the input data.
@param output_type: Output element type.
@return ShapeOf node
"""
return _get_node_factory_opset3().create(
"ShapeOf",
[as_node(data)],
{"output_type": output_type}
)
@nameable_op
def shuffle_channels(data: Node, axis: int, group: int, name: Optional[str] = None) -> Node:
"""Perform permutation on data in the channel dimension of the input tensor.
@param data: The node with input tensor.
@param axis: Channel dimension index in the data tensor.
A negative value means that the index should be calculated
from the back of the input data shape.
@param group: The channel dimension specified by the axis parameter
should be split into this number of groups.
@param name: Optional output node name.
@return The new node performing a permutation on data in the channel dimension
of the input tensor.
The operation is the equivalent with the following transformation of the input tensor
`data` of shape [N, C, H, W]:
`data_reshaped` = reshape(`data`, [N, group, C / group, H * W])
`data_trnasposed` = transpose(`data_reshaped`, [0, 2, 1, 3])
`output` = reshape(`data_trnasposed`, [N, C, H, W])
For example:
@code{.py}
Inputs: tensor of shape [1, 6, 2, 2]
data = [[[[ 0., 1.], [ 2., 3.]],
[[ 4., 5.], [ 6., 7.]],
[[ 8., 9.], [10., 11.]],
[[12., 13.], [14., 15.]],
[[16., 17.], [18., 19.]],
[[20., 21.], [22., 23.]]]]
axis = 1
groups = 3
Output: tensor of shape [1, 6, 2, 2]
output = [[[[ 0., 1.], [ 2., 3.]],
[[ 8., 9.], [10., 11.]],
[[16., 17.], [18., 19.]],
[[ 4., 5.], [ 6., 7.]],
[[12., 13.], [14., 15.]],
[[20., 21.], [22., 23.]]]]
@endcode
"""
return _get_node_factory_opset3().create(
"ShuffleChannels", [as_node(data)], {"axis": axis, "group": group}
)
@nameable_op
def topk(
data: NodeInput,
k: NodeInput,
axis: int,
mode: str,
sort: str,
index_element_type: str = "i32",
name: Optional[str] = None,
) -> Node:
"""Return a node which performs TopK.
@param data: Input data.
@param k: K.
@param axis: TopK Axis.
@param mode: Compute TopK largest ('max') or smallest ('min')
@param sort: Order of output elements (sort by: 'none', 'index' or 'value')
@param index_element_type: Type of output tensor with indices.
@return The new node which performs TopK (both indices and values)
"""
return _get_node_factory_opset3().create(
"TopK",
as_nodes(data, k),
{"axis": axis, "mode": mode, "sort": sort, "index_element_type": index_element_type},
)

View File

@ -0,0 +1,143 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.opset1.ops import absolute
from openvino.opset1.ops import absolute as abs
from openvino.opset1.ops import acos
from openvino.opset4.ops import acosh
from openvino.opset1.ops import add
from openvino.opset1.ops import asin
from openvino.opset4.ops import asinh
from openvino.opset3.ops import assign
from openvino.opset1.ops import atan
from openvino.opset4.ops import atanh
from openvino.opset1.ops import avg_pool
from openvino.opset1.ops import batch_norm_inference
from openvino.opset2.ops import batch_to_space
from openvino.opset1.ops import binary_convolution
from openvino.opset3.ops import broadcast
from openvino.opset3.ops import bucketize
from openvino.opset1.ops import ceiling
from openvino.opset1.ops import ceiling as ceil
from openvino.opset1.ops import clamp
from openvino.opset1.ops import concat
from openvino.opset1.ops import constant
from openvino.opset1.ops import convert
from openvino.opset1.ops import convert_like
from openvino.opset1.ops import convolution
from openvino.opset1.ops import convolution_backprop_data
from openvino.opset1.ops import cos
from openvino.opset1.ops import cosh
from openvino.opset1.ops import ctc_greedy_decoder
from openvino.opset4.ops import ctc_loss
from openvino.opset3.ops import cum_sum
from openvino.opset3.ops import cum_sum as cumsum
from openvino.opset1.ops import deformable_convolution
from openvino.opset1.ops import deformable_psroi_pooling
from openvino.opset1.ops import depth_to_space
from openvino.opset1.ops import detection_output
from openvino.opset1.ops import divide
from openvino.opset1.ops import elu
from openvino.opset3.ops import embedding_bag_offsets_sum
from openvino.opset3.ops import embedding_bag_packed_sum
from openvino.opset3.ops import embedding_segments_sum
from openvino.opset3.ops import extract_image_patches
from openvino.opset1.ops import equal
from openvino.opset1.ops import erf
from openvino.opset1.ops import exp
from openvino.opset1.ops import fake_quantize
from openvino.opset1.ops import floor
from openvino.opset1.ops import floor_mod
from openvino.opset1.ops import gather
from openvino.opset1.ops import gather_tree
from openvino.opset2.ops import gelu
from openvino.opset1.ops import greater
from openvino.opset1.ops import greater_equal
from openvino.opset1.ops import grn
from openvino.opset1.ops import group_convolution
from openvino.opset1.ops import group_convolution_backprop_data
from openvino.opset3.ops import gru_cell
from openvino.opset1.ops import hard_sigmoid
from openvino.opset4.ops import hswish
from openvino.opset1.ops import interpolate
from openvino.opset1.ops import less
from openvino.opset1.ops import less_equal
from openvino.opset1.ops import log
from openvino.opset1.ops import logical_and
from openvino.opset1.ops import logical_not
from openvino.opset1.ops import logical_or
from openvino.opset1.ops import logical_xor
from openvino.opset1.ops import lrn
from openvino.opset4.ops import lstm_cell
from openvino.opset1.ops import lstm_sequence
from openvino.opset1.ops import matmul
from openvino.opset1.ops import max_pool
from openvino.opset1.ops import maximum
from openvino.opset1.ops import minimum
from openvino.opset4.ops import mish
from openvino.opset1.ops import mod
from openvino.opset1.ops import multiply
from openvino.opset2.ops import mvn
from openvino.opset1.ops import negative
from openvino.opset4.ops import non_max_suppression
from openvino.opset3.ops import non_zero
from openvino.opset1.ops import normalize_l2
from openvino.opset1.ops import not_equal
from openvino.opset1.ops import one_hot
from openvino.opset1.ops import pad
from openvino.opset1.ops import parameter
from openvino.opset1.ops import power
from openvino.opset1.ops import prelu
from openvino.opset1.ops import prior_box
from openvino.opset1.ops import prior_box_clustered
from openvino.opset1.ops import psroi_pooling
from openvino.opset4.ops import proposal
from openvino.opset1.ops import range
from openvino.opset3.ops import read_value
from openvino.opset4.ops import reduce_l1
from openvino.opset4.ops import reduce_l2
from openvino.opset1.ops import reduce_logical_and
from openvino.opset1.ops import reduce_logical_or
from openvino.opset1.ops import reduce_max
from openvino.opset1.ops import reduce_mean
from openvino.opset1.ops import reduce_min
from openvino.opset1.ops import reduce_prod
from openvino.opset1.ops import reduce_sum
from openvino.opset1.ops import region_yolo
from openvino.opset2.ops import reorg_yolo
from openvino.opset1.ops import relu
from openvino.opset1.ops import reshape
from openvino.opset1.ops import result
from openvino.opset1.ops import reverse_sequence
from openvino.opset3.ops import rnn_cell
from openvino.opset3.ops import roi_align
from openvino.opset2.ops import roi_pooling
from openvino.opset3.ops import scatter_elements_update
from openvino.opset3.ops import scatter_update
from openvino.opset1.ops import select
from openvino.opset1.ops import selu
from openvino.opset3.ops import shape_of
from openvino.opset3.ops import shuffle_channels
from openvino.opset1.ops import sigmoid
from openvino.opset1.ops import sign
from openvino.opset1.ops import sin
from openvino.opset1.ops import sinh
from openvino.opset1.ops import softmax
from openvino.opset4.ops import softplus
from openvino.opset2.ops import space_to_batch
from openvino.opset1.ops import space_to_depth
from openvino.opset1.ops import split
from openvino.opset1.ops import sqrt
from openvino.opset1.ops import squared_difference
from openvino.opset1.ops import squeeze
from openvino.opset1.ops import strided_slice
from openvino.opset1.ops import subtract
from openvino.opset4.ops import swish
from openvino.opset1.ops import tan
from openvino.opset1.ops import tanh
from openvino.opset1.ops import tensor_iterator
from openvino.opset1.ops import tile
from openvino.opset3.ops import topk
from openvino.opset1.ops import transpose
from openvino.opset1.ops import unsqueeze
from openvino.opset1.ops import variadic_split

View File

@ -0,0 +1,409 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Factory functions for all openvino ops."""
from typing import Callable, Iterable, List, Optional, Set, Union
import numpy as np
from functools import partial
from openvino.impl import Node, Shape
from openvino.impl.op import Constant, Parameter
from openvino.opset_utils import _get_node_factory
from openvino.utils.decorators import binary_op, nameable_op, unary_op
from openvino.utils.input_validation import (
assert_list_of_ints,
check_valid_attributes,
is_non_negative_value,
is_positive_value,
)
from openvino.utils.node_factory import NodeFactory
from openvino.utils.tensor_iterator_types import (
GraphBody,
TensorIteratorSliceInputDesc,
TensorIteratorMergedInputDesc,
TensorIteratorInvariantInputDesc,
TensorIteratorBodyOutputDesc,
TensorIteratorConcatOutputDesc,
)
from openvino.utils.types import (
NodeInput,
NumericData,
NumericType,
ScalarData,
TensorShape,
as_node,
as_nodes,
get_dtype,
get_element_type,
get_element_type_str,
make_constant_node,
)
_get_node_factory_opset4 = partial(_get_node_factory, "opset4")
# -------------------------------------------- ops ------------------------------------------------
@nameable_op
def ctc_loss(
logits: NodeInput,
logit_length: NodeInput,
labels: NodeInput,
label_length: NodeInput,
blank_index: Optional[NodeInput] = None,
preprocess_collapse_repeated: bool = False,
ctc_merge_repeated: bool = True,
unique: bool = False,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs CTCLoss.
@param logits: 3-D tensor of logits.
@param logit_length: 1-D tensor of lengths for each object from a batch.
@param labels: 2-D tensor of labels for which likelihood is estimated using logits.
@param label_length: 1-D tensor of length for each label sequence.
@param blank_index: Scalar used to mark a blank index.
@param preprocess_collapse_repeated: Flag for preprocessing labels before loss calculation.
@param ctc_merge_repeated: Flag for merging repeated characters in a potential alignment.
@param unique: Flag to find unique elements in a target.
@return The new node which performs CTCLoss
"""
if blank_index is not None:
inputs = as_nodes(logits, logit_length, labels, label_length, blank_index)
else:
inputs = as_nodes(logits, logit_length, labels, label_length)
attributes = {
"preprocess_collapse_repeated": preprocess_collapse_repeated,
"ctc_merge_repeated": ctc_merge_repeated,
"unique": unique,
}
return _get_node_factory_opset4().create("CTCLoss", inputs, attributes)
@nameable_op
def non_max_suppression(
boxes: NodeInput,
scores: NodeInput,
max_output_boxes_per_class: Optional[NodeInput] = None,
iou_threshold: Optional[NodeInput] = None,
score_threshold: Optional[NodeInput] = None,
box_encoding: str = "corner",
sort_result_descending: bool = True,
output_type: str = "i64",
name: Optional[str] = None,
) -> Node:
"""Return a node which performs NonMaxSuppression.
@param boxes: Tensor with box coordinates.
@param scores: Tensor with box scores.
@param max_output_boxes_per_class: Tensor Specifying maximum number of boxes
to be selected per class.
@param iou_threshold: Tensor specifying intersection over union threshold
@param score_threshold: Tensor specifying minimum score to consider box for the processing.
@param box_encoding: Format of boxes data encoding.
@param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
boxes across batches or not.
@param output_type: Output element type.
@return The new node which performs NonMaxSuppression
"""
if max_output_boxes_per_class is None:
max_output_boxes_per_class = make_constant_node(0, np.int64)
if iou_threshold is None:
iou_threshold = make_constant_node(0, np.float32)
if score_threshold is None:
score_threshold = make_constant_node(0, np.float32)
inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
attributes = {
"box_encoding": box_encoding,
"sort_result_descending": sort_result_descending,
"output_type": output_type,
}
return _get_node_factory_opset4().create("NonMaxSuppression", inputs, attributes)
@nameable_op
def softplus(data: NodeInput, name: Optional[str] = None) -> Node:
"""Apply SoftPlus operation on each element of input tensor.
@param data: The tensor providing input data.
@return The new node with SoftPlus operation applied on each element.
"""
return _get_node_factory_opset4().create("SoftPlus", as_nodes(data), {})
@nameable_op
def mish(data: NodeInput, name: Optional[str] = None,) -> Node:
"""Return a node which performs Mish.
@param data: Tensor with input data floating point type.
@return The new node which performs Mish
"""
return _get_node_factory_opset4().create("Mish", as_nodes(data), {})
@nameable_op
def hswish(data: NodeInput, name: Optional[str] = None,) -> Node:
"""Return a node which performs HSwish (hard version of Swish).
@param data: Tensor with input data floating point type.
@return The new node which performs HSwish
"""
return _get_node_factory_opset4().create("HSwish", as_nodes(data), {})
@nameable_op
def swish(
data: NodeInput,
beta: Optional[NodeInput] = None,
name: Optional[str] = None,
) -> Node:
"""Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)).
@param data: Tensor with input data floating point type.
@return The new node which performs Swish
"""
if beta is None:
beta = make_constant_node(1.0, np.float32)
return _get_node_factory_opset4().create("Swish", as_nodes(data, beta), {})
@nameable_op
def acosh(node: NodeInput, name: Optional[str] = None) -> Node:
"""Apply hyperbolic inverse cosine function on the input node element-wise.
@param node: One of: input node, array or scalar.
@param name: Optional new name for output node.
@return New node with arccosh operation applied on it.
"""
return _get_node_factory_opset4().create("Acosh", [node])
@nameable_op
def asinh(node: NodeInput, name: Optional[str] = None) -> Node:
"""Apply hyperbolic inverse sinus function on the input node element-wise.
@param node: One of: input node, array or scalar.
@param name: Optional new name for output node.
@return New node with arcsinh operation applied on it.
"""
return _get_node_factory_opset4().create("Asinh", [node])
@nameable_op
def atanh(node: NodeInput, name: Optional[str] = None) -> Node:
"""Apply hyperbolic inverse tangent function on the input node element-wise.
@param node: One of: input node, array or scalar.
@param name: Optional new name for output node.
@return New node with arctanh operation applied on it.
"""
return _get_node_factory_opset4().create("Atanh", [node])
@nameable_op
def proposal(
class_probs: Node,
bbox_deltas: Node,
image_shape: NodeInput,
attrs: dict,
name: Optional[str] = None,
) -> Node:
"""Filter bounding boxes and outputs only those with the highest prediction confidence.
@param class_probs: 4D input floating point tensor with class prediction scores.
@param bbox_deltas: 4D input floating point tensor with corrected predictions of bounding boxes
@param image_shape: The 1D input tensor with 3 or 4 elements describing image shape.
@param attrs: The dictionary containing key, value pairs for attributes.
@param name: Optional name for the output node.
* base_size The size of the anchor to which scale and ratio attributes are applied.
Range of values: a positive unsigned integer number
Default value: None
Required: yes
* pre_nms_topn The number of bounding boxes before the NMS operation.
Range of values: a positive unsigned integer number
Default value: None
Required: yes
* post_nms_topn The number of bounding boxes after the NMS operation.
Range of values: a positive unsigned integer number
Default value: None
Required: yes
* nms_thresh The minimum value of the proposal to be taken into consideration.
Range of values: a positive floating-point number
Default value: None
Required: yes
* feat_stride The step size to slide over boxes (in pixels).
Range of values: a positive unsigned integer
Default value: None
Required: yes
* min_size The minimum size of box to be taken into consideration.
Range of values: a positive unsigned integer number
Default value: None
Required: yes
* ratio The ratios for anchor generation.
Range of values: a list of floating-point numbers
Default value: None
Required: yes
* scale The scales for anchor generation.
Range of values: a list of floating-point numbers
Default value: None
Required: yes
* clip_before_nms The flag that specifies whether to perform clip bounding boxes before
non-maximum suppression or not.
Range of values: True or False
Default value: True
Required: no
* clip_after_nms The flag that specifies whether to perform clip bounding boxes after
non-maximum suppression or not.
Range of values: True or False
Default value: False
Required: no
* normalize The flag that specifies whether to perform normalization of output boxes to
[0,1] interval or not.
Range of values: True or False
Default value: False
Required: no
* box_size_scale Specifies the scale factor applied to logits of box sizes before decoding.
Range of values: a positive floating-point number
Default value: 1.0
Required: no
* box_coordinate_scale Specifies the scale factor applied to logits of box coordinates
before decoding.
Range of values: a positive floating-point number
Default value: 1.0
Required: no
* framework Specifies how the box coordinates are calculated.
Range of values: "" (empty string) - calculate box coordinates like in Caffe*
tensorflow - calculate box coordinates like in the TensorFlow*
Object Detection API models
Default value: "" (empty string)
Required: no
Example of attribute dictionary:
@code{.py}
# just required ones
attrs = {
'base_size': 85,
'pre_nms_topn': 10,
'post_nms_topn': 20,
'nms_thresh': 0.34,
'feat_stride': 16,
'min_size': 32,
'ratio': [0.1, 1.5, 2.0, 2.5],
'scale': [2, 3, 3, 4],
}
@endcode
Optional attributes which are absent from dictionary will be set with corresponding default.
@return Node representing Proposal operation.
"""
requirements = [
("base_size", True, np.unsignedinteger, is_positive_value),
("pre_nms_topn", True, np.unsignedinteger, is_positive_value),
("post_nms_topn", True, np.unsignedinteger, is_positive_value),
("nms_thresh", True, np.floating, is_positive_value),
("feat_stride", True, np.unsignedinteger, is_positive_value),
("min_size", True, np.unsignedinteger, is_positive_value),
("ratio", True, np.floating, None),
("scale", True, np.floating, None),
("clip_before_nms", False, np.bool_, None),
("clip_after_nms", False, np.bool_, None),
("normalize", False, np.bool_, None),
("box_size_scale", False, np.floating, is_positive_value),
("box_coordinate_scale", False, np.floating, is_positive_value),
("framework", False, np.str_, None),
]
check_valid_attributes("Proposal", attrs, requirements)
return _get_node_factory_opset4().create(
"Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs
)
@nameable_op
def reduce_l1(
node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None
) -> Node:
"""L1-reduction operation on input tensor, eliminating the specified reduction axes.
@param node: The tensor we want to mean-reduce.
@param reduction_axes: The axes to eliminate through mean operation.
@param keep_dims: If set to True it holds axes that are used for reduction
@param name: Optional name for output node.
@return The new node performing mean-reduction operation.
"""
return _get_node_factory_opset4().create(
"ReduceL1", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}
)
@nameable_op
def reduce_l2(
node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None
) -> Node:
"""L2-reduction operation on input tensor, eliminating the specified reduction axes.
@param node: The tensor we want to mean-reduce.
@param reduction_axes: The axes to eliminate through mean operation.
@param keep_dims: If set to True it holds axes that are used for reduction
@param name: Optional name for output node.
@return The new node performing mean-reduction operation.
"""
return _get_node_factory_opset4().create(
"ReduceL2", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}
)
@nameable_op
def lstm_cell(
X: NodeInput,
initial_hidden_state: NodeInput,
initial_cell_state: NodeInput,
W: NodeInput,
R: NodeInput,
B: NodeInput,
hidden_size: int,
activations: List[str] = None,
activations_alpha: List[float] = None,
activations_beta: List[float] = None,
clip: float = 0.0,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs LSTMCell operation.
@param X: The input tensor with shape: [batch_size, input_size].
@param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size].
@param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size].
@param W: The weight tensor with shape: [4*hidden_size, input_size].
@param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size].
@param B: The bias tensor for gates with shape: [4*hidden_size].
@param hidden_size: Specifies hidden state size.
@param activations: The list of three activation functions for gates.
@param activations_alpha: The list of alpha parameters for activation functions.
@param activations_beta: The list of beta parameters for activation functions.
@param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
@param name: An optional name of the output node.
@return The new node represents LSTMCell. Node outputs count: 2.
"""
if activations is None:
activations = ["sigmoid", "tanh", "tanh"]
if activations_alpha is None:
activations_alpha = []
if activations_beta is None:
activations_beta = []
node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, W, R, B)
attributes = {
"hidden_size": hidden_size,
"activations": activations,
"activations_alpha": activations_alpha,
"activations_beta": activations_beta,
"clip": clip,
}
return _get_node_factory_opset4().create("LSTMCell", node_inputs, attributes)

View File

@ -0,0 +1,150 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.opset1.ops import absolute
from openvino.opset1.ops import absolute as abs
from openvino.opset1.ops import acos
from openvino.opset4.ops import acosh
from openvino.opset1.ops import add
from openvino.opset1.ops import asin
from openvino.opset4.ops import asinh
from openvino.opset3.ops import assign
from openvino.opset1.ops import atan
from openvino.opset4.ops import atanh
from openvino.opset1.ops import avg_pool
from openvino.opset5.ops import batch_norm_inference
from openvino.opset2.ops import batch_to_space
from openvino.opset1.ops import binary_convolution
from openvino.opset3.ops import broadcast
from openvino.opset3.ops import bucketize
from openvino.opset1.ops import ceiling
from openvino.opset1.ops import ceiling as ceil
from openvino.opset1.ops import clamp
from openvino.opset1.ops import concat
from openvino.opset1.ops import constant
from openvino.opset1.ops import convert
from openvino.opset1.ops import convert_like
from openvino.opset1.ops import convolution
from openvino.opset1.ops import convolution_backprop_data
from openvino.opset1.ops import cos
from openvino.opset1.ops import cosh
from openvino.opset1.ops import ctc_greedy_decoder
from openvino.opset4.ops import ctc_loss
from openvino.opset3.ops import cum_sum
from openvino.opset3.ops import cum_sum as cumsum
from openvino.opset1.ops import deformable_convolution
from openvino.opset1.ops import deformable_psroi_pooling
from openvino.opset1.ops import depth_to_space
from openvino.opset1.ops import detection_output
from openvino.opset1.ops import divide
from openvino.opset1.ops import elu
from openvino.opset3.ops import embedding_bag_offsets_sum
from openvino.opset3.ops import embedding_bag_packed_sum
from openvino.opset3.ops import embedding_segments_sum
from openvino.opset3.ops import extract_image_patches
from openvino.opset1.ops import equal
from openvino.opset1.ops import erf
from openvino.opset1.ops import exp
from openvino.opset1.ops import fake_quantize
from openvino.opset1.ops import floor
from openvino.opset1.ops import floor_mod
from openvino.opset1.ops import gather
from openvino.opset5.ops import gather_nd
from openvino.opset1.ops import gather_tree
from openvino.opset2.ops import gelu
from openvino.opset1.ops import greater
from openvino.opset1.ops import greater_equal
from openvino.opset1.ops import grn
from openvino.opset1.ops import group_convolution
from openvino.opset1.ops import group_convolution_backprop_data
from openvino.opset3.ops import gru_cell
from openvino.opset5.ops import gru_sequence
from openvino.opset1.ops import hard_sigmoid
from openvino.opset5.ops import hsigmoid
from openvino.opset4.ops import hswish
from openvino.opset1.ops import interpolate
from openvino.opset1.ops import less
from openvino.opset1.ops import less_equal
from openvino.opset1.ops import log
from openvino.opset1.ops import logical_and
from openvino.opset1.ops import logical_not
from openvino.opset1.ops import logical_or
from openvino.opset1.ops import logical_xor
from openvino.opset5.ops import log_softmax
from openvino.opset5.ops import loop
from openvino.opset1.ops import lrn
from openvino.opset4.ops import lstm_cell
from openvino.opset1.ops import lstm_sequence
from openvino.opset1.ops import matmul
from openvino.opset1.ops import max_pool
from openvino.opset1.ops import maximum
from openvino.opset1.ops import minimum
from openvino.opset4.ops import mish
from openvino.opset1.ops import mod
from openvino.opset1.ops import multiply
from openvino.opset2.ops import mvn
from openvino.opset1.ops import negative
from openvino.opset5.ops import non_max_suppression
from openvino.opset3.ops import non_zero
from openvino.opset1.ops import normalize_l2
from openvino.opset1.ops import not_equal
from openvino.opset1.ops import one_hot
from openvino.opset1.ops import pad
from openvino.opset1.ops import parameter
from openvino.opset1.ops import power
from openvino.opset1.ops import prelu
from openvino.opset1.ops import prior_box
from openvino.opset1.ops import prior_box_clustered
from openvino.opset1.ops import psroi_pooling
from openvino.opset4.ops import proposal
from openvino.opset1.ops import range
from openvino.opset3.ops import read_value
from openvino.opset4.ops import reduce_l1
from openvino.opset4.ops import reduce_l2
from openvino.opset1.ops import reduce_logical_and
from openvino.opset1.ops import reduce_logical_or
from openvino.opset1.ops import reduce_max
from openvino.opset1.ops import reduce_mean
from openvino.opset1.ops import reduce_min
from openvino.opset1.ops import reduce_prod
from openvino.opset1.ops import reduce_sum
from openvino.opset1.ops import region_yolo
from openvino.opset2.ops import reorg_yolo
from openvino.opset1.ops import relu
from openvino.opset1.ops import reshape
from openvino.opset1.ops import result
from openvino.opset1.ops import reverse_sequence
from openvino.opset3.ops import rnn_cell
from openvino.opset5.ops import rnn_sequence
from openvino.opset3.ops import roi_align
from openvino.opset2.ops import roi_pooling
from openvino.opset5.ops import round
from openvino.opset3.ops import scatter_elements_update
from openvino.opset3.ops import scatter_update
from openvino.opset1.ops import select
from openvino.opset1.ops import selu
from openvino.opset3.ops import shape_of
from openvino.opset3.ops import shuffle_channels
from openvino.opset1.ops import sigmoid
from openvino.opset1.ops import sign
from openvino.opset1.ops import sin
from openvino.opset1.ops import sinh
from openvino.opset1.ops import softmax
from openvino.opset4.ops import softplus
from openvino.opset2.ops import space_to_batch
from openvino.opset1.ops import space_to_depth
from openvino.opset1.ops import split
from openvino.opset1.ops import sqrt
from openvino.opset1.ops import squared_difference
from openvino.opset1.ops import squeeze
from openvino.opset1.ops import strided_slice
from openvino.opset1.ops import subtract
from openvino.opset4.ops import swish
from openvino.opset1.ops import tan
from openvino.opset1.ops import tanh
from openvino.opset1.ops import tensor_iterator
from openvino.opset1.ops import tile
from openvino.opset3.ops import topk
from openvino.opset1.ops import transpose
from openvino.opset1.ops import unsqueeze
from openvino.opset1.ops import variadic_split

View File

@ -0,0 +1,427 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Factory functions for all openvino ops."""
from typing import Callable, Iterable, List, Optional, Set, Union
import numpy as np
from functools import partial
from openvino.impl import Node, Shape
from openvino.impl.op import Constant, Parameter
from openvino.opset_utils import _get_node_factory
from openvino.utils.decorators import binary_op, nameable_op, unary_op
from openvino.utils.input_validation import (
assert_list_of_ints,
check_valid_attributes,
is_non_negative_value,
is_positive_value,
)
from openvino.utils.node_factory import NodeFactory
from openvino.utils.tensor_iterator_types import (
GraphBody,
TensorIteratorSliceInputDesc,
TensorIteratorMergedInputDesc,
TensorIteratorInvariantInputDesc,
TensorIteratorBodyOutputDesc,
TensorIteratorConcatOutputDesc,
)
from openvino.utils.types import (
NodeInput,
NumericData,
NumericType,
ScalarData,
TensorShape,
as_node,
as_nodes,
get_dtype,
get_element_type,
get_element_type_str,
make_constant_node,
)
_get_node_factory_opset5 = partial(_get_node_factory, "opset5")
# -------------------------------------------- ops ------------------------------------------------
@nameable_op
def batch_norm_inference(
data: NodeInput,
gamma: NodeInput,
beta: NodeInput,
mean: NodeInput,
variance: NodeInput,
epsilon: float,
name: Optional[str] = None,
) -> Node:
"""Perform layer normalizes a input tensor by mean and variance with appling scale and offset.
@param data: The input tensor with data for normalization.
@param gamma: The scalar scaling for normalized value.
@param beta: The bias added to the scaled normalized value.
@param mean: The value for mean normalization.
@param variance: The value for variance normalization.
@param epsilon: The number to be added to the variance to avoid division
by zero when normalizing a value.
@param name: The optional name of the output node.
@return: The new node which performs BatchNormInference.
"""
inputs = as_nodes(data, gamma, beta, mean, variance)
return _get_node_factory_opset5().create("BatchNormInference", inputs, {"epsilon": epsilon})
@nameable_op
def gather_nd(
data: NodeInput,
indices: NodeInput,
batch_dims: Optional[int] = 0,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs GatherND.
@param data: N-D tensor with data for gathering
@param indices: K-D tensor of tuples with indices by which data is gathered
@param batch_dims: Scalar value of batch dimensions
@return: The new node which performs GatherND
"""
inputs = as_nodes(data, indices)
attributes = {
"batch_dims": batch_dims
}
return _get_node_factory_opset5().create("GatherND", inputs, attributes)
@nameable_op
def log_softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node:
"""Apply LogSoftmax operation on each element of input tensor.
@param data: The tensor providing input data.
@param axis: An axis along which LogSoftmax should be calculated
@return: The new node with LogSoftmax operation applied on each element.
"""
return _get_node_factory_opset5().create("LogSoftmax", [as_node(data)], {"axis": axis})
@nameable_op
def non_max_suppression(
boxes: NodeInput,
scores: NodeInput,
max_output_boxes_per_class: Optional[NodeInput] = None,
iou_threshold: Optional[NodeInput] = None,
score_threshold: Optional[NodeInput] = None,
soft_nms_sigma: Optional[NodeInput] = None,
box_encoding: str = "corner",
sort_result_descending: bool = True,
output_type: str = "i64",
name: Optional[str] = None,
) -> Node:
"""Return a node which performs NonMaxSuppression.
@param boxes: Tensor with box coordinates.
@param scores: Tensor with box scores.
@param max_output_boxes_per_class: Tensor Specifying maximum number of boxes
to be selected per class.
@param iou_threshold: Tensor specifying intersection over union threshold
@param score_threshold: Tensor specifying minimum score to consider box for the processing.
@param soft_nms_sigma: Tensor specifying the sigma parameter for Soft-NMS.
@param box_encoding: Format of boxes data encoding.
@param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
boxes across batches or not.
@param output_type: Output element type.
@return: The new node which performs NonMaxSuppression
"""
if max_output_boxes_per_class is None:
max_output_boxes_per_class = make_constant_node(0, np.int64)
if iou_threshold is None:
iou_threshold = make_constant_node(0, np.float32)
if score_threshold is None:
score_threshold = make_constant_node(0, np.float32)
if soft_nms_sigma is None:
inputs = as_nodes(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold
)
else:
inputs = as_nodes(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, soft_nms_sigma
)
attributes = {
"box_encoding": box_encoding,
"sort_result_descending": sort_result_descending,
"output_type": output_type,
}
return _get_node_factory_opset5().create("NonMaxSuppression", inputs, attributes)
@nameable_op
def round(data: NodeInput, mode: str = "half_to_even", name: Optional[str] = None) -> Node:
"""Apply Round operation on each element of input tensor.
@param data: The tensor providing input data.
@param mode: Rule to round halfway cases. If set to 'half_to_even' then halfs round to the nearest even
integer or rounding in such a way that the result heads away from zero if `mode` attribute is
'half_away_from_zero`.
@param name: An optional name of the output node.
@return: The new node with Round operation applied on each element.
"""
return _get_node_factory_opset5().create("Round", as_nodes(data), {"mode": mode.upper()})
@nameable_op
def lstm_sequence(
X: NodeInput,
initial_hidden_state: NodeInput,
initial_cell_state: NodeInput,
sequence_lengths: NodeInput,
W: NodeInput,
R: NodeInput,
B: NodeInput,
hidden_size: int,
direction: str,
activations: List[str] = None,
activations_alpha: List[float] = None,
activations_beta: List[float] = None,
clip: float = 0.0,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs LSTMSequence operation.
@param X: The input tensor. Shape: [batch_size, seq_length, input_size].
@param initial_hidden_state: The hidden state tensor.
Shape: [batch_size, num_directions, hidden_size].
@param initial_cell_state: The cell state tensor.
Shape: [batch_size, num_directions, hidden_size].
@param sequence_lengths: Specifies real sequence lengths for each batch element.
Shape: [batch_size]. Integer type.
@param W: Tensor with weights for matrix multiplication operation with input portion of data.
Expected format: fico
Shape: [num_directions, 4*hidden_size, input_size].
@param R: The tensor with weights for matrix multiplication operation with hidden state.
Expected format: fico
Shape: [num_directions, 4*hidden_size, hidden_size].
@param B: The sum of biases (weight and recurrence). Expected format: fico
Shape: [num_directions, 4*hidden_size].
@param hidden_size: Specifies hidden state size.
@param direction: Specifies if the RNN is forward, reverse, or bidirectional.
@param activations: The list of three activation functions for gates.
@param activations_alpha: The list of alpha parameters for activation functions.
@param activations_beta: The list of beta parameters for activation functions.
@param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
@param name: An optional name of the output node.
@return: The new node represents LSTMSequence. Node outputs count: 3.
"""
if activations is None:
activations = ["sigmoid", "tanh", "tanh"]
if activations_alpha is None:
activations_alpha = []
if activations_beta is None:
activations_beta = []
node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B)
attributes = {
"hidden_size": hidden_size,
"direction": direction.lower(),
"activations": activations,
"activations_alpha": activations_alpha,
"activations_beta": activations_beta,
"clip": clip,
}
return _get_node_factory_opset5().create("LSTMSequence", node_inputs, attributes)
def hsigmoid(data: NodeInput, name: Optional[str] = None,) -> Node:
"""Return a node which performs HSigmoid.
@param data: Tensor with input data floating point type.
@return: The new node which performs HSigmoid
"""
return _get_node_factory_opset5().create("HSigmoid", as_nodes(data), {})
@nameable_op
def gru_sequence(
X: NodeInput,
initial_hidden_state: NodeInput,
sequence_lengths: NodeInput,
W: NodeInput,
R: NodeInput,
B: NodeInput,
hidden_size: int,
direction: str,
activations: List[str] = None,
activations_alpha: List[float] = None,
activations_beta: List[float] = None,
clip: float = 0.0,
linear_before_reset: bool = False,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs GRUSequence operation.
@param X: The input tensor. Shape: [batch_size, seq_length, input_size].
@param initial_hidden_state: The hidden state tensor.
Shape: [batch_size, num_directions, hidden_size].
@param sequence_lengths: Specifies real sequence lengths for each batch element.
Shape: [batch_size]. Integer type.
@param W: Tensor with weights for matrix multiplication operation with input portion of data.
Shape: [num_directions, 3*hidden_size, input_size].
@param R: The tensor with weights for matrix multiplication operation with hidden state.
Shape: [num_directions, 3*hidden_size, hidden_size].
@param B: The sum of biases (weight and recurrence).
For linear_before_reset set True the shape is [num_directions, 4*hidden_size].
Otherwise the shape is [num_directions, 3*hidden_size].
@param hidden_size: Specifies hidden state size.
@param direction: Specifies if the RNN is forward, reverse, or bidirectional.
@param activations: The list of three activation functions for gates.
@param activations_alpha: The list of alpha parameters for activation functions.
@param activations_beta: The list of beta parameters for activation functions.
@param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
@param linear_before_reset: Flag denotes if the layer behaves according to the modification
of GRU described in the formula in the ONNX documentation.
@param name: An optional name of the output node.
@return: The new node represents GRUSequence. Node outputs count: 2.
"""
if activations is None:
activations = ["sigmoid", "tanh"]
if activations_alpha is None:
activations_alpha = []
if activations_beta is None:
activations_beta = []
node_inputs = as_nodes(X, initial_hidden_state, sequence_lengths, W, R, B)
attributes = {
"hidden_size": hidden_size,
"direction": direction.lower(),
"activations": activations,
"activations_alpha": activations_alpha,
"activations_beta": activations_beta,
"linear_before_reset": linear_before_reset,
"clip": clip,
}
return _get_node_factory_opset5().create("GRUSequence", node_inputs, attributes)
@nameable_op
def rnn_sequence(
X: NodeInput,
initial_hidden_state: NodeInput,
sequence_lengths: NodeInput,
W: NodeInput,
R: NodeInput,
B: NodeInput,
hidden_size: int,
direction: str,
activations: List[str] = None,
activations_alpha: List[float] = None,
activations_beta: List[float] = None,
clip: float = 0.0,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs RNNSequence operation.
@param X: The input tensor. Shape: [batch_size, seq_length, input_size].
@param initial_hidden_state: The hidden state tensor.
Shape: [batch_size, num_directions, hidden_size].
@param sequence_lengths: Specifies real sequence lengths for each batch element.
Shape: [batch_size]. Integer type.
@param W: Tensor with weights for matrix multiplication operation with input portion of data.
Shape: [num_directions, hidden_size, input_size].
@param R: The tensor with weights for matrix multiplication operation with hidden state.
Shape: [num_directions, hidden_size, hidden_size].
@param B: The sum of biases (weight and recurrence).
Shape: [num_directions, hidden_size].
@param hidden_size: Specifies hidden state size.
@param direction: Specifies if the RNN is forward, reverse, or bidirectional.
@param activations: The list of three activation functions for gates.
@param activations_alpha: The list of alpha parameters for activation functions.
@param activations_beta: The list of beta parameters for activation functions.
@param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
@param name: An optional name of the output node.
@return: The new node represents RNNSequence. Node outputs count: 2.
"""
if activations is None:
activations = ["tanh"]
if activations_alpha is None:
activations_alpha = []
if activations_beta is None:
activations_beta = []
inputs = as_nodes(X, initial_hidden_state, sequence_lengths, W, R, B)
attributes = {
"hidden_size": hidden_size,
"direction": direction.lower(),
"activations": activations,
"activations_alpha": activations_alpha,
"activations_beta": activations_beta,
"clip": clip,
}
return _get_node_factory_opset5().create("RNNSequence", inputs, attributes)
@nameable_op
def loop(
trip_count: NodeInput,
execution_condition: NodeInput,
inputs: List[Node],
graph_body: GraphBody,
slice_input_desc: List[TensorIteratorSliceInputDesc],
merged_input_desc: List[TensorIteratorMergedInputDesc],
invariant_input_desc: List[TensorIteratorInvariantInputDesc],
body_output_desc: List[TensorIteratorBodyOutputDesc],
concat_output_desc: List[TensorIteratorConcatOutputDesc],
body_condition_output_idx: int,
current_iteration_input_idx: int = -1,
name: Optional[str] = None,
) -> Node:
"""Perform recurrent execution of the network described in the body, iterating through the data.
@param trip_count: A scalar or 1D tensor with 1 element specifying
maximum number of iterations.
@param execution_condition: A scalar or 1D tensor with 1 element
specifying whether to execute the first iteration or not.
@param inputs: The provided to TensorIterator operator.
@param graph_body: The graph representing the body we execute.
@param slice_input_desc: The descriptors describing sliced inputs, that is nodes
representing tensors we iterate through, processing single
data slice in one iteration.
@param merged_input_desc: The descriptors describing merged inputs, that is nodes
representing variables with initial value at first iteration,
which may be changing through iterations.
@param invariant_input_desc: The descriptors describing invariant inputs, that is nodes
representing variable with persistent value through all
iterations.
@param body_output_desc: The descriptors describing body outputs from specified
iteration.
@param concat_output_desc: The descriptors describing specified output values through
all the iterations concatenated into one node.
@param body_condition_output_idx: Determines the purpose of the corresponding result in
the graph_body. This result will determine the dynamic
exit condition. If the value of this result is False,
then iterations stop.
@param current_iteration_input_idx: Determines the purpose of the corresponding parameter
in the graph_body. This parameter will be used as
an iteration counter. Optional.
@return: The new node which performs Loop.
"""
attributes = {
"body": graph_body.serialize(),
"input_descriptions": {"slice_input_desc": [desc.serialize() for desc in slice_input_desc],
"merged_input_desc": [desc.serialize() for desc in merged_input_desc],
"invariant_input_desc": [desc.serialize() for desc in invariant_input_desc]},
"output_descriptions": {"body_output_desc": [desc.serialize() for desc in body_output_desc],
"concat_output_desc": [desc.serialize() for desc in concat_output_desc]},
"special_body_ports": {"body_condition_output_idx": body_condition_output_idx,
"current_iteration_input_idx": current_iteration_input_idx}
}
return _get_node_factory_opset5().create("Loop", as_nodes(trip_count, execution_condition, *inputs),
attributes)

View File

@ -0,0 +1,152 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.opset1.ops import absolute
from openvino.opset1.ops import absolute as abs
from openvino.opset1.ops import acos
from openvino.opset4.ops import acosh
from openvino.opset1.ops import add
from openvino.opset1.ops import asin
from openvino.opset4.ops import asinh
from openvino.opset6.ops import assign
from openvino.opset1.ops import atan
from openvino.opset4.ops import atanh
from openvino.opset1.ops import avg_pool
from openvino.opset5.ops import batch_norm_inference
from openvino.opset2.ops import batch_to_space
from openvino.opset1.ops import binary_convolution
from openvino.opset3.ops import broadcast
from openvino.opset3.ops import bucketize
from openvino.opset1.ops import ceiling
from openvino.opset1.ops import ceiling as ceil
from openvino.opset1.ops import clamp
from openvino.opset1.ops import concat
from openvino.opset1.ops import constant
from openvino.opset1.ops import convert
from openvino.opset1.ops import convert_like
from openvino.opset1.ops import convolution
from openvino.opset1.ops import convolution_backprop_data
from openvino.opset1.ops import cos
from openvino.opset1.ops import cosh
from openvino.opset1.ops import ctc_greedy_decoder
from openvino.opset6.ops import ctc_greedy_decoder_seq_len
from openvino.opset4.ops import ctc_loss
from openvino.opset3.ops import cum_sum
from openvino.opset3.ops import cum_sum as cumsum
from openvino.opset1.ops import deformable_convolution
from openvino.opset1.ops import deformable_psroi_pooling
from openvino.opset1.ops import depth_to_space
from openvino.opset1.ops import detection_output
from openvino.opset1.ops import divide
from openvino.opset1.ops import elu
from openvino.opset3.ops import embedding_bag_offsets_sum
from openvino.opset3.ops import embedding_bag_packed_sum
from openvino.opset3.ops import embedding_segments_sum
from openvino.opset3.ops import extract_image_patches
from openvino.opset1.ops import equal
from openvino.opset1.ops import erf
from openvino.opset1.ops import exp
from openvino.opset1.ops import fake_quantize
from openvino.opset1.ops import floor
from openvino.opset1.ops import floor_mod
from openvino.opset1.ops import gather
from openvino.opset6.ops import gather_elements
from openvino.opset5.ops import gather_nd
from openvino.opset1.ops import gather_tree
from openvino.opset2.ops import gelu
from openvino.opset1.ops import greater
from openvino.opset1.ops import greater_equal
from openvino.opset1.ops import grn
from openvino.opset1.ops import group_convolution
from openvino.opset1.ops import group_convolution_backprop_data
from openvino.opset3.ops import gru_cell
from openvino.opset5.ops import gru_sequence
from openvino.opset1.ops import hard_sigmoid
from openvino.opset5.ops import hsigmoid
from openvino.opset4.ops import hswish
from openvino.opset1.ops import interpolate
from openvino.opset1.ops import less
from openvino.opset1.ops import less_equal
from openvino.opset1.ops import log
from openvino.opset1.ops import logical_and
from openvino.opset1.ops import logical_not
from openvino.opset1.ops import logical_or
from openvino.opset1.ops import logical_xor
from openvino.opset5.ops import log_softmax
from openvino.opset5.ops import loop
from openvino.opset1.ops import lrn
from openvino.opset4.ops import lstm_cell
from openvino.opset1.ops import lstm_sequence
from openvino.opset1.ops import matmul
from openvino.opset1.ops import max_pool
from openvino.opset1.ops import maximum
from openvino.opset1.ops import minimum
from openvino.opset4.ops import mish
from openvino.opset1.ops import mod
from openvino.opset1.ops import multiply
from openvino.opset6.ops import mvn
from openvino.opset1.ops import negative
from openvino.opset5.ops import non_max_suppression
from openvino.opset3.ops import non_zero
from openvino.opset1.ops import normalize_l2
from openvino.opset1.ops import not_equal
from openvino.opset1.ops import one_hot
from openvino.opset1.ops import pad
from openvino.opset1.ops import parameter
from openvino.opset1.ops import power
from openvino.opset1.ops import prelu
from openvino.opset1.ops import prior_box
from openvino.opset1.ops import prior_box_clustered
from openvino.opset1.ops import psroi_pooling
from openvino.opset4.ops import proposal
from openvino.opset1.ops import range
from openvino.opset6.ops import read_value
from openvino.opset4.ops import reduce_l1
from openvino.opset4.ops import reduce_l2
from openvino.opset1.ops import reduce_logical_and
from openvino.opset1.ops import reduce_logical_or
from openvino.opset1.ops import reduce_max
from openvino.opset1.ops import reduce_mean
from openvino.opset1.ops import reduce_min
from openvino.opset1.ops import reduce_prod
from openvino.opset1.ops import reduce_sum
from openvino.opset1.ops import region_yolo
from openvino.opset2.ops import reorg_yolo
from openvino.opset1.ops import relu
from openvino.opset1.ops import reshape
from openvino.opset1.ops import result
from openvino.opset1.ops import reverse_sequence
from openvino.opset3.ops import rnn_cell
from openvino.opset5.ops import rnn_sequence
from openvino.opset3.ops import roi_align
from openvino.opset2.ops import roi_pooling
from openvino.opset5.ops import round
from openvino.opset3.ops import scatter_elements_update
from openvino.opset3.ops import scatter_update
from openvino.opset1.ops import select
from openvino.opset1.ops import selu
from openvino.opset3.ops import shape_of
from openvino.opset3.ops import shuffle_channels
from openvino.opset1.ops import sigmoid
from openvino.opset1.ops import sign
from openvino.opset1.ops import sin
from openvino.opset1.ops import sinh
from openvino.opset1.ops import softmax
from openvino.opset4.ops import softplus
from openvino.opset2.ops import space_to_batch
from openvino.opset1.ops import space_to_depth
from openvino.opset1.ops import split
from openvino.opset1.ops import sqrt
from openvino.opset1.ops import squared_difference
from openvino.opset1.ops import squeeze
from openvino.opset1.ops import strided_slice
from openvino.opset1.ops import subtract
from openvino.opset4.ops import swish
from openvino.opset1.ops import tan
from openvino.opset1.ops import tanh
from openvino.opset1.ops import tensor_iterator
from openvino.opset1.ops import tile
from openvino.opset3.ops import topk
from openvino.opset1.ops import transpose
from openvino.opset1.ops import unsqueeze
from openvino.opset1.ops import variadic_split

View File

@ -0,0 +1,163 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Factory functions for all openvino ops."""
from typing import Callable, Iterable, List, Optional, Set, Union
import numpy as np
from functools import partial
from openvino.impl import Node, Shape
from openvino.impl.op import Constant, Parameter
from openvino.opset_utils import _get_node_factory
from openvino.utils.decorators import binary_op, nameable_op, unary_op
from openvino.utils.input_validation import (
assert_list_of_ints,
check_valid_attributes,
is_non_negative_value,
is_positive_value,
)
from openvino.utils.node_factory import NodeFactory
from openvino.utils.tensor_iterator_types import (
GraphBody,
TensorIteratorSliceInputDesc,
TensorIteratorMergedInputDesc,
TensorIteratorInvariantInputDesc,
TensorIteratorBodyOutputDesc,
TensorIteratorConcatOutputDesc,
)
from openvino.utils.types import (
NodeInput,
NumericData,
NumericType,
ScalarData,
TensorShape,
as_node,
as_nodes,
get_dtype,
get_element_type,
get_element_type_str,
make_constant_node,
)
_get_node_factory_opset6 = partial(_get_node_factory, "opset6")
# -------------------------------------------- ops ------------------------------------------------
@nameable_op
def ctc_greedy_decoder_seq_len(
data: NodeInput,
sequence_length: NodeInput,
blank_index: Optional[NodeInput] = None,
merge_repeated: bool = True,
classes_index_type: str = "i32",
sequence_length_type: str = "i32",
name: Optional[str] = None,
) -> Node:
"""Return a node which performs CTCGreedyDecoderSeqLen.
@param data: The input 3D tensor. Shape: [batch_size, seq_length, num_classes]
@param sequence_length: Input 1D tensor with sequence length. Shape: [batch_size]
@param blank_index: Scalar or 1D tensor with specifies the class index to use for the blank class.
Optional parameter. Default value is num_classes-1.
@return: The new node which performs CTCGreedyDecoderSeqLen.
"""
if blank_index is not None:
inputs = as_nodes(data, sequence_length, blank_index)
else:
inputs = as_nodes(data, sequence_length)
attributes = {
"merge_repeated": merge_repeated,
"classes_index_type": classes_index_type,
"sequence_length_type": sequence_length_type
}
return _get_node_factory_opset6().create("CTCGreedyDecoderSeqLen", inputs, attributes)
@nameable_op
def gather_elements(
data: NodeInput,
indices: NodeInput,
axis: Optional[int] = 0,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs GatherElements.
@param data: N-D tensor with data for gathering
@param indices: N-D tensor with indices by which data is gathered
@param axis: axis along which elements are gathered
@return: The new node which performs GatherElements
"""
inputs = as_nodes(data, indices)
attributes = {
"axis": axis
}
return _get_node_factory_opset6().create("GatherElements", inputs, attributes)
@nameable_op
def mvn(
data: Node,
axes: Node,
normalize_variance: bool,
eps: float,
eps_mode: str,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs MeanVarianceNormalization (MVN).
@param data: The node with data tensor.
@param axes: The node with axes to reduce on.
@param normalize_variance: Denotes whether to perform variance normalization.
@param eps: The number added to the variance to avoid division by zero
when normalizing the value. Scalar value.
@param eps_mode: how eps is applied (`inside_sqrt` or `outside_sqrt`)
@param name: Optional output node name.
@return The new node performing a MVN operation on input tensor.
"""
inputs = as_nodes(data, axes)
attributes = {
"normalize_variance": normalize_variance,
"eps": eps,
"eps_mode": eps_mode
}
return _get_node_factory_opset6().create("MVN", inputs, attributes)
@nameable_op
def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node:
"""Return a node which produces the Assign operation.
@param new_value: Node producing a value to be assigned to a variable.
@param variable_id: Id of a variable to be updated.
@param name: Optional name for output node.
@return Assign node
"""
return _get_node_factory_opset6().create(
"Assign",
[as_node(new_value)],
{"variable_id": variable_id}
)
@nameable_op
def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node:
"""Return a node which produces the Assign operation.
@param init_value: Node producing a value to be returned instead of an unassigned variable.
@param variable_id: Id of a variable to be read.
@param name: Optional name for output node.
@return ReadValue node
"""
return _get_node_factory_opset6().create(
"ReadValue",
[as_node(init_value)],
{"variable_id": variable_id}
)

View File

@ -0,0 +1,156 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.opset1.ops import absolute
from openvino.opset1.ops import absolute as abs
from openvino.opset1.ops import acos
from openvino.opset4.ops import acosh
from openvino.opset1.ops import add
from openvino.opset1.ops import asin
from openvino.opset4.ops import asinh
from openvino.opset3.ops import assign
from openvino.opset1.ops import atan
from openvino.opset4.ops import atanh
from openvino.opset1.ops import avg_pool
from openvino.opset5.ops import batch_norm_inference
from openvino.opset2.ops import batch_to_space
from openvino.opset1.ops import binary_convolution
from openvino.opset3.ops import broadcast
from openvino.opset3.ops import bucketize
from openvino.opset1.ops import ceiling
from openvino.opset1.ops import ceiling as ceil
from openvino.opset1.ops import clamp
from openvino.opset1.ops import concat
from openvino.opset1.ops import constant
from openvino.opset1.ops import convert
from openvino.opset1.ops import convert_like
from openvino.opset1.ops import convolution
from openvino.opset1.ops import convolution_backprop_data
from openvino.opset1.ops import cos
from openvino.opset1.ops import cosh
from openvino.opset1.ops import ctc_greedy_decoder
from openvino.opset6.ops import ctc_greedy_decoder_seq_len
from openvino.opset4.ops import ctc_loss
from openvino.opset3.ops import cum_sum
from openvino.opset3.ops import cum_sum as cumsum
from openvino.opset1.ops import deformable_convolution
from openvino.opset1.ops import deformable_psroi_pooling
from openvino.opset1.ops import depth_to_space
from openvino.opset1.ops import detection_output
from openvino.opset7.ops import dft
from openvino.opset1.ops import divide
from openvino.opset7.ops import einsum
from openvino.opset1.ops import elu
from openvino.opset3.ops import embedding_bag_offsets_sum
from openvino.opset3.ops import embedding_bag_packed_sum
from openvino.opset3.ops import embedding_segments_sum
from openvino.opset3.ops import extract_image_patches
from openvino.opset1.ops import equal
from openvino.opset1.ops import erf
from openvino.opset1.ops import exp
from openvino.opset1.ops import fake_quantize
from openvino.opset1.ops import floor
from openvino.opset1.ops import floor_mod
from openvino.opset7.ops import gather
from openvino.opset6.ops import gather_elements
from openvino.opset5.ops import gather_nd
from openvino.opset1.ops import gather_tree
from openvino.opset7.ops import gelu
from openvino.opset1.ops import greater
from openvino.opset1.ops import greater_equal
from openvino.opset1.ops import grn
from openvino.opset1.ops import group_convolution
from openvino.opset1.ops import group_convolution_backprop_data
from openvino.opset3.ops import gru_cell
from openvino.opset5.ops import gru_sequence
from openvino.opset1.ops import hard_sigmoid
from openvino.opset5.ops import hsigmoid
from openvino.opset4.ops import hswish
from openvino.opset7.ops import idft
from openvino.opset1.ops import interpolate
from openvino.opset1.ops import less
from openvino.opset1.ops import less_equal
from openvino.opset1.ops import log
from openvino.opset1.ops import logical_and
from openvino.opset1.ops import logical_not
from openvino.opset1.ops import logical_or
from openvino.opset1.ops import logical_xor
from openvino.opset5.ops import log_softmax
from openvino.opset5.ops import loop
from openvino.opset1.ops import lrn
from openvino.opset4.ops import lstm_cell
from openvino.opset1.ops import lstm_sequence
from openvino.opset1.ops import matmul
from openvino.opset1.ops import max_pool
from openvino.opset1.ops import maximum
from openvino.opset1.ops import minimum
from openvino.opset4.ops import mish
from openvino.opset1.ops import mod
from openvino.opset1.ops import multiply
from openvino.opset6.ops import mvn
from openvino.opset1.ops import negative
from openvino.opset5.ops import non_max_suppression
from openvino.opset3.ops import non_zero
from openvino.opset1.ops import normalize_l2
from openvino.opset1.ops import not_equal
from openvino.opset1.ops import one_hot
from openvino.opset1.ops import pad
from openvino.opset1.ops import parameter
from openvino.opset1.ops import power
from openvino.opset1.ops import prelu
from openvino.opset1.ops import prior_box
from openvino.opset1.ops import prior_box_clustered
from openvino.opset1.ops import psroi_pooling
from openvino.opset4.ops import proposal
from openvino.opset1.ops import range
from openvino.opset3.ops import read_value
from openvino.opset4.ops import reduce_l1
from openvino.opset4.ops import reduce_l2
from openvino.opset1.ops import reduce_logical_and
from openvino.opset1.ops import reduce_logical_or
from openvino.opset1.ops import reduce_max
from openvino.opset1.ops import reduce_mean
from openvino.opset1.ops import reduce_min
from openvino.opset1.ops import reduce_prod
from openvino.opset1.ops import reduce_sum
from openvino.opset1.ops import region_yolo
from openvino.opset2.ops import reorg_yolo
from openvino.opset1.ops import relu
from openvino.opset1.ops import reshape
from openvino.opset1.ops import result
from openvino.opset1.ops import reverse_sequence
from openvino.opset3.ops import rnn_cell
from openvino.opset5.ops import rnn_sequence
from openvino.opset3.ops import roi_align
from openvino.opset2.ops import roi_pooling
from openvino.opset7.ops import roll
from openvino.opset5.ops import round
from openvino.opset3.ops import scatter_elements_update
from openvino.opset3.ops import scatter_update
from openvino.opset1.ops import select
from openvino.opset1.ops import selu
from openvino.opset3.ops import shape_of
from openvino.opset3.ops import shuffle_channels
from openvino.opset1.ops import sigmoid
from openvino.opset1.ops import sign
from openvino.opset1.ops import sin
from openvino.opset1.ops import sinh
from openvino.opset1.ops import softmax
from openvino.opset4.ops import softplus
from openvino.opset2.ops import space_to_batch
from openvino.opset1.ops import space_to_depth
from openvino.opset1.ops import split
from openvino.opset1.ops import sqrt
from openvino.opset1.ops import squared_difference
from openvino.opset1.ops import squeeze
from openvino.opset1.ops import strided_slice
from openvino.opset1.ops import subtract
from openvino.opset4.ops import swish
from openvino.opset1.ops import tan
from openvino.opset1.ops import tanh
from openvino.opset1.ops import tensor_iterator
from openvino.opset1.ops import tile
from openvino.opset3.ops import topk
from openvino.opset1.ops import transpose
from openvino.opset1.ops import unsqueeze
from openvino.opset1.ops import variadic_split

View File

@ -0,0 +1,166 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Factory functions for all openvino ops."""
from functools import partial
from typing import Callable, Iterable, List, Optional, Set, Union
import numpy as np
from openvino.impl import Node, Shape
from openvino.impl.op import Constant, Parameter
from openvino.opset_utils import _get_node_factory
from openvino.utils.decorators import binary_op, nameable_op, unary_op
from openvino.utils.input_validation import (
assert_list_of_ints,
check_valid_attributes,
is_non_negative_value,
is_positive_value,
)
from openvino.utils.node_factory import NodeFactory
from openvino.utils.tensor_iterator_types import (
GraphBody,
TensorIteratorSliceInputDesc,
TensorIteratorMergedInputDesc,
TensorIteratorInvariantInputDesc,
TensorIteratorBodyOutputDesc,
TensorIteratorConcatOutputDesc,
)
from openvino.utils.types import (
NodeInput,
NumericData,
NumericType,
ScalarData,
TensorShape,
as_node,
as_nodes,
get_dtype,
get_element_type,
get_element_type_str,
make_constant_node,
)
_get_node_factory_opset7 = partial(_get_node_factory, "opset7")
# -------------------------------------------- ops ------------------------------------------------
@nameable_op
def einsum(
inputs: List[Node],
equation: str
) -> Node:
"""Return a node which performs Einsum operation.
@param inputs: The list of input nodes
@param equation: Einsum equation
@return: The new node performing Einsum operation on the inputs
"""
attributes = {
"equation": equation
}
return _get_node_factory_opset7().create("Einsum", as_nodes(*inputs), attributes)
@nameable_op
def gelu(
data: Node,
approximation_mode: str,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs Gelu activation function.
@param data: The node with data tensor.
@param approximation_mode: defines which approximation to use ('tanh' or 'erf')
@param name: Optional output node name.
@return The new node performing a Gelu activation with the input tensor.
"""
inputs = as_nodes(data)
attributes = {
"approximation_mode": approximation_mode
}
return _get_node_factory_opset7().create("Gelu", inputs, attributes)
@nameable_op
def roll(
data: NodeInput,
shift: NodeInput,
axes: NodeInput,
) -> Node:
"""Return a node which performs Roll operation.
@param data: The node with data tensor.
@param shift: The node with the tensor with numbers of places by which elements are shifted.
@param axes: The node with the tensor with axes along which elements are shifted.
@return The new node performing a Roll operation on the input tensor.
"""
inputs = as_nodes(data, shift, axes)
return _get_node_factory_opset7().create("Roll", inputs)
@nameable_op
def gather(
data: NodeInput,
indices: NodeInput,
axis: NodeInput,
batch_dims: Optional[int] = 0,
) -> Node:
"""Return a node which performs Gather.
@param data: N-D tensor with data for gathering
@param indices: N-D tensor with indices by which data is gathered
@param axis: axis along which elements are gathered
@param batch_dims: number of batch dimensions
@return: The new node which performs Gather
"""
inputs = as_nodes(data, indices, axis)
attributes = {
"batch_dims": batch_dims
}
return _get_node_factory_opset7().create("Gather", inputs, attributes)
def dft(
data: NodeInput,
axes: NodeInput,
signal_size: Optional[NodeInput] = None,
) -> Node:
"""Return a node which performs DFT operation.
@param data: Tensor with transformed data.
@param axes: Tensor with axes to transform.
@param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'.
@return: The new node which performs DFT operation on the input data tensor.
"""
if signal_size is None:
inputs = as_nodes(data, axes)
else:
inputs = as_nodes(data, axes, signal_size)
return _get_node_factory_opset7().create("DFT", inputs)
@nameable_op
def idft(
data: NodeInput,
axes: NodeInput,
signal_size: Optional[NodeInput] = None,
) -> Node:
"""Return a node which performs IDFT operation.
@param data: Tensor with transformed data.
@param axes: Tensor with axes to transform.
@param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'.
@return: The new node which performs IDFT operation on the input data tensor.
"""
if signal_size is None:
inputs = as_nodes(data, axes)
else:
inputs = as_nodes(data, axes, signal_size)
return _get_node_factory_opset7().create("IDFT", inputs)

View File

@ -0,0 +1,161 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.opset1.ops import absolute
from openvino.opset1.ops import absolute as abs
from openvino.opset1.ops import acos
from openvino.opset4.ops import acosh
from openvino.opset8.ops import adaptive_avg_pool
from openvino.opset8.ops import adaptive_max_pool
from openvino.opset1.ops import add
from openvino.opset1.ops import asin
from openvino.opset4.ops import asinh
from openvino.opset3.ops import assign
from openvino.opset1.ops import atan
from openvino.opset4.ops import atanh
from openvino.opset1.ops import avg_pool
from openvino.opset5.ops import batch_norm_inference
from openvino.opset2.ops import batch_to_space
from openvino.opset1.ops import binary_convolution
from openvino.opset3.ops import broadcast
from openvino.opset3.ops import bucketize
from openvino.opset1.ops import ceiling
from openvino.opset1.ops import ceiling as ceil
from openvino.opset1.ops import clamp
from openvino.opset1.ops import concat
from openvino.opset1.ops import constant
from openvino.opset1.ops import convert
from openvino.opset1.ops import convert_like
from openvino.opset1.ops import convolution
from openvino.opset1.ops import convolution_backprop_data
from openvino.opset1.ops import cos
from openvino.opset1.ops import cosh
from openvino.opset1.ops import ctc_greedy_decoder
from openvino.opset6.ops import ctc_greedy_decoder_seq_len
from openvino.opset4.ops import ctc_loss
from openvino.opset3.ops import cum_sum
from openvino.opset3.ops import cum_sum as cumsum
from openvino.opset8.ops import deformable_convolution
from openvino.opset1.ops import deformable_psroi_pooling
from openvino.opset1.ops import depth_to_space
from openvino.opset1.ops import detection_output
from openvino.opset7.ops import dft
from openvino.opset1.ops import divide
from openvino.opset7.ops import einsum
from openvino.opset1.ops import elu
from openvino.opset3.ops import embedding_bag_offsets_sum
from openvino.opset3.ops import embedding_bag_packed_sum
from openvino.opset3.ops import embedding_segments_sum
from openvino.opset3.ops import extract_image_patches
from openvino.opset1.ops import equal
from openvino.opset1.ops import erf
from openvino.opset1.ops import exp
from openvino.opset1.ops import fake_quantize
from openvino.opset1.ops import floor
from openvino.opset1.ops import floor_mod
from openvino.opset8.ops import gather
from openvino.opset6.ops import gather_elements
from openvino.opset5.ops import gather_nd
from openvino.opset1.ops import gather_tree
from openvino.opset7.ops import gelu
from openvino.opset1.ops import greater
from openvino.opset1.ops import greater_equal
from openvino.opset1.ops import grn
from openvino.opset1.ops import group_convolution
from openvino.opset1.ops import group_convolution_backprop_data
from openvino.opset3.ops import gru_cell
from openvino.opset5.ops import gru_sequence
from openvino.opset1.ops import hard_sigmoid
from openvino.opset5.ops import hsigmoid
from openvino.opset4.ops import hswish
from openvino.opset7.ops import idft
from openvino.opset1.ops import interpolate
from openvino.opset1.ops import less
from openvino.opset1.ops import less_equal
from openvino.opset1.ops import log
from openvino.opset1.ops import logical_and
from openvino.opset1.ops import logical_not
from openvino.opset1.ops import logical_or
from openvino.opset1.ops import logical_xor
from openvino.opset5.ops import log_softmax
from openvino.opset5.ops import loop
from openvino.opset1.ops import lrn
from openvino.opset4.ops import lstm_cell
from openvino.opset1.ops import lstm_sequence
from openvino.opset1.ops import matmul
from openvino.opset8.ops import matrix_nms
from openvino.opset8.ops import max_pool
from openvino.opset1.ops import maximum
from openvino.opset1.ops import minimum
from openvino.opset4.ops import mish
from openvino.opset1.ops import mod
from openvino.opset8.ops import multiclass_nms
from openvino.opset1.ops import multiply
from openvino.opset6.ops import mvn
from openvino.opset1.ops import negative
from openvino.opset5.ops import non_max_suppression
from openvino.opset3.ops import non_zero
from openvino.opset1.ops import normalize_l2
from openvino.opset1.ops import not_equal
from openvino.opset1.ops import one_hot
from openvino.opset1.ops import pad
from openvino.opset1.ops import parameter
from openvino.opset1.ops import power
from openvino.opset1.ops import prelu
from openvino.opset1.ops import prior_box
from openvino.opset1.ops import prior_box_clustered
from openvino.opset1.ops import psroi_pooling
from openvino.opset4.ops import proposal
from openvino.opset1.ops import range
from openvino.opset8.ops import random_uniform
from openvino.opset3.ops import read_value
from openvino.opset4.ops import reduce_l1
from openvino.opset4.ops import reduce_l2
from openvino.opset1.ops import reduce_logical_and
from openvino.opset1.ops import reduce_logical_or
from openvino.opset1.ops import reduce_max
from openvino.opset1.ops import reduce_mean
from openvino.opset1.ops import reduce_min
from openvino.opset1.ops import reduce_prod
from openvino.opset1.ops import reduce_sum
from openvino.opset1.ops import region_yolo
from openvino.opset2.ops import reorg_yolo
from openvino.opset1.ops import relu
from openvino.opset1.ops import reshape
from openvino.opset1.ops import result
from openvino.opset1.ops import reverse_sequence
from openvino.opset3.ops import rnn_cell
from openvino.opset5.ops import rnn_sequence
from openvino.opset3.ops import roi_align
from openvino.opset2.ops import roi_pooling
from openvino.opset7.ops import roll
from openvino.opset5.ops import round
from openvino.opset3.ops import scatter_elements_update
from openvino.opset3.ops import scatter_update
from openvino.opset1.ops import select
from openvino.opset1.ops import selu
from openvino.opset3.ops import shape_of
from openvino.opset3.ops import shuffle_channels
from openvino.opset1.ops import sigmoid
from openvino.opset1.ops import sign
from openvino.opset1.ops import sin
from openvino.opset1.ops import sinh
from openvino.opset1.ops import softmax
from openvino.opset4.ops import softplus
from openvino.opset2.ops import space_to_batch
from openvino.opset1.ops import space_to_depth
from openvino.opset1.ops import split
from openvino.opset1.ops import sqrt
from openvino.opset1.ops import squared_difference
from openvino.opset1.ops import squeeze
from openvino.opset1.ops import strided_slice
from openvino.opset1.ops import subtract
from openvino.opset4.ops import swish
from openvino.opset1.ops import tan
from openvino.opset1.ops import tanh
from openvino.opset1.ops import tensor_iterator
from openvino.opset1.ops import tile
from openvino.opset3.ops import topk
from openvino.opset1.ops import transpose
from openvino.opset1.ops import unsqueeze
from openvino.opset1.ops import variadic_split

View File

@ -0,0 +1,369 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Factory functions for all openvino ops."""
from functools import partial
from typing import Callable, Iterable, List, Optional, Set, Union
import numpy as np
from openvino.impl import Node, Shape
from openvino.impl.op import Constant, Parameter
from openvino.opset_utils import _get_node_factory
from openvino.utils.decorators import binary_op, nameable_op, unary_op
from openvino.utils.input_validation import (
assert_list_of_ints,
check_valid_attributes,
is_non_negative_value,
is_positive_value,
)
from openvino.utils.node_factory import NodeFactory
from openvino.utils.tensor_iterator_types import (
GraphBody,
TensorIteratorSliceInputDesc,
TensorIteratorMergedInputDesc,
TensorIteratorInvariantInputDesc,
TensorIteratorBodyOutputDesc,
TensorIteratorConcatOutputDesc,
)
from openvino.utils.types import (
NodeInput,
NumericData,
NumericType,
ScalarData,
TensorShape,
as_node,
as_nodes,
get_dtype,
get_element_type,
get_element_type_str,
make_constant_node,
)
_get_node_factory_opset8 = partial(_get_node_factory, "opset8")
# -------------------------------------------- ops ------------------------------------------------
@nameable_op
def deformable_convolution(
data: NodeInput,
offsets: NodeInput,
filters: NodeInput,
strides: List[int],
pads_begin: List[int],
pads_end: List[int],
dilations: List[int],
mask: Optional[NodeInput] = None,
auto_pad: str = "EXPLICIT",
group: int = 1,
deformable_group: int = 1,
bilinear_interpolation_pad: bool = False,
name: Optional[str] = None,
) -> Node:
"""Return a node which performs deformable convolution operation.
@param data: The node providing data batch tensor.
@param offsets: The node providing offset tensor.
@param filters: The node providing filters tensor.
@param strides: The distance (in pixels) to slide the filter on the feature map over the axes.
@param pads_begin: The number of pixels to add to the beginning along each axis.
@param pads_end: The number of pixels to add to the end along each axis.
@param dilations: The distance in width and height between elements (weights) in the filter.
@param mask: The node providing modulation scalar (mask) tensor.
@param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid.
@param group: The number of groups which both output and input should be split into.
@param deformable_group: The number of groups which deformable values and output should be split
into along the channel axis.
@param bilinear_interpolation_pad: The flag that determines the mode of bilinear interpolation
execution.
@param name: The optional new name for output node.
@return New node performing deformable convolution operation.
"""
if mask is None:
inputs = as_nodes(data, offsets, filters)
else:
inputs = as_nodes(data, offsets, filters, mask)
return _get_node_factory_opset8().create(
"DeformableConvolution",
inputs,
{
"strides": strides,
"pads_begin": pads_begin,
"pads_end": pads_end,
"dilations": dilations,
"auto_pad": auto_pad,
"group": group,
"deformable_group": deformable_group,
"bilinear_interpolation_pad": bilinear_interpolation_pad
},
)
@nameable_op
def adaptive_avg_pool(
data: NodeInput,
output_shape: NodeInput
) -> Node:
"""Return a node which performs AdaptiveAvgPool operation.
@param data: The list of input nodes
@param output_shape: the shape of spatial dimentions after operation
@return: The new node performing AdaptiveAvgPool operation on the data
"""
inputs = as_nodes(data, output_shape)
return _get_node_factory_opset8().create("AdaptiveAvgPool", inputs)
@nameable_op
def adaptive_max_pool(
data: NodeInput,
output_shape: NodeInput,
index_element_type: str = "i64"
) -> Node:
"""Return a node which performs AdaptiveMaxPool operation.
@param data: The list of input nodes
@param output_shape: the shape of spatial dimentions after operation
@param index_element_type: Type of indices output.
@return: The new node performing AdaptiveMaxPool operation on the data
"""
inputs = as_nodes(data, output_shape)
attributes = {
"index_element_type": index_element_type,
}
return _get_node_factory_opset8().create("AdaptiveMaxPool", inputs, attributes)
@nameable_op
def multiclass_nms(
boxes: NodeInput,
scores: NodeInput,
sort_result_type: str = "none",
sort_result_across_batch: bool = False,
output_type: str = "i64",
iou_threshold: float = 0.0,
score_threshold: float = 0.0,
nms_top_k: int = -1,
keep_top_k: int = -1,
background_class: int = -1,
nms_eta: float = 1.0,
normalized: bool = True
) -> Node:
"""Return a node which performs MulticlassNms.
@param boxes: Tensor with box coordinates.
@param scores: Tensor with box scores.
@param sort_result_type: Specifies order of output elements, possible values:
'class': sort selected boxes by class id (ascending)
'score': sort selected boxes by score (descending)
'none': do not guarantee the order.
@param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes
across batches or not
@param output_type: Specifies the output tensor type, possible values:
'i64', 'i32'
@param iou_threshold: Specifies intersection over union threshold
@param score_threshold: Specifies minimum score to consider box for the processing
@param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning
to keep all boxes
@param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1
meaning to keep all boxes
@param background_class: Specifies the background class id, -1 meaning to keep all classes
@param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0]
@param normalized: Specifies whether boxes are normalized or not
@return: The new node which performs MuticlassNms
"""
inputs = as_nodes(boxes, scores)
attributes = {
"sort_result_type": sort_result_type,
"sort_result_across_batch": sort_result_across_batch,
"output_type": output_type,
"iou_threshold": iou_threshold,
"score_threshold": score_threshold,
"nms_top_k": nms_top_k,
"keep_top_k": keep_top_k,
"background_class": background_class,
"nms_eta": nms_eta,
"normalized": normalized
}
return _get_node_factory_opset8().create("MulticlassNms", inputs, attributes)
@nameable_op
def matrix_nms(
boxes: NodeInput,
scores: NodeInput,
sort_result_type: str = "none",
sort_result_across_batch: bool = False,
output_type: str = "i64",
score_threshold: float = 0.0,
nms_top_k: int = -1,
keep_top_k: int = -1,
background_class: int = -1,
decay_function: str = "linear",
gaussian_sigma: float = 2.0,
post_threshold: float = 0.0,
normalized: bool = True
) -> Node:
"""Return a node which performs MatrixNms.
@param boxes: Tensor with box coordinates.
@param scores: Tensor with box scores.
@param sort_result_type: Specifies order of output elements, possible values:
'class': sort selected boxes by class id (ascending)
'score': sort selected boxes by score (descending)
'none': do not guarantee the order.
@param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes
across batches or not
@param output_type: Specifies the output tensor type, possible values:
'i64', 'i32'
@param score_threshold: Specifies minimum score to consider box for the processing
@param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning
to keep all boxes
@param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1
meaning to keep all boxes
@param background_class: Specifies the background class id, -1 meaning to keep all classes
@param decay_function: Specifies decay function used to decay scores, possible values:
'gaussian', 'linear'
@param gaussian_sigma: Specifies gaussian_sigma parameter for gaussian decay_function
@param post_threshold: Specifies threshold to filter out boxes with low confidence score
after decaying
@param normalized: Specifies whether boxes are normalized or not
@return: The new node which performs MatrixNms
"""
inputs = as_nodes(boxes, scores)
attributes = {
"sort_result_type": sort_result_type,
"sort_result_across_batch": sort_result_across_batch,
"output_type": output_type,
"score_threshold": score_threshold,
"nms_top_k": nms_top_k,
"keep_top_k": keep_top_k,
"background_class": background_class,
"decay_function": decay_function,
"gaussian_sigma": gaussian_sigma,
"post_threshold": post_threshold,
"normalized": normalized
}
return _get_node_factory_opset8().create("MatrixNms", inputs, attributes)
@nameable_op
def gather(
data: NodeInput,
indices: NodeInput,
axis: NodeInput,
batch_dims: Optional[int] = 0,
) -> Node:
"""Return a node which performs Gather with support of negative indices.
@param data: N-D tensor with data for gathering
@param indices: N-D tensor with indices by which data is gathered. Negative indices
indicate reverse indexing from the end
@param axis: axis along which elements are gathered
@param batch_dims: number of batch dimensions
@return: The new node which performs Gather
"""
inputs = as_nodes(data, indices, axis)
attributes = {
"batch_dims": batch_dims
}
return _get_node_factory_opset8().create("Gather", inputs, attributes)
@nameable_op
def max_pool(
data: NodeInput,
strides: List[int],
dilations: List[int],
pads_begin: List[int],
pads_end: List[int],
kernel_shape: TensorShape,
rounding_type: str = "floor",
auto_pad: Optional[str] = None,
index_element_type: Optional[str] = "i64",
axis: Optional[int] = 0,
name: Optional[str] = None,
) -> Node:
"""Perform max pooling operation and return both values and indices of the selected elements.
@param data: The node providing input data.
@param strides: The distance (in pixels) to slide the filter on the feature map
over the axes.
@param dilations: The dilation of filter elements(distance between elements).
@param pads_begin: The number of pixels to add at the beginning along each axis.
@param pads_end: The number of pixels to add at the end along each axis.
@param kernel_shape: The pooling operation kernel shape.
@param rounding_type: Determines used rounding schema when computing output shape.
Acceptable values are: ['floor', 'ceil']. Defaults to 'floor'.
@param auto_pad: Determines how the padding is calculated. Acceptable values:
[None, 'same_upper', 'same_lower', 'valid']. Defaults to None.
@param index_element_type: The data type used for the indices output of this operator.
Defaults to i64.
@param axis: The first dimension in the data shape used to determine the maximum
returned index value. The value is the product of all dimensions
starting at the provided axis. Defaults to 0.
@param name: The optional name for the created output node.
@return The new node performing max pooling operation.
"""
if auto_pad is None:
auto_pad = "explicit"
return _get_node_factory_opset8().create(
"MaxPool",
[as_node(data)],
{
"strides": strides,
"dilations": dilations,
"pads_begin": pads_begin,
"pads_end": pads_end,
"kernel": kernel_shape,
"rounding_type": rounding_type.upper(),
"auto_pad": auto_pad.upper(),
"index_element_type": index_element_type,
"axis": axis,
},
)
@nameable_op
def random_uniform(
output_shape: NodeInput,
min_val: NodeInput,
max_val: NodeInput,
output_type: str,
global_seed: int = 0,
op_seed: int = 0
) -> Node:
"""Return a node which generates sequence of random values from uniform distribution.
@param output_shape: Tensor with shape of the output tensor.
@param min_val: Tensor with the lower bound on the range of random values to generate.
@param max_val: Tensor with the upper bound on the range of random values to generate.
@param output_type: Specifies the output tensor type, possible values:
'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'.
@param global_seed: Specifies global seed value. Required to be a positive integer or 0.
@param op_seed: Specifies operational seed value. Required to be a positive integer or 0.
@return The new node which performs generation of random values from uniform distribution.
"""
inputs = as_nodes(output_shape, min_val, max_val)
if global_seed < 0:
raise RuntimeError("global_seed should be positive or 0. Got: {}".format(global_seed))
if op_seed < 0:
raise RuntimeError("op_seed should be positive or 0. Got: {}".format(op_seed))
attributes = {
"output_type": output_type,
"global_seed": global_seed,
"op_seed": op_seed,
}
return _get_node_factory_opset8().create("RandomUniform", inputs, attributes)

View File

@ -0,0 +1,21 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from typing import Optional
import numpy as np
from openvino.impl import Node
from openvino.utils.decorators import nameable_op
from openvino.utils.node_factory import NodeFactory
from openvino.utils.types import (
as_node,
NodeInput,
)
def _get_node_factory(opset_version: Optional[str] = None) -> NodeFactory:
"""Return NodeFactory configured to create operators from specified opset version."""
if opset_version:
return NodeFactory(opset_version)
else:
return NodeFactory()

View File

@ -0,0 +1,4 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Generic utilities. Factor related functions out to separate files."""

View File

@ -0,0 +1,36 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import List
import openvino as ng
from openvino.impl import AxisSet, Node
from openvino.utils.types import NodeInput, TensorShape, get_dtype, make_constant_node
log = logging.getLogger(__name__)
def get_broadcast_axes(
output_shape: TensorShape, input_shape: TensorShape, axis: int = None
) -> AxisSet:
"""Generate a list of broadcast axes for openvino broadcast.
Informally, a broadcast "adds" axes to the input tensor,
replicating elements from the input tensor as needed to fill the new dimensions.
Function calculate which of the output axes are added in this way.
@param output_shape: The new shape for the output tensor.
@param input_shape: The shape of input tensor.
@param axis: The axis along which we want to replicate elements.
@return The indices of added axes.
"""
axes_indexes = list(range(0, len(output_shape)))
if axis is None:
output_begin = len(output_shape) - len(input_shape)
else:
output_begin = axis
right_axes_indexes = list(range(output_begin, output_begin + len(input_shape)))
for index in reversed(right_axes_indexes):
del axes_indexes[index]
return AxisSet(set(axes_indexes))

View File

@ -0,0 +1,52 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from functools import wraps
from typing import Any, Callable
from openvino.impl import Node
from openvino.utils.types import NodeInput, as_node, as_nodes
def _set_node_friendly_name(node: Node, **kwargs: Any) -> Node:
if "name" in kwargs:
node.friendly_name = kwargs["name"]
return node
def nameable_op(node_factory_function: Callable) -> Callable:
"""Set the name to the openvino operator returned by the wrapped function."""
@wraps(node_factory_function)
def wrapper(*args: Any, **kwargs: Any) -> Node:
node = node_factory_function(*args, **kwargs)
node = _set_node_friendly_name(node, **kwargs)
return node
return wrapper
def unary_op(node_factory_function: Callable) -> Callable:
"""Convert the first input value to a Constant Node if a numeric value is detected."""
@wraps(node_factory_function)
def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node:
input_node = as_node(input_value)
node = node_factory_function(input_node, *args, **kwargs)
node = _set_node_friendly_name(node, **kwargs)
return node
return wrapper
def binary_op(node_factory_function: Callable) -> Callable:
"""Convert the first two input values to Constant Nodes if numeric values are detected."""
@wraps(node_factory_function)
def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:
left, right = as_nodes(left, right)
node = node_factory_function(left, right, *args, **kwargs)
node = _set_node_friendly_name(node, **kwargs)
return node
return wrapper

View File

@ -0,0 +1,136 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Helper functions for validating user input."""
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
import numpy as np
from openvino.exceptions import UserInputError
log = logging.getLogger(__name__)
def assert_list_of_ints(value_list: Iterable[int], message: str) -> None:
"""Verify that the provided value is an iterable of integers."""
try:
for value in value_list:
if not isinstance(value, int):
raise TypeError
except TypeError:
log.warning(message)
raise UserInputError(message, value_list)
def _check_value(op_name, attr_key, value, val_type, cond=None):
# type: (str, str, Any, Type, Optional[Callable[[Any], bool]]) -> bool
"""Check whether provided value satisfies specified criteria.
@param op_name: The operator name which attributes are checked.
@param attr_key: The attribute name.
@param value: The value to check.
@param val_type: Required value type.
@param cond: The optional function running additional checks.
:raises UserInputError:
@return True if attribute satisfies all criterias. Otherwise False.
"""
if not np.issubdtype(type(value), val_type):
raise UserInputError(
'{} operator attribute "{}" value must by of type {}.'.format(
op_name, attr_key, val_type
)
)
if cond is not None and not cond(value):
raise UserInputError(
'{} operator attribute "{}" value does not satisfy provided condition.'.format(
op_name, attr_key
)
)
return True
def check_valid_attribute(op_name, attr_dict, attr_key, val_type, cond=None, required=False):
# type: (str, dict, str, Type, Optional[Callable[[Any], bool]], Optional[bool]) -> bool
"""Check whether specified attribute satisfies given criteria.
@param op_name: The operator name which attributes are checked.
@param attr_dict: Dictionary containing key-value attributes to check.
@param attr_key: Key value for validated attribute.
@param val_type: Value type for validated attribute.
@param cond: Any callable wich accept attribute value and returns True or False.
@param required: Whether provided attribute key is not required. This mean it may be missing
from provided dictionary.
:raises UserInputError:
@return True if attribute satisfies all criterias. Otherwise False.
"""
result = True
if required and attr_key not in attr_dict:
raise UserInputError(
'Provided dictionary is missing {} operator required attribute "{}"'.format(
op_name, attr_key
)
)
if attr_key not in attr_dict:
return result
attr_value = attr_dict[attr_key]
if np.isscalar(attr_value):
result = result and _check_value(op_name, attr_key, attr_value, val_type, cond)
else:
for v in attr_value:
result = result and _check_value(op_name, attr_key, v, val_type, cond)
return result
def check_valid_attributes(
op_name, # type: str
attributes, # type: Dict[str, Any]
requirements, # type: List[Tuple[str, bool, Type, Optional[Callable]]]
):
# type: (...) -> bool
"""Perform attributes validation according to specified type, value criteria.
@param op_name: The operator name which attributes are checked.
@param attributes: The dictionary with user provided attributes to check.
@param requirements: The list of tuples describing attributes' requirements. The tuple should
contain following values:
(attr_name: str,
is_required: bool,
value_type: Type,
value_condition: Callable)
:raises UserInputError:
@return True if all attributes satisfies criterias. Otherwise False.
"""
for attr, required, val_type, cond in requirements:
check_valid_attribute(op_name, attributes, attr, val_type, cond, required)
return True
def is_positive_value(x): # type: (Any) -> bool
"""Determine whether the specified x is positive value.
@param x: The value to check.
@return True if the specified x is positive value, False otherwise.
"""
return x > 0
def is_non_negative_value(x): # type: (Any) -> bool
"""Determine whether the specified x is non-negative value.
@param x: The value to check.
@return True if the specified x is non-negative value, False otherwise.
"""
return x >= 0

View File

@ -0,0 +1,167 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
from functools import partial
from typing import Any, Dict, List, Optional, Union
from openvino.pyopenvino import NodeFactory as _NodeFactory
from openvino.impl import Node, Output
from openvino.exceptions import UserInputError
DEFAULT_OPSET = "opset8"
class NodeFactory(object):
"""Factory front-end to create node objects."""
def __init__(self, opset_version: str = DEFAULT_OPSET) -> None:
"""Create the NodeFactory object.
@param opset_version: The opset version the factory will use to produce ops from.
"""
self.factory = _NodeFactory(opset_version)
def create(
self,
op_type_name: str,
arguments: Optional[List[Union[Node, Output]]] = None,
attributes: Optional[Dict[str, Any]] = None,
) -> Node:
"""Create node object from provided description.
The user does not have to provide all node's attributes, but only required ones.
@param op_type_name: The operator type name.
@param arguments: The operator arguments.
@param attributes: The operator attributes.
@return Node object representing requested operator with attributes set.
"""
if arguments is None and attributes is None:
node = self.factory.create(op_type_name)
node._attr_cache = {}
node._attr_cache_valid = False
return node
if arguments is None and attributes is not None:
raise UserInputError(
'Error: cannot create "{}" op without arguments.'.format(
op_type_name
)
)
if attributes is None:
attributes = {}
assert arguments is not None
arguments = self._arguments_as_outputs(arguments)
node = self.factory.create(op_type_name, arguments, attributes)
# Currently we don't support any attribute getters & setters for TensorIterator node.
if node.get_type_name() == "TensorIterator":
return node
# Set getters and setters for each node's attribute.
# node.get_attribute_name()
# node.set_attribute_name()
# For compound (with more than one level of nesting) attributes of form ie.:
# node.class_member_name.some_metric.attr_name:
# node.get_some_metric_attr_name()
# node.set_some_metric_attr_name()
# Please see test_dyn_attributes.py for more usage examples.
all_attributes = node.get_attributes()
for attr_name in all_attributes.keys():
setattr(
node,
self._normalize_attr_name_getter(attr_name),
partial(NodeFactory._get_node_attr_value, node, attr_name),
)
setattr(
node,
self._normalize_attr_name_setter(attr_name),
partial(NodeFactory._set_node_attr_value, node, attr_name),
)
# Setup helper members for caching attribute values.
# The cache would be lazily populated at first access attempt.
node._attr_cache = {}
node._attr_cache_valid = False
return node
@staticmethod
def _arguments_as_outputs(arguments: List[Union[Node, Output]]) -> List[Output]:
outputs = []
for argument in arguments:
if issubclass(type(argument), Output):
outputs.append(argument)
else:
log.warning("Op arguments were passed as Node, please avoid passing arguments in "
"this manner, and pass Output(s) instead, because accepting Nodes will "
"be deprecated in a future release.")
outputs.extend(argument.outputs())
return outputs
@staticmethod
def _normalize_attr_name(attr_name: str, prefix: str) -> str:
"""Normalize attribute name.
@param attr_name: The attribute name.
@param prefix: The prefix to attach to attribute name.
@return The modified attribute name.
"""
# Trim first part of the name if there is only one level of attribute hierarchy.
if attr_name.count(".") == 1:
attr_name = attr_name[attr_name.find(".") + 1:]
return prefix + attr_name.replace(".", "_")
@classmethod
def _normalize_attr_name_getter(cls, attr_name: str) -> str:
"""Normalize atr name to be suitable for getter function name.
@param attr_name: The attribute name to normalize
@return The appropriate getter function name.
"""
return cls._normalize_attr_name(attr_name, "get_")
@classmethod
def _normalize_attr_name_setter(cls, attr_name: str) -> str:
"""Normalize attribute name to be suitable for setter function name.
@param attr_name: The attribute name to normalize
@return The appropriate setter function name.
"""
return cls._normalize_attr_name(attr_name, "set_")
@staticmethod
def _get_node_attr_value(node: Node, attr_name: str) -> Any:
"""Get provided node attribute value.
@param node: The node we retrieve attribute value from.
@param attr_name: The attribute name.
@return The node attribute value.
"""
if not node._attr_cache_valid:
node._attr_cache = node.get_attributes()
node._attr_cache_valid = True
return node._attr_cache[attr_name]
@staticmethod
def _set_node_attr_value(node: Node, attr_name: str, value: Any) -> None:
"""Set the node attribute value.
@param node: The node we change attribute value for.
@param attr_name: The attribute name.
@param value: The new attribute value.
"""
node.set_attribute(attr_name, value)
node._attr_cache[attr_name] = value

View File

@ -0,0 +1,23 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from typing import Iterable, Optional
from openvino.impl import Node
def get_reduction_axes(node: Node, reduction_axes: Optional[Iterable[int]]) -> Iterable[int]:
"""Get reduction axes if it is None and convert it to set if its type is different.
If reduction_axes is None we default to reduce all axes.
@param node: The node we fill reduction axes for.
@param reduction_axes: The collection of indices of axes to reduce. May be None.
@return Set filled with indices of axes we want to reduce.
"""
if reduction_axes is None:
reduction_axes = set(range(len(node.shape)))
if type(reduction_axes) is not set:
reduction_axes = set(reduction_axes)
return reduction_axes

View File

@ -0,0 +1,154 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Helper classes for aggregating TensorIterator input/output desciptor attributes."""
from typing import List
from openvino.impl import Node
from openvino.impl.op import Parameter
class GraphBody(object):
"""Class containing graph parameters and results."""
def __init__(self, parameters: List[Parameter], results: List[Node],) -> None:
self.parameters = parameters
self.results = results
def serialize(self) -> dict:
"""Serialize GraphBody as a dictionary."""
return {
"parameters": self.parameters,
"results": self.results,
}
class TensorIteratorInputDesc(object):
"""Represents a generic input descriptor for TensorIterator operator."""
def __init__(self, input_idx: int, body_parameter_idx: int,) -> None:
self.input_idx = input_idx
self.body_parameter_idx = body_parameter_idx
def serialize(self) -> dict:
"""Serialize TensorIteratorInputDesc as a dictionary."""
return {
"input_idx": self.input_idx,
"body_parameter_idx": self.body_parameter_idx,
}
class TensorIteratorSliceInputDesc(TensorIteratorInputDesc):
"""Represents a TensorIterator graph body input formed from slices of TensorIterator input."""
def __init__(
self,
input_idx: int,
body_parameter_idx: int,
start: int,
stride: int,
part_size: int,
end: int,
axis: int,
) -> None:
super().__init__(input_idx, body_parameter_idx)
self.start = start
self.stride = stride
self.part_size = part_size
self.end = end
self.axis = axis
def serialize(self) -> dict:
"""Serialize TensorIteratorSliceInputDesc as a dictionary."""
output = super().serialize()
output["start"] = self.start
output["stride"] = self.stride
output["part_size"] = self.part_size
output["end"] = self.end
output["axis"] = self.axis
return output
class TensorIteratorMergedInputDesc(TensorIteratorInputDesc):
"""Represents a TensorIterator graph body input with initial value in the first iteration.
Later on, this input value is computed inside graph body.
"""
def __init__(self, input_idx: int, body_parameter_idx: int, body_value_idx: int,) -> None:
super().__init__(input_idx, body_parameter_idx)
self.body_value_idx = body_value_idx
def serialize(self) -> dict:
"""Serialize TensorIteratorMergedInputDesc as a dictionary."""
output = super().serialize()
output["body_value_idx"] = self.body_value_idx
return output
class TensorIteratorInvariantInputDesc(TensorIteratorInputDesc):
"""Represents a TensorIterator graph body input that has invariant value during iteration."""
def __init__(self, input_idx: int, body_parameter_idx: int,) -> None:
super().__init__(input_idx, body_parameter_idx)
class TensorIteratorOutputDesc(object):
"""Represents a generic output descriptor for TensorIterator operator."""
def __init__(self, body_value_idx: int, output_idx: int,) -> None:
self.body_value_idx = body_value_idx
self.output_idx = output_idx
def serialize(self) -> dict:
"""Serialize TensorIteratorOutputDesc as a dictionary."""
return {
"body_value_idx": self.body_value_idx,
"output_idx": self.output_idx,
}
class TensorIteratorBodyOutputDesc(TensorIteratorOutputDesc):
"""Represents an output from a specific iteration."""
def __init__(self, body_value_idx: int, output_idx: int, iteration: int,) -> None:
super().__init__(body_value_idx, output_idx)
self.iteration = iteration
def serialize(self) -> dict:
"""Serialize TensorIteratorBodyOutputDesc as a dictionary."""
output = super().serialize()
output["iteration"] = self.iteration
return output
class TensorIteratorConcatOutputDesc(TensorIteratorOutputDesc):
"""Represents an output produced by concatenation of output from each iteration."""
def __init__(
self,
body_value_idx: int,
output_idx: int,
start: int,
stride: int,
part_size: int,
end: int,
axis: int,
) -> None:
super().__init__(body_value_idx, output_idx)
self.start = start
self.stride = stride
self.part_size = part_size
self.end = end
self.axis = axis
def serialize(self) -> dict:
"""Serialize TensorIteratorConcatOutputDesc as a dictionary."""
output = super().serialize()
output["start"] = self.start
output["stride"] = self.stride
output["part_size"] = self.part_size
output["end"] = self.end
output["axis"] = self.axis
return output

View File

@ -0,0 +1,146 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Functions related to converting between Python and numpy types and openvino types."""
import logging
from typing import List, Union
import numpy as np
from openvino.exceptions import NgraphTypeError
from openvino.impl import Node, Shape, Output
from openvino.impl import Type as NgraphType
from openvino.impl.op import Constant
log = logging.getLogger(__name__)
TensorShape = List[int]
NumericData = Union[int, float, np.ndarray]
NumericType = Union[type, np.dtype]
ScalarData = Union[int, float]
NodeInput = Union[Node, NumericData]
openvino_to_numpy_types_map = [
(NgraphType.boolean, np.bool),
(NgraphType.f16, np.float16),
(NgraphType.f32, np.float32),
(NgraphType.f64, np.float64),
(NgraphType.i8, np.int8),
(NgraphType.i16, np.int16),
(NgraphType.i32, np.int32),
(NgraphType.i64, np.int64),
(NgraphType.u8, np.uint8),
(NgraphType.u16, np.uint16),
(NgraphType.u32, np.uint32),
(NgraphType.u64, np.uint64),
(NgraphType.bf16, np.uint16),
]
openvino_to_numpy_types_str_map = [
("boolean", np.bool),
("f16", np.float16),
("f32", np.float32),
("f64", np.float64),
("i8", np.int8),
("i16", np.int16),
("i32", np.int32),
("i64", np.int64),
("u8", np.uint8),
("u16", np.uint16),
("u32", np.uint32),
("u64", np.uint64),
]
def get_element_type(data_type: NumericType) -> NgraphType:
"""Return an ngraph element type for a Python type or numpy.dtype."""
if data_type is int:
log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.")
return NgraphType.i32
if data_type is float:
log.warning("Converting float type of undefined bitwidth to 32-bit ngraph float.")
return NgraphType.f32
ng_type = next(
(ng_type for (ng_type, np_type) in openvino_to_numpy_types_map if np_type == data_type), None
)
if ng_type:
return ng_type
raise NgraphTypeError("Unidentified data type %s", data_type)
def get_element_type_str(data_type: NumericType) -> str:
"""Return an ngraph element type string representation for a Python type or numpy dtype."""
if data_type is int:
log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.")
return "i32"
if data_type is float:
log.warning("Converting float type of undefined bitwidth to 32-bit ngraph float.")
return "f32"
ng_type = next(
(ng_type for (ng_type, np_type) in openvino_to_numpy_types_str_map if np_type == data_type),
None,
)
if ng_type:
return ng_type
raise NgraphTypeError("Unidentified data type %s", data_type)
def get_dtype(ngraph_type: NgraphType) -> np.dtype:
"""Return a numpy.dtype for an ngraph element type."""
np_type = next(
(np_type for (ng_type, np_type) in openvino_to_numpy_types_map if ng_type == ngraph_type),
None,
)
if np_type:
return np.dtype(np_type)
raise NgraphTypeError("Unidentified data type %s", ngraph_type)
def get_ndarray(data: NumericData) -> np.ndarray:
"""Wrap data into a numpy ndarray."""
if type(data) == np.ndarray:
return data
return np.array(data)
def get_shape(data: NumericData) -> TensorShape:
"""Return a shape of NumericData."""
if type(data) == np.ndarray:
return data.shape # type: ignore
elif type(data) == list:
return [len(data)] # type: ignore
return []
def make_constant_node(value: NumericData, dtype: NumericType = None) -> Constant:
"""Return an ngraph Constant node with the specified value."""
ndarray = get_ndarray(value)
if dtype:
element_type = get_element_type(dtype)
else:
element_type = get_element_type(ndarray.dtype)
return Constant(element_type, Shape(ndarray.shape), ndarray.flatten().tolist())
def as_node(input_value: NodeInput) -> Node:
"""Return input values as nodes. Scalars will be converted to Constant nodes."""
if issubclass(type(input_value), Node):
return input_value
if issubclass(type(input_value), Output):
return input_value
return make_constant_node(input_value)
def as_nodes(*input_values: NodeInput) -> List[Node]:
"""Return input values as nodes. Scalars will be converted to Constant nodes."""
return [as_node(input_value) for input_value in input_values]

View File

@ -8,6 +8,7 @@
#include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
PYBIND11_MAKE_OPAQUE(Containers::PyInputsDataMap);
PYBIND11_MAKE_OPAQUE(Containers::PyConstInputsDataMap);
PYBIND11_MAKE_OPAQUE(Containers::PyOutputsDataMap);
PYBIND11_MAKE_OPAQUE(Containers::PyResults);
@ -16,6 +17,14 @@ namespace py = pybind11;
namespace Containers {
void regclass_PyInputsDataMap(py::module m) {
auto py_inputs_data_map = py::bind_map<PyInputsDataMap>(m, "PyInputsDataMap");
py_inputs_data_map.def("keys", [](PyInputsDataMap& self) {
return py::make_key_iterator(self.begin(), self.end());
});
}
void regclass_PyConstInputsDataMap(py::module m) {
auto py_const_inputs_data_map = py::bind_map<PyConstInputsDataMap>(m, "PyConstInputsDataMap");

View File

@ -13,6 +13,8 @@
namespace py = pybind11;
namespace Containers {
using PyInputsDataMap = std::map<std::string, std::shared_ptr<InferenceEngine::InputInfo>>;
using PyConstInputsDataMap =
std::map<std::string, std::shared_ptr<const InferenceEngine::InputInfo>>;
@ -22,6 +24,7 @@ namespace Containers {
using PyResults =
std::map<std::string, std::shared_ptr<const InferenceEngine::Blob>>;
void regclass_PyInputsDataMap(py::module m);
void regclass_PyConstInputsDataMap(py::module m);
void regclass_PyOutputsDataMap(py::module m);
void regclass_PyResults(py::module m);

View File

@ -10,20 +10,17 @@
#include <ie_input_info.hpp>
#include "ngraph/function.hpp"
#include "openvino/core/function.hpp"
#include "pyopenvino/core/containers.hpp"
#include "pyopenvino/core/ie_input_info.hpp"
// using PyInputsDataMap = std::map<std::string, std::shared_ptr<InferenceEngine::InputInfo>>;
//
// PYBIND11_MAKE_OPAQUE(PyInputsDataMap);
namespace py = pybind11;
void regclass_IENetwork(py::module m) {
py::class_<InferenceEngine::CNNNetwork, std::shared_ptr<InferenceEngine::CNNNetwork>> cls(m, "IENetwork");
cls.def(py::init());
cls.def(py::init([](std::shared_ptr<ngraph::Function>& function) {
cls.def(py::init([](std::shared_ptr<ov::Function>& function) {
InferenceEngine::CNNNetwork cnnNetwork(function);
return std::make_shared<InferenceEngine::CNNNetwork>(cnnNetwork);
}));
@ -82,14 +79,8 @@ void regclass_IENetwork(py::module m) {
&InferenceEngine::CNNNetwork::getBatchSize,
&InferenceEngine::CNNNetwork::setBatchSize);
// auto py_inputs_data_map = py::bind_map<PyInputsDataMap>(m, "PyInputsDataMap");
// py_inputs_data_map.def("keys", [](PyInputsDataMap& self) {
// return py::make_key_iterator(self.begin(), self.end());
// });
cls.def_property_readonly("input_info", [](InferenceEngine::CNNNetwork& self) {
std::map<std::string, std::shared_ptr<InferenceEngine::InputInfo>> inputs;
Containers::PyInputsDataMap inputs;
const InferenceEngine::InputsDataMap& inputsInfo = self.getInputsInfo();
for (auto& in : inputsInfo) {
inputs[in.first] = in.second;

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/axis_set.hpp" // ov::AxisSet
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <iterator>
#include <sstream>
#include <string>
#include "pyopenvino/graph/axis_set.hpp"
namespace py = pybind11;
void regclass_graph_AxisSet(py::module m) {
py::class_<ov::AxisSet, std::shared_ptr<ov::AxisSet>> axis_set(m, "AxisSet");
axis_set.doc() = "openvino.impl.AxisSet wraps ov::AxisSet";
axis_set.def(py::init<const std::initializer_list<size_t>&>(), py::arg("axes"));
axis_set.def(py::init<const std::set<size_t>&>(), py::arg("axes"));
axis_set.def(py::init<const std::vector<size_t>&>(), py::arg("axes"));
axis_set.def(py::init<const ov::AxisSet&>(), py::arg("axes"));
axis_set.def("__len__", [](const ov::AxisSet& v) {
return v.size();
});
axis_set.def(
"__iter__",
[](ov::AxisSet& v) {
return py::make_iterator(v.begin(), v.end());
},
py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */
axis_set.def("__repr__", [](const ov::AxisSet& self) -> std::string {
std::stringstream data_ss;
std::copy(self.begin(), self.end(), std::ostream_iterator<int>(data_ss, ", "));
std::string data_str = data_ss.str();
return "<AxisSet {" + data_str.substr(0, data_str.size() - 2) + "}>";
});
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_AxisSet(py::module m);

View File

@ -0,0 +1,20 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/axis_vector.hpp" // ov::AxisVector
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "pyopenvino/graph/axis_vector.hpp"
namespace py = pybind11;
void regclass_graph_AxisVector(py::module m) {
py::class_<ov::AxisVector, std::shared_ptr<ov::AxisVector>> axis_vector(m, "AxisVector");
axis_vector.doc() = "openvino.impl.AxisVector wraps ov::AxisVector";
axis_vector.def(py::init<const std::initializer_list<size_t>&>(), py::arg("axes"));
axis_vector.def(py::init<const std::vector<size_t>&>(), py::arg("axes"));
axis_vector.def(py::init<const ov::AxisVector&>(), py::arg("axes"));
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_AxisVector(py::module m);

View File

@ -0,0 +1,21 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/coordinate.hpp" // ov::Coordinate
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "pyopenvino/graph/coordinate.hpp"
namespace py = pybind11;
void regclass_graph_Coordinate(py::module m) {
py::class_<ov::Coordinate, std::shared_ptr<ov::Coordinate>> coordinate(m, "Coordinate");
coordinate.doc() = "openvino.impl.Coordinate wraps ov::Coordinate";
coordinate.def(py::init<const std::initializer_list<size_t>&>());
coordinate.def(py::init<const ov::Shape&>());
coordinate.def(py::init<const std::vector<size_t>&>());
coordinate.def(py::init<const ov::Coordinate&>());
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_Coordinate(py::module m);

View File

@ -0,0 +1,37 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/coordinate_diff.hpp" // ov::CoordinateDiff
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <iterator>
#include <sstream>
#include <string>
#include "pyopenvino/graph/coordinate_diff.hpp"
namespace py = pybind11;
void regclass_graph_CoordinateDiff(py::module m) {
py::class_<ov::CoordinateDiff, std::shared_ptr<ov::CoordinateDiff>> coordinate_diff(m, "CoordinateDiff");
coordinate_diff.doc() = "openvino.impl.CoordinateDiff wraps ov::CoordinateDiff";
coordinate_diff.def(py::init<const std::initializer_list<ptrdiff_t>&>());
coordinate_diff.def(py::init<const std::vector<ptrdiff_t>&>());
coordinate_diff.def(py::init<const ov::CoordinateDiff&>());
coordinate_diff.def("__str__", [](const ov::CoordinateDiff& self) -> std::string {
std::stringstream stringstream;
std::copy(self.begin(), self.end(), std::ostream_iterator<int>(stringstream, ", "));
std::string string = stringstream.str();
return string.substr(0, string.size() - 2);
});
coordinate_diff.def("__repr__", [](const ov::CoordinateDiff& self) -> std::string {
std::string class_name = py::cast(self).get_type().attr("__name__").cast<std::string>();
std::string shape_str = py::cast(self).attr("__str__")().cast<std::string>();
return "<" + class_name + ": (" + shape_str + ")>";
});
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_CoordinateDiff(py::module m);

View File

@ -0,0 +1,343 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// These are not used here, but needed in order to not violate ODR, since
// these are included in other translation units, and specialize some types.
// Related: https://github.com/pybind/pybind11/issues/1055
#include "dict_attribute_visitor.hpp"
#include <pybind11/numpy.h>
#include <pybind11/stl.h>
#include "openvino/op/loop.hpp"
#include "openvino/op/util/sub_graph_base.hpp"
namespace py = pybind11;
util::DictAttributeDeserializer::DictAttributeDeserializer(
const py::dict& attributes,
std::unordered_map<std::string, std::shared_ptr<ov::op::util::Variable>>& variables)
: m_attributes(attributes),
m_variables(variables) {}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<void>& adapter) {
if (m_attributes.contains(name)) {
if (const auto& a = ov::as_type<
ov::AttributeAdapter<std::vector<std::shared_ptr<ov::op::util::SubGraphOp::InputDescription>>>>(
&adapter)) {
std::vector<std::shared_ptr<ov::op::util::SubGraphOp::InputDescription>> input_descs;
const py::dict& input_desc = m_attributes[name.c_str()].cast<py::dict>();
const auto& merged_input_desc = input_desc["merged_input_desc"].cast<py::list>();
const auto& slice_input_desc = input_desc["slice_input_desc"].cast<py::list>();
const auto& invariant_input_desc = input_desc["invariant_input_desc"].cast<py::list>();
for (py::handle h : slice_input_desc) {
const py::dict& desc = h.cast<py::dict>();
auto slice_in = std::make_shared<ov::op::util::SubGraphOp::SliceInputDescription>(
desc["input_idx"].cast<int64_t>(),
desc["body_parameter_idx"].cast<int64_t>(),
desc["start"].cast<int64_t>(),
desc["stride"].cast<int64_t>(),
desc["part_size"].cast<int64_t>(),
desc["end"].cast<int64_t>(),
desc["axis"].cast<int64_t>());
input_descs.push_back(slice_in);
}
for (py::handle h : merged_input_desc) {
const py::dict& desc = h.cast<py::dict>();
auto merged_in = std::make_shared<ov::op::util::SubGraphOp::MergedInputDescription>(
desc["input_idx"].cast<int64_t>(),
desc["body_parameter_idx"].cast<int64_t>(),
desc["body_value_idx"].cast<int64_t>());
input_descs.push_back(merged_in);
}
for (py::handle h : invariant_input_desc) {
const py::dict& desc = h.cast<py::dict>();
auto invariant_in = std::make_shared<ov::op::util::SubGraphOp::InvariantInputDescription>(
desc["input_idx"].cast<int64_t>(),
desc["body_parameter_idx"].cast<int64_t>());
input_descs.push_back(invariant_in);
}
a->set(input_descs);
} else if (const auto& a = ov::as_type<
ov::AttributeAdapter<std::vector<std::shared_ptr<ov::op::util::SubGraphOp::OutputDescription>>>>(
&adapter)) {
std::vector<std::shared_ptr<ov::op::util::SubGraphOp::OutputDescription>> output_descs;
const py::dict& output_desc = m_attributes[name.c_str()].cast<py::dict>();
const auto& body_output_desc = output_desc["body_output_desc"].cast<py::list>();
const auto& concat_output_desc = output_desc["concat_output_desc"].cast<py::list>();
for (py::handle h : body_output_desc) {
const py::dict& desc = h.cast<py::dict>();
auto body_output = std::make_shared<ov::op::util::SubGraphOp::BodyOutputDescription>(
desc["body_value_idx"].cast<int64_t>(),
desc["output_idx"].cast<int64_t>(),
desc["iteration"].cast<int64_t>());
output_descs.push_back(body_output);
}
for (py::handle h : concat_output_desc) {
const py::dict& desc = h.cast<py::dict>();
auto concat_output = std::make_shared<ov::op::util::SubGraphOp::ConcatOutputDescription>(
desc["body_value_idx"].cast<int64_t>(),
desc["output_idx"].cast<int64_t>(),
desc["start"].cast<int64_t>(),
desc["stride"].cast<int64_t>(),
desc["part_size"].cast<int64_t>(),
desc["end"].cast<int64_t>(),
desc["axis"].cast<int64_t>());
output_descs.push_back(concat_output);
}
a->set(output_descs);
} else if (const auto& a = ov::as_type<ov::AttributeAdapter<ov::op::v5::Loop::SpecialBodyPorts>>(&adapter)) {
ov::op::v5::Loop::SpecialBodyPorts special_body_ports;
const py::dict& special_ports_dict = m_attributes[name.c_str()].cast<py::dict>();
special_body_ports.body_condition_output_idx =
special_ports_dict["body_condition_output_idx"].cast<int64_t>();
special_body_ports.current_iteration_input_idx =
special_ports_dict["current_iteration_input_idx"].cast<int64_t>();
a->set(special_body_ports);
} else if (const auto& a =
ov::as_type<ov::AttributeAdapter<std::shared_ptr<ov::op::util::Variable>>>(&adapter)) {
std::string variable_id = m_attributes[name.c_str()].cast<std::string>();
if (!m_variables.count(variable_id)) {
m_variables[variable_id] = std::make_shared<ov::op::util::Variable>(
ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, variable_id});
}
a->set(m_variables[variable_id]);
} else {
NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name);
}
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<bool>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<bool>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<std::string>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::string>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<int8_t>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<int8_t>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<int16_t>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<int16_t>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<int32_t>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<int32_t>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<int64_t>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<int64_t>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<uint8_t>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<uint8_t>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<uint16_t>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<uint16_t>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<uint32_t>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<uint32_t>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<uint64_t>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<uint64_t>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<float>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<float>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor<double>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<double>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<std::string>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<std::string>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int8_t>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<int8_t>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int16_t>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<int16_t>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int32_t>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<int32_t>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int64_t>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<int64_t>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint8_t>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<uint8_t>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint16_t>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<uint16_t>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint32_t>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<uint32_t>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint64_t>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<uint64_t>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<float>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<float>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<double>>& adapter) {
if (m_attributes.contains(name)) {
adapter.set(m_attributes[name.c_str()].cast<std::vector<double>>());
}
}
void util::DictAttributeDeserializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::shared_ptr<ov::Function>>& adapter) {
if (m_attributes.contains(name)) {
if (name == "body") {
const py::dict& body_attrs = m_attributes[name.c_str()].cast<py::dict>();
const auto& body_outputs = as_output_vector(body_attrs["results"].cast<ov::NodeVector>());
const auto& body_parameters = body_attrs["parameters"].cast<ov::ParameterVector>();
auto body = std::make_shared<ov::Function>(body_outputs, body_parameters);
adapter.set(body);
} else {
NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name);
}
}
}
util::DictAttributeSerializer::DictAttributeSerializer(const std::shared_ptr<ov::Node>& node) {
node->visit_attributes(*this);
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<void>& adapter) {
if (m_attributes.contains(name)) {
NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name);
}
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<bool>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<std::string>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<int8_t>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<int16_t>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<int32_t>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<int64_t>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<uint8_t>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<uint16_t>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<uint32_t>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<uint64_t>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<float>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor<double>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<std::string>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int8_t>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int16_t>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int32_t>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int64_t>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint8_t>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint16_t>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint32_t>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint64_t>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<float>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}
void util::DictAttributeSerializer::on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<double>>& adapter) {
m_attributes[name.c_str()] = adapter.get();
}

View File

@ -0,0 +1,131 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstdint>
#include <string>
#include <vector>
#include "openvino/core/attribute_visitor.hpp"
#include "openvino/core/function.hpp"
#include "openvino/core/node.hpp"
#include "openvino/op/util/variable.hpp"
#include <pybind11/pybind11.h>
namespace py = pybind11;
namespace util
{
class DictAttributeDeserializer : public ov::AttributeVisitor
{
public:
DictAttributeDeserializer(
const py::dict& attributes,
std::unordered_map<std::string, std::shared_ptr<ov::op::util::Variable>>& variables);
void on_adapter(const std::string& name, ov::ValueAccessor<void>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<bool>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::string>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<int8_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<int16_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<int32_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<int64_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<uint8_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<uint16_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<uint32_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<uint64_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<float>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<double>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<std::string>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int8_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int16_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int32_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int64_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint8_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint16_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint32_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint64_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<float>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<double>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::shared_ptr<ov::Function>>& adapter) override;
protected:
const py::dict& m_attributes;
std::unordered_map<std::string, std::shared_ptr<ov::op::util::Variable>>& m_variables;
};
class DictAttributeSerializer : public ov::AttributeVisitor
{
public:
explicit DictAttributeSerializer(const std::shared_ptr<ov::Node>& node);
void on_adapter(const std::string& name, ov::ValueAccessor<void>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<bool>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::string>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<int8_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<int16_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<int32_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<int64_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<uint8_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<uint16_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<uint32_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<uint64_t>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<float>& adapter) override;
void on_adapter(const std::string& name, ov::ValueAccessor<double>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<std::string>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int8_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int16_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int32_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<int64_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint8_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint16_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint32_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<uint64_t>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<float>>& adapter) override;
void on_adapter(const std::string& name,
ov::ValueAccessor<std::vector<double>>& adapter) override;
template <typename T>
T get_attribute(const std::string& name)
{
NGRAPH_CHECK(m_attributes.contains(name),
"Couldn't find attribute \"",
name,
"\" in serialized node attribute dictionary.");
return m_attributes[name.c_str()].cast<T>();
}
py::dict get_attributes() const { return m_attributes; }
protected:
py::dict m_attributes;
};
} // namespace util

View File

@ -0,0 +1,209 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/dimension.hpp" // ov::Dimension
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <iterator>
#include <sstream>
#include <string>
#include "pyopenvino/graph/dimension.hpp"
namespace py = pybind11;
void regclass_graph_Dimension(py::module m) {
using value_type = ov::Dimension::value_type;
py::class_<ov::Dimension, std::shared_ptr<ov::Dimension>> dim(m, "Dimension");
dim.doc() = "openvino.impl.Dimension wraps ov::Dimension";
dim.def(py::init<>());
dim.def(py::init<value_type&>(),
py::arg("dimension"),
R"(
Construct a static dimension.
Parameters
----------
dimension : int
Value of the dimension.
)");
dim.def(py::init<value_type&, value_type&>(),
py::arg("min_dimension"),
py::arg("max_dimension"),
R"(
Construct a dynamic dimension with bounded range.
Parameters
----------
min_dimension : int
The lower inclusive limit for the dimension.
max_dimension : int
The upper inclusive limit for the dimension.
)");
dim.def_static("dynamic", &ov::Dimension::dynamic);
dim.def_property_readonly("is_dynamic",
&ov::Dimension::is_dynamic,
R"(
Check if Dimension is dynamic.
Returns
----------
is_dynamic : bool
True if dynamic, else False.
)");
dim.def_property_readonly("is_static",
&ov::Dimension::is_static,
R"(
Check if Dimension is static.
Returns
----------
is_static : bool
True if static, else False.
)");
dim.def(
"__eq__",
[](const ov::Dimension& a, const ov::Dimension& b) {
return a == b;
},
py::is_operator());
dim.def(
"__eq__",
[](const ov::Dimension& a, const int64_t& b) {
return a == b;
},
py::is_operator());
dim.def("__len__", &ov::Dimension::get_length);
dim.def("get_length",
&ov::Dimension::get_length,
R"(
Return this dimension as integer.
This dimension must be static and non-negative.
Returns
----------
get_length : int
Value of the dimension.
)");
dim.def("get_min_length",
&ov::Dimension::get_min_length,
R"(
Return this dimension's min_dimension as integer.
This dimension must be dynamic and non-negative.
Returns
----------
get_min_length : int
Value of the dimension.
)");
dim.def("get_max_length",
&ov::Dimension::get_max_length,
R"(
Return this dimension's max_dimension as integer.
This dimension must be dynamic and non-negative.
Returns
----------
get_max_length : int
Value of the dimension.
)");
dim.def("same_scheme",
&ov::Dimension::same_scheme,
py::arg("dim"),
R"(
Return this dimension's max_dimension as integer.
This dimension must be dynamic and non-negative.
Parameters
----------
dim : Dimension
The other dimension to compare this dimension to.
Returns
----------
same_scheme : bool
True if this dimension and dim are both dynamic,
or if they are both static and equal, otherwise False.
)");
dim.def("compatible",
&ov::Dimension::compatible,
py::arg("d"),
R"(
Check whether this dimension is capable of being merged
with the argument dimension.
Parameters
----------
d : Dimension
The dimension to compare this dimension with.
Returns
----------
compatible : bool
True if this dimension is compatible with d, else False.
)");
dim.def("relaxes",
&ov::Dimension::relaxes,
py::arg("d"),
R"(
Check whether this dimension is a relaxation of the argument.
This dimension relaxes (or is a relaxation of) d if:
(1) this and d are static and equal
(2) this dimension contains d dimension
this.relaxes(d) is equivalent to d.refines(this).
Parameters
----------
d : Dimension
The dimension to compare this dimension with.
Returns
----------
relaxes : bool
True if this dimension relaxes d, else False.
)");
dim.def("refines",
&ov::Dimension::refines,
py::arg("d"),
R"(
Check whether this dimension is a refinement of the argument.
This dimension refines (or is a refinement of) d if:
(1) this and d are static and equal
(2) d dimension contains this dimension
this.refines(d) is equivalent to d.relaxes(this).
Parameters
----------
d : Dimension
The dimension to compare this dimension with.
Returns
----------
relaxes : bool
True if this dimension refines d, else False.
)");
dim.def("__str__", [](const ov::Dimension& self) -> std::string {
std::stringstream ss;
ss << self;
return ss.str();
});
dim.def("__repr__", [](const ov::Dimension& self) -> std::string {
return "<Dimension: " + py::cast(self).attr("__str__")().cast<std::string>() + ">";
});
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_Dimension(py::module m);

View File

@ -0,0 +1,314 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/function.hpp" // ov::Function
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "openvino/op/parameter.hpp" // ov::op::v0::Parameter
#include "openvino/op/sink.hpp"
#include "pyopenvino/graph/function.hpp"
namespace py = pybind11;
static const char* CAPSULE_NAME = "ngraph_function";
void regclass_graph_Function(py::module m) {
py::class_<ov::Function, std::shared_ptr<ov::Function>> function(m, "Function", py::module_local());
function.doc() = "openvino.impl.Function wraps ov::Function";
function.def(py::init([](const ov::ResultVector& res,
const std::vector<std::shared_ptr<ov::Node>>& nodes,
const ov::ParameterVector& params,
const std::string& name) {
ov::SinkVector sinks;
for (const auto& node : nodes) {
auto sink = std::dynamic_pointer_cast<ov::op::Sink>(node);
NGRAPH_CHECK(sink != nullptr, "Node {} is not instance of Sink");
sinks.push_back(sink);
}
return std::make_shared<ov::Function>(res, sinks, params, name);
}),
py::arg("results"),
py::arg("sinks"),
py::arg("parameters"),
py::arg("name"),
R"(
Create user-defined Function which is a representation of a model.
Parameters
----------
results : List[op.Result]
List of results.
sinks : List[Node]
List of Nodes to be used as Sinks (e.g. Assign ops).
parameters : List[op.Parameter]
List of parameters.
name : str
String to set as function's friendly name.
)");
function.def(py::init<const std::vector<std::shared_ptr<ov::Node>>&,
const std::vector<std::shared_ptr<ov::op::v0::Parameter>>&,
const std::string&>(),
py::arg("results"),
py::arg("parameters"),
py::arg("name"),
R"(
Create user-defined Function which is a representation of a model.
Parameters
----------
results : List[Node]
List of Nodes to be used as results.
parameters : List[op.Parameter]
List of parameters.
name : str
String to set as function's friendly name.
)");
function.def(py::init<const std::shared_ptr<ov::Node>&,
const std::vector<std::shared_ptr<ov::op::v0::Parameter>>&,
const std::string&>(),
py::arg("result"),
py::arg("parameters"),
py::arg("name"),
R"(
Create user-defined Function which is a representation of a model.
Parameters
----------
results : Node
Node to be used as result.
parameters : List[op.Parameter]
List of parameters.
name : str
String to set as function's friendly name.
)");
function.def("get_output_size",
&ov::Function::get_output_size,
R"(
Return the number of outputs for the function.
Returns
----------
get_output_size : int
Number of outputs.
)");
function.def("get_ops",
&ov::Function::get_ops,
R"(
Return ops used in the function.
Returns
----------
get_ops : List[Node]
List of Nodes representing ops used in function.
)");
function.def("get_ordered_ops",
&ov::Function::get_ordered_ops,
R"(
Return ops used in the function in topological order.
Returns
----------
get_ordered_ops : List[Node]
List of sorted Nodes representing ops used in function.
)");
function.def("get_output_op",
&ov::Function::get_output_op,
py::arg("i"),
R"(
Return the op that generates output i
Parameters
----------
i : int
output index
Returns
----------
get_output_op : Node
Node object that generates output i
)");
function.def("get_output_element_type",
&ov::Function::get_output_element_type,
py::arg("i"),
R"(
Return the element type of output i
Parameters
----------
i : int
output index
Returns
----------
get_output_op : Type
Type object of output i
)");
function.def("get_output_shape",
&ov::Function::get_output_shape,
py::arg("i"),
R"(
Return the shape of element i
Parameters
----------
i : int
element index
Returns
----------
get_output_shape : Shape
Shape object of element i
)");
function.def("get_output_partial_shape",
&ov::Function::get_output_partial_shape,
py::arg("i"),
R"(
Return the partial shape of element i
Parameters
----------
i : int
element index
Returns
----------
get_output_partial_shape : PartialShape
PartialShape object of element i
)");
function.def("get_parameters",
&ov::Function::get_parameters,
R"(
Return the function parameters.
Returns
----------
get_parameters : ParameterVector
ParameterVector containing function parameters.
)");
function.def("get_results",
&ov::Function::get_results,
R"(
Return a list of function outputs.
Returns
----------
get_results : ResultVector
ResultVector containing function parameters.
)");
function.def("get_result",
&ov::Function::get_result,
R"(
Return single result.
Returns
----------
get_result : Node
Node object representing result.
)");
function.def("get_name",
&ov::Function::get_name,
R"(
Get the unique name of the function.
Returns
----------
get_name : str
String with a name of the function.
)");
function.def("get_friendly_name",
&ov::Function::get_friendly_name,
R"(
Gets the friendly name for a function. If no
friendly name has been set via set_friendly_name
then the function's unique name is returned.
Returns
----------
get_friendly_name : str
String with a friendly name of the function.
)");
function.def("set_friendly_name",
&ov::Function::set_friendly_name,
py::arg("name"),
R"(
Sets a friendly name for a function. This does
not overwrite the unique name of the function and
is retrieved via get_friendly_name(). Used mainly
for debugging.
Parameters
----------
name : str
String to set as the friendly name.
)");
function.def("is_dynamic",
&ov::Function::is_dynamic,
R"(
Returns true if any of the op's defined in the function
contains partial shape.
Returns
----------
is_dynamic : bool
)");
function.def("__repr__", [](const ov::Function& self) {
std::string class_name = py::cast(self).get_type().attr("__name__").cast<std::string>();
std::stringstream shapes_ss;
for (size_t i = 0; i < self.get_output_size(); ++i) {
if (i > 0) {
shapes_ss << ", ";
}
shapes_ss << self.get_output_partial_shape(i);
}
return "<" + class_name + ": '" + self.get_friendly_name() + "' (" + shapes_ss.str() + ")>";
});
function.def_static("from_capsule", [](py::object* capsule) {
// get the underlying PyObject* which is a PyCapsule pointer
auto* pybind_capsule_ptr = capsule->ptr();
// extract the pointer stored in the PyCapsule under the name CAPSULE_NAME
auto* capsule_ptr = PyCapsule_GetPointer(pybind_capsule_ptr, CAPSULE_NAME);
auto* ngraph_function = static_cast<std::shared_ptr<ov::Function>*>(capsule_ptr);
if (ngraph_function && *ngraph_function) {
return *ngraph_function;
} else {
throw std::runtime_error("The provided capsule does not contain an ov::Function");
}
});
function.def_static("to_capsule", [](std::shared_ptr<ov::Function>& ngraph_function) {
// create a shared pointer on the heap before putting it in the capsule
// this secures the lifetime of the object transferred by the capsule
auto* sp_copy = new std::shared_ptr<ov::Function>(ngraph_function);
// a destructor callback that will delete the heap allocated shared_ptr
// when the capsule is destructed
auto sp_deleter = [](PyObject* capsule) {
auto* capsule_ptr = PyCapsule_GetPointer(capsule, CAPSULE_NAME);
auto* function_sp = static_cast<std::shared_ptr<ov::Function>*>(capsule_ptr);
if (function_sp) {
delete function_sp;
}
};
// put the shared_ptr in a new capsule under the same name as in "from_capsule"
auto pybind_capsule = py::capsule(sp_copy, CAPSULE_NAME, sp_deleter);
return pybind_capsule;
});
function.def_property_readonly("name", &ov::Function::get_name);
function.def_property("friendly_name", &ov::Function::get_friendly_name, &ov::Function::set_friendly_name);
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_Function(py::module m);

View File

@ -0,0 +1,306 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/node.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
#include "dict_attribute_visitor.hpp"
#include "openvino/core/variant.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/divide.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/subtract.hpp"
#include "pyopenvino/graph/node.hpp"
#include "pyopenvino/graph/rt_map.hpp"
#include "pyopenvino/graph/variant.hpp"
class PyNode : public ov::Node {
public:
std::shared_ptr<ov::Node> clone_with_new_inputs(const ov::OutputVector& inputs) const override {
PYBIND11_OVERRIDE_PURE(std::shared_ptr<ov::Node>, ov::Node, clone_with_new_inputs, inputs);
}
const type_info_t& get_type_info() const override {
PYBIND11_OVERRIDE_PURE(type_info_t&, ov::Node, get_type_info, );
}
};
namespace py = pybind11;
using PyRTMap = std::map<std::string, std::shared_ptr<ov::Variant>>;
PYBIND11_MAKE_OPAQUE(PyRTMap);
void regclass_graph_Node(py::module m) {
py::class_<ov::Node, std::shared_ptr<ov::Node>, PyNode> node(m, "Node", py::dynamic_attr());
node.doc() = "openvino.impl.Node wraps ov::Node";
node.def(
"__add__",
[](const std::shared_ptr<ov::Node>& a, const std::shared_ptr<ov::Node> b) {
return std::make_shared<ov::op::v1::Add>(a, b);
},
py::is_operator());
node.def(
"__sub__",
[](const std::shared_ptr<ov::Node>& a, const std::shared_ptr<ov::Node> b) {
return std::make_shared<ov::op::v1::Subtract>(a, b);
},
py::is_operator());
node.def(
"__mul__",
[](const std::shared_ptr<ov::Node>& a, const std::shared_ptr<ov::Node> b) {
return std::make_shared<ov::op::v1::Multiply>(a, b);
},
py::is_operator());
node.def(
"__div__",
[](const std::shared_ptr<ov::Node>& a, const std::shared_ptr<ov::Node> b) {
return std::make_shared<ov::op::v1::Divide>(a, b);
},
py::is_operator());
node.def(
"__truediv__",
[](const std::shared_ptr<ov::Node>& a, const std::shared_ptr<ov::Node> b) {
return std::make_shared<ov::op::v1::Divide>(a, b);
},
py::is_operator());
node.def("__repr__", [](const ov::Node& self) {
std::string type_name = self.get_type_name();
std::stringstream shapes_ss;
for (size_t i = 0; i < self.get_output_size(); ++i) {
if (i > 0) {
shapes_ss << ", ";
}
shapes_ss << self.get_output_partial_shape(i);
}
return "<" + type_name + ": '" + self.get_friendly_name() + "' (" + shapes_ss.str() + ")>";
});
node.def("get_element_type",
&ov::Node::get_element_type,
R"(
Checks that there is exactly one output and returns it's element type.
Returns
----------
get_element_type : Type
Type of the output.
)");
node.def("get_output_size",
&ov::Node::get_output_size,
R"(
Returns the number of outputs from the node.
Returns
----------
get_element_type : int
Number of outputs.
)");
node.def("get_output_element_type",
&ov::Node::get_output_element_type,
py::arg("i"),
R"(
Returns the element type for output i
Parameters
----------
i : int
Index of the output.
Returns
----------
get_output_element_type : Type
Type of the output i
)");
node.def("get_output_shape",
&ov::Node::get_output_shape,
py::arg("i"),
R"(
Returns the shape for output i
Parameters
----------
i : int
Index of the output.
Returns
----------
get_output_shape : Shape
Shape of the output i
)");
node.def("get_output_partial_shape",
&ov::Node::get_output_partial_shape,
py::arg("i"),
R"(
Returns the partial shape for output i
Parameters
----------
i : int
Index of the output.
Returns
----------
get_output_partial_shape : PartialShape
PartialShape of the output i
)");
node.def("get_type_name",
&ov::Node::get_type_name,
R"(
Returns Type's name from the node.
Returns
----------
get_type_name : str
String repesenting Type's name.
)");
node.def("get_name",
&ov::Node::get_name,
R"(
Get the unique name of the node
Returns
----------
get_name : str
Unique name of the node.
)");
node.def("get_friendly_name",
&ov::Node::get_friendly_name,
R"(
Gets the friendly name for a node. If no friendly name has
been set via set_friendly_name then the node's unique name
is returned.
Returns
----------
get_name : str
Friendly name of the node.
)");
node.def("get_type_info", &ov::Node::get_type_info);
node.def("set_friendly_name",
&ov::Node::set_friendly_name,
py::arg("name"),
R"(
Sets a friendly name for a node. This does not overwrite the unique name
of the node and is retrieved via get_friendly_name(). Used mainly for
debugging. The friendly name may be set exactly once.
Parameters
----------
name : str
Friendly name to set.
)");
node.def("input",
(ov::Input<ov::Node>(ov::Node::*)(size_t)) & ov::Node::input,
py::arg("input_index"),
R"(
A handle to the input_index input of this node.
Parameters
----------
input_index : int
Index of Input.
Returns
----------
input : Input
Input of this node.
)");
node.def("inputs",
(std::vector<ov::Input<ov::Node>>(ov::Node::*)()) & ov::Node::inputs,
R"(
A list containing a handle for each of this node's inputs, in order.
Returns
----------
inputs : List[Input]
List of node's inputs.
)");
node.def("output",
(ov::Output<ov::Node>(ov::Node::*)(size_t)) & ov::Node::output,
py::arg("output_index"),
R"(
A handle to the output_index output of this node.
Parameters
----------
output_index : int
Index of Output.
Returns
----------
input : Output
Output of this node.
)");
node.def("outputs",
(std::vector<ov::Output<ov::Node>>(ov::Node::*)()) & ov::Node::outputs,
R"(
A list containing a handle for each of this node's outputs, in order.
Returns
----------
inputs : List[Output]
List of node's outputs.
)");
node.def("get_rt_info",
(PyRTMap & (ov::Node::*)()) & ov::Node::get_rt_info,
py::return_value_policy::reference_internal,
R"(
Returns PyRTMap which is a dictionary of user defined runtime info.
Returns
----------
get_rt_info : PyRTMap
A dictionary of user defined data.
)");
node.def("get_version",
&ov::Node::get_version,
R"(
Returns operation's version of the node.
Returns
----------
get_version : int
Operation version.
)");
node.def("set_argument", &ov::Node::set_argument);
node.def("set_arguments", [](const std::shared_ptr<ov::Node>& self, const ov::NodeVector& args) {
self->set_arguments(args);
});
node.def("set_arguments", [](const std::shared_ptr<ov::Node>& self, const ov::OutputVector& args) {
self->set_arguments(args);
});
node.def_property_readonly("shape", &ov::Node::get_shape);
node.def_property_readonly("name", &ov::Node::get_name);
node.def_property_readonly("rt_info",
(PyRTMap & (ov::Node::*)()) & ov::Node::get_rt_info,
py::return_value_policy::reference_internal);
node.def_property_readonly("version", &ov::Node::get_version);
node.def_property_readonly("type_info", &ov::Node::get_type_info);
node.def_property("friendly_name", &ov::Node::get_friendly_name, &ov::Node::set_friendly_name);
node.def("get_attributes", [](const std::shared_ptr<ov::Node>& self) {
util::DictAttributeSerializer dict_serializer(self);
return dict_serializer.get_attributes();
});
node.def("set_attribute", [](std::shared_ptr<ov::Node>& self, const std::string& atr_name, py::object value) {
py::dict attr_dict;
attr_dict[atr_name.c_str()] = value;
std::unordered_map<std::string, std::shared_ptr<ov::op::util::Variable>> variables;
util::DictAttributeDeserializer dict_deserializer(attr_dict, variables);
self->visit_attributes(dict_deserializer);
});
node.def("set_arguments", [](const std::shared_ptr<ov::Node>& self, const ov::OutputVector& arguments) {
return self->set_arguments(arguments);
});
node.def("validate", [](const std::shared_ptr<ov::Node>& self) {
return self->constructor_validate_and_infer_types();
});
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_Node(py::module m);

View File

@ -0,0 +1,119 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "node_factory.hpp"
#include <pybind11/numpy.h>
#include <pybind11/stl.h>
#include <algorithm>
#include <cctype>
#include <functional>
#include <locale>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "dict_attribute_visitor.hpp"
#include "ngraph/check.hpp"
#include "ngraph/log.hpp"
#include "openvino/core/except.hpp"
#include "openvino/core/node.hpp"
#include "openvino/op/util/op_types.hpp"
#include "openvino/op/util/variable.hpp"
#include "openvino/opsets/opset.hpp"
namespace py = pybind11;
namespace {
class NodeFactory {
public:
NodeFactory() {}
NodeFactory(const std::string& opset_name) : m_opset(get_opset(opset_name)) {}
std::shared_ptr<ov::Node> create(const std::string op_type_name,
const ov::OutputVector& arguments,
const py::dict& attributes = py::dict()) {
std::shared_ptr<ov::Node> op_node = std::shared_ptr<ov::Node>(m_opset.create(op_type_name));
NGRAPH_CHECK(op_node != nullptr, "Couldn't create operator: ", op_type_name);
NGRAPH_CHECK(!ov::op::util::is_constant(op_node),
"Currently NodeFactory doesn't support Constant node: ",
op_type_name);
util::DictAttributeDeserializer visitor(attributes, m_variables);
op_node->set_arguments(arguments);
op_node->visit_attributes(visitor);
op_node->constructor_validate_and_infer_types();
return op_node;
}
std::shared_ptr<ov::Node> create(const std::string op_type_name) {
std::shared_ptr<ov::Node> op_node = std::shared_ptr<ov::Node>(m_opset.create(op_type_name));
NGRAPH_CHECK(op_node != nullptr, "Couldn't create operator: ", op_type_name);
NGRAPH_CHECK(!ov::op::util::is_constant(op_node),
"Currently NodeFactory doesn't support Constant node: ",
op_type_name);
NGRAPH_WARN << "Empty op created! Please assign inputs and attributes and run validate() before op is used.";
return op_node;
}
private:
const ov::OpSet& get_opset(std::string opset_ver) {
std::locale loc;
std::transform(opset_ver.begin(), opset_ver.end(), opset_ver.begin(), [&loc](char c) {
return std::tolower(c, loc);
});
using OpsetFunction = std::function<const ov::OpSet&()>;
static const std::map<std::string, OpsetFunction> s_opsets{
{"opset1", OpsetFunction(ov::get_opset1)},
{"opset2", OpsetFunction(ov::get_opset2)},
{"opset3", OpsetFunction(ov::get_opset3)},
{"opset4", OpsetFunction(ov::get_opset4)},
{"opset5", OpsetFunction(ov::get_opset5)},
{"opset6", OpsetFunction(ov::get_opset6)},
{"opset7", OpsetFunction(ov::get_opset7)},
{"opset8", OpsetFunction(ov::get_opset8)},
};
auto it = s_opsets.find(opset_ver);
if (it == s_opsets.end()) {
throw ngraph::ngraph_error("Unsupported opset version requested.");
}
return it->second();
}
const ov::OpSet& m_opset = ov::get_opset8();
std::unordered_map<std::string, std::shared_ptr<ov::op::util::Variable>> m_variables;
};
} // namespace
void regclass_graph_NodeFactory(py::module m) {
py::class_<NodeFactory> node_factory(m, "NodeFactory");
node_factory.doc() = "NodeFactory creates nGraph nodes";
node_factory.def(py::init());
node_factory.def(py::init<std::string>());
node_factory.def("create", [](NodeFactory& self, const std::string name) {
return self.create(name);
});
node_factory.def(
"create",
[](NodeFactory& self, const std::string name, const ov::OutputVector& arguments, const py::dict& attributes) {
return self.create(name, arguments, attributes);
});
node_factory.def("__repr__", [](const NodeFactory& self) {
return "<NodeFactory>";
});
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_NodeFactory(py::module m);

View File

@ -0,0 +1,78 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/node_input.hpp"
#include <pybind11/stl.h>
#include "dict_attribute_visitor.hpp"
#include "pyopenvino/graph/node_input.hpp"
namespace py = pybind11;
void regclass_graph_Input(py::module m) {
py::class_<ov::Input<ov::Node>, std::shared_ptr<ov::Input<ov::Node>>> input(m, "Input", py::dynamic_attr());
input.doc() = "openvino.impl.Input wraps ov::Input<Node>";
input.def("get_node",
&ov::Input<ov::Node>::get_node,
R"(
Get node referenced by this input handle.
Returns
----------
get_node : Node
Node object referenced by this input handle.
)");
input.def("get_index",
&ov::Input<ov::Node>::get_index,
R"(
The index of the input referred to by this input handle.
Returns
----------
get_index : int
Index value as integer.
)");
input.def("get_element_type",
&ov::Input<ov::Node>::get_element_type,
R"(
The element type of the input referred to by this input handle.
Returns
----------
get_element_type : Type
Type of the input.
)");
input.def("get_shape",
&ov::Input<ov::Node>::get_shape,
R"(
The shape of the input referred to by this input handle.
Returns
----------
get_shape : Shape
Shape of the input.
)");
input.def("get_partial_shape",
&ov::Input<ov::Node>::get_partial_shape,
R"(
The partial shape of the input referred to by this input handle.
Returns
----------
get_partial_shape : PartialShape
PartialShape of the input.
)");
input.def("get_source_output",
&ov::Input<ov::Node>::get_source_output,
R"(
A handle to the output that is connected to this input.
Returns
----------
get_source_output : Output
Output that is connected to the input.
)");
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_Input(py::module m);

View File

@ -0,0 +1,78 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/core/node_output.hpp"
#include <pybind11/stl.h>
#include "dict_attribute_visitor.hpp"
#include "pyopenvino/graph/node_output.hpp"
namespace py = pybind11;
void regclass_graph_Output(py::module m) {
py::class_<ov::Output<ov::Node>, std::shared_ptr<ov::Output<ov::Node>>> output(m, "Output", py::dynamic_attr());
output.doc() = "openvino.impl.Output wraps ov::Output<Node>";
output.def("get_node",
&ov::Output<ov::Node>::get_node,
R"(
Get node referenced by this output handle.
Returns
----------
get_node : Node
Node object referenced by this output handle.
)");
output.def("get_index",
&ov::Output<ov::Node>::get_index,
R"(
The index of the output referred to by this output handle.
Returns
----------
get_index : int
Index value as integer.
)");
output.def("get_element_type",
&ov::Output<ov::Node>::get_element_type,
R"(
The element type of the output referred to by this output handle.
Returns
----------
get_element_type : Type
Type of the output.
)");
output.def("get_shape",
&ov::Output<ov::Node>::get_shape,
R"(
The shape of the output referred to by this output handle.
Returns
----------
get_shape : Shape
Shape of the output.
)");
output.def("get_partial_shape",
&ov::Output<ov::Node>::get_partial_shape,
R"(
The partial shape of the output referred to by this output handle.
Returns
----------
get_partial_shape : PartialShape
PartialShape of the output.
)");
output.def("get_target_inputs",
&ov::Output<ov::Node>::get_target_inputs,
R"(
A set containing handles for all inputs targeted by the output
referenced by this output handle.
Returns
----------
get_target_inputs : Set[Input]
Set of Inputs.
)");
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_Output(py::module m);

View File

@ -0,0 +1,142 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/constant.hpp"
#include <pybind11/buffer_info.h>
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <stdexcept>
#include <vector>
#include "openvino/core/shape.hpp"
#include "pyopenvino/graph/ops/constant.hpp"
namespace py = pybind11;
template <typename T>
std::vector<ssize_t> _get_byte_strides(const ov::Shape& s) {
std::vector<ssize_t> byte_strides;
std::vector<size_t> element_strides = ov::row_major_strides(s);
for (auto v : element_strides) {
byte_strides.push_back(static_cast<ssize_t>(v) * sizeof(T));
}
return byte_strides;
}
template <typename T>
py::buffer_info _get_buffer_info(const ov::op::v0::Constant& c) {
ov::Shape shape = c.get_shape();
return py::buffer_info(const_cast<void*>(c.get_data_ptr()), /* Pointer to buffer */
static_cast<ssize_t>(c.get_element_type().size()), /* Size of one scalar */
py::format_descriptor<T>::format(), /* Python struct-style format descriptor */
static_cast<ssize_t>(shape.size()), /* Number of dimensions */
std::vector<ssize_t>{shape.begin(), shape.end()}, /* Buffer dimensions */
_get_byte_strides<T>(shape) /* Strides (in bytes) for each index */
);
}
template <>
py::buffer_info _get_buffer_info<ov::float16>(const ov::op::v0::Constant& c) {
ov::Shape shape = c.get_shape();
return py::buffer_info(const_cast<void*>(c.get_data_ptr()), /* Pointer to buffer */
static_cast<ssize_t>(c.get_element_type().size()), /* Size of one scalar */
std::string(1, 'H'), /* Python struct-style format descriptor */
static_cast<ssize_t>(shape.size()), /* Number of dimensions */
std::vector<ssize_t>{shape.begin(), shape.end()}, /* Buffer dimensions */
_get_byte_strides<ov::float16>(shape) /* Strides (in bytes) for each index */
);
}
template <typename T>
py::array _cast_vector(const ov::op::v0::Constant& self) {
auto vec = self.cast_vector<T>();
return py::array(vec.size(), vec.data());
}
void regclass_graph_op_Constant(py::module m) {
py::class_<ov::op::v0::Constant, std::shared_ptr<ov::op::v0::Constant>, ov::Node> constant(m,
"Constant",
py::buffer_protocol());
constant.doc() = "openvino.impl.op.Constant wraps ov::op::v0::Constant";
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<char>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<ov::float16>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<float>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<double>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<int8_t>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<int16_t>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<int32_t>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<int64_t>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<uint8_t>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<uint16_t>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<uint32_t>&>());
constant.def(py::init<const ov::element::Type&, const ov::Shape&, const std::vector<uint64_t>&>());
constant.def("get_value_strings", &ov::op::v0::Constant::get_value_strings);
constant.def("get_vector", [](const ov::op::v0::Constant& self) {
auto element_type = self.get_element_type();
if (element_type == ov::element::boolean) {
return _cast_vector<char>(self);
} else if (element_type == ov::element::f16) {
return _cast_vector<ov::float16>(self);
} else if (element_type == ov::element::f32) {
return _cast_vector<float>(self);
} else if (element_type == ov::element::f64) {
return _cast_vector<double>(self);
} else if (element_type == ov::element::i8) {
return _cast_vector<int8_t>(self);
} else if (element_type == ov::element::i16) {
return _cast_vector<int16_t>(self);
} else if (element_type == ov::element::i32) {
return _cast_vector<int32_t>(self);
} else if (element_type == ov::element::i64) {
return _cast_vector<int64_t>(self);
} else if (element_type == ov::element::u8 || element_type == ov::element::u1) {
return _cast_vector<uint8_t>(self);
} else if (element_type == ov::element::u16) {
return _cast_vector<uint16_t>(self);
} else if (element_type == ov::element::u32) {
return _cast_vector<uint32_t>(self);
} else if (element_type == ov::element::u64) {
return _cast_vector<uint64_t>(self);
} else {
throw std::runtime_error("Unsupported data type!");
}
});
// Provide buffer access
constant.def_buffer([](const ov::op::v0::Constant& self) -> py::buffer_info {
auto element_type = self.get_element_type();
if (element_type == ov::element::boolean) {
return _get_buffer_info<char>(self);
} else if (element_type == ov::element::f16) {
return _get_buffer_info<ov::float16>(self);
} else if (element_type == ov::element::f32) {
return _get_buffer_info<float>(self);
} else if (element_type == ov::element::f64) {
return _get_buffer_info<double>(self);
} else if (element_type == ov::element::i8) {
return _get_buffer_info<int8_t>(self);
} else if (element_type == ov::element::i16) {
return _get_buffer_info<int16_t>(self);
} else if (element_type == ov::element::i32) {
return _get_buffer_info<int32_t>(self);
} else if (element_type == ov::element::i64) {
return _get_buffer_info<int64_t>(self);
} else if (element_type == ov::element::u8 || element_type == ov::element::u1) {
return _get_buffer_info<uint8_t>(self);
} else if (element_type == ov::element::u16) {
return _get_buffer_info<uint16_t>(self);
} else if (element_type == ov::element::u32) {
return _get_buffer_info<uint32_t>(self);
} else if (element_type == ov::element::u64) {
return _get_buffer_info<uint64_t>(self);
} else {
throw std::runtime_error("Unsupported data type!");
}
});
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_op_Constant(py::module m);

View File

@ -0,0 +1,38 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/parameter.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <string>
#include "openvino/core/node.hpp"
#include "openvino/core/partial_shape.hpp" // ov::PartialShape
#include "pyopenvino/graph/ops/parameter.hpp"
namespace py = pybind11;
void regclass_graph_op_Parameter(py::module m) {
py::class_<ov::op::v0::Parameter, std::shared_ptr<ov::op::v0::Parameter>, ov::Node> parameter(m, "Parameter");
parameter.doc() = "openvino.impl.op.Parameter wraps ov::op::v0::Parameter";
parameter.def("__repr__", [](const ov::Node& self) {
std::string class_name = py::cast(self).get_type().attr("__name__").cast<std::string>();
std::string shape = py::cast(self.get_output_partial_shape(0)).attr("__str__")().cast<std::string>();
std::string type = self.get_element_type().c_type_string();
return "<" + class_name + ": '" + self.get_friendly_name() + "' (" + shape + ", " + type + ")>";
});
parameter.def(py::init<const ov::element::Type&, const ov::Shape&>());
parameter.def(py::init<const ov::element::Type&, const ov::PartialShape&>());
// parameter.def_property_readonly("description", &ov::op::v0::Parameter::description);
parameter.def(
"get_partial_shape",
(const ov::PartialShape& (ov::op::v0::Parameter::*)() const) & ov::op::v0::Parameter::get_partial_shape);
parameter.def("get_partial_shape",
(ov::PartialShape & (ov::op::v0::Parameter::*)()) & ov::op::v0::Parameter::get_partial_shape);
parameter.def("set_partial_shape", &ov::op::v0::Parameter::set_partial_shape);
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_op_Parameter(py::module m);

View File

@ -0,0 +1,21 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/result.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <string>
#include "openvino/core/node.hpp"
#include "pyopenvino/graph/ops/result.hpp"
namespace py = pybind11;
void regclass_graph_op_Result(py::module m) {
py::class_<ov::op::v0::Result, std::shared_ptr<ov::op::v0::Result>, ov::Node> result(m, "Result");
result.doc() = "openvino.impl.op.Result wraps ov::op::v0::Result";
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_op_Result(py::module m);

View File

@ -0,0 +1,27 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/util/arithmetic_reduction.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "openvino/op/op.hpp"
#include "pyopenvino/graph/ops/util/arithmetic_reduction.hpp"
namespace py = pybind11;
void regclass_graph_op_util_ArithmeticReduction(py::module m) {
py::class_<ov::op::util::ArithmeticReduction, std::shared_ptr<ov::op::util::ArithmeticReduction>>
arithmeticReduction(m, "ArithmeticReduction");
// arithmeticReduction.def(py::init<const std::string&,
// const std::shared_ptr<ov::Node>&,
// const ov::AxisSet& >());
arithmeticReduction.def("get_reduction_axes", &ov::op::util::ArithmeticReduction::get_reduction_axes);
arithmeticReduction.def("set_reduction_axes", &ov::op::util::ArithmeticReduction::set_reduction_axes);
arithmeticReduction.def_property("reduction_axes",
&ov::op::util::ArithmeticReduction::get_reduction_axes,
&ov::op::util::ArithmeticReduction::set_reduction_axes);
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_op_util_ArithmeticReduction(py::module m);

View File

@ -0,0 +1,17 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/util/binary_elementwise_arithmetic.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp"
namespace py = pybind11;
void regclass_graph_op_util_BinaryElementwiseArithmetic(py::module m) {
py::class_<ov::op::util::BinaryElementwiseArithmetic, std::shared_ptr<ov::op::util::BinaryElementwiseArithmetic>>
binaryElementwiseArithmetic(m, "BinaryElementwiseArithmetic");
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_op_util_BinaryElementwiseArithmetic(py::module m);

View File

@ -0,0 +1,17 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/util/binary_elementwise_comparison.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include "pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp"
namespace py = pybind11;
void regclass_graph_op_util_BinaryElementwiseComparison(py::module m) {
py::class_<ov::op::util::BinaryElementwiseComparison, std::shared_ptr<ov::op::util::BinaryElementwiseComparison>>
binaryElementwiseComparison(m, "BinaryElementwiseComparison");
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_graph_op_util_BinaryElementwiseComparison(py::module m);

Some files were not shown because too many files have changed in this diff Show More