[PYTHON] Fix style in python doc strings (#10606)

* Fix style in python doc strings

* New line quotes
This commit is contained in:
Jan Iwaszkiewicz 2022-02-25 11:02:04 +01:00 committed by GitHub
parent 14d11a8998
commit 54f39294de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 254 additions and 252 deletions

View File

@ -132,7 +132,7 @@ class InferRequest(InferRequestBase):
:param inputs: Data to be set on input tensors. :param inputs: Data to be set on input tensors.
:type inputs: Union[Dict[keys, values], List[values]], optional :type inputs: Union[Dict[keys, values], List[values]], optional
:param userdata: Any data that will be passed inside callback call. :param userdata: Any data that will be passed inside the callback.
:type userdata: Any :type userdata: Any
""" """
super().start_async( super().start_async(
@ -164,8 +164,8 @@ class CompiledModel(CompiledModelBase):
Blocks all methods of CompiledModel while request is running. Blocks all methods of CompiledModel while request is running.
Method creates new temporary InferRequest and run inference on it. Method creates new temporary InferRequest and run inference on it.
It is advised to use dedicated InferRequest class for performance, It is advised to use a dedicated InferRequest class for performance,
optimizing workflows and creating advanced pipelines. optimizing workflows, and creating advanced pipelines.
The allowed types of keys in the `inputs` dictionary are: The allowed types of keys in the `inputs` dictionary are:
@ -188,7 +188,10 @@ class CompiledModel(CompiledModelBase):
) )
def __call__(self, inputs: Union[dict, list] = None) -> dict: def __call__(self, inputs: Union[dict, list] = None) -> dict:
"""Callable infer wrapper for CompiledModel. Look at `infer_new_request` for reference.""" """Callable infer wrapper for CompiledModel.
Take a look at `infer_new_request` for reference.
"""
return self.infer_new_request(inputs) return self.infer_new_request(inputs)
@ -245,7 +248,7 @@ class Core(CoreBase):
"""Core class represents OpenVINO runtime Core entity. """Core class represents OpenVINO runtime Core entity.
User applications can create several Core class instances, but in this User applications can create several Core class instances, but in this
case the underlying plugins are created multiple times and not shared case, the underlying plugins are created multiple times and not shared
between several Core instances. The recommended way is to have a single between several Core instances. The recommended way is to have a single
Core instance per application. Core instance per application.
""" """

View File

@ -43,7 +43,7 @@ def absolute(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with Abs operation applied on it. :return: New node with Abs operation applied on it.
""" """
return _get_node_factory_opset1().create("Abs", [node]) return _get_node_factory_opset1().create("Abs", [node])
@ -54,7 +54,7 @@ def acos(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with arccos operation applied on it. :return: New node with arccos operation applied on it.
""" """
return _get_node_factory_opset1().create("Acos", [node]) return _get_node_factory_opset1().create("Acos", [node])
@ -78,7 +78,7 @@ def asin(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with arcsin operation applied on it. :return: New node with arcsin operation applied on it.
""" """
return _get_node_factory_opset1().create("Asin", [node]) return _get_node_factory_opset1().create("Asin", [node])
@ -89,7 +89,7 @@ def atan(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with arctan operation applied on it. :return: New node with arctan operation applied on it.
""" """
return _get_node_factory_opset1().create("Atan", [node]) return _get_node_factory_opset1().create("Atan", [node])
@ -120,7 +120,7 @@ def avg_pool(
[None, 'same_upper', 'same_lower', 'valid'] [None, 'same_upper', 'same_lower', 'valid']
:param name: Optional name for the new output node. :param name: Optional name for the new output node.
returns New node with AvgPool operation applied on its data. :return: New node with AvgPool operation applied on its data.
""" """
if auto_pad is None: if auto_pad is None:
auto_pad = "explicit" auto_pad = "explicit"
@ -159,7 +159,7 @@ def batch_norm_inference(
:param epsilon: The number to be added to the variance to avoid division :param epsilon: The number to be added to the variance to avoid division
by zero when normalizing a value. by zero when normalizing a value.
:param name: The optional name of the output node. :param name: The optional name of the output node.
returns The new node which performs BatchNormInference. :return: The new node which performs BatchNormInference.
""" """
inputs = as_nodes(gamma, beta, data, mean, variance) inputs = as_nodes(gamma, beta, data, mean, variance)
return _get_node_factory_opset1().create("BatchNormInference", inputs, {"epsilon": epsilon}) return _get_node_factory_opset1().create("BatchNormInference", inputs, {"epsilon": epsilon})
@ -190,7 +190,7 @@ def binary_convolution(
:param pad_value: Floating-point value used to fill pad area. :param pad_value: Floating-point value used to fill pad area.
:param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns New node performing binary convolution operation. :return: New node performing binary convolution operation.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"BinaryConvolution", "BinaryConvolution",
@ -224,7 +224,7 @@ def broadcast(
:param mode: The type of broadcasting that specifies mapping of input tensor axes :param mode: The type of broadcasting that specifies mapping of input tensor axes
to output shape axes. Range of values: NUMPY, EXPLICIT. to output shape axes. Range of values: NUMPY, EXPLICIT.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with broadcast shape. :return: New node with broadcast shape.
""" """
inputs = as_nodes(data, target_shape) inputs = as_nodes(data, target_shape)
if mode.upper() == "EXPLICIT": if mode.upper() == "EXPLICIT":
@ -247,7 +247,7 @@ def ctc_greedy_decoder(
:param sequence_mask: The tensor with sequence masks for each sequence in the batch. :param sequence_mask: The tensor with sequence masks for each sequence in the batch.
:param merge_repeated: The flag for merging repeated labels during the CTC calculation. :param merge_repeated: The flag for merging repeated labels during the CTC calculation.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The new node performing an CTCGreedyDecoder operation on input tensor. :return: The new node performing an CTCGreedyDecoder operation on input tensor.
""" """
node_inputs = as_nodes(data, sequence_mask) node_inputs = as_nodes(data, sequence_mask)
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
@ -261,7 +261,7 @@ def ceiling(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: The node providing data to ceiling operation. :param node: The node providing data to ceiling operation.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The node performing element-wise ceiling. :return: The node performing element-wise ceiling.
""" """
return _get_node_factory_opset1().create("Ceiling", [node]) return _get_node_factory_opset1().create("Ceiling", [node])
@ -276,7 +276,7 @@ def clamp(
:param min_value: The lower bound of the <min_value;max_value> range. Scalar value. :param min_value: The lower bound of the <min_value;max_value> range. Scalar value.
:param max_value: The upper bound of the <min_value;max_value> range. Scalar value. :param max_value: The upper bound of the <min_value;max_value> range. Scalar value.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a clamp operation on its input data element-wise. :return: The new node performing a clamp operation on its input data element-wise.
Performs a clipping operation on an input value between a pair of boundary values. Performs a clipping operation on an input value between a pair of boundary values.
@ -306,7 +306,7 @@ def concat(nodes: List[NodeInput], axis: int, name: Optional[str] = None) -> Nod
:param nodes: The nodes we want concatenate into single new node. :param nodes: The nodes we want concatenate into single new node.
:param axis: The axis along which we want to concatenate input nodes. :param axis: The axis along which we want to concatenate input nodes.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns Return new node that is a concatenation of input nodes. :return: Return new node that is a concatenation of input nodes.
""" """
return _get_node_factory_opset1().create("Concat", as_nodes(*nodes), {"axis": axis}) return _get_node_factory_opset1().create("Concat", as_nodes(*nodes), {"axis": axis})
@ -322,7 +322,7 @@ def constant(
:param value: One of: array of values or scalar to initialize node with. :param value: One of: array of values or scalar to initialize node with.
:param dtype: The data type of provided data. :param dtype: The data type of provided data.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The Constant node initialized with provided data. :return: The Constant node initialized with provided data.
""" """
return make_constant_node(value, dtype) return make_constant_node(value, dtype)
@ -336,7 +336,7 @@ def convert(
:param data: Node which produces the input tensor. :param data: Node which produces the input tensor.
:param destination_type: Provides the target type for the conversion. :param destination_type: Provides the target type for the conversion.
:param name: Optional name for the output node. :param name: Optional name for the output node.
returns New node performing the conversion operation. :return: New node performing the conversion operation.
""" """
if not isinstance(destination_type, str): if not isinstance(destination_type, str):
destination_type = get_element_type_str(destination_type) destination_type = get_element_type_str(destination_type)
@ -352,7 +352,7 @@ def convert_like(data: NodeInput, like: NodeInput, name: Optional[str] = None) -
:param data: Node which produces the input tensor :param data: Node which produces the input tensor
:param like: Node which provides the target type information for the conversion :param like: Node which provides the target type information for the conversion
:param name: Optional name for the output node. :param name: Optional name for the output node.
returns New node performing the conversion operation. :return: New node performing the conversion operation.
""" """
return _get_node_factory_opset1().create("ConvertLike", [data, like]) return _get_node_factory_opset1().create("ConvertLike", [data, like])
@ -378,7 +378,7 @@ def convolution(
:param dilations: The data batch dilation strides. :param dilations: The data batch dilation strides.
:param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. :param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns New node performing batched convolution operation. :return: New node performing batched convolution operation.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"Convolution", "Convolution",
@ -419,7 +419,7 @@ def convolution_backprop_data(
in the filter. in the filter.
:param name: The node name. :param name: The node name.
returns The node object representing ConvolutionBackpropData operation. :return: The node object representing ConvolutionBackpropData operation.
""" """
spatial_dim_count = len(strides) spatial_dim_count = len(strides)
if pads_begin is None: if pads_begin is None:
@ -456,7 +456,7 @@ def cos(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with cos operation applied on it. :return: New node with cos operation applied on it.
""" """
return _get_node_factory_opset1().create("Cos", [node]) return _get_node_factory_opset1().create("Cos", [node])
@ -467,7 +467,7 @@ def cosh(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with cosh operation applied on it. :return: New node with cosh operation applied on it.
""" """
return _get_node_factory_opset1().create("Cosh", [node]) return _get_node_factory_opset1().create("Cosh", [node])
@ -499,7 +499,7 @@ def deformable_convolution(
:param deformable_group: The number of groups which deformable values and output should be split :param deformable_group: The number of groups which deformable values and output should be split
into along the channel axis. into along the channel axis.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns New node performing deformable convolution operation. :return: New node performing deformable convolution operation.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"DeformableConvolution", "DeformableConvolution",
@ -548,7 +548,7 @@ def deformable_psroi_pooling(
:param part_size: The number of parts the output tensor spatial dimensions are divided into. :param part_size: The number of parts the output tensor spatial dimensions are divided into.
:param offsets: Optional node. 4D input blob with transformation values (offsets). :param offsets: Optional node. 4D input blob with transformation values (offsets).
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns New node performing DeformablePSROIPooling operation. :return: New node performing DeformablePSROIPooling operation.
""" """
node_inputs = as_nodes(feature_maps, coords) node_inputs = as_nodes(feature_maps, coords)
if offsets is not None: if offsets is not None:
@ -592,7 +592,7 @@ def depth_to_space(node: Node, mode: str, block_size: int = 1, name: str = None)
:param block_size: The size of the spatial block of values describing :param block_size: The size of the spatial block of values describing
how the tensor's data is to be rearranged. how the tensor's data is to be rearranged.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing an DepthToSpace operation on its input tensor. :return: The new node performing an DepthToSpace operation on its input tensor.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"DepthToSpace", [node], {"mode": mode, "block_size": block_size}, "DepthToSpace", [node], {"mode": mode, "block_size": block_size},
@ -618,7 +618,7 @@ def detection_output(
:param aux_class_preds: The 2D input tensor with additional class predictions information. :param aux_class_preds: The 2D input tensor with additional class predictions information.
:param aux_box_preds: The 2D input tensor with additional box predictions information. :param aux_box_preds: The 2D input tensor with additional box predictions information.
:param name: Optional name for the output node. :param name: Optional name for the output node.
returns Node representing DetectionOutput operation. :return: Node representing DetectionOutput operation.
Available attributes are: Available attributes are:
@ -774,7 +774,7 @@ def divide(
:param right_node: The node providing divisor data. :param right_node: The node providing divisor data.
:param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The node performing element-wise division. :return: The node performing element-wise division.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"Divide", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "Divide", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -793,7 +793,7 @@ def elu(data: NodeInput, alpha: NumericType, name: Optional[str] = None) -> Node
:param data: Input tensor. One of: input node, array or scalar. :param data: Input tensor. One of: input node, array or scalar.
:param alpha: Scalar multiplier for negative values. :param alpha: Scalar multiplier for negative values.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing an ELU operation on its input data element-wise. :return: The new node performing an ELU operation on its input data element-wise.
""" """
return _get_node_factory_opset1().create("Elu", [as_node(data)], {"alpha": alpha}) return _get_node_factory_opset1().create("Elu", [as_node(data)], {"alpha": alpha})
@ -812,7 +812,7 @@ def equal(
:param auto_broadcast: The type of broadcasting specifies rules used for :param auto_broadcast: The type of broadcasting specifies rules used for
auto-broadcasting of input tensors. auto-broadcasting of input tensors.
:param name: The optional name for output new node. :param name: The optional name for output new node.
returns The node performing element-wise equality check. :return: The node performing element-wise equality check.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"Equal", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "Equal", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -825,7 +825,7 @@ def erf(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: The node providing data for operation. :param node: The node providing data for operation.
:param name: The optional name for new output node. :param name: The optional name for new output node.
returns The new node performing element-wise Erf operation. :return: The new node performing element-wise Erf operation.
""" """
return _get_node_factory_opset1().create("Erf", [node]) return _get_node_factory_opset1().create("Erf", [node])
@ -836,7 +836,7 @@ def exp(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: The node providing data for operation. :param node: The node providing data for operation.
:param name: The optional name for new output node. :param name: The optional name for new output node.
returns The new node performing natural exponential operation. :return: The new node performing natural exponential operation.
""" """
return _get_node_factory_opset1().create("Exp", [node]) return _get_node_factory_opset1().create("Exp", [node])
@ -862,7 +862,7 @@ def fake_quantize(
:param levels: The number of quantization levels. Integer value. :param levels: The number of quantization levels. Integer value.
:param auto_broadcast: The type of broadcasting specifies rules used for :param auto_broadcast: The type of broadcasting specifies rules used for
auto-broadcasting of input tensors. auto-broadcasting of input tensors.
returns New node with quantized value. :return: New node with quantized value.
Input floating point values are quantized into a discrete set of floating point values. Input floating point values are quantized into a discrete set of floating point values.
@ -895,7 +895,7 @@ def floor(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: The input node providing data. :param node: The input node providing data.
:param name: The optional name for new output node. :param name: The optional name for new output node.
returns The node performing element-wise floor operation. :return: The node performing element-wise floor operation.
""" """
return _get_node_factory_opset1().create("Floor", [node]) return _get_node_factory_opset1().create("Floor", [node])
@ -913,7 +913,7 @@ def floor_mod(
:param right_node: The second input node for FloorMod operation. :param right_node: The second input node for FloorMod operation.
:param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The node performing element-wise FloorMod operation. :return: The node performing element-wise FloorMod operation.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"FloorMod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "FloorMod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -930,7 +930,7 @@ def gather(
:param indices: Tensor with indexes to gather. :param indices: Tensor with indexes to gather.
:param axis: The dimension index to gather data from. :param axis: The dimension index to gather data from.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The new node performing a Gather operation on the data input tensor. :return: The new node performing a Gather operation on the data input tensor.
""" """
node_inputs = as_nodes(data, indices, axis) node_inputs = as_nodes(data, indices, axis)
return _get_node_factory_opset1().create("Gather", node_inputs) return _get_node_factory_opset1().create("Gather", node_inputs)
@ -951,7 +951,7 @@ def gather_tree(
:param max_seq_len: The tensor with maximum lengths for each sequence in the batch. :param max_seq_len: The tensor with maximum lengths for each sequence in the batch.
:param end_token: The scalar tensor with value of the end marker in a sequence. :param end_token: The scalar tensor with value of the end marker in a sequence.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The new node performing a GatherTree operation. :return: The new node performing a GatherTree operation.
The GatherTree node generates the complete beams from the indices per each step The GatherTree node generates the complete beams from the indices per each step
and the parent beam indices. and the parent beam indices.
@ -988,7 +988,7 @@ def greater(
:param auto_broadcast: The type of broadcasting specifies rules used for :param auto_broadcast: The type of broadcasting specifies rules used for
auto-broadcasting of input tensors. auto-broadcasting of input tensors.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The node performing element-wise check whether left_node is greater than right_node. :return: The node performing element-wise check whether left_node is greater than right_node.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"Greater", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "Greater", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -1009,7 +1009,7 @@ def greater_equal(
:param auto_broadcast: The type of broadcasting specifies rules used for :param auto_broadcast: The type of broadcasting specifies rules used for
auto-broadcasting of input tensors. auto-broadcasting of input tensors.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The node performing element-wise check whether left_node is greater than or equal :return: The node performing element-wise check whether left_node is greater than or equal
right_node. right_node.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
@ -1027,7 +1027,7 @@ def grn(data: Node, bias: float, name: Optional[str] = None) -> Node:
:param data: The node with data tensor. :param data: The node with data tensor.
:param bias: The bias added to the variance. Scalar value. :param bias: The bias added to the variance. Scalar value.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a GRN operation on tensor's channels. :return: The new node performing a GRN operation on tensor's channels.
""" """
return _get_node_factory_opset1().create("GRN", [data], {"bias": bias}) return _get_node_factory_opset1().create("GRN", [data], {"bias": bias})
@ -1062,7 +1062,7 @@ def group_convolution(
Ceil(num_dims/2) at the end Ceil(num_dims/2) at the end
VALID: No padding VALID: No padding
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a Group Convolution operation on tensor from input node. :return: The new node performing a Group Convolution operation on tensor from input node.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"GroupConvolution", "GroupConvolution",
@ -1113,7 +1113,7 @@ def group_convolution_backprop_data(
:param output_padding: The additional amount of paddings added per each spatial axis :param output_padding: The additional amount of paddings added per each spatial axis
in the output tensor. in the output tensor.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a Group Convolution operation on tensor from input node. :return: The new node performing a Group Convolution operation on tensor from input node.
""" """
spatial_dim_count = len(strides) spatial_dim_count = len(strides)
if dilations is None: if dilations is None:
@ -1150,7 +1150,7 @@ def hard_sigmoid(data: Node, alpha: NodeInput, beta: NodeInput, name: Optional[s
:param alpha: A node producing the alpha parameter. :param alpha: A node producing the alpha parameter.
:param beta: A node producing the beta parameter :param beta: A node producing the beta parameter
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a Hard Sigmoid element-wise on input tensor. :return: The new node performing a Hard Sigmoid element-wise on input tensor.
Hard Sigmoid uses the following logic: Hard Sigmoid uses the following logic:
@ -1171,7 +1171,7 @@ def interpolate(
:param output_shape: 1D tensor describing output shape for spatial axes. :param output_shape: 1D tensor describing output shape for spatial axes.
:param attrs: The dictionary containing key, value pairs for attributes. :param attrs: The dictionary containing key, value pairs for attributes.
:param name: Optional name for the output node. :param name: Optional name for the output node.
returns Node representing interpolation operation. :return: Node representing interpolation operation.
Available attributes are: Available attributes are:
@ -1251,7 +1251,7 @@ def less(
:param auto_broadcast: The type of broadcasting specifies rules used for :param auto_broadcast: The type of broadcasting specifies rules used for
auto-broadcasting of input tensors. auto-broadcasting of input tensors.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The node performing element-wise check whether left_node is less than the right_node. :return: The node performing element-wise check whether left_node is less than the right_node.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"Less", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "Less", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -1272,7 +1272,7 @@ def less_equal(
:param auto_broadcast: The type of broadcasting specifies rules used for :param auto_broadcast: The type of broadcasting specifies rules used for
auto-broadcasting of input tensors. auto-broadcasting of input tensors.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The node performing element-wise check whether left_node is less than or equal the :return: The node performing element-wise check whether left_node is less than or equal the
right_node. right_node.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
@ -1286,7 +1286,7 @@ def log(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: The input node providing data for operation. :param node: The input node providing data for operation.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The new node performing log operation element-wise. :return: The new node performing log operation element-wise.
""" """
return _get_node_factory_opset1().create("Log", [node]) return _get_node_factory_opset1().create("Log", [node])
@ -1305,7 +1305,7 @@ def logical_and(
:param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes
to output shape axes. Range of values: numpy, explicit. to output shape axes. Range of values: numpy, explicit.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The node performing logical and operation on input nodes corresponding elements. :return: The node performing logical and operation on input nodes corresponding elements.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"LogicalAnd", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "LogicalAnd", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -1318,7 +1318,7 @@ def logical_not(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: The input node providing data. :param node: The input node providing data.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The node performing element-wise logical NOT operation with given tensor. :return: The node performing element-wise logical NOT operation with given tensor.
""" """
return _get_node_factory_opset1().create("LogicalNot", [node]) return _get_node_factory_opset1().create("LogicalNot", [node])
@ -1337,7 +1337,7 @@ def logical_or(
:param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes
to output shape axes. Range of values: numpy, explicit. to output shape axes. Range of values: numpy, explicit.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The node performing logical or operation on input nodes corresponding elements. :return: The node performing logical or operation on input nodes corresponding elements.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"LogicalOr", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "LogicalOr", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -1358,7 +1358,7 @@ def logical_xor(
:param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes
to output shape axes. Range of values: numpy, explicit. to output shape axes. Range of values: numpy, explicit.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The node performing logical or operation on input nodes corresponding elements. :return: The node performing logical or operation on input nodes corresponding elements.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"LogicalXor", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "LogicalXor", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -1383,7 +1383,7 @@ def lrn(
:param bias: An offset (usually positive) to avoid dividing by 0. :param bias: An offset (usually positive) to avoid dividing by 0.
:param size: Width of the 1-D normalization window. :param size: Width of the 1-D normalization window.
:param name: An optional name of the output node. :param name: An optional name of the output node.
returns The new node which performs LRN. :return: The new node which performs LRN.
""" """
attributes = {"alpha": alpha, "beta": beta, "bias": bias, "size": size} attributes = {"alpha": alpha, "beta": beta, "bias": bias, "size": size}
return _get_node_factory_opset1().create("LRN", as_nodes(data, axes), attributes) return _get_node_factory_opset1().create("LRN", as_nodes(data, axes), attributes)
@ -1419,7 +1419,7 @@ def lstm_cell(
:param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
:param name: An optional name of the output node. :param name: An optional name of the output node.
returns The new node represents LSTMCell. Node outputs count: 2. :return: The new node represents LSTMCell. Node outputs count: 2.
""" """
if activations is None: if activations is None:
activations = ["sigmoid", "tanh", "tanh"] activations = ["sigmoid", "tanh", "tanh"]
@ -1493,7 +1493,7 @@ def lstm_sequence(
:param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
:param name: An optional name of the output node. :param name: An optional name of the output node.
returns The new node represents LSTMSequence. Node outputs count: 3. :return: The new node represents LSTMSequence. Node outputs count: 3.
""" """
if activations is None: if activations is None:
activations = ["sigmoid", "tanh", "tanh"] activations = ["sigmoid", "tanh", "tanh"]
@ -1546,7 +1546,7 @@ def matmul(
:param data_b: right-hand side matrix :param data_b: right-hand side matrix
:param transpose_a: should the first matrix be transposed before operation :param transpose_a: should the first matrix be transposed before operation
:param transpose_b: should the second matrix be transposed :param transpose_b: should the second matrix be transposed
returns MatMul operation node :return: MatMul operation node
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"MatMul", as_nodes(data_a, data_b), {"transpose_a": transpose_a, "transpose_b": transpose_b} "MatMul", as_nodes(data_a, data_b), {"transpose_a": transpose_a, "transpose_b": transpose_b}
@ -1578,7 +1578,7 @@ def max_pool(
[None, 'same_upper', 'same_lower', 'valid'] [None, 'same_upper', 'same_lower', 'valid']
:param name: The optional name for the created output node. :param name: The optional name for the created output node.
returns The new node performing max pooling operation. :return: The new node performing max pooling operation.
""" """
if auto_pad is None: if auto_pad is None:
auto_pad = "explicit" auto_pad = "explicit"
@ -1635,7 +1635,7 @@ def mod(
:param right_node: The second input node for mod operation. :param right_node: The second input node for mod operation.
:param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. :param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The node performing element-wise Mod operation. :return: The node performing element-wise Mod operation.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"Mod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "Mod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -1683,7 +1683,7 @@ def non_max_suppression(
:param box_encoding: Format of boxes data encoding. Range of values: corner or cente. :param box_encoding: Format of boxes data encoding. Range of values: corner or cente.
:param sort_result_descending: Flag that specifies whenever it is necessary to sort selected :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
boxes across batches or not. boxes across batches or not.
returns The new node which performs NonMaxSuppression :return: The new node which performs NonMaxSuppression
""" """
if max_output_boxes_per_class is None: if max_output_boxes_per_class is None:
max_output_boxes_per_class = make_constant_node(0, np.int64) max_output_boxes_per_class = make_constant_node(0, np.int64)
@ -1711,7 +1711,7 @@ def normalize_l2(
:param axes: Node indicating axes along which L2 reduction is calculated :param axes: Node indicating axes along which L2 reduction is calculated
:param eps: The epsilon added to L2 norm :param eps: The epsilon added to L2 norm
:param eps_mode: how eps is combined with L2 value (`add` or `max`) :param eps_mode: how eps is combined with L2 value (`add` or `max`)
returns New node which performs the L2 normalization. :return: New node which performs the L2 normalization.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"NormalizeL2", as_nodes(data, axes), {"eps": eps, "mode": eps_mode} "NormalizeL2", as_nodes(data, axes), {"eps": eps, "mode": eps_mode}
@ -1732,7 +1732,7 @@ def not_equal(
:param auto_broadcast: The type of broadcasting specifies rules used for :param auto_broadcast: The type of broadcasting specifies rules used for
auto-broadcasting of input tensors. auto-broadcasting of input tensors.
:param name: The optional name for output new node. :param name: The optional name for output new node.
returns The node performing element-wise inequality check. :return: The node performing element-wise inequality check.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"NotEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "NotEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -1759,7 +1759,7 @@ def one_hot(
by indices in input take. by indices in input take.
:param name: The optional name for new output node. :param name: The optional name for new output node.
returns New node performing one-hot operation. :return: New node performing one-hot operation.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"OneHot", as_nodes(indices, depth, on_value, off_value), {"axis": axis} "OneHot", as_nodes(indices, depth, on_value, off_value), {"axis": axis}
@ -1783,7 +1783,7 @@ def pad(
:param pads_end: number of padding elements to be added after the last element. :param pads_end: number of padding elements to be added after the last element.
:param pad_mode: "constant", "edge", "reflect" or "symmetric" :param pad_mode: "constant", "edge", "reflect" or "symmetric"
:param arg_pad_value: value used for padding if pad_mode is "constant" :param arg_pad_value: value used for padding if pad_mode is "constant"
returns Pad operation node. :return: Pad operation node.
""" """
input_nodes = as_nodes(arg, pads_begin, pads_end) input_nodes = as_nodes(arg, pads_begin, pads_end)
if arg_pad_value: if arg_pad_value:
@ -1818,7 +1818,7 @@ def power(
:param name: The optional name for the new output node. :param name: The optional name for the new output node.
:param auto_broadcast: The type of broadcasting specifies rules used for :param auto_broadcast: The type of broadcasting specifies rules used for
auto-broadcasting of input tensors. auto-broadcasting of input tensors.
returns The new node performing element-wise exponentiation operation on input nodes. :return: The new node performing element-wise exponentiation operation on input nodes.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"Power", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "Power", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -1832,7 +1832,7 @@ def prelu(data: NodeInput, slope: NodeInput, name: Optional[str] = None) -> Node
:param data: The node with data tensor. :param data: The node with data tensor.
:param slope: The node with the multipliers for negative values. :param slope: The node with the multipliers for negative values.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a PRelu operation on tensor's channels. :return: The new node performing a PRelu operation on tensor's channels.
PRelu uses the following logic: PRelu uses the following logic:
@ -1858,7 +1858,7 @@ def prior_box_clustered(
specifies shape of the image for which boxes are generated. specifies shape of the image for which boxes are generated.
:param attrs: The dictionary containing key, value pairs for attributes. :param attrs: The dictionary containing key, value pairs for attributes.
:param name: Optional name for the output node. :param name: Optional name for the output node.
returns Node representing PriorBoxClustered operation. :return: Node representing PriorBoxClustered operation.
Available attributes are: Available attributes are:
@ -1942,7 +1942,7 @@ def prior_box(
:param image_shape: Shape of image to which prior boxes are scaled. :param image_shape: Shape of image to which prior boxes are scaled.
:param attrs: The dictionary containing key, value pairs for attributes. :param attrs: The dictionary containing key, value pairs for attributes.
:param name: Optional name for the output node. :param name: Optional name for the output node.
returns Node representing prior box operation. :return: Node representing prior box operation.
Available attributes are: Available attributes are:
@ -2062,7 +2062,7 @@ def proposal(
:param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape.
:param attrs: The dictionary containing key, value pairs for attributes. :param attrs: The dictionary containing key, value pairs for attributes.
:param name: Optional name for the output node. :param name: Optional name for the output node.
returns Node representing Proposal operation. :return: Node representing Proposal operation.
* base_size The size of the anchor to which scale and ratio attributes are applied. * base_size The size of the anchor to which scale and ratio attributes are applied.
Range of values: a positive unsigned integer number Range of values: a positive unsigned integer number
@ -2196,15 +2196,15 @@ def psroi_pooling(
) -> Node: ) -> Node:
"""Return a node which produces a PSROIPooling operation. """Return a node which produces a PSROIPooling operation.
:param input: Input feature map {N, C, ...} :param input: Input feature map `{N, C, ...}`.
:param coords: Coordinates of bounding boxes :param coords: Coordinates of bounding boxes.
:param output_dim: Output channel number :param output_dim: Output channel number.
:param group_size: Number of groups to encode position-sensitive scores :param group_size: Number of groups to encode position-sensitive scores.
:param spatial_scale: Ratio of input feature map over input image size :param spatial_scale: Ratio of input feature map over input image size.
:param spatial_bins_x: Numbers of bins to divide the input feature maps over :param spatial_bins_x: Numbers of bins to divide the input feature maps over.
:param spatial_bins_y: Numbers of bins to divide the input feature maps over :param spatial_bins_y: Numbers of bins to divide the input feature maps over.
:param mode: Mode of pooling - "avg" or "bilinear" :param mode: Mode of pooling - "avg" or "bilinear".
returns PSROIPooling node :return: PSROIPooling node
""" """
mode = mode.lower() mode = mode.lower()
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
@ -2225,11 +2225,11 @@ def psroi_pooling(
def range(start: Node, stop: NodeInput, step: NodeInput, name: Optional[str] = None) -> Node: def range(start: Node, stop: NodeInput, step: NodeInput, name: Optional[str] = None) -> Node:
"""Return a node which produces the Range operation. """Return a node which produces the Range operation.
:param start: The start value of the generated range :param start: The start value of the generated range.
:param stop: The stop value of the generated range :param stop: The stop value of the generated range.
:param step: The step value for the generated range :param step: The step value for the generated range.
:param name: Optional name for output node. :param name: Optional name for output node.
returns Range node :return: Range node
""" """
return _get_node_factory_opset1().create("Range", as_nodes(start, stop, step)) return _get_node_factory_opset1().create("Range", as_nodes(start, stop, step))
@ -2240,7 +2240,7 @@ def relu(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: The optional output node name. :param name: The optional output node name.
returns The new node performing relu operation on its input element-wise. :return: The new node performing relu operation on its input element-wise.
""" """
return _get_node_factory_opset1().create("Relu", [node]) return _get_node_factory_opset1().create("Relu", [node])
@ -2253,9 +2253,9 @@ def reduce_logical_and(
:param node: The tensor we want to reduce. :param node: The tensor we want to reduce.
:param reduction_axes: The axes to eliminate through AND operation. :param reduction_axes: The axes to eliminate through AND operation.
:param keep_dims: If set to True it holds axes that are used for reduction :param keep_dims: If set to True it holds axes that are used for reduction.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The new node performing reduction operation. :return: The new node performing reduction operation.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"ReduceLogicalAnd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} "ReduceLogicalAnd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}
@ -2270,9 +2270,9 @@ def reduce_logical_or(
:param node: The tensor we want to reduce. :param node: The tensor we want to reduce.
:param reduction_axes: The axes to eliminate through OR operation. :param reduction_axes: The axes to eliminate through OR operation.
:param keep_dims: If set to True it holds axes that are used for reduction :param keep_dims: If set to True it holds axes that are used for reduction.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The new node performing reduction operation. :return: The new node performing reduction operation.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"ReduceLogicalOr", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} "ReduceLogicalOr", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}
@ -2287,7 +2287,7 @@ def reduce_max(
:param node: The tensor we want to max-reduce. :param node: The tensor we want to max-reduce.
:param reduction_axes: The axes to eliminate through max operation. :param reduction_axes: The axes to eliminate through max operation.
:param keep_dims: If set to True it holds axes that are used for reduction :param keep_dims: If set to True it holds axes that are used for reduction.
:param name: Optional name for output node. :param name: Optional name for output node.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
@ -2303,9 +2303,9 @@ def reduce_mean(
:param node: The tensor we want to mean-reduce. :param node: The tensor we want to mean-reduce.
:param reduction_axes: The axes to eliminate through mean operation. :param reduction_axes: The axes to eliminate through mean operation.
:param keep_dims: If set to True it holds axes that are used for reduction :param keep_dims: If set to True it holds axes that are used for reduction.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The new node performing mean-reduction operation. :return: The new node performing mean-reduction operation.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"ReduceMean", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} "ReduceMean", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}
@ -2338,7 +2338,7 @@ def reduce_prod(
:param reduction_axes: The axes to eliminate through product operation. :param reduction_axes: The axes to eliminate through product operation.
:param keep_dims: If set to True it holds axes that are used for reduction :param keep_dims: If set to True it holds axes that are used for reduction
:param name: Optional name for output node. :param name: Optional name for output node.
returns The new node performing product-reduction operation. :return: The new node performing product-reduction operation.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"ReduceProd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} "ReduceProd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}
@ -2355,7 +2355,7 @@ def reduce_sum(
:param reduction_axes: The axes to eliminate through summation. :param reduction_axes: The axes to eliminate through summation.
:param keep_dims: If set to True it holds axes that are used for reduction :param keep_dims: If set to True it holds axes that are used for reduction
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The new node performing summation along `reduction_axes` element-wise. :return: The new node performing summation along `reduction_axes` element-wise.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"ReduceSum", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} "ReduceSum", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}
@ -2387,7 +2387,7 @@ def region_yolo(
:param end_axis: Axis to end softmax on :param end_axis: Axis to end softmax on
:param anchors: A flattened list of pairs `[width, height]` that describes prior box sizes :param anchors: A flattened list of pairs `[width, height]` that describes prior box sizes
:param name: Optional name for output node. :param name: Optional name for output node.
returns RegionYolo node :return: RegionYolo node
""" """
if anchors is None: if anchors is None:
anchors = [] anchors = []
@ -2434,7 +2434,7 @@ def result(data: NodeInput, name: Optional[str] = None) -> Node:
"""Return a node which represents an output of a graph (Model). """Return a node which represents an output of a graph (Model).
:param data: The tensor containing the input data :param data: The tensor containing the input data
returns Result node :return: Result node
""" """
return _get_node_factory_opset1().create("Result", [data]) return _get_node_factory_opset1().create("Result", [data])
@ -2453,7 +2453,7 @@ def reverse_sequence(
:param seq_lengths: 1D tensor of integers with sequence lengths in the input tensor. :param seq_lengths: 1D tensor of integers with sequence lengths in the input tensor.
:param batch_axis: index of the batch dimension. :param batch_axis: index of the batch dimension.
:param seq_axis: index of the sequence dimension. :param seq_axis: index of the sequence dimension.
returns ReverseSequence node :return: ReverseSequence node
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"ReverseSequence", "ReverseSequence",
@ -2479,7 +2479,7 @@ def select(
item value is `False`. item value is `False`.
:param auto_broadcast: Mode specifies rules used for auto-broadcasting of input tensors. :param auto_broadcast: Mode specifies rules used for auto-broadcasting of input tensors.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The new node with values selected according to provided arguments. :return: The new node with values selected according to provided arguments.
""" """
inputs = as_nodes(cond, then_node, else_node) inputs = as_nodes(cond, then_node, else_node)
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
@ -2499,7 +2499,7 @@ def selu(
:param alpha: Alpha coefficient of SELU operation :param alpha: Alpha coefficient of SELU operation
:param lambda_value: Lambda coefficient of SELU operation :param lambda_value: Lambda coefficient of SELU operation
:param name: The optional output node name. :param name: The optional output node name.
returns The new node performing relu operation on its input element-wise. :return: The new node performing relu operation on its input element-wise.
""" """
return _get_node_factory_opset1().create("Selu", as_nodes(data, alpha, lambda_value)) return _get_node_factory_opset1().create("Selu", as_nodes(data, alpha, lambda_value))
@ -2509,7 +2509,7 @@ def shape_of(data: NodeInput, name: Optional[str] = None) -> Node:
"""Return a node which produces a tensor containing the shape of its input data. """Return a node which produces a tensor containing the shape of its input data.
:param data: The tensor containing the input data. :param data: The tensor containing the input data.
returns ShapeOf node :return: ShapeOf node
""" """
return _get_node_factory_opset1().create("ShapeOf", [as_node(data)]) return _get_node_factory_opset1().create("ShapeOf", [as_node(data)])
@ -2519,7 +2519,7 @@ def sigmoid(data: NodeInput, name: Optional[str] = None) -> Node:
"""Return a node which applies the sigmoid function element-wise. """Return a node which applies the sigmoid function element-wise.
:param data: The tensor containing the input data :param data: The tensor containing the input data
returns Sigmoid node :return: Sigmoid node
""" """
return _get_node_factory_opset1().create("Sigmoid", [data]) return _get_node_factory_opset1().create("Sigmoid", [data])
@ -2530,7 +2530,7 @@ def sign(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns The node with mapped elements of the input tensor to -1 (if it is negative), :return: The node with mapped elements of the input tensor to -1 (if it is negative),
0 (if it is zero), or 1 (if it is positive). 0 (if it is zero), or 1 (if it is positive).
""" """
return _get_node_factory_opset1().create("Sign", [node]) return _get_node_factory_opset1().create("Sign", [node])
@ -2542,7 +2542,7 @@ def sin(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with sin operation applied on it. :return: New node with sin operation applied on it.
""" """
return _get_node_factory_opset1().create("Sin", [node]) return _get_node_factory_opset1().create("Sin", [node])
@ -2553,7 +2553,7 @@ def sinh(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with sin operation applied on it. :return: New node with sin operation applied on it.
""" """
return _get_node_factory_opset1().create("Sinh", [node]) return _get_node_factory_opset1().create("Sinh", [node])
@ -2564,7 +2564,7 @@ def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node:
:param data: The tensor providing input data. :param data: The tensor providing input data.
:param axis: An axis along which Softmax should be calculated :param axis: An axis along which Softmax should be calculated
returns The new node with softmax operation applied on each element. :return: The new node with softmax operation applied on each element.
""" """
return _get_node_factory_opset1().create("Softmax", [as_node(data)], {"axis": axis}) return _get_node_factory_opset1().create("Softmax", [as_node(data)], {"axis": axis})
@ -2574,7 +2574,7 @@ def space_to_depth(data: Node, mode: str, block_size: int = 1, name: str = None)
"""Perform SpaceToDepth operation on the input tensor. """Perform SpaceToDepth operation on the input tensor.
SpaceToDepth rearranges blocks of spatial data into depth. SpaceToDepth rearranges blocks of spatial data into depth.
The operator returns a copy of the input tensor where values from the height The operator :return: a copy of the input tensor where values from the height
and width dimensions are moved to the depth dimension. and width dimensions are moved to the depth dimension.
:param data: The node with data tensor. :param data: The node with data tensor.
@ -2585,7 +2585,7 @@ def space_to_depth(data: Node, mode: str, block_size: int = 1, name: str = None)
:param block_size: The size of the block of values to be moved. Scalar value. :param block_size: The size of the block of values to be moved. Scalar value.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a SpaceToDepth operation on input tensor. :return: The new node performing a SpaceToDepth operation on input tensor.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"SpaceToDepth", [data], {"mode": mode, "block_size": block_size}, "SpaceToDepth", [data], {"mode": mode, "block_size": block_size},
@ -2599,7 +2599,7 @@ def split(data: NodeInput, axis: NodeInput, num_splits: int, name: Optional[str]
:param data: The input tensor to be split :param data: The input tensor to be split
:param axis: Axis along which the input data will be split :param axis: Axis along which the input data will be split
:param num_splits: Number of the output tensors that should be produced :param num_splits: Number of the output tensors that should be produced
returns Split node :return: Split node
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"Split", "Split",
@ -2614,7 +2614,7 @@ def sqrt(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns The new node with sqrt operation applied element-wise. :return: The new node with sqrt operation applied element-wise.
""" """
return _get_node_factory_opset1().create("Sqrt", [node]) return _get_node_factory_opset1().create("Sqrt", [node])
@ -2632,7 +2632,7 @@ def squared_difference(
:param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes
to output shape axes. Range of values: numpy, explicit. to output shape axes. Range of values: numpy, explicit.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns The new node performing a squared difference between two tensors. :return: The new node performing a squared difference between two tensors.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"SquaredDifference", [x1, x2], {"auto_broadcast": auto_broadcast.upper()} "SquaredDifference", [x1, x2], {"auto_broadcast": auto_broadcast.upper()}
@ -2647,7 +2647,7 @@ def squeeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Nod
:param axes: List of non-negative integers, indicate the dimensions to squeeze. :param axes: List of non-negative integers, indicate the dimensions to squeeze.
One of: input node or array. One of: input node or array.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns The new node performing a squeeze operation on input tensor. :return: The new node performing a squeeze operation on input tensor.
Remove single-dimensional entries from the shape of a tensor. Remove single-dimensional entries from the shape of a tensor.
Takes a parameter `axes` with a list of axes to squeeze. Takes a parameter `axes` with a list of axes to squeeze.
@ -2690,7 +2690,7 @@ def strided_slice(
:param new_axis_mask: A mask indicating dimensions where '1' should be inserted :param new_axis_mask: A mask indicating dimensions where '1' should be inserted
:param shrink_axis_mask: A mask indicating which dimensions should be deleted :param shrink_axis_mask: A mask indicating which dimensions should be deleted
:param ellipsis_mask: Indicates positions where missing dimensions should be inserted :param ellipsis_mask: Indicates positions where missing dimensions should be inserted
returns StridedSlice node :return: StridedSlice node
""" """
if new_axis_mask is None: if new_axis_mask is None:
new_axis_mask = [] new_axis_mask = []
@ -2725,7 +2725,7 @@ def subtract(
:param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes :param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes
to output shape axes. Range of values: numpy, explicit. to output shape axes. Range of values: numpy, explicit.
:param name: The optional name for output node. :param name: The optional name for output node.
returns The new output node performing subtraction operation on both tensors element-wise. :return: The new output node performing subtraction operation on both tensors element-wise.
""" """
return _get_node_factory_opset1().create( return _get_node_factory_opset1().create(
"Subtract", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} "Subtract", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
@ -2738,7 +2738,7 @@ def tan(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with tan operation applied on it. :return: New node with tan operation applied on it.
""" """
return _get_node_factory_opset1().create("Tan", [node]) return _get_node_factory_opset1().create("Tan", [node])

View File

@ -54,7 +54,7 @@ def batch_to_space(
:param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`. :param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`.
:param crops_end: Specifies the amount to crop from the end along each axis of `data`. :param crops_end: Specifies the amount to crop from the end along each axis of `data`.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a BatchToSpace operation. :return: The new node performing a BatchToSpace operation.
""" """
return _get_node_factory_opset2().create( return _get_node_factory_opset2().create(
"BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end) "BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end)
@ -73,7 +73,7 @@ def gelu(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: Input tensor. One of: input node, array or scalar. :param node: Input tensor. One of: input node, array or scalar.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a GELU operation on its input data element-wise. :return: The new node performing a GELU operation on its input data element-wise.
""" """
return _get_node_factory_opset2().create("Gelu", [node]) return _get_node_factory_opset2().create("Gelu", [node])
@ -96,9 +96,9 @@ def mvn(
:param across_channels: Denotes if mean values are shared across channels. :param across_channels: Denotes if mean values are shared across channels.
:param normalize_variance: Denotes whether to perform variance normalization. :param normalize_variance: Denotes whether to perform variance normalization.
:param eps: The number added to the variance to avoid division by zero :param eps: The number added to the variance to avoid division by zero
when normalizing the value. Scalar value. when normalizing the value. Scalar value.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a MVN operation on input tensor. :return: The new node performing a MVN operation on input tensor.
""" """
return _get_node_factory_opset2().create( return _get_node_factory_opset2().create(
"MVN", "MVN",
@ -111,10 +111,10 @@ def mvn(
def reorg_yolo(input: Node, stride: List[int], name: Optional[str] = None) -> Node: def reorg_yolo(input: Node, stride: List[int], name: Optional[str] = None) -> Node:
"""Return a node which produces the ReorgYolo operation. """Return a node which produces the ReorgYolo operation.
:param input: Input data :param input: Input data.
:param stride: Stride to reorganize input by :param stride: Stride to reorganize input by.
:param name: Optional name for output node. :param name: Optional name for output node.
returns ReorgYolo node :return: ReorgYolo node.
""" """
return _get_node_factory_opset2().create("ReorgYolo", [input], {"stride": stride}) return _get_node_factory_opset2().create("ReorgYolo", [input], {"stride": stride})
@ -130,12 +130,12 @@ def roi_pooling(
) -> Node: ) -> Node:
"""Return a node which produces an ROIPooling operation. """Return a node which produces an ROIPooling operation.
:param input: Input feature map {N, C, ...} :param input: Input feature map `{N, C, ...}`.
:param coords: Coordinates of bounding boxes :param coords: Coordinates of bounding boxes.
:param output_size: Height/Width of ROI output features (shape) :param output_size: Height/Width of ROI output features (shape).
:param spatial_scale: Ratio of input feature map over input image size (float) :param spatial_scale: Ratio of input feature map over input image size (float).
:param method: Method of pooling - string: "max" or "bilinear" :param method: Method of pooling - string: "max" or "bilinear".
returns ROIPooling node :return: ROIPooling node.
""" """
method = method.lower() method = method.lower()
return _get_node_factory_opset2().create( return _get_node_factory_opset2().create(
@ -164,7 +164,7 @@ def space_to_batch(
:param pads_begin: Specifies the padding for the beginning along each axis of `data`. :param pads_begin: Specifies the padding for the beginning along each axis of `data`.
:param pads_end: Specifies the padding for the ending along each axis of `data`. :param pads_end: Specifies the padding for the ending along each axis of `data`.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a SpaceToBatch operation. :return: The new node performing a SpaceToBatch operation.
""" """
return _get_node_factory_opset2().create( return _get_node_factory_opset2().create(
"SpaceToBatch", as_nodes(data, block_shape, pads_begin, pads_end) "SpaceToBatch", as_nodes(data, block_shape, pads_begin, pads_end)

View File

@ -44,7 +44,7 @@ def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -
:param new_value: Node producing a value to be assigned to a variable. :param new_value: Node producing a value to be assigned to a variable.
:param variable_id: Id of a variable to be updated. :param variable_id: Id of a variable to be updated.
:param name: Optional name for output node. :param name: Optional name for output node.
returns Assign node :return: Assign node
""" """
return _get_node_factory_opset3().create( return _get_node_factory_opset3().create(
"Assign", "Assign",
@ -70,7 +70,7 @@ def broadcast(
:param broadcast_spec: The type of broadcasting that specifies mapping of input tensor axes :param broadcast_spec: The type of broadcasting that specifies mapping of input tensor axes
to output shape axes. Range of values: NUMPY, EXPLICIT, BIDIRECTIONAL. to output shape axes. Range of values: NUMPY, EXPLICIT, BIDIRECTIONAL.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with broadcast shape. :return: New node with broadcast shape.
""" """
inputs = as_nodes(data, target_shape) inputs = as_nodes(data, target_shape)
if broadcast_spec.upper() == "EXPLICIT": if broadcast_spec.upper() == "EXPLICIT":
@ -96,7 +96,7 @@ def bucketize(
:param with_right_bound: indicates whether bucket includes the right or left :param with_right_bound: indicates whether bucket includes the right or left
edge of interval. default true = includes right edge edge of interval. default true = includes right edge
:param name: Optional name for output node. :param name: Optional name for output node.
returns Bucketize node :return: Bucketize node
""" """
return _get_node_factory_opset3().create( return _get_node_factory_opset3().create(
"Bucketize", "Bucketize",
@ -119,7 +119,7 @@ def cum_sum(
:param axis: zero dimension tensor specifying axis position along which sum will be performed. :param axis: zero dimension tensor specifying axis position along which sum will be performed.
:param exclusive: if set to true, the top element is not included :param exclusive: if set to true, the top element is not included
:param reverse: if set to true, will perform the sums in reverse direction :param reverse: if set to true, will perform the sums in reverse direction
returns New node performing the operation :return: New node performing the operation
""" """
return _get_node_factory_opset3().create( return _get_node_factory_opset3().create(
"CumSum", as_nodes(arg, axis), {"exclusive": exclusive, "reverse": reverse} "CumSum", as_nodes(arg, axis), {"exclusive": exclusive, "reverse": reverse}
@ -143,7 +143,7 @@ def embedding_bag_offsets_sum(
:param per_sample_weights: Tensor with weights for each sample. :param per_sample_weights: Tensor with weights for each sample.
:param default_index: Scalar containing default index in embedding table to fill empty bags. :param default_index: Scalar containing default index in embedding table to fill empty bags.
:param name: Optional name for output node. :param name: Optional name for output node.
returns The new node which performs EmbeddingBagOffsetsSum :return: The new node which performs EmbeddingBagOffsetsSum
""" """
inputs = [emb_table, as_node(indices), as_node(offsets)] inputs = [emb_table, as_node(indices), as_node(offsets)]
if per_sample_weights is not None: if per_sample_weights is not None:
@ -171,7 +171,7 @@ def embedding_bag_packed_sum(
:param indices: Tensor with indices. :param indices: Tensor with indices.
:param per_sample_weights: Weights to be multiplied with embedding table. :param per_sample_weights: Weights to be multiplied with embedding table.
:param name: Optional name for output node. :param name: Optional name for output node.
returns EmbeddingBagPackedSum node :return: EmbeddingBagPackedSum node
""" """
inputs = [as_node(emb_table), as_node(indices)] inputs = [as_node(emb_table), as_node(indices)]
if per_sample_weights is not None: if per_sample_weights is not None:
@ -202,7 +202,7 @@ def embedding_segments_sum(
:param default_index: Scalar containing default index in embedding table to fill empty bags. :param default_index: Scalar containing default index in embedding table to fill empty bags.
:param per_sample_weights: Weights to be multiplied with embedding table. :param per_sample_weights: Weights to be multiplied with embedding table.
:param name: Optional name for output node. :param name: Optional name for output node.
returns EmbeddingSegmentsSum node :return: EmbeddingSegmentsSum node
""" """
inputs = [as_node(emb_table), as_node(indices), as_node(segment_ids)] inputs = [as_node(emb_table), as_node(indices), as_node(segment_ids)]
if per_sample_weights is not None: if per_sample_weights is not None:
@ -235,7 +235,7 @@ def extract_image_patches(
:param rates: Element seleciton rate for creating a patch. :param rates: Element seleciton rate for creating a patch.
:param auto_pad: Padding type. :param auto_pad: Padding type.
:param name: Optional name for output node. :param name: Optional name for output node.
returns ExtractImagePatches node :return: ExtractImagePatches node
""" """
return _get_node_factory_opset3().create( return _get_node_factory_opset3().create(
"ExtractImagePatches", "ExtractImagePatches",
@ -288,7 +288,7 @@ def gru_cell(
:param linear_before_reset: Flag denotes if the layer behaves according to the modification :param linear_before_reset: Flag denotes if the layer behaves according to the modification
of GRUCell described in the formula in the ONNX documentation. of GRUCell described in the formula in the ONNX documentation.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a GRUCell operation on tensor from input node. :return: The new node performing a GRUCell operation on tensor from input node.
""" """
if activations is None: if activations is None:
activations = ["sigmoid", "tanh"] activations = ["sigmoid", "tanh"]
@ -333,7 +333,7 @@ def non_max_suppression(
:param sort_result_descending: Flag that specifies whenever it is necessary to sort selected :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
boxes across batches or not. boxes across batches or not.
:param output_type: Output element type. :param output_type: Output element type.
returns The new node which performs NonMaxSuppression :return: The new node which performs NonMaxSuppression
""" """
if max_output_boxes_per_class is None: if max_output_boxes_per_class is None:
max_output_boxes_per_class = make_constant_node(0, np.int64) max_output_boxes_per_class = make_constant_node(0, np.int64)
@ -359,7 +359,7 @@ def non_zero(data: NodeInput, output_type: str = "i64", name: Optional[str] = No
:param data: Input data. :param data: Input data.
:param output_type: Output tensor type. :param output_type: Output tensor type.
returns The new node which performs NonZero :return: The new node which performs NonZero
""" """
return _get_node_factory_opset3().create( return _get_node_factory_opset3().create(
"NonZero", "NonZero",
@ -375,7 +375,7 @@ def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = No
:param init_value: Node producing a value to be returned instead of an unassigned variable. :param init_value: Node producing a value to be returned instead of an unassigned variable.
:param variable_id: Id of a variable to be read. :param variable_id: Id of a variable to be read.
:param name: Optional name for output node. :param name: Optional name for output node.
returns ReadValue node :return: ReadValue node
""" """
return _get_node_factory_opset3().create( return _get_node_factory_opset3().create(
"ReadValue", "ReadValue",
@ -422,7 +422,7 @@ def rnn_cell(
:param clip: The value defining clipping range [-clip, clip] on input of :param clip: The value defining clipping range [-clip, clip] on input of
activation functions. activation functions.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a RNNCell operation on tensor from input node. :return: The new node performing a RNNCell operation on tensor from input node.
""" """
if activations is None: if activations is None:
activations = ["tanh"] activations = ["tanh"]
@ -467,7 +467,7 @@ def roi_align(
:param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates. :param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates.
:param mode: Method to perform pooling to produce output feature map elements. :param mode: Method to perform pooling to produce output feature map elements.
returns The new node which performs ROIAlign :return: The new node which performs ROIAlign
""" """
inputs = as_nodes(data, rois, batch_indices) inputs = as_nodes(data, rois, batch_indices)
attributes = { attributes = {
@ -494,7 +494,7 @@ def scatter_elements_update(
:param indices: The tensor with indexes which will be updated. :param indices: The tensor with indexes which will be updated.
:param updates: The tensor with update values. :param updates: The tensor with update values.
:param axis: The axis for scatter. :param axis: The axis for scatter.
returns ScatterElementsUpdate node :return: ScatterElementsUpdate node
ScatterElementsUpdate creates a copy of the first input tensor with updated elements ScatterElementsUpdate creates a copy of the first input tensor with updated elements
specified with second and third input tensors. specified with second and third input tensors.
@ -523,7 +523,7 @@ def scatter_update(
:param indices: The tensor with indexes which will be updated. :param indices: The tensor with indexes which will be updated.
:param updates: The tensor with update values. :param updates: The tensor with update values.
:param axis: The axis at which elements will be updated. :param axis: The axis at which elements will be updated.
returns ScatterUpdate node :return: ScatterUpdate node
""" """
return _get_node_factory_opset3().create( return _get_node_factory_opset3().create(
"ScatterUpdate", "ScatterUpdate",
@ -537,7 +537,7 @@ def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = No
:param data: The tensor containing the input data. :param data: The tensor containing the input data.
:param output_type: Output element type. :param output_type: Output element type.
returns ShapeOf node :return: ShapeOf node
""" """
return _get_node_factory_opset3().create( return _get_node_factory_opset3().create(
"ShapeOf", "ShapeOf",
@ -557,7 +557,7 @@ def shuffle_channels(data: Node, axis: int, group: int, name: Optional[str] = No
:param group: The channel dimension specified by the axis parameter :param group: The channel dimension specified by the axis parameter
should be split into this number of groups. should be split into this number of groups.
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a permutation on data in the channel dimension :return: The new node performing a permutation on data in the channel dimension
of the input tensor. of the input tensor.
The operation is the equivalent with the following transformation of the input tensor The operation is the equivalent with the following transformation of the input tensor
@ -617,7 +617,7 @@ def topk(
:param mode: Compute TopK largest ('max') or smallest ('min') :param mode: Compute TopK largest ('max') or smallest ('min')
:param sort: Order of output elements (sort by: 'none', 'index' or 'value') :param sort: Order of output elements (sort by: 'none', 'index' or 'value')
:param index_element_type: Type of output tensor with indices. :param index_element_type: Type of output tensor with indices.
returns The new node which performs TopK (both indices and values) :return: The new node which performs TopK (both indices and values)
""" """
return _get_node_factory_opset3().create( return _get_node_factory_opset3().create(
"TopK", "TopK",

View File

@ -59,7 +59,7 @@ def ctc_loss(
:param preprocess_collapse_repeated: Flag for preprocessing labels before loss calculation. :param preprocess_collapse_repeated: Flag for preprocessing labels before loss calculation.
:param ctc_merge_repeated: Flag for merging repeated characters in a potential alignment. :param ctc_merge_repeated: Flag for merging repeated characters in a potential alignment.
:param unique: Flag to find unique elements in a target. :param unique: Flag to find unique elements in a target.
returns The new node which performs CTCLoss :return: The new node which performs CTCLoss
""" """
if blank_index is not None: if blank_index is not None:
inputs = as_nodes(logits, logit_length, labels, label_length, blank_index) inputs = as_nodes(logits, logit_length, labels, label_length, blank_index)
@ -99,7 +99,7 @@ def non_max_suppression(
:param sort_result_descending: Flag that specifies whenever it is necessary to sort selected :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
boxes across batches or not. boxes across batches or not.
:param output_type: Output element type. :param output_type: Output element type.
returns The new node which performs NonMaxSuppression :return: The new node which performs NonMaxSuppression
""" """
if max_output_boxes_per_class is None: if max_output_boxes_per_class is None:
max_output_boxes_per_class = make_constant_node(0, np.int64) max_output_boxes_per_class = make_constant_node(0, np.int64)
@ -123,7 +123,7 @@ def softplus(data: NodeInput, name: Optional[str] = None) -> Node:
"""Apply SoftPlus operation on each element of input tensor. """Apply SoftPlus operation on each element of input tensor.
:param data: The tensor providing input data. :param data: The tensor providing input data.
returns The new node with SoftPlus operation applied on each element. :return: The new node with SoftPlus operation applied on each element.
""" """
return _get_node_factory_opset4().create("SoftPlus", as_nodes(data), {}) return _get_node_factory_opset4().create("SoftPlus", as_nodes(data), {})
@ -133,7 +133,7 @@ def mish(data: NodeInput, name: Optional[str] = None,) -> Node:
"""Return a node which performs Mish. """Return a node which performs Mish.
:param data: Tensor with input data floating point type. :param data: Tensor with input data floating point type.
returns The new node which performs Mish :return: The new node which performs Mish
""" """
return _get_node_factory_opset4().create("Mish", as_nodes(data), {}) return _get_node_factory_opset4().create("Mish", as_nodes(data), {})
@ -143,7 +143,7 @@ def hswish(data: NodeInput, name: Optional[str] = None,) -> Node:
"""Return a node which performs HSwish (hard version of Swish). """Return a node which performs HSwish (hard version of Swish).
:param data: Tensor with input data floating point type. :param data: Tensor with input data floating point type.
returns The new node which performs HSwish :return: The new node which performs HSwish
""" """
return _get_node_factory_opset4().create("HSwish", as_nodes(data), {}) return _get_node_factory_opset4().create("HSwish", as_nodes(data), {})
@ -157,7 +157,7 @@ def swish(
"""Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)). """Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)).
:param data: Tensor with input data floating point type. :param data: Tensor with input data floating point type.
returns The new node which performs Swish :return: The new node which performs Swish
""" """
if beta is None: if beta is None:
beta = make_constant_node(1.0, np.float32) beta = make_constant_node(1.0, np.float32)
@ -170,7 +170,7 @@ def acosh(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with arccosh operation applied on it. :return: New node with arccosh operation applied on it.
""" """
return _get_node_factory_opset4().create("Acosh", [node]) return _get_node_factory_opset4().create("Acosh", [node])
@ -181,7 +181,7 @@ def asinh(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with arcsinh operation applied on it. :return: New node with arcsinh operation applied on it.
""" """
return _get_node_factory_opset4().create("Asinh", [node]) return _get_node_factory_opset4().create("Asinh", [node])
@ -192,7 +192,7 @@ def atanh(node: NodeInput, name: Optional[str] = None) -> Node:
:param node: One of: input node, array or scalar. :param node: One of: input node, array or scalar.
:param name: Optional new name for output node. :param name: Optional new name for output node.
returns New node with arctanh operation applied on it. :return: New node with arctanh operation applied on it.
""" """
return _get_node_factory_opset4().create("Atanh", [node]) return _get_node_factory_opset4().create("Atanh", [node])
@ -292,7 +292,7 @@ def proposal(
} }
Optional attributes which are absent from dictionary will be set with corresponding default. Optional attributes which are absent from dictionary will be set with corresponding default.
returns Node representing Proposal operation. :return: Node representing Proposal operation.
""" """
requirements = [ requirements = [
("base_size", True, np.unsignedinteger, is_positive_value), ("base_size", True, np.unsignedinteger, is_positive_value),
@ -328,7 +328,7 @@ def reduce_l1(
:param reduction_axes: The axes to eliminate through mean operation. :param reduction_axes: The axes to eliminate through mean operation.
:param keep_dims: If set to True it holds axes that are used for reduction :param keep_dims: If set to True it holds axes that are used for reduction
:param name: Optional name for output node. :param name: Optional name for output node.
returns The new node performing mean-reduction operation. :return: The new node performing mean-reduction operation.
""" """
return _get_node_factory_opset4().create( return _get_node_factory_opset4().create(
"ReduceL1", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} "ReduceL1", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}
@ -345,7 +345,7 @@ def reduce_l2(
:param reduction_axes: The axes to eliminate through mean operation. :param reduction_axes: The axes to eliminate through mean operation.
:param keep_dims: If set to True it holds axes that are used for reduction :param keep_dims: If set to True it holds axes that are used for reduction
:param name: Optional name for output node. :param name: Optional name for output node.
returns The new node performing mean-reduction operation. :return: The new node performing mean-reduction operation.
""" """
return _get_node_factory_opset4().create( return _get_node_factory_opset4().create(
"ReduceL2", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} "ReduceL2", as_nodes(node, reduction_axes), {"keep_dims": keep_dims}
@ -382,7 +382,7 @@ def lstm_cell(
:param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
:param name: An optional name of the output node. :param name: An optional name of the output node.
returns The new node represents LSTMCell. Node outputs count: 2. :return: The new node represents LSTMCell. Node outputs count: 2.
""" """
if activations is None: if activations is None:
activations = ["sigmoid", "tanh", "tanh"] activations = ["sigmoid", "tanh", "tanh"]

View File

@ -57,7 +57,7 @@ def batch_norm_inference(
:param epsilon: The number to be added to the variance to avoid division :param epsilon: The number to be added to the variance to avoid division
by zero when normalizing a value. by zero when normalizing a value.
:param name: The optional name of the output node. :param name: The optional name of the output node.
@return: The new node which performs BatchNormInference. :return: The new node which performs BatchNormInference.
""" """
inputs = as_nodes(data, gamma, beta, mean, variance) inputs = as_nodes(data, gamma, beta, mean, variance)
return _get_node_factory_opset5().create("BatchNormInference", inputs, {"epsilon": epsilon}) return _get_node_factory_opset5().create("BatchNormInference", inputs, {"epsilon": epsilon})
@ -75,7 +75,7 @@ def gather_nd(
:param data: N-D tensor with data for gathering :param data: N-D tensor with data for gathering
:param indices: K-D tensor of tuples with indices by which data is gathered :param indices: K-D tensor of tuples with indices by which data is gathered
:param batch_dims: Scalar value of batch dimensions :param batch_dims: Scalar value of batch dimensions
@return: The new node which performs GatherND :return: The new node which performs GatherND
""" """
inputs = as_nodes(data, indices) inputs = as_nodes(data, indices)
@ -92,7 +92,7 @@ def log_softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node:
:param data: The tensor providing input data. :param data: The tensor providing input data.
:param axis: An axis along which LogSoftmax should be calculated :param axis: An axis along which LogSoftmax should be calculated
@return: The new node with LogSoftmax operation applied on each element. :return: The new node with LogSoftmax operation applied on each element.
""" """
return _get_node_factory_opset5().create("LogSoftmax", [as_node(data)], {"axis": axis}) return _get_node_factory_opset5().create("LogSoftmax", [as_node(data)], {"axis": axis})
@ -123,7 +123,7 @@ def non_max_suppression(
:param sort_result_descending: Flag that specifies whenever it is necessary to sort selected :param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
boxes across batches or not. boxes across batches or not.
:param output_type: Output element type. :param output_type: Output element type.
@return: The new node which performs NonMaxSuppression :return: The new node which performs NonMaxSuppression
""" """
if max_output_boxes_per_class is None: if max_output_boxes_per_class is None:
max_output_boxes_per_class = make_constant_node(0, np.int64) max_output_boxes_per_class = make_constant_node(0, np.int64)
@ -158,7 +158,7 @@ def round(data: NodeInput, mode: str = "half_to_even", name: Optional[str] = Non
integer or rounding in such a way that the result heads away from zero if `mode` attribute is integer or rounding in such a way that the result heads away from zero if `mode` attribute is
'half_away_from_zero`. 'half_away_from_zero`.
:param name: An optional name of the output node. :param name: An optional name of the output node.
@return: The new node with Round operation applied on each element. :return: The new node with Round operation applied on each element.
""" """
return _get_node_factory_opset5().create("Round", as_nodes(data), {"mode": mode.upper()}) return _get_node_factory_opset5().create("Round", as_nodes(data), {"mode": mode.upper()})
@ -205,7 +205,7 @@ def lstm_sequence(
:param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
:param name: An optional name of the output node. :param name: An optional name of the output node.
@return: The new node represents LSTMSequence. Node outputs count: 3. :return: The new node represents LSTMSequence. Node outputs count: 3.
""" """
if activations is None: if activations is None:
activations = ["sigmoid", "tanh", "tanh"] activations = ["sigmoid", "tanh", "tanh"]
@ -231,7 +231,7 @@ def hsigmoid(data: NodeInput, name: Optional[str] = None,) -> Node:
"""Return a node which performs HSigmoid. """Return a node which performs HSigmoid.
:param data: Tensor with input data floating point type. :param data: Tensor with input data floating point type.
@return: The new node which performs HSigmoid :return: The new node which performs HSigmoid
""" """
return _get_node_factory_opset5().create("HSigmoid", as_nodes(data), {}) return _get_node_factory_opset5().create("HSigmoid", as_nodes(data), {})
@ -277,7 +277,7 @@ def gru_sequence(
of GRU described in the formula in the ONNX documentation. of GRU described in the formula in the ONNX documentation.
:param name: An optional name of the output node. :param name: An optional name of the output node.
@return: The new node represents GRUSequence. Node outputs count: 2. :return: The new node represents GRUSequence. Node outputs count: 2.
""" """
if activations is None: if activations is None:
activations = ["sigmoid", "tanh"] activations = ["sigmoid", "tanh"]
@ -337,7 +337,7 @@ def rnn_sequence(
:param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. :param clip: Specifies bound values [-C, C] for tensor clipping performed before activations.
:param name: An optional name of the output node. :param name: An optional name of the output node.
@return: The new node represents RNNSequence. Node outputs count: 2. :return: The new node represents RNNSequence. Node outputs count: 2.
""" """
if activations is None: if activations is None:
activations = ["tanh"] activations = ["tanh"]

View File

@ -53,7 +53,7 @@ def ctc_greedy_decoder_seq_len(
:param sequence_length: Input 1D tensor with sequence length. Shape: [batch_size] :param sequence_length: Input 1D tensor with sequence length. Shape: [batch_size]
:param blank_index: Scalar or 1D tensor with specifies the class index to use for the blank class. :param blank_index: Scalar or 1D tensor with specifies the class index to use for the blank class.
Optional parameter. Default value is num_classes-1. Optional parameter. Default value is num_classes-1.
@return: The new node which performs CTCGreedyDecoderSeqLen. :return: The new node which performs CTCGreedyDecoderSeqLen.
""" """
if blank_index is not None: if blank_index is not None:
inputs = as_nodes(data, sequence_length, blank_index) inputs = as_nodes(data, sequence_length, blank_index)
@ -81,7 +81,7 @@ def gather_elements(
:param data: N-D tensor with data for gathering :param data: N-D tensor with data for gathering
:param indices: N-D tensor with indices by which data is gathered :param indices: N-D tensor with indices by which data is gathered
:param axis: axis along which elements are gathered :param axis: axis along which elements are gathered
@return: The new node which performs GatherElements :return: The new node which performs GatherElements
""" """
inputs = as_nodes(data, indices) inputs = as_nodes(data, indices)
@ -110,7 +110,7 @@ def mvn(
when normalizing the value. Scalar value. when normalizing the value. Scalar value.
:param eps_mode: how eps is applied (`inside_sqrt` or `outside_sqrt`) :param eps_mode: how eps is applied (`inside_sqrt` or `outside_sqrt`)
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a MVN operation on input tensor. :return: The new node performing a MVN operation on input tensor.
""" """
inputs = as_nodes(data, axes) inputs = as_nodes(data, axes)
@ -130,7 +130,7 @@ def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -
:param new_value: Node producing a value to be assigned to a variable. :param new_value: Node producing a value to be assigned to a variable.
:param variable_id: Id of a variable to be updated. :param variable_id: Id of a variable to be updated.
:param name: Optional name for output node. :param name: Optional name for output node.
returns Assign node :return: Assign node
""" """
return _get_node_factory_opset6().create( return _get_node_factory_opset6().create(
"Assign", "Assign",
@ -146,7 +146,7 @@ def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = No
:param init_value: Node producing a value to be returned instead of an unassigned variable. :param init_value: Node producing a value to be returned instead of an unassigned variable.
:param variable_id: Id of a variable to be read. :param variable_id: Id of a variable to be read.
:param name: Optional name for output node. :param name: Optional name for output node.
returns ReadValue node :return: ReadValue node
""" """
return _get_node_factory_opset6().create( return _get_node_factory_opset6().create(
"ReadValue", "ReadValue",

View File

@ -46,7 +46,7 @@ def einsum(
:param inputs: The list of input nodes :param inputs: The list of input nodes
:param equation: Einsum equation :param equation: Einsum equation
@return: The new node performing Einsum operation on the inputs :return: The new node performing Einsum operation on the inputs
""" """
attributes = { attributes = {
"equation": equation "equation": equation
@ -66,7 +66,7 @@ def gelu(
:param data: The node with data tensor. :param data: The node with data tensor.
:param approximation_mode: defines which approximation to use ('tanh' or 'erf') :param approximation_mode: defines which approximation to use ('tanh' or 'erf')
:param name: Optional output node name. :param name: Optional output node name.
returns The new node performing a Gelu activation with the input tensor. :return: The new node performing a Gelu activation with the input tensor.
""" """
inputs = as_nodes(data) inputs = as_nodes(data)
@ -88,7 +88,7 @@ def roll(
:param data: The node with data tensor. :param data: The node with data tensor.
:param shift: The node with the tensor with numbers of places by which elements are shifted. :param shift: The node with the tensor with numbers of places by which elements are shifted.
:param axes: The node with the tensor with axes along which elements are shifted. :param axes: The node with the tensor with axes along which elements are shifted.
returns The new node performing a Roll operation on the input tensor. :return: The new node performing a Roll operation on the input tensor.
""" """
inputs = as_nodes(data, shift, axes) inputs = as_nodes(data, shift, axes)
@ -108,7 +108,7 @@ def gather(
:param indices: N-D tensor with indices by which data is gathered :param indices: N-D tensor with indices by which data is gathered
:param axis: axis along which elements are gathered :param axis: axis along which elements are gathered
:param batch_dims: number of batch dimensions :param batch_dims: number of batch dimensions
@return: The new node which performs Gather :return: The new node which performs Gather
""" """
inputs = as_nodes(data, indices, axis) inputs = as_nodes(data, indices, axis)
attributes = { attributes = {
@ -127,7 +127,7 @@ def dft(
:param data: Tensor with transformed data. :param data: Tensor with transformed data.
:param axes: Tensor with axes to transform. :param axes: Tensor with axes to transform.
:param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'. :param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'.
@return: The new node which performs DFT operation on the input data tensor. :return: The new node which performs DFT operation on the input data tensor.
""" """
if signal_size is None: if signal_size is None:
inputs = as_nodes(data, axes) inputs = as_nodes(data, axes)
@ -148,7 +148,7 @@ def idft(
:param data: Tensor with transformed data. :param data: Tensor with transformed data.
:param axes: Tensor with axes to transform. :param axes: Tensor with axes to transform.
:param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'. :param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'.
@return: The new node which performs IDFT operation on the input data tensor. :return: The new node which performs IDFT operation on the input data tensor.
""" """
if signal_size is None: if signal_size is None:
inputs = as_nodes(data, axes) inputs = as_nodes(data, axes)

View File

@ -62,7 +62,7 @@ def deformable_convolution(
:param bilinear_interpolation_pad: The flag that determines the mode of bilinear interpolation :param bilinear_interpolation_pad: The flag that determines the mode of bilinear interpolation
execution. execution.
:param name: The optional new name for output node. :param name: The optional new name for output node.
returns New node performing deformable convolution operation. :return: New node performing deformable convolution operation.
""" """
if mask is None: if mask is None:
inputs = as_nodes(data, offsets, filters) inputs = as_nodes(data, offsets, filters)
@ -94,7 +94,7 @@ def adaptive_avg_pool(
:param data: The list of input nodes :param data: The list of input nodes
:param output_shape: the shape of spatial dimentions after operation :param output_shape: the shape of spatial dimentions after operation
@return: The new node performing AdaptiveAvgPool operation on the data :return: The new node performing AdaptiveAvgPool operation on the data
""" """
inputs = as_nodes(data, output_shape) inputs = as_nodes(data, output_shape)
return _get_node_factory_opset8().create("AdaptiveAvgPool", inputs) return _get_node_factory_opset8().create("AdaptiveAvgPool", inputs)
@ -111,7 +111,7 @@ def adaptive_max_pool(
:param data: The list of input nodes :param data: The list of input nodes
:param output_shape: the shape of spatial dimentions after operation :param output_shape: the shape of spatial dimentions after operation
:param index_element_type: Type of indices output. :param index_element_type: Type of indices output.
@return: The new node performing AdaptiveMaxPool operation on the data :return: The new node performing AdaptiveMaxPool operation on the data
""" """
inputs = as_nodes(data, output_shape) inputs = as_nodes(data, output_shape)
@ -158,7 +158,7 @@ def multiclass_nms(
:param background_class: Specifies the background class id, -1 meaning to keep all classes :param background_class: Specifies the background class id, -1 meaning to keep all classes
:param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0] :param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0]
:param normalized: Specifies whether boxes are normalized or not :param normalized: Specifies whether boxes are normalized or not
@return: The new node which performs MuticlassNms :return: The new node which performs MuticlassNms
""" """
inputs = as_nodes(boxes, scores) inputs = as_nodes(boxes, scores)
@ -218,7 +218,7 @@ def matrix_nms(
:param post_threshold: Specifies threshold to filter out boxes with low confidence score :param post_threshold: Specifies threshold to filter out boxes with low confidence score
after decaying after decaying
:param normalized: Specifies whether boxes are normalized or not :param normalized: Specifies whether boxes are normalized or not
@return: The new node which performs MatrixNms :return: The new node which performs MatrixNms
""" """
inputs = as_nodes(boxes, scores) inputs = as_nodes(boxes, scores)
@ -253,7 +253,7 @@ def gather(
indicate reverse indexing from the end indicate reverse indexing from the end
:param axis: axis along which elements are gathered :param axis: axis along which elements are gathered
:param batch_dims: number of batch dimensions :param batch_dims: number of batch dimensions
@return: The new node which performs Gather :return: The new node which performs Gather
""" """
inputs = as_nodes(data, indices, axis) inputs = as_nodes(data, indices, axis)
attributes = { attributes = {
@ -296,7 +296,7 @@ def max_pool(
starting at the provided axis. Defaults to 0. starting at the provided axis. Defaults to 0.
:param name: The optional name for the created output node. :param name: The optional name for the created output node.
returns The new node performing max pooling operation. :return: The new node performing max pooling operation.
""" """
if auto_pad is None: if auto_pad is None:
auto_pad = "explicit" auto_pad = "explicit"
@ -335,7 +335,7 @@ def random_uniform(
'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'. 'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'.
:param global_seed: Specifies global seed value. Required to be a positive integer or 0. :param global_seed: Specifies global seed value. Required to be a positive integer or 0.
:param op_seed: Specifies operational seed value. Required to be a positive integer or 0. :param op_seed: Specifies operational seed value. Required to be a positive integer or 0.
returns The new node which performs generation of random values from uniform distribution. :return: The new node which performs generation of random values from uniform distribution.
""" """
inputs = as_nodes(output_shape, min_val, max_val) inputs = as_nodes(output_shape, min_val, max_val)
@ -370,7 +370,7 @@ def slice(
:param step: The node providing step values. :param step: The node providing step values.
:param axes: The optional node providing axes to slice, default [0, 1, ..., len(start)-1]. :param axes: The optional node providing axes to slice, default [0, 1, ..., len(start)-1].
:param name: The optional name for the created output node. :param name: The optional name for the created output node.
returns The new node performing Slice operation. :return: The new node performing Slice operation.
""" """
if axes is None: if axes is None:
inputs = as_nodes(data, start, stop, step) inputs = as_nodes(data, start, stop, step)
@ -392,7 +392,7 @@ def gather_nd(
:param data: N-D tensor with data for gathering :param data: N-D tensor with data for gathering
:param indices: K-D tensor of tuples with indices by which data is gathered :param indices: K-D tensor of tuples with indices by which data is gathered
:param batch_dims: Scalar value of batch dimensions :param batch_dims: Scalar value of batch dimensions
@return: The new node which performs GatherND :return: The new node which performs GatherND
""" """
inputs = as_nodes(data, indices) inputs = as_nodes(data, indices)
@ -413,7 +413,7 @@ def prior_box(
:param image_shape: Shape of image to which prior boxes are scaled. :param image_shape: Shape of image to which prior boxes are scaled.
:param attrs: The dictionary containing key, value pairs for attributes. :param attrs: The dictionary containing key, value pairs for attributes.
:param name: Optional name for the output node. :param name: Optional name for the output node.
returns Node representing prior box operation. :return: Node representing prior box operation.
Available attributes are: Available attributes are:
* min_size The minimum box size (in pixels). * min_size The minimum box size (in pixels).
Range of values: positive floating point numbers Range of values: positive floating point numbers
@ -524,7 +524,7 @@ def i420_to_bgr(
:param arg_u: The node providing U plane data. Required for separate planes. :param arg_u: The node providing U plane data. Required for separate planes.
:param arg_v: The node providing V plane data. Required for separate planes. :param arg_v: The node providing V plane data. Required for separate planes.
:param name: The optional name for the created output node. :param name: The optional name for the created output node.
returns The new node performing I420toBGR operation. :return: The new node performing I420toBGR operation.
""" """
if arg_u is None and arg_v is None: if arg_u is None and arg_v is None:
inputs = as_nodes(arg) inputs = as_nodes(arg)
@ -551,7 +551,7 @@ def i420_to_rgb(
:param arg_u: The node providing U plane data. Required for separate planes. :param arg_u: The node providing U plane data. Required for separate planes.
:param arg_v: The node providing V plane data. Required for separate planes. :param arg_v: The node providing V plane data. Required for separate planes.
:param name: The optional name for the created output node. :param name: The optional name for the created output node.
returns The new node performing I420toRGB operation. :return: The new node performing I420toRGB operation.
""" """
if arg_u is None and arg_v is None: if arg_u is None and arg_v is None:
inputs = as_nodes(arg) inputs = as_nodes(arg)
@ -576,7 +576,7 @@ def nv12_to_bgr(
:param arg: The node providing single or Y plane data. :param arg: The node providing single or Y plane data.
:param arg_uv: The node providing UV plane data. Required for separate planes. :param arg_uv: The node providing UV plane data. Required for separate planes.
:param name: The optional name for the created output node. :param name: The optional name for the created output node.
returns The new node performing NV12toBGR operation. :return: The new node performing NV12toBGR operation.
""" """
if arg_uv is None: if arg_uv is None:
inputs = as_nodes(arg) inputs = as_nodes(arg)
@ -597,7 +597,7 @@ def nv12_to_rgb(
:param arg: The node providing single or Y plane data. :param arg: The node providing single or Y plane data.
:param arg_uv: The node providing UV plane data. Required for separate planes. :param arg_uv: The node providing UV plane data. Required for separate planes.
:param name: The optional name for the created output node. :param name: The optional name for the created output node.
returns The new node performing NV12toRGB operation. :return: The new node performing NV12toRGB operation.
""" """
if arg_uv is None: if arg_uv is None:
inputs = as_nodes(arg) inputs = as_nodes(arg)
@ -626,7 +626,7 @@ def detection_output(
:param aux_class_preds: The 2D input tensor with additional class predictions information. :param aux_class_preds: The 2D input tensor with additional class predictions information.
:param aux_box_preds: The 2D input tensor with additional box predictions information. :param aux_box_preds: The 2D input tensor with additional box predictions information.
:param name: Optional name for the output node. :param name: Optional name for the output node.
returns Node representing DetectionOutput operation. :return: Node representing DetectionOutput operation.
Available attributes are: Available attributes are:
* background_label_id The background label id. * background_label_id The background label id.
Range of values: integer value Range of values: integer value
@ -751,6 +751,6 @@ def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node:
:param data: The tensor providing input data. :param data: The tensor providing input data.
:param axis: An axis along which Softmax should be calculated. Can be positive or negative. :param axis: An axis along which Softmax should be calculated. Can be positive or negative.
:param name: Optional name for the node. :param name: Optional name for the node.
returns The new node with softmax operation applied on each element. :return: The new node with softmax operation applied on each element.
""" """
return _get_node_factory_opset8().create("Softmax", [as_node(data)], {"axis": axis}) return _get_node_factory_opset8().create("Softmax", [as_node(data)], {"axis": axis})

View File

@ -202,7 +202,7 @@ void regclass_AsyncInferQueue(py::module m) {
py::arg("inputs"), py::arg("inputs"),
py::arg("userdata"), py::arg("userdata"),
R"( R"(
Run asynchronous inference using next available InferRequest. Run asynchronous inference using the next available InferRequest.
This function releases the GIL, so another Python thread can This function releases the GIL, so another Python thread can
work while this function runs in the background. work while this function runs in the background.
@ -262,8 +262,8 @@ void regclass_AsyncInferQueue(py::module m) {
}, },
R"( R"(
Sets unified callback on all InferRequests from queue's pool. Sets unified callback on all InferRequests from queue's pool.
Signature of such function should have two arguments, where The signature of such function should have two arguments, where
first one is InferRequest object and second one is userdata the first one is InferRequest object and the second one is userdata
connected to InferRequest from the AsyncInferQueue's pool. connected to InferRequest from the AsyncInferQueue's pool.
.. code-block:: python .. code-block:: python

View File

@ -53,11 +53,11 @@ void regclass_CompiledModel(py::module m) {
py::arg("inputs"), py::arg("inputs"),
R"( R"(
Infers specified input(s) in synchronous mode. Infers specified input(s) in synchronous mode.
Blocks all methods of CompiledModel while request is running. Blocks all methods of CompiledModel while the request is running.
Method creates new temporary InferRequest and run inference on it. Method creates new temporary InferRequest and run inference on it.
It is advised to use dedicated InferRequest class for performance, It is advised to use a dedicated InferRequest class for performance,
optimizing workflows and creating advanced pipelines. optimizing workflows, and creating advanced pipelines.
:param inputs: Data to set on input tensors. :param inputs: Data to set on input tensors.
:type inputs: Dict[Union[int, str, openvino.runtime.ConstOutput], openvino.runtime.Tensor] :type inputs: Dict[Union[int, str, openvino.runtime.ConstOutput], openvino.runtime.Tensor]
@ -108,10 +108,10 @@ void regclass_CompiledModel(py::module m) {
R"( R"(
Exports the compiled model to bytes/output stream. Exports the compiled model to bytes/output stream.
Advanced version of `export_model`. It utilizes, streams from standard Advanced version of `export_model`. It utilizes, streams from the standard
Python library `io`. Python library `io`.
Function performs flushing of the stream, writes to it and then rewinds Function performs flushing of the stream, writes to it, and then rewinds
the stream to the beginning (using seek(0)). the stream to the beginning (using seek(0)).
:param model_stream: A stream object to which the model will be serialized. :param model_stream: A stream object to which the model will be serialized.
@ -168,12 +168,12 @@ void regclass_CompiledModel(py::module m) {
R"( R"(
Gets runtime model information from a device. Gets runtime model information from a device.
This object (returned model) represents the internal device specific model This object (returned model) represents the internal device-specific model
which is optimized for particular accelerator. It contains device specific nodes, which is optimized for the particular accelerator. It contains device-specific nodes,
runtime information and can be used only to understand how the source model runtime information, and can be used only to understand how the source model
is optimized and which kernels, element types and layouts are selected. is optimized and which kernels, element types, and layouts are selected.
:return: Model containing Executable Graph information. :return: Model, containing Executable Graph information.
:rtype: openvino.runtime.Model :rtype: openvino.runtime.Model
)"); )");
@ -201,7 +201,7 @@ void regclass_CompiledModel(py::module m) {
py::arg("index"), py::arg("index"),
R"( R"(
Gets input of a compiled model identified by an index. Gets input of a compiled model identified by an index.
If an input with given index is not found, this method throws an exception. If the input with given index is not found, this method throws an exception.
:param index: An input index. :param index: An input index.
:type index: int :type index: int
@ -214,9 +214,9 @@ void regclass_CompiledModel(py::module m) {
py::arg("tensor_name"), py::arg("tensor_name"),
R"( R"(
Gets input of a compiled model identified by a tensor_name. Gets input of a compiled model identified by a tensor_name.
If an input with given tensor name is not found, this method throws an exception. If the input with given tensor name is not found, this method throws an exception.
:param tensor_name: An input tensor's name. :param tensor_name: An input tensor name.
:type tensor_name: str :type tensor_name: str
:return: A compiled model input. :return: A compiled model input.
:rtype: openvino.runtime.ConstOutput :rtype: openvino.runtime.ConstOutput
@ -235,7 +235,7 @@ void regclass_CompiledModel(py::module m) {
(ov::Output<const ov::Node>(ov::CompiledModel::*)() const) & ov::CompiledModel::output, (ov::Output<const ov::Node>(ov::CompiledModel::*)() const) & ov::CompiledModel::output,
R"( R"(
Gets a single output of a compiled model. Gets a single output of a compiled model.
If a model has more than one output, this method throws an exception. If the model has more than one output, this method throws an exception.
:return: A compiled model output. :return: A compiled model output.
:rtype: openvino.runtime.ConstOutput :rtype: openvino.runtime.ConstOutput
@ -246,7 +246,7 @@ void regclass_CompiledModel(py::module m) {
py::arg("index"), py::arg("index"),
R"( R"(
Gets output of a compiled model identified by an index. Gets output of a compiled model identified by an index.
If an output with given index is not found, this method throws an exception. If the output with given index is not found, this method throws an exception.
:param index: An output index. :param index: An output index.
:type index: int :type index: int
@ -259,9 +259,9 @@ void regclass_CompiledModel(py::module m) {
py::arg("tensor_name"), py::arg("tensor_name"),
R"( R"(
Gets output of a compiled model identified by a tensor_name. Gets output of a compiled model identified by a tensor_name.
If an output with given tensor name is not found, this method throws an exception. If the output with given tensor name is not found, this method throws an exception.
:param tensor_name: An output tensor's name. :param tensor_name: An output tensor name.
:type tensor_name: str :type tensor_name: str
:return: A compiled model output. :return: A compiled model output.
:rtype: openvino.runtime.ConstOutput :rtype: openvino.runtime.ConstOutput

View File

@ -27,7 +27,7 @@ void regclass_Core(py::module m) {
py::class_<ov::Core, std::shared_ptr<ov::Core>> cls(m, "Core"); py::class_<ov::Core, std::shared_ptr<ov::Core>> cls(m, "Core");
cls.doc() = cls.doc() =
"openvino.runtime.Core class represents OpenVINO runtime Core entity. User applications can create several " "openvino.runtime.Core class represents OpenVINO runtime Core entity. User applications can create several "
"Core class instances, but in this case the underlying plugins are created multiple times and not shared " "Core class instances, but in this case, the underlying plugins are created multiple times and not shared "
"between several Core instances. The recommended way is to have a single Core instance per application."; "between several Core instances. The recommended way is to have a single Core instance per application.";
cls.def(py::init<const std::string&>(), py::arg("xml_config_file") = ""); cls.def(py::init<const std::string&>(), py::arg("xml_config_file") = "");
@ -82,12 +82,12 @@ void regclass_Core(py::module m) {
py::arg("config") = py::dict(), py::arg("config") = py::dict(),
R"( R"(
Creates a compiled model from a source model object. Creates a compiled model from a source model object.
Users can create as many compiled models as they need and use them simultaneously Users can create as many compiled models as they need, and use them simultaneously
(up to the limitation of the hardware resources). (up to the limitation of the hardware resources).
:param model: Model acquired from read_model function. :param model: Model acquired from read_model function.
:type model: openvino.runtime.Model :type model: openvino.runtime.Model
:param device_name: Name of the device to load the model to. :param device_name: Name of the device which will load the model.
:type device_name: str :type device_name: str
:param properties: Optional dict of pairs: (property name, property value) relevant only for this load operation. :param properties: Optional dict of pairs: (property name, property value) relevant only for this load operation.
:type properties: dict :type properties: dict
@ -106,7 +106,7 @@ void regclass_Core(py::module m) {
py::arg("config") = py::dict(), py::arg("config") = py::dict(),
R"( R"(
Creates and loads a compiled model from a source model to the default OpenVINO device Creates and loads a compiled model from a source model to the default OpenVINO device
selected by AUTO plugin. Users can create as many compiled models as they need and use selected by AUTO plugin. Users can create as many compiled models as they need, and use
them simultaneously (up to the limitation of the hardware resources). them simultaneously (up to the limitation of the hardware resources).
:param model: Model acquired from read_model function. :param model: Model acquired from read_model function.
@ -216,8 +216,8 @@ void regclass_Core(py::module m) {
:param model: A path to a model in IR / ONNX / PDPD format. :param model: A path to a model in IR / ONNX / PDPD format.
:type model: str :type model: str
:param weights: A path to a data file For IR format (*.bin): if path is empty, :param weights: A path to a data file For IR format (*.bin): if path is empty,
will try to read bin file with the same name as xml and if bin it tries to read a bin file with the same name as xml and if the bin
file with the same name was not found, will load IR without weights. file with the same name was not found, loads IR without weights.
For ONNX format (*.onnx): weights parameter is not used. For ONNX format (*.onnx): weights parameter is not used.
For PDPD format (*.pdmodel) weights parameter is not used. For PDPD format (*.pdmodel) weights parameter is not used.
:type weights: str :type weights: str
@ -255,9 +255,8 @@ void regclass_Core(py::module m) {
:param model: A string with model in IR / ONNX / PDPD format. :param model: A string with model in IR / ONNX / PDPD format.
:type model: str :type model: str
:param weights: A path to a data file For IR format (*.bin): if path is empty, :param weights: A path to a data file For IR format (*.bin): if path is empty,
will try to read bin file with the same name as xml and if bin it tries to read a bin file with the same name as xml and if the bin
file with the same name was not found, will load IR without weights. file with the same name was not found, loads IR without weights. For ONNX format (*.onnx): weights parameter is not used.
For ONNX format (*.onnx): weights parameter is not used.
For PDPD format (*.pdmodel) weights parameter is not used. For PDPD format (*.pdmodel) weights parameter is not used.
:type weights: str :type weights: str
:return: A model. :return: A model.
@ -280,10 +279,10 @@ void regclass_Core(py::module m) {
R"( R"(
Imports a compiled model from a previously exported one. Imports a compiled model from a previously exported one.
:param model_stream: Input stream containing a model previously exported using export_model method. :param model_stream: Input stream, containing a model previously exported, using export_model method.
:type model_stream: bytes :type model_stream: bytes
:param device_name: Name of device to import compiled model for. :param device_name: Name of device to which compiled model is imported.
Note, if device_name device was not used to compile the original mode, an exception is thrown. Note: if device_name is not used to compile the original model, an exception is thrown.
:type device_name: str :type device_name: str
:param properties: Optional map of pairs: (property name, property value) relevant only for this load operation. :param properties: Optional map of pairs: (property name, property value) relevant only for this load operation.
:type properties: dict, optional :type properties: dict, optional
@ -332,10 +331,10 @@ void regclass_Core(py::module m) {
Python library `io`. Python library `io`.
:param model_stream: Input stream containing a model previously exported using export_model method. :param model_stream: Input stream, containing a model previously exported, using export_model method.
:type model_stream: io.BytesIO :type model_stream: io.BytesIO
:param device_name: Name of device to import compiled model for. :param device_name: Name of device to which compiled model is imported.
Note, if device_name device was not used to compile the original mode, an exception is thrown. Note: if device_name is not used to compile the original model, an exception is thrown.
:type device_name: str :type device_name: str
:param properties: Optional map of pairs: (property name, property value) relevant only for this load operation. :param properties: Optional map of pairs: (property name, property value) relevant only for this load operation.
:type properties: dict, optional :type properties: dict, optional

View File

@ -51,8 +51,8 @@ void regclass_InferRequest(py::module m) {
py::arg("tensors"), py::arg("tensors"),
R"( R"(
Sets batch of tensors for input data to infer by tensor name. Sets batch of tensors for input data to infer by tensor name.
Model input shall have batch dimension and number of tensors shall Model input needs to have batch dimension and the number of tensors needs to be
match with batch size. Current version supports set tensors to model inputs only. matched with batch size. Current version supports set tensors to model inputs only.
In case if `tensor_name` is associated with output (or any other non-input node), In case if `tensor_name` is associated with output (or any other non-input node),
an exception will be thrown. an exception will be thrown.
@ -60,7 +60,7 @@ void regclass_InferRequest(py::module m) {
:type tensor_name: str :type tensor_name: str
:param tensors: Input tensors for batched infer request. The type of each tensor :param tensors: Input tensors for batched infer request. The type of each tensor
must match the model input element type and shape (except batch dimension). must match the model input element type and shape (except batch dimension).
Total size of tensors shall match with input's size. Total size of tensors needs to match with input's size.
:type tensors: List[openvino.runtime.Tensor] :type tensors: List[openvino.runtime.Tensor]
)"); )");
@ -73,8 +73,8 @@ void regclass_InferRequest(py::module m) {
py::arg("tensors"), py::arg("tensors"),
R"( R"(
Sets batch of tensors for input data to infer by tensor name. Sets batch of tensors for input data to infer by tensor name.
Model input shall have batch dimension and number of tensors shall Model input needs to have batch dimension and the number of tensors needs to be
match with batch size. Current version supports set tensors to model inputs only. matched with batch size. Current version supports set tensors to model inputs only.
In case if `port` is associated with output (or any other non-input node), In case if `port` is associated with output (or any other non-input node),
an exception will be thrown. an exception will be thrown.
@ -83,7 +83,7 @@ void regclass_InferRequest(py::module m) {
:type port: openvino.runtime.ConstOutput :type port: openvino.runtime.ConstOutput
:param tensors: Input tensors for batched infer request. The type of each tensor :param tensors: Input tensors for batched infer request. The type of each tensor
must match the model input element type and shape (except batch dimension). must match the model input element type and shape (except batch dimension).
Total size of tensors shall match with input's size. Total size of tensors needs to match with input's size.
:type tensors: List[openvino.runtime.Tensor] :type tensors: List[openvino.runtime.Tensor]
:rtype: None :rtype: None
)"); )");
@ -130,12 +130,12 @@ void regclass_InferRequest(py::module m) {
py::arg("tensors"), py::arg("tensors"),
R"( R"(
Sets batch of tensors for single input data. Sets batch of tensors for single input data.
Model input shall have batch dimension and number of `tensors` Model input needs to have batch dimension and the number of `tensors`
shall match with batch size. needs to match with batch size.
:param tensors: Input tensors for batched infer request. The type of each tensor :param tensors: Input tensors for batched infer request. The type of each tensor
must match the model input element type and shape (except batch dimension). must match the model input element type and shape (except batch dimension).
Total size of tensors shall match with input's size. Total size of tensors needs to match with input's size.
:type tensors: List[openvino.runtime.Tensor] :type tensors: List[openvino.runtime.Tensor]
)"); )");
@ -148,14 +148,14 @@ void regclass_InferRequest(py::module m) {
py::arg("tensors"), py::arg("tensors"),
R"( R"(
Sets batch of tensors for single input data to infer by index. Sets batch of tensors for single input data to infer by index.
Model input shall have batch dimension and number of `tensors` Model input needs to have batch dimension and the number of `tensors`
shall match with batch size. needs to match with batch size.
:param idx: Index of input tensor. :param idx: Index of input tensor.
:type idx: int :type idx: int
:param tensors: Input tensors for batched infer request. The type of each tensor :param tensors: Input tensors for batched infer request. The type of each tensor
must match the model input element type and shape (except batch dimension). must match the model input element type and shape (except batch dimension).
Total size of tensors shall match with input's size. Total size of tensors needs to match with input's size.
)"); )");
cls.def( cls.def(
@ -513,8 +513,8 @@ void regclass_InferRequest(py::module m) {
return self._request.get_profiling_info(); return self._request.get_profiling_info();
}, },
R"( R"(
Queries performance measures per layer to get feedback of what Queries performance is measured per layer to get feedback on what
is the most time consuming operation, not all plugins provide is the most time-consuming operation, not all plugins provide
meaningful data. meaningful data.
:return: List of profiling information for operations in model. :return: List of profiling information for operations in model.
@ -616,7 +616,7 @@ void regclass_InferRequest(py::module m) {
return self._request.get_profiling_info(); return self._request.get_profiling_info();
}, },
R"( R"(
Performance measures per layer to get feedback of what is the most time consuming operation. Performance is measured per layer to get feedback on the most time-consuming operation.
Not all plugins provide meaningful data! Not all plugins provide meaningful data!
:return: Inference time. :return: Inference time.

View File

@ -146,7 +146,7 @@ void regmodule_offline_transformations(py::module m) {
py::arg("weights_path"), py::arg("weights_path"),
py::arg("version") = "UNSPECIFIED", py::arg("version") = "UNSPECIFIED",
R"( R"(
Serialize given model into IR. The generated .xml and .bin files will be save Serialize given model into IR. The generated .xml and .bin files will be saved
into provided paths. into provided paths.
:param model: model which will be converted to IR representation :param model: model which will be converted to IR representation

View File

@ -26,10 +26,10 @@ void regclass_Tensor(py::module m) {
:param array: Array to create tensor from. :param array: Array to create tensor from.
:type array: numpy.array :type array: numpy.array
:param shared_memory: If `True` this Tensor memory is being shared with a host, :param shared_memory: If `True`, this Tensor memory is being shared with a host,
that means the responsibility of keeping host memory is that means the responsibility of keeping host memory is
on the side of a user. Any action performed on the host on the side of a user. Any action performed on the host
memory will be reflected on this Tensor's memory! memory is reflected on this Tensor's memory!
If `False`, data is being copied to this Tensor. If `False`, data is being copied to this Tensor.
Requires data to be C_CONTIGUOUS if `True`. Requires data to be C_CONTIGUOUS if `True`.
:type shared_memory: bool :type shared_memory: bool
@ -43,8 +43,8 @@ void regclass_Tensor(py::module m) {
R"( R"(
Another Tensor's special constructor. Another Tensor's special constructor.
It take an array or slice of it and shape that will be It takes an array or slice of it, and shape that will be
selected starting from the first element of given array/slice. selected, starting from the first element of the given array/slice.
Please use it only in advanced cases if necessary! Please use it only in advanced cases if necessary!
:param array: Underlaying methods will retrieve pointer on first element :param array: Underlaying methods will retrieve pointer on first element

View File

@ -631,7 +631,7 @@ void regclass_graph_Model(py::module m) {
Return -1 if parameter not matched. Return -1 if parameter not matched.
:param parameter: Parameter which index is to be found. :param parameter: Parameter, which index is to be found.
:type parameter: op.Parameter :type parameter: op.Parameter
:return: Index for parameter :return: Index for parameter
:rtype: int :rtype: int

View File

@ -101,7 +101,7 @@ void regclass_graph_Output(py::module m, std::string typestring)
output.def("get_target_inputs", output.def("get_target_inputs",
&ov::Output<VT>::get_target_inputs, &ov::Output<VT>::get_target_inputs,
R"( R"(
A set containing handles for all inputs targeted by the output A set containing handles for all inputs, targeted by the output,
referenced by this output handle. referenced by this output handle.
:return: Set of Inputs. :return: Set of Inputs.

View File

@ -27,7 +27,7 @@ void regmodule_graph_util(py::module m) {
:param index: Output node. :param index: Output node.
:type index: openvino.runtime.Output :type index: openvino.runtime.Output
:return: If it succeeded to calculate both bounds and :return: If it succeeded to calculate both bounds and
they are the same returns Constant operation they are the same, returns Constant operation
from the resulting bound, otherwise Null. from the resulting bound, otherwise Null.
:rtype: openvino.runtime.op.Constant or openvino.runtime.Node :rtype: openvino.runtime.op.Constant or openvino.runtime.Node
)"); )");