fix comments ngraph api - master (#3519)
* fix comments ngraph api * remove whitespace * fixes Co-authored-by: Nikolay Tyukaev <ntyukaev_lo@jenkins.inn.intel.com>
This commit is contained in:
parent
a6ea479688
commit
0ecc360664
@ -37,10 +37,7 @@ limitations under the License.
|
||||
<tab type="user" title="DL Streamer API Reference" url="https://openvinotoolkit.github.io/dlstreamer_gst/"/>
|
||||
<tab type="user" title="nGraph С++ API Reference" url="../ngraph_cpp_api/annotated.html"/>
|
||||
<!-- nGraph Python API Reference -->
|
||||
<tab type="files" visible="yes" title="nGraph Python API Reference">
|
||||
<tab type="filelist" visible="yes" title="nGraph Python API Reference" intro=""/>
|
||||
<tab type="globals" visible="yes" title="" intro=""/>
|
||||
</tab>
|
||||
<tab type="filelist" visible="yes" title="nGraph Python API Reference" intro=""/>
|
||||
</tab>
|
||||
<!-- Chinese docs -->
|
||||
<tab type="user" title="中文文件" url="https://docs.openvinotoolkit.org/cn/index.html"/>
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
"""! ngraph module namespace, exposing factory functions for all ops and other classes."""
|
||||
"""ngraph module namespace, exposing factory functions for all ops and other classes."""
|
||||
# noqa: F401
|
||||
|
||||
from pkg_resources import get_distribution, DistributionNotFound
|
||||
|
@ -13,16 +13,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
"""! ngraph exceptions hierarchy. All exceptions are descendants of NgraphError."""
|
||||
"""ngraph exceptions hierarchy. All exceptions are descendants of NgraphError."""
|
||||
|
||||
|
||||
class NgraphError(Exception):
|
||||
"""! Base class for Ngraph exceptions."""
|
||||
"""Base class for Ngraph exceptions."""
|
||||
|
||||
|
||||
class UserInputError(NgraphError):
|
||||
"""! User provided unexpected input."""
|
||||
"""User provided unexpected input."""
|
||||
|
||||
|
||||
class NgraphTypeError(NgraphError, TypeError):
|
||||
"""! Type mismatch error."""
|
||||
"""Type mismatch error."""
|
||||
|
@ -13,14 +13,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
"""! nGraph helper functions."""
|
||||
"""nGraph helper functions."""
|
||||
|
||||
from ngraph.impl import Function
|
||||
from openvino.inference_engine import IENetwork
|
||||
|
||||
|
||||
def function_from_cnn(cnn_network: IENetwork) -> Function:
|
||||
"""! Get nGraph function from Inference Engine CNN network."""
|
||||
"""Get nGraph function from Inference Engine CNN network."""
|
||||
capsule = cnn_network._get_function_capsule()
|
||||
ng_function = Function.from_capsule(capsule)
|
||||
return ng_function
|
||||
|
@ -24,7 +24,7 @@ import numpy as np
|
||||
|
||||
from _pyngraph.op import Constant
|
||||
|
||||
""" Retrieve Constant inner data.
|
||||
"""Retrieve Constant inner data.
|
||||
|
||||
Internally uses PyBind11 Numpy's buffer protocol.
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
"""! Factory functions for all ngraph ops."""
|
||||
"""Factory functions for all ngraph ops."""
|
||||
from typing import Callable, Iterable, List, Optional, Set, Union
|
||||
|
||||
import numpy as np
|
||||
@ -66,7 +66,7 @@ def batch_to_space(
|
||||
crops_end: NodeInput,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Perform BatchToSpace operation on the input tensor.
|
||||
"""Perform BatchToSpace operation on the input tensor.
|
||||
|
||||
BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions.
|
||||
|
||||
@ -84,14 +84,13 @@ def batch_to_space(
|
||||
|
||||
@unary_op
|
||||
def gelu(node: NodeInput, name: Optional[str] = None) -> Node:
|
||||
r"""! Perform Gaussian Error Linear Unit operation element-wise on data from input node.
|
||||
r"""Perform Gaussian Error Linear Unit operation element-wise on data from input node.
|
||||
|
||||
Computes GELU function:
|
||||
|
||||
\f[ f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}}) \f]
|
||||
|
||||
For more information refer to:
|
||||
`Gaussian Error Linear Unit (GELU) <https://arxiv.org/pdf/1606.08415.pdf>`_
|
||||
For more information refer to [Gaussian Error Linear Unit (GELU)](https://arxiv.org/pdf/1606.08415.pdf>)
|
||||
|
||||
@param node: Input tensor. One of: input node, array or scalar.
|
||||
@param name: Optional output node name.
|
||||
@ -108,7 +107,7 @@ def mvn(
|
||||
eps: float = 1e-9,
|
||||
name: str = None,
|
||||
) -> Node:
|
||||
r"""! Perform Mean Variance Normalization operation on data from input node.
|
||||
r"""Perform Mean Variance Normalization operation on data from input node.
|
||||
|
||||
Computes MVN on the input tensor `data` (called `X`) using formula:
|
||||
|
||||
@ -131,7 +130,7 @@ def mvn(
|
||||
|
||||
@nameable_op
|
||||
def reorg_yolo(input: Node, stride: List[int], name: Optional[str] = None) -> Node:
|
||||
"""! Return a node which produces the ReorgYolo operation.
|
||||
"""Return a node which produces the ReorgYolo operation.
|
||||
|
||||
@param input: Input data
|
||||
@param stride: Stride to reorganize input by
|
||||
@ -150,7 +149,7 @@ def roi_pooling(
|
||||
method: str,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which produces an ROIPooling operation.
|
||||
"""Return a node which produces an ROIPooling operation.
|
||||
|
||||
@param input: Input feature map {N, C, ...}
|
||||
@param coords: Coordinates of bounding boxes
|
||||
@ -175,7 +174,7 @@ def space_to_batch(
|
||||
pads_end: NodeInput,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Perform SpaceToBatch operation on the input tensor.
|
||||
"""Perform SpaceToBatch operation on the input tensor.
|
||||
|
||||
SpaceToBatch permutes data tensor blocks of spatial data into batch dimension.
|
||||
The operator returns a copy of the input tensor where values from spatial blocks dimensions
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
"""! Factory functions for all ngraph ops."""
|
||||
"""Factory functions for all ngraph ops."""
|
||||
from typing import Callable, Iterable, List, Optional, Set, Union
|
||||
|
||||
import numpy as np
|
||||
@ -60,7 +60,7 @@ _get_node_factory_opset3 = partial(_get_node_factory, "opset3")
|
||||
|
||||
@nameable_op
|
||||
def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node:
|
||||
"""! Return a node which produces the Assign operation.
|
||||
"""Return a node which produces the Assign operation.
|
||||
|
||||
@param new_value: Node producing a value to be assigned to a variable.
|
||||
@param variable_id: Id of a variable to be updated.
|
||||
@ -82,7 +82,7 @@ def broadcast(
|
||||
broadcast_spec: str = "NUMPY",
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Create a node which broadcasts the input node's values along specified axes to a desired shape.
|
||||
"""Create a node which broadcasts the input node's values along specified axes to a desired shape.
|
||||
|
||||
@param data: The node with input tensor data.
|
||||
@param target_shape: The node with a new shape we want to broadcast tensor to.
|
||||
@ -109,7 +109,7 @@ def bucketize(
|
||||
with_right_bound: bool = True,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which produces the Bucketize operation.
|
||||
"""Return a node which produces the Bucketize operation.
|
||||
|
||||
@param data: Input data to bucketize
|
||||
@param buckets: 1-D of sorted unique boundaries for buckets
|
||||
@ -134,7 +134,7 @@ def cum_sum(
|
||||
reverse: bool = False,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Construct a cumulative summation operation.
|
||||
"""Construct a cumulative summation operation.
|
||||
|
||||
@param arg: The tensor to be summed.
|
||||
@param axis: zero dimension tensor specifying axis position along which sum will be performed.
|
||||
@ -156,7 +156,7 @@ def embedding_bag_offsets_sum(
|
||||
per_sample_weights: Optional[NodeInput] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which performs sums of bags of embeddings without the intermediate embeddings.
|
||||
"""Return a node which performs sums of bags of embeddings without the intermediate embeddings.
|
||||
|
||||
@param emb_table: Tensor containing the embedding lookup table.
|
||||
@param indices: Tensor with indices.
|
||||
@ -183,7 +183,7 @@ def embedding_bag_packed_sum(
|
||||
per_sample_weights: Optional[NodeInput] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return an EmbeddingBagPackedSum node.
|
||||
"""Return an EmbeddingBagPackedSum node.
|
||||
|
||||
EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given
|
||||
input tensor with a row (from the weights matrix) at that index
|
||||
@ -211,7 +211,7 @@ def embedding_segments_sum(
|
||||
per_sample_weights: Optional[NodeInput] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return an EmbeddingSegmentsSum node.
|
||||
"""Return an EmbeddingSegmentsSum node.
|
||||
|
||||
EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given
|
||||
input tensor with a row (from the weights matrix) at that index
|
||||
@ -248,7 +248,7 @@ def extract_image_patches(
|
||||
auto_pad: str,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which produces the ExtractImagePatches operation.
|
||||
"""Return a node which produces the ExtractImagePatches operation.
|
||||
|
||||
@param image: 4-D Input data to extract image patches.
|
||||
@param sizes: Patch size in the format of [size_rows, size_cols].
|
||||
@ -280,7 +280,7 @@ def gru_cell(
|
||||
linear_before_reset: bool = False,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Perform GRUCell operation on the tensor from input node.
|
||||
"""Perform GRUCell operation on the tensor from input node.
|
||||
|
||||
GRUCell represents a single GRU Cell that computes the output
|
||||
using the formula described in the paper: https://arxiv.org/abs/1406.1078
|
||||
@ -342,7 +342,7 @@ def non_max_suppression(
|
||||
output_type: str = "i64",
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which performs NonMaxSuppression.
|
||||
"""Return a node which performs NonMaxSuppression.
|
||||
|
||||
@param boxes: Tensor with box coordinates.
|
||||
@param scores: Tensor with box scores.
|
||||
@ -375,7 +375,7 @@ def non_max_suppression(
|
||||
|
||||
@nameable_op
|
||||
def non_zero(data: NodeInput, output_type: str = "i64", name: Optional[str] = None,) -> Node:
|
||||
"""! Return the indices of the elements that are non-zero.
|
||||
"""Return the indices of the elements that are non-zero.
|
||||
|
||||
@param data: Input data.
|
||||
@param output_type: Output tensor type.
|
||||
@ -391,7 +391,7 @@ def non_zero(data: NodeInput, output_type: str = "i64", name: Optional[str] = No
|
||||
|
||||
@nameable_op
|
||||
def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node:
|
||||
"""! Return a node which produces the Assign operation.
|
||||
"""Return a node which produces the Assign operation.
|
||||
|
||||
@param init_value: Node producing a value to be returned instead of an unassigned variable.
|
||||
@param variable_id: Id of a variable to be read.
|
||||
@ -419,7 +419,7 @@ def rnn_cell(
|
||||
clip: float = 0.0,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Perform RNNCell operation on tensor from input node.
|
||||
"""Perform RNNCell operation on tensor from input node.
|
||||
|
||||
It follows notation and equations defined as in ONNX standard:
|
||||
https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN
|
||||
@ -475,7 +475,7 @@ def roi_align(
|
||||
mode: str,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which performs ROIAlign.
|
||||
"""Return a node which performs ROIAlign.
|
||||
|
||||
@param data: Input data.
|
||||
@param rois: RoIs (Regions of Interest) to pool over.
|
||||
@ -509,23 +509,23 @@ def scatter_elements_update(
|
||||
axis: NodeInput,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which produces a ScatterElementsUpdate operation.
|
||||
"""Return a node which produces a ScatterElementsUpdate operation.
|
||||
|
||||
@param data: The input tensor to be updated.
|
||||
@param indices: The tensor with indexes which will be updated.
|
||||
@param updates: The tensor with update values.
|
||||
@param axis: The axis for scatter.
|
||||
@return ScatterElementsUpdate node
|
||||
|
||||
ScatterElementsUpdate creates a copy of the first input tensor with updated elements
|
||||
specified with second and third input tensors.
|
||||
|
||||
|
||||
For each entry in `updates`, the target index in `data` is obtained by combining
|
||||
the corresponding entry in `indices` with the index of the entry itself: the
|
||||
index-value for dimension equal to `axis` is obtained from the value of the
|
||||
corresponding entry in `indices` and the index-value for dimension not equal
|
||||
to `axis` is obtained from the index of the entry itself.
|
||||
|
||||
@param data: The input tensor to be updated.
|
||||
@param indices: The tensor with indexes which will be updated.
|
||||
@param updates: The tensor with update values.
|
||||
@param axis: The axis for scatter.
|
||||
@return ScatterElementsUpdate node
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"ScatterElementsUpdate", as_nodes(data, indices, updates, axis)
|
||||
@ -536,7 +536,7 @@ def scatter_elements_update(
|
||||
def scatter_update(
|
||||
data: Node, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None
|
||||
) -> Node:
|
||||
"""! Return a node which produces a ScatterUpdate operation.
|
||||
"""Return a node which produces a ScatterUpdate operation.
|
||||
|
||||
ScatterUpdate sets new values to slices from data addressed by indices.
|
||||
|
||||
@ -554,7 +554,7 @@ def scatter_update(
|
||||
|
||||
@nameable_op
|
||||
def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = None) -> Node:
|
||||
"""! Return a node which produces a tensor containing the shape of its input data.
|
||||
"""Return a node which produces a tensor containing the shape of its input data.
|
||||
|
||||
@param data: The tensor containing the input data.
|
||||
@param output_type: Output element type.
|
||||
@ -569,7 +569,17 @@ def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = No
|
||||
|
||||
@nameable_op
|
||||
def shuffle_channels(data: Node, axis: int, groups: int, name: Optional[str] = None) -> Node:
|
||||
"""! Perform permutation on data in the channel dimension of the input tensor.
|
||||
"""Perform permutation on data in the channel dimension of the input tensor.
|
||||
|
||||
@param data: The node with input tensor.
|
||||
@param axis: Channel dimension index in the data tensor.
|
||||
A negative value means that the index should be calculated
|
||||
from the back of the input data shape.
|
||||
@param group: The channel dimension specified by the axis parameter
|
||||
should be split into this number of groups.
|
||||
@param name: Optional output node name.
|
||||
@return The new node performing a permutation on data in the channel dimension
|
||||
of the input tensor.
|
||||
|
||||
The operation is the equivalent with the following transformation of the input tensor
|
||||
`data` of shape [N, C, H, W]:
|
||||
@ -582,7 +592,7 @@ def shuffle_channels(data: Node, axis: int, groups: int, name: Optional[str] = N
|
||||
|
||||
For example:
|
||||
|
||||
~~~~~~~~~~~~~{.py}
|
||||
@code{.py}
|
||||
Inputs: tensor of shape [1, 6, 2, 2]
|
||||
|
||||
data = [[[[ 0., 1.], [ 2., 3.]],
|
||||
@ -603,17 +613,7 @@ def shuffle_channels(data: Node, axis: int, groups: int, name: Optional[str] = N
|
||||
[[ 4., 5.], [ 6., 7.]],
|
||||
[[12., 13.], [14., 15.]],
|
||||
[[20., 21.], [22., 23.]]]]
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
@param data: The node with input tensor.
|
||||
@param axis: Channel dimension index in the data tensor.
|
||||
A negative value means that the index should be calculated
|
||||
from the back of the input data shape.
|
||||
@param group: The channel dimension specified by the axis parameter
|
||||
should be split into this number of groups.
|
||||
@param name: Optional output node name.
|
||||
@return The new node performing a permutation on data in the channel dimension
|
||||
of the input tensor.
|
||||
@endcode
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"ShuffleChannels", [as_node(data)], {"axis": axis, "groups": groups}
|
||||
@ -630,7 +630,7 @@ def topk(
|
||||
index_element_type: str = "i32",
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which performs TopK.
|
||||
"""Return a node which performs TopK.
|
||||
|
||||
@param data: Input data.
|
||||
@param k: K.
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
"""! Factory functions for all ngraph ops."""
|
||||
"""Factory functions for all ngraph ops."""
|
||||
from typing import Callable, Iterable, List, Optional, Set, Union
|
||||
|
||||
import numpy as np
|
||||
@ -70,7 +70,7 @@ def ctc_loss(
|
||||
unique: bool = False,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which performs CTCLoss.
|
||||
"""Return a node which performs CTCLoss.
|
||||
|
||||
@param logits: 3-D tensor of logits.
|
||||
@param logit_length: 1-D tensor of lengths for each object from a batch.
|
||||
@ -108,7 +108,7 @@ def non_max_suppression(
|
||||
output_type: str = "i64",
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which performs NonMaxSuppression.
|
||||
"""Return a node which performs NonMaxSuppression.
|
||||
|
||||
@param boxes: Tensor with box coordinates.
|
||||
@param scores: Tensor with box scores.
|
||||
@ -141,7 +141,7 @@ def non_max_suppression(
|
||||
|
||||
@nameable_op
|
||||
def softplus(data: NodeInput, name: Optional[str] = None) -> Node:
|
||||
"""! Apply SoftPlus operation on each element of input tensor.
|
||||
"""Apply SoftPlus operation on each element of input tensor.
|
||||
|
||||
@param data: The tensor providing input data.
|
||||
@return The new node with SoftPlus operation applied on each element.
|
||||
@ -151,7 +151,7 @@ def softplus(data: NodeInput, name: Optional[str] = None) -> Node:
|
||||
|
||||
@nameable_op
|
||||
def mish(data: NodeInput, name: Optional[str] = None,) -> Node:
|
||||
"""! Return a node which performs Mish.
|
||||
"""Return a node which performs Mish.
|
||||
|
||||
@param data: Tensor with input data floating point type.
|
||||
@return The new node which performs Mish
|
||||
@ -161,7 +161,7 @@ def mish(data: NodeInput, name: Optional[str] = None,) -> Node:
|
||||
|
||||
@nameable_op
|
||||
def hswish(data: NodeInput, name: Optional[str] = None,) -> Node:
|
||||
"""! Return a node which performs HSwish (hard version of Swish).
|
||||
"""Return a node which performs HSwish (hard version of Swish).
|
||||
|
||||
@param data: Tensor with input data floating point type.
|
||||
@return The new node which performs HSwish
|
||||
@ -175,7 +175,7 @@ def swish(
|
||||
beta: Optional[NodeInput] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)).
|
||||
"""Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)).
|
||||
|
||||
@param data: Tensor with input data floating point type.
|
||||
@return The new node which performs Swish
|
||||
@ -187,7 +187,7 @@ def swish(
|
||||
|
||||
@nameable_op
|
||||
def acosh(node: NodeInput, name: Optional[str] = None) -> Node:
|
||||
"""! Apply hyperbolic inverse cosine function on the input node element-wise.
|
||||
"""Apply hyperbolic inverse cosine function on the input node element-wise.
|
||||
|
||||
@param node: One of: input node, array or scalar.
|
||||
@param name: Optional new name for output node.
|
||||
@ -198,7 +198,7 @@ def acosh(node: NodeInput, name: Optional[str] = None) -> Node:
|
||||
|
||||
@nameable_op
|
||||
def asinh(node: NodeInput, name: Optional[str] = None) -> Node:
|
||||
"""! Apply hyperbolic inverse sinus function on the input node element-wise.
|
||||
"""Apply hyperbolic inverse sinus function on the input node element-wise.
|
||||
|
||||
@param node: One of: input node, array or scalar.
|
||||
@param name: Optional new name for output node.
|
||||
@ -209,7 +209,7 @@ def asinh(node: NodeInput, name: Optional[str] = None) -> Node:
|
||||
|
||||
@nameable_op
|
||||
def atanh(node: NodeInput, name: Optional[str] = None) -> Node:
|
||||
"""! Apply hyperbolic inverse tangent function on the input node element-wise.
|
||||
"""Apply hyperbolic inverse tangent function on the input node element-wise.
|
||||
|
||||
@param node: One of: input node, array or scalar.
|
||||
@param name: Optional new name for output node.
|
||||
@ -226,7 +226,7 @@ def proposal(
|
||||
attrs: dict,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Filter bounding boxes and outputs only those with the highest prediction confidence.
|
||||
"""Filter bounding boxes and outputs only those with the highest prediction confidence.
|
||||
|
||||
@param class_probs: 4D input floating point tensor with class prediction scores.
|
||||
@param bbox_deltas: 4D input floating point tensor with corrected predictions of bounding boxes
|
||||
@ -295,8 +295,9 @@ def proposal(
|
||||
Object Detection API models
|
||||
Default value: "" (empty string)
|
||||
Required: no
|
||||
|
||||
Example of attribute dictionary:
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~{.py}
|
||||
@code{.py}
|
||||
# just required ones
|
||||
attrs = {
|
||||
'base_size': 85,
|
||||
@ -308,7 +309,7 @@ def proposal(
|
||||
'ratio': [0.1, 1.5, 2.0, 2.5],
|
||||
'scale': [2, 3, 3, 4],
|
||||
}
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@endcode
|
||||
Optional attributes which are absent from dictionary will be set with corresponding default.
|
||||
@return Node representing Proposal operation.
|
||||
"""
|
||||
@ -340,7 +341,7 @@ def proposal(
|
||||
def reduce_l1(
|
||||
node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None
|
||||
) -> Node:
|
||||
"""! L1-reduction operation on input tensor, eliminating the specified reduction axes.
|
||||
"""L1-reduction operation on input tensor, eliminating the specified reduction axes.
|
||||
|
||||
@param node: The tensor we want to mean-reduce.
|
||||
@param reduction_axes: The axes to eliminate through mean operation.
|
||||
@ -357,7 +358,7 @@ def reduce_l1(
|
||||
def reduce_l2(
|
||||
node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None
|
||||
) -> Node:
|
||||
"""! L2-reduction operation on input tensor, eliminating the specified reduction axes.
|
||||
"""L2-reduction operation on input tensor, eliminating the specified reduction axes.
|
||||
|
||||
@param node: The tensor we want to mean-reduce.
|
||||
@param reduction_axes: The axes to eliminate through mean operation.
|
||||
@ -385,7 +386,7 @@ def lstm_cell(
|
||||
clip: float = 0.0,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""! Return a node which performs LSTMCell operation.
|
||||
"""Return a node which performs LSTMCell operation.
|
||||
|
||||
@param X: The input tensor with shape: [batch_size, input_size].
|
||||
@param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size].
|
||||
|
@ -27,7 +27,7 @@ from ngraph.utils.types import (
|
||||
|
||||
|
||||
def _get_node_factory(opset_version: Optional[str] = None) -> NodeFactory:
|
||||
"""! Return NodeFactory configured to create operators from specified opset version."""
|
||||
"""Return NodeFactory configured to create operators from specified opset version."""
|
||||
if opset_version:
|
||||
return NodeFactory(opset_version)
|
||||
else:
|
||||
|
@ -13,4 +13,4 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
"""! Generic utilities. Factor related functions out to separate files."""
|
||||
"""Generic utilities. Factor related functions out to separate files."""
|
||||
|
@ -26,7 +26,7 @@ log = logging.getLogger(__name__)
|
||||
def get_broadcast_axes(
|
||||
output_shape: TensorShape, input_shape: TensorShape, axis: int = None
|
||||
) -> AxisSet:
|
||||
"""! Generate a list of broadcast axes for ngraph++ broadcast.
|
||||
"""Generate a list of broadcast axes for ngraph++ broadcast.
|
||||
|
||||
Informally, a broadcast "adds" axes to the input tensor,
|
||||
replicating elements from the input tensor as needed to fill the new dimensions.
|
||||
|
@ -27,7 +27,7 @@ def _set_node_friendly_name(node: Node, **kwargs: Any) -> Node:
|
||||
|
||||
|
||||
def nameable_op(node_factory_function: Callable) -> Callable:
|
||||
"""! Set the name to the ngraph operator returned by the wrapped function."""
|
||||
"""Set the name to the ngraph operator returned by the wrapped function."""
|
||||
|
||||
@wraps(node_factory_function)
|
||||
def wrapper(*args: Any, **kwargs: Any) -> Node:
|
||||
@ -39,7 +39,7 @@ def nameable_op(node_factory_function: Callable) -> Callable:
|
||||
|
||||
|
||||
def unary_op(node_factory_function: Callable) -> Callable:
|
||||
"""! Convert the first input value to a Constant Node if a numeric value is detected."""
|
||||
"""Convert the first input value to a Constant Node if a numeric value is detected."""
|
||||
|
||||
@wraps(node_factory_function)
|
||||
def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node:
|
||||
@ -52,7 +52,7 @@ def unary_op(node_factory_function: Callable) -> Callable:
|
||||
|
||||
|
||||
def binary_op(node_factory_function: Callable) -> Callable:
|
||||
"""! Convert the first two input values to Constant Nodes if numeric values are detected."""
|
||||
"""Convert the first two input values to Constant Nodes if numeric values are detected."""
|
||||
|
||||
@wraps(node_factory_function)
|
||||
def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
"""! Helper functions for validating user input."""
|
||||
"""Helper functions for validating user input."""
|
||||
|
||||
import logging
|
||||
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
|
||||
@ -27,7 +27,7 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def assert_list_of_ints(value_list: Iterable[int], message: str) -> None:
|
||||
"""! Verify that the provided value is an iterable of integers."""
|
||||
"""Verify that the provided value is an iterable of integers."""
|
||||
try:
|
||||
for value in value_list:
|
||||
if not isinstance(value, int):
|
||||
@ -39,7 +39,7 @@ def assert_list_of_ints(value_list: Iterable[int], message: str) -> None:
|
||||
|
||||
def _check_value(op_name, attr_key, value, val_type, cond=None):
|
||||
# type: (str, str, Any, Type, Optional[Callable[[Any], bool]]) -> bool
|
||||
"""! Check whether provided value satisfies specified criteria.
|
||||
"""Check whether provided value satisfies specified criteria.
|
||||
|
||||
@param op_name: The operator name which attributes are checked.
|
||||
@param attr_key: The attribute name.
|
||||
@ -67,7 +67,7 @@ def _check_value(op_name, attr_key, value, val_type, cond=None):
|
||||
|
||||
def check_valid_attribute(op_name, attr_dict, attr_key, val_type, cond=None, required=False):
|
||||
# type: (str, dict, str, Type, Optional[Callable[[Any], bool]], Optional[bool]) -> bool
|
||||
"""! Check whether specified attribute satisfies given criteria.
|
||||
"""Check whether specified attribute satisfies given criteria.
|
||||
|
||||
@param op_name: The operator name which attributes are checked.
|
||||
@param attr_dict: Dictionary containing key-value attributes to check.
|
||||
@ -110,7 +110,7 @@ def check_valid_attributes(
|
||||
requirements, # type: List[Tuple[str, bool, Type, Optional[Callable]]]
|
||||
):
|
||||
# type: (...) -> bool
|
||||
"""! Perform attributes validation according to specified type, value criteria.
|
||||
"""Perform attributes validation according to specified type, value criteria.
|
||||
|
||||
@param op_name: The operator name which attributes are checked.
|
||||
@param attributes: The dictionary with user provided attributes to check.
|
||||
@ -130,7 +130,7 @@ def check_valid_attributes(
|
||||
|
||||
|
||||
def is_positive_value(x): # type: (Any) -> bool
|
||||
"""! Determine whether the specified x is positive value.
|
||||
"""Determine whether the specified x is positive value.
|
||||
|
||||
@param x: The value to check.
|
||||
|
||||
@ -140,7 +140,7 @@ def is_positive_value(x): # type: (Any) -> bool
|
||||
|
||||
|
||||
def is_non_negative_value(x): # type: (Any) -> bool
|
||||
"""! Determine whether the specified x is non-negative value.
|
||||
"""Determine whether the specified x is non-negative value.
|
||||
|
||||
@param x: The value to check.
|
||||
|
||||
|
@ -9,10 +9,10 @@ DEFAULT_OPSET = "opset6"
|
||||
|
||||
|
||||
class NodeFactory(object):
|
||||
"""! Factory front-end to create node objects."""
|
||||
"""Factory front-end to create node objects."""
|
||||
|
||||
def __init__(self, opset_version: str = DEFAULT_OPSET) -> None:
|
||||
"""! Create the NodeFactory object.
|
||||
"""Create the NodeFactory object.
|
||||
|
||||
@param opset_version: The opset version the factory will use to produce ops from.
|
||||
"""
|
||||
@ -24,7 +24,7 @@ class NodeFactory(object):
|
||||
arguments: List[Union[Node, Output]],
|
||||
attributes: Optional[Dict[str, Any]] = None,
|
||||
) -> Node:
|
||||
"""! Create node object from provided description.
|
||||
"""Create node object from provided description.
|
||||
|
||||
The user does not have to provide all node's attributes, but only required ones.
|
||||
|
||||
@ -84,7 +84,7 @@ class NodeFactory(object):
|
||||
|
||||
@staticmethod
|
||||
def _normalize_attr_name(attr_name: str, prefix: str) -> str:
|
||||
"""! Normalize attribute name.
|
||||
"""Normalize attribute name.
|
||||
|
||||
@param attr_name: The attribute name.
|
||||
@param prefix: The prefix to attach to attribute name.
|
||||
@ -98,7 +98,7 @@ class NodeFactory(object):
|
||||
|
||||
@classmethod
|
||||
def _normalize_attr_name_getter(cls, attr_name: str) -> str:
|
||||
"""! Normalize atr name to be suitable for getter function name.
|
||||
"""Normalize atr name to be suitable for getter function name.
|
||||
|
||||
@param attr_name: The attribute name to normalize
|
||||
|
||||
@ -108,7 +108,7 @@ class NodeFactory(object):
|
||||
|
||||
@classmethod
|
||||
def _normalize_attr_name_setter(cls, attr_name: str) -> str:
|
||||
"""! Normalize attribute name to be suitable for setter function name.
|
||||
"""Normalize attribute name to be suitable for setter function name.
|
||||
|
||||
@param attr_name: The attribute name to normalize
|
||||
|
||||
@ -118,7 +118,7 @@ class NodeFactory(object):
|
||||
|
||||
@staticmethod
|
||||
def _get_node_attr_value(node: Node, attr_name: str) -> Any:
|
||||
"""! Get provided node attribute value.
|
||||
"""Get provided node attribute value.
|
||||
|
||||
@param node: The node we retrieve attribute value from.
|
||||
@param attr_name: The attribute name.
|
||||
@ -132,7 +132,7 @@ class NodeFactory(object):
|
||||
|
||||
@staticmethod
|
||||
def _set_node_attr_value(node: Node, attr_name: str, value: Any) -> None:
|
||||
"""! Set the node attribute value.
|
||||
"""Set the node attribute value.
|
||||
|
||||
@param node: The node we change attribute value for.
|
||||
@param attr_name: The attribute name.
|
||||
|
@ -20,7 +20,7 @@ from ngraph.impl import Node
|
||||
|
||||
|
||||
def get_reduction_axes(node: Node, reduction_axes: Optional[Iterable[int]]) -> Iterable[int]:
|
||||
"""! Get reduction axes if it is None and convert it to set if its type is different.
|
||||
"""Get reduction axes if it is None and convert it to set if its type is different.
|
||||
|
||||
If reduction_axes is None we default to reduce all axes.
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
"""! Helper classes for aggregating TensorIterator input/output desciptor attributes."""
|
||||
"""Helper classes for aggregating TensorIterator input/output desciptor attributes."""
|
||||
|
||||
from typing import List
|
||||
|
||||
@ -22,14 +22,14 @@ from ngraph.impl.op import Parameter
|
||||
|
||||
|
||||
class GraphBody(object):
|
||||
"""! Class containing graph parameters and results."""
|
||||
"""Class containing graph parameters and results."""
|
||||
|
||||
def __init__(self, parameters: List[Parameter], results: List[Node],) -> None:
|
||||
self.parameters = parameters
|
||||
self.results = results
|
||||
|
||||
def serialize(self) -> dict:
|
||||
"""! Serialize GraphBody as a dictionary."""
|
||||
"""Serialize GraphBody as a dictionary."""
|
||||
return {
|
||||
"parameters": self.parameters,
|
||||
"results": self.results,
|
||||
@ -37,14 +37,14 @@ class GraphBody(object):
|
||||
|
||||
|
||||
class TensorIteratorInputDesc(object):
|
||||
"""! Represents a generic input descriptor for TensorIterator operator."""
|
||||
"""Represents a generic input descriptor for TensorIterator operator."""
|
||||
|
||||
def __init__(self, input_idx: int, body_parameter_idx: int,) -> None:
|
||||
self.input_idx = input_idx
|
||||
self.body_parameter_idx = body_parameter_idx
|
||||
|
||||
def serialize(self) -> dict:
|
||||
"""! Serialize TensorIteratorInputDesc as a dictionary."""
|
||||
"""Serialize TensorIteratorInputDesc as a dictionary."""
|
||||
return {
|
||||
"input_idx": self.input_idx,
|
||||
"body_parameter_idx": self.body_parameter_idx,
|
||||
@ -52,7 +52,7 @@ class TensorIteratorInputDesc(object):
|
||||
|
||||
|
||||
class TensorIteratorSliceInputDesc(TensorIteratorInputDesc):
|
||||
"""! Represents a TensorIterator graph body input formed from slices of TensorIterator input."""
|
||||
"""Represents a TensorIterator graph body input formed from slices of TensorIterator input."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -72,7 +72,7 @@ class TensorIteratorSliceInputDesc(TensorIteratorInputDesc):
|
||||
self.axis = axis
|
||||
|
||||
def serialize(self) -> dict:
|
||||
"""! Serialize TensorIteratorSliceInputDesc as a dictionary."""
|
||||
"""Serialize TensorIteratorSliceInputDesc as a dictionary."""
|
||||
output = super().serialize()
|
||||
output["start"] = self.start
|
||||
output["stride"] = self.stride
|
||||
@ -83,7 +83,7 @@ class TensorIteratorSliceInputDesc(TensorIteratorInputDesc):
|
||||
|
||||
|
||||
class TensorIteratorMergedInputDesc(TensorIteratorInputDesc):
|
||||
"""! Represents a TensorIterator graph body input with initial value in the first iteration.
|
||||
"""Represents a TensorIterator graph body input with initial value in the first iteration.
|
||||
|
||||
Later on, this input value is computed inside graph body.
|
||||
"""
|
||||
@ -93,28 +93,28 @@ class TensorIteratorMergedInputDesc(TensorIteratorInputDesc):
|
||||
self.body_value_idx = body_value_idx
|
||||
|
||||
def serialize(self) -> dict:
|
||||
"""! Serialize TensorIteratorMergedInputDesc as a dictionary."""
|
||||
"""Serialize TensorIteratorMergedInputDesc as a dictionary."""
|
||||
output = super().serialize()
|
||||
output["body_value_idx"] = self.body_value_idx
|
||||
return output
|
||||
|
||||
|
||||
class TensorIteratorInvariantInputDesc(TensorIteratorInputDesc):
|
||||
"""! Represents a TensorIterator graph body input that has invariant value during iteration."""
|
||||
"""Represents a TensorIterator graph body input that has invariant value during iteration."""
|
||||
|
||||
def __init__(self, input_idx: int, body_parameter_idx: int,) -> None:
|
||||
super().__init__(input_idx, body_parameter_idx)
|
||||
|
||||
|
||||
class TensorIteratorOutputDesc(object):
|
||||
"""! Represents a generic output descriptor for TensorIterator operator."""
|
||||
"""Represents a generic output descriptor for TensorIterator operator."""
|
||||
|
||||
def __init__(self, body_value_idx: int, output_idx: int,) -> None:
|
||||
self.body_value_idx = body_value_idx
|
||||
self.output_idx = output_idx
|
||||
|
||||
def serialize(self) -> dict:
|
||||
"""! Serialize TensorIteratorOutputDesc as a dictionary."""
|
||||
"""Serialize TensorIteratorOutputDesc as a dictionary."""
|
||||
return {
|
||||
"body_value_idx": self.body_value_idx,
|
||||
"output_idx": self.output_idx,
|
||||
@ -122,21 +122,21 @@ class TensorIteratorOutputDesc(object):
|
||||
|
||||
|
||||
class TensorIteratorBodyOutputDesc(TensorIteratorOutputDesc):
|
||||
"""! Represents an output from a specific iteration."""
|
||||
"""Represents an output from a specific iteration."""
|
||||
|
||||
def __init__(self, body_value_idx: int, output_idx: int, iteration: int,) -> None:
|
||||
super().__init__(body_value_idx, output_idx)
|
||||
self.iteration = iteration
|
||||
|
||||
def serialize(self) -> dict:
|
||||
"""! Serialize TensorIteratorBodyOutputDesc as a dictionary."""
|
||||
"""Serialize TensorIteratorBodyOutputDesc as a dictionary."""
|
||||
output = super().serialize()
|
||||
output["iteration"] = self.iteration
|
||||
return output
|
||||
|
||||
|
||||
class TensorIteratorConcatOutputDesc(TensorIteratorOutputDesc):
|
||||
"""! Represents an output produced by concatenation of output from each iteration."""
|
||||
"""Represents an output produced by concatenation of output from each iteration."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -156,7 +156,7 @@ class TensorIteratorConcatOutputDesc(TensorIteratorOutputDesc):
|
||||
self.axis = axis
|
||||
|
||||
def serialize(self) -> dict:
|
||||
"""! Serialize TensorIteratorConcatOutputDesc as a dictionary."""
|
||||
"""Serialize TensorIteratorConcatOutputDesc as a dictionary."""
|
||||
output = super().serialize()
|
||||
output["start"] = self.start
|
||||
output["stride"] = self.stride
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
"""! Functions related to converting between Python and numpy types and ngraph types."""
|
||||
"""Functions related to converting between Python and numpy types and ngraph types."""
|
||||
|
||||
import logging
|
||||
from typing import List, Union
|
||||
@ -66,7 +66,7 @@ ngraph_to_numpy_types_str_map = [
|
||||
|
||||
|
||||
def get_element_type(data_type: NumericType) -> NgraphType:
|
||||
"""! Return an ngraph element type for a Python type or numpy.dtype."""
|
||||
"""Return an ngraph element type for a Python type or numpy.dtype."""
|
||||
if data_type is int:
|
||||
log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.")
|
||||
return NgraphType.i32
|
||||
@ -85,7 +85,7 @@ def get_element_type(data_type: NumericType) -> NgraphType:
|
||||
|
||||
|
||||
def get_element_type_str(data_type: NumericType) -> str:
|
||||
"""! Return an ngraph element type string representation for a Python type or numpy dtype."""
|
||||
"""Return an ngraph element type string representation for a Python type or numpy dtype."""
|
||||
if data_type is int:
|
||||
log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.")
|
||||
return "i32"
|
||||
@ -105,7 +105,7 @@ def get_element_type_str(data_type: NumericType) -> str:
|
||||
|
||||
|
||||
def get_dtype(ngraph_type: NgraphType) -> np.dtype:
|
||||
"""! Return a numpy.dtype for an ngraph element type."""
|
||||
"""Return a numpy.dtype for an ngraph element type."""
|
||||
np_type = next(
|
||||
(np_type for (ng_type, np_type) in ngraph_to_numpy_types_map if ng_type == ngraph_type),
|
||||
None,
|
||||
@ -118,14 +118,14 @@ def get_dtype(ngraph_type: NgraphType) -> np.dtype:
|
||||
|
||||
|
||||
def get_ndarray(data: NumericData) -> np.ndarray:
|
||||
"""! Wrap data into a numpy ndarray."""
|
||||
"""Wrap data into a numpy ndarray."""
|
||||
if type(data) == np.ndarray:
|
||||
return data
|
||||
return np.array(data)
|
||||
|
||||
|
||||
def get_shape(data: NumericData) -> TensorShape:
|
||||
"""! Return a shape of NumericData."""
|
||||
"""Return a shape of NumericData."""
|
||||
if type(data) == np.ndarray:
|
||||
return data.shape # type: ignore
|
||||
elif type(data) == list:
|
||||
@ -134,7 +134,7 @@ def get_shape(data: NumericData) -> TensorShape:
|
||||
|
||||
|
||||
def make_constant_node(value: NumericData, dtype: NumericType = None) -> Constant:
|
||||
"""! Return an ngraph Constant node with the specified value."""
|
||||
"""Return an ngraph Constant node with the specified value."""
|
||||
ndarray = get_ndarray(value)
|
||||
if dtype:
|
||||
element_type = get_element_type(dtype)
|
||||
@ -145,7 +145,7 @@ def make_constant_node(value: NumericData, dtype: NumericType = None) -> Constan
|
||||
|
||||
|
||||
def as_node(input_value: NodeInput) -> Node:
|
||||
"""! Return input values as nodes. Scalars will be converted to Constant nodes."""
|
||||
"""Return input values as nodes. Scalars will be converted to Constant nodes."""
|
||||
if issubclass(type(input_value), Node):
|
||||
return input_value
|
||||
if issubclass(type(input_value), Output):
|
||||
@ -154,5 +154,5 @@ def as_node(input_value: NodeInput) -> Node:
|
||||
|
||||
|
||||
def as_nodes(*input_values: NodeInput) -> List[Node]:
|
||||
"""! Return input values as nodes. Scalars will be converted to Constant nodes."""
|
||||
"""Return input values as nodes. Scalars will be converted to Constant nodes."""
|
||||
return [as_node(input_value) for input_value in input_values]
|
||||
|
Loading…
Reference in New Issue
Block a user