Expose OpSets as part of nGraph PythonAPI (#1261)
This commit is contained in:
parent
b3f55dd0be
commit
821a3dae32
@ -217,6 +217,10 @@ sources = [
|
||||
|
||||
packages = [
|
||||
"ngraph",
|
||||
"ngraph.opset1",
|
||||
"ngraph.opset2",
|
||||
"ngraph.opset3",
|
||||
"ngraph.opset4",
|
||||
"ngraph.utils",
|
||||
"ngraph.impl",
|
||||
"ngraph.impl.op",
|
||||
|
@ -23,135 +23,155 @@ try:
|
||||
except DistributionNotFound:
|
||||
__version__ = "0.0.0.dev0"
|
||||
|
||||
from ngraph.impl import Node
|
||||
|
||||
from ngraph.ops import absolute
|
||||
from ngraph.ops import absolute as abs
|
||||
from ngraph.ops import acos
|
||||
from ngraph.ops import add
|
||||
from ngraph.ops import asin
|
||||
from ngraph.ops import assign
|
||||
from ngraph.ops import atan
|
||||
from ngraph.ops import avg_pool
|
||||
from ngraph.ops import batch_norm_inference
|
||||
from ngraph.ops import batch_to_space
|
||||
from ngraph.ops import binary_convolution
|
||||
from ngraph.ops import broadcast
|
||||
from ngraph.ops import bucketize
|
||||
from ngraph.ops import ceiling
|
||||
from ngraph.ops import ceiling as ceil
|
||||
from ngraph.ops import clamp
|
||||
from ngraph.ops import concat
|
||||
from ngraph.ops import constant
|
||||
from ngraph.ops import convert
|
||||
from ngraph.ops import convert_like
|
||||
from ngraph.ops import convolution
|
||||
from ngraph.ops import convolution_backprop_data
|
||||
from ngraph.ops import cos
|
||||
from ngraph.ops import cosh
|
||||
from ngraph.ops import ctc_greedy_decoder
|
||||
from ngraph.ops import cum_sum
|
||||
from ngraph.ops import cum_sum as cumsum
|
||||
from ngraph.ops import deformable_convolution
|
||||
from ngraph.ops import deformable_psroi_pooling
|
||||
from ngraph.ops import depth_to_space
|
||||
from ngraph.ops import detection_output
|
||||
from ngraph.ops import divide
|
||||
from ngraph.ops import elu
|
||||
from ngraph.ops import embedding_bag_offsets_sum
|
||||
from ngraph.ops import embedding_bag_packed_sum
|
||||
from ngraph.ops import embedding_segments_sum
|
||||
from ngraph.ops import extract_image_patches
|
||||
from ngraph.ops import equal
|
||||
from ngraph.ops import erf
|
||||
from ngraph.ops import exp
|
||||
from ngraph.ops import fake_quantize
|
||||
from ngraph.ops import floor
|
||||
from ngraph.ops import floor_mod
|
||||
from ngraph.ops import gather
|
||||
from ngraph.ops import gather_tree
|
||||
from ngraph.ops import gelu
|
||||
from ngraph.ops import get_output_element
|
||||
from ngraph.ops import greater
|
||||
from ngraph.ops import greater_equal
|
||||
from ngraph.ops import grn
|
||||
from ngraph.ops import group_convolution
|
||||
from ngraph.ops import group_convolution_backprop_data
|
||||
from ngraph.ops import gru_cell
|
||||
from ngraph.ops import hard_sigmoid
|
||||
from ngraph.ops import interpolate
|
||||
from ngraph.ops import less
|
||||
from ngraph.ops import less_equal
|
||||
from ngraph.ops import log
|
||||
from ngraph.ops import logical_and
|
||||
from ngraph.ops import logical_not
|
||||
from ngraph.ops import logical_or
|
||||
from ngraph.ops import logical_xor
|
||||
from ngraph.ops import lrn
|
||||
from ngraph.ops import lstm_cell
|
||||
from ngraph.ops import lstm_sequence
|
||||
from ngraph.ops import matmul
|
||||
from ngraph.ops import max_pool
|
||||
from ngraph.ops import maximum
|
||||
from ngraph.ops import minimum
|
||||
from ngraph.ops import mod
|
||||
from ngraph.ops import multiply
|
||||
from ngraph.ops import mvn
|
||||
from ngraph.ops import negative
|
||||
from ngraph.ops import non_max_suppression
|
||||
from ngraph.ops import non_zero
|
||||
from ngraph.ops import normalize_l2
|
||||
from ngraph.ops import not_equal
|
||||
from ngraph.ops import one_hot
|
||||
from ngraph.ops import pad
|
||||
from ngraph.ops import parameter
|
||||
from ngraph.ops import power
|
||||
from ngraph.ops import prelu
|
||||
from ngraph.ops import prior_box
|
||||
from ngraph.ops import prior_box_clustered
|
||||
from ngraph.ops import psroi_pooling
|
||||
from ngraph.ops import proposal
|
||||
from ngraph.ops import read_value
|
||||
from ngraph.ops import reduce_logical_and
|
||||
from ngraph.ops import reduce_logical_or
|
||||
from ngraph.ops import reduce_max
|
||||
from ngraph.ops import reduce_mean
|
||||
from ngraph.ops import reduce_min
|
||||
from ngraph.ops import reduce_prod
|
||||
from ngraph.ops import reduce_sum
|
||||
from ngraph.ops import region_yolo
|
||||
from ngraph.ops import reorg_yolo
|
||||
from ngraph.ops import relu
|
||||
from ngraph.ops import reshape
|
||||
from ngraph.ops import result
|
||||
from ngraph.ops import reverse
|
||||
from ngraph.ops import reverse_sequence
|
||||
from ngraph.ops import rnn_cell
|
||||
from ngraph.ops import roi_align
|
||||
from ngraph.ops import roi_pooling
|
||||
from ngraph.ops import scatter_elements_update
|
||||
from ngraph.ops import scatter_update
|
||||
from ngraph.ops import select
|
||||
from ngraph.ops import selu
|
||||
from ngraph.ops import shape_of
|
||||
from ngraph.ops import shuffle_channels
|
||||
from ngraph.ops import sigmoid
|
||||
from ngraph.ops import sign
|
||||
from ngraph.ops import sin
|
||||
from ngraph.ops import sinh
|
||||
from ngraph.ops import softmax
|
||||
from ngraph.ops import space_to_batch
|
||||
from ngraph.ops import space_to_depth
|
||||
from ngraph.ops import split
|
||||
from ngraph.ops import sqrt
|
||||
from ngraph.ops import squared_difference
|
||||
from ngraph.ops import squeeze
|
||||
from ngraph.ops import strided_slice
|
||||
from ngraph.ops import subtract
|
||||
from ngraph.ops import tan
|
||||
from ngraph.ops import tanh
|
||||
from ngraph.ops import tensor_iterator
|
||||
from ngraph.ops import tile
|
||||
from ngraph.ops import topk
|
||||
from ngraph.ops import transpose
|
||||
from ngraph.ops import unsqueeze
|
||||
from ngraph.ops import variadic_split
|
||||
from ngraph.opset4 import absolute
|
||||
from ngraph.opset4 import absolute as abs
|
||||
from ngraph.opset4 import acos
|
||||
from ngraph.opset4 import add
|
||||
from ngraph.opset4 import asin
|
||||
from ngraph.opset4 import assign
|
||||
from ngraph.opset4 import atan
|
||||
from ngraph.opset4 import avg_pool
|
||||
from ngraph.opset4 import batch_norm_inference
|
||||
from ngraph.opset4 import batch_to_space
|
||||
from ngraph.opset4 import binary_convolution
|
||||
from ngraph.opset4 import broadcast
|
||||
from ngraph.opset4 import bucketize
|
||||
from ngraph.opset4 import ceiling
|
||||
from ngraph.opset4 import ceiling as ceil
|
||||
from ngraph.opset4 import clamp
|
||||
from ngraph.opset4 import concat
|
||||
from ngraph.opset4 import constant
|
||||
from ngraph.opset4 import convert
|
||||
from ngraph.opset4 import convert_like
|
||||
from ngraph.opset4 import convolution
|
||||
from ngraph.opset4 import convolution_backprop_data
|
||||
from ngraph.opset4 import cos
|
||||
from ngraph.opset4 import cosh
|
||||
from ngraph.opset4 import ctc_greedy_decoder
|
||||
from ngraph.opset4 import cum_sum
|
||||
from ngraph.opset4 import cum_sum as cumsum
|
||||
from ngraph.opset4 import deformable_convolution
|
||||
from ngraph.opset4 import deformable_psroi_pooling
|
||||
from ngraph.opset4 import depth_to_space
|
||||
from ngraph.opset4 import detection_output
|
||||
from ngraph.opset4 import divide
|
||||
from ngraph.opset4 import elu
|
||||
from ngraph.opset4 import embedding_bag_offsets_sum
|
||||
from ngraph.opset4 import embedding_bag_packed_sum
|
||||
from ngraph.opset4 import embedding_segments_sum
|
||||
from ngraph.opset4 import extract_image_patches
|
||||
from ngraph.opset4 import equal
|
||||
from ngraph.opset4 import erf
|
||||
from ngraph.opset4 import exp
|
||||
from ngraph.opset4 import fake_quantize
|
||||
from ngraph.opset4 import floor
|
||||
from ngraph.opset4 import floor_mod
|
||||
from ngraph.opset4 import gather
|
||||
from ngraph.opset4 import gather_tree
|
||||
from ngraph.opset4 import gelu
|
||||
from ngraph.opset4 import greater
|
||||
from ngraph.opset4 import greater_equal
|
||||
from ngraph.opset4 import grn
|
||||
from ngraph.opset4 import group_convolution
|
||||
from ngraph.opset4 import group_convolution_backprop_data
|
||||
from ngraph.opset4 import gru_cell
|
||||
from ngraph.opset4 import hard_sigmoid
|
||||
from ngraph.opset4 import interpolate
|
||||
from ngraph.opset4 import less
|
||||
from ngraph.opset4 import less_equal
|
||||
from ngraph.opset4 import log
|
||||
from ngraph.opset4 import logical_and
|
||||
from ngraph.opset4 import logical_not
|
||||
from ngraph.opset4 import logical_or
|
||||
from ngraph.opset4 import logical_xor
|
||||
from ngraph.opset4 import lrn
|
||||
from ngraph.opset4 import lstm_cell
|
||||
from ngraph.opset4 import lstm_sequence
|
||||
from ngraph.opset4 import matmul
|
||||
from ngraph.opset4 import max_pool
|
||||
from ngraph.opset4 import maximum
|
||||
from ngraph.opset4 import minimum
|
||||
from ngraph.opset4 import mod
|
||||
from ngraph.opset4 import multiply
|
||||
from ngraph.opset4 import mvn
|
||||
from ngraph.opset4 import negative
|
||||
from ngraph.opset4 import non_max_suppression
|
||||
from ngraph.opset4 import non_zero
|
||||
from ngraph.opset4 import normalize_l2
|
||||
from ngraph.opset4 import not_equal
|
||||
from ngraph.opset4 import one_hot
|
||||
from ngraph.opset4 import pad
|
||||
from ngraph.opset4 import parameter
|
||||
from ngraph.opset4 import power
|
||||
from ngraph.opset4 import prelu
|
||||
from ngraph.opset4 import prior_box
|
||||
from ngraph.opset4 import prior_box_clustered
|
||||
from ngraph.opset4 import psroi_pooling
|
||||
from ngraph.opset4 import proposal
|
||||
from ngraph.opset4 import range
|
||||
from ngraph.opset4 import read_value
|
||||
from ngraph.opset4 import reduce_logical_and
|
||||
from ngraph.opset4 import reduce_logical_or
|
||||
from ngraph.opset4 import reduce_max
|
||||
from ngraph.opset4 import reduce_mean
|
||||
from ngraph.opset4 import reduce_min
|
||||
from ngraph.opset4 import reduce_prod
|
||||
from ngraph.opset4 import reduce_sum
|
||||
from ngraph.opset4 import region_yolo
|
||||
from ngraph.opset4 import reorg_yolo
|
||||
from ngraph.opset4 import relu
|
||||
from ngraph.opset4 import reshape
|
||||
from ngraph.opset4 import result
|
||||
from ngraph.opset4 import reverse
|
||||
from ngraph.opset4 import reverse_sequence
|
||||
from ngraph.opset4 import rnn_cell
|
||||
from ngraph.opset4 import roi_align
|
||||
from ngraph.opset4 import roi_pooling
|
||||
from ngraph.opset4 import scatter_elements_update
|
||||
from ngraph.opset4 import scatter_update
|
||||
from ngraph.opset4 import select
|
||||
from ngraph.opset4 import selu
|
||||
from ngraph.opset4 import shape_of
|
||||
from ngraph.opset4 import shuffle_channels
|
||||
from ngraph.opset4 import sigmoid
|
||||
from ngraph.opset4 import sign
|
||||
from ngraph.opset4 import sin
|
||||
from ngraph.opset4 import sinh
|
||||
from ngraph.opset4 import softmax
|
||||
from ngraph.opset4 import space_to_batch
|
||||
from ngraph.opset4 import space_to_depth
|
||||
from ngraph.opset4 import split
|
||||
from ngraph.opset4 import sqrt
|
||||
from ngraph.opset4 import squared_difference
|
||||
from ngraph.opset4 import squeeze
|
||||
from ngraph.opset4 import strided_slice
|
||||
from ngraph.opset4 import subtract
|
||||
from ngraph.opset4 import tan
|
||||
from ngraph.opset4 import tanh
|
||||
from ngraph.opset4 import tensor_iterator
|
||||
from ngraph.opset4 import tile
|
||||
from ngraph.opset4 import topk
|
||||
from ngraph.opset4 import transpose
|
||||
from ngraph.opset4 import unsqueeze
|
||||
from ngraph.opset4 import variadic_split
|
||||
|
||||
|
||||
# Extend Node class to support binary operators
|
||||
Node.__add__ = add
|
||||
Node.__sub__ = subtract
|
||||
Node.__mul__ = multiply
|
||||
Node.__div__ = divide
|
||||
Node.__truediv__ = divide
|
||||
Node.__radd__ = lambda left, right: add(right, left)
|
||||
Node.__rsub__ = lambda left, right: subtract(right, left)
|
||||
Node.__rmul__ = lambda left, right: multiply(right, left)
|
||||
Node.__rdiv__ = lambda left, right: divide(right, left)
|
||||
Node.__rtruediv__ = lambda left, right: divide(right, left)
|
||||
Node.__eq__ = equal
|
||||
Node.__ne__ = not_equal
|
||||
Node.__lt__ = less
|
||||
Node.__le__ = less_equal
|
||||
Node.__gt__ = greater
|
||||
Node.__ge__ = greater_equal
|
||||
|
125
ngraph/python/src/ngraph/opset1/__init__.py
Normal file
125
ngraph/python/src/ngraph/opset1/__init__.py
Normal file
@ -0,0 +1,125 @@
|
||||
# ******************************************************************************
|
||||
# Copyright 2017-2020 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
|
||||
from ngraph.opset1.ops import absolute
|
||||
from ngraph.opset1.ops import absolute as abs
|
||||
from ngraph.opset1.ops import acos
|
||||
from ngraph.opset1.ops import add
|
||||
from ngraph.opset1.ops import asin
|
||||
from ngraph.opset1.ops import atan
|
||||
from ngraph.opset1.ops import avg_pool
|
||||
from ngraph.opset1.ops import batch_norm_inference
|
||||
from ngraph.opset1.ops import binary_convolution
|
||||
from ngraph.opset1.ops import broadcast
|
||||
from ngraph.opset1.ops import ceiling
|
||||
from ngraph.opset1.ops import ceiling as ceil
|
||||
from ngraph.opset1.ops import clamp
|
||||
from ngraph.opset1.ops import concat
|
||||
from ngraph.opset1.ops import constant
|
||||
from ngraph.opset1.ops import convert
|
||||
from ngraph.opset1.ops import convert_like
|
||||
from ngraph.opset1.ops import convolution
|
||||
from ngraph.opset1.ops import convolution_backprop_data
|
||||
from ngraph.opset1.ops import cos
|
||||
from ngraph.opset1.ops import cosh
|
||||
from ngraph.opset1.ops import ctc_greedy_decoder
|
||||
from ngraph.opset1.ops import deformable_convolution
|
||||
from ngraph.opset1.ops import deformable_psroi_pooling
|
||||
from ngraph.opset1.ops import depth_to_space
|
||||
from ngraph.opset1.ops import detection_output
|
||||
from ngraph.opset1.ops import divide
|
||||
from ngraph.opset1.ops import elu
|
||||
from ngraph.opset1.ops import equal
|
||||
from ngraph.opset1.ops import erf
|
||||
from ngraph.opset1.ops import exp
|
||||
from ngraph.opset1.ops import fake_quantize
|
||||
from ngraph.opset1.ops import floor
|
||||
from ngraph.opset1.ops import floor_mod
|
||||
from ngraph.opset1.ops import gather
|
||||
from ngraph.opset1.ops import gather_tree
|
||||
from ngraph.opset1.ops import greater
|
||||
from ngraph.opset1.ops import greater_equal
|
||||
from ngraph.opset1.ops import grn
|
||||
from ngraph.opset1.ops import group_convolution
|
||||
from ngraph.opset1.ops import group_convolution_backprop_data
|
||||
from ngraph.opset1.ops import hard_sigmoid
|
||||
from ngraph.opset1.ops import interpolate
|
||||
from ngraph.opset1.ops import less
|
||||
from ngraph.opset1.ops import less_equal
|
||||
from ngraph.opset1.ops import log
|
||||
from ngraph.opset1.ops import logical_and
|
||||
from ngraph.opset1.ops import logical_not
|
||||
from ngraph.opset1.ops import logical_or
|
||||
from ngraph.opset1.ops import logical_xor
|
||||
from ngraph.opset1.ops import lrn
|
||||
from ngraph.opset1.ops import lstm_cell
|
||||
from ngraph.opset1.ops import lstm_sequence
|
||||
from ngraph.opset1.ops import matmul
|
||||
from ngraph.opset1.ops import max_pool
|
||||
from ngraph.opset1.ops import maximum
|
||||
from ngraph.opset1.ops import minimum
|
||||
from ngraph.opset1.ops import mod
|
||||
from ngraph.opset1.ops import multiply
|
||||
from ngraph.opset1.ops import negative
|
||||
from ngraph.opset1.ops import non_max_suppression
|
||||
from ngraph.opset1.ops import normalize_l2
|
||||
from ngraph.opset1.ops import not_equal
|
||||
from ngraph.opset1.ops import one_hot
|
||||
from ngraph.opset1.ops import pad
|
||||
from ngraph.opset1.ops import parameter
|
||||
from ngraph.opset1.ops import power
|
||||
from ngraph.opset1.ops import prelu
|
||||
from ngraph.opset1.ops import prior_box
|
||||
from ngraph.opset1.ops import prior_box_clustered
|
||||
from ngraph.opset1.ops import psroi_pooling
|
||||
from ngraph.opset1.ops import proposal
|
||||
from ngraph.opset1.ops import range
|
||||
from ngraph.opset1.ops import reduce_logical_and
|
||||
from ngraph.opset1.ops import reduce_logical_or
|
||||
from ngraph.opset1.ops import reduce_max
|
||||
from ngraph.opset1.ops import reduce_mean
|
||||
from ngraph.opset1.ops import reduce_min
|
||||
from ngraph.opset1.ops import reduce_prod
|
||||
from ngraph.opset1.ops import reduce_sum
|
||||
from ngraph.opset1.ops import region_yolo
|
||||
from ngraph.opset1.ops import relu
|
||||
from ngraph.opset1.ops import reshape
|
||||
from ngraph.opset1.ops import result
|
||||
from ngraph.opset1.ops import reverse_sequence
|
||||
from ngraph.opset1.ops import select
|
||||
from ngraph.opset1.ops import selu
|
||||
from ngraph.opset1.ops import shape_of
|
||||
from ngraph.opset1.ops import sigmoid
|
||||
from ngraph.opset1.ops import sign
|
||||
from ngraph.opset1.ops import sin
|
||||
from ngraph.opset1.ops import sinh
|
||||
from ngraph.opset1.ops import softmax
|
||||
from ngraph.opset1.ops import space_to_depth
|
||||
from ngraph.opset1.ops import split
|
||||
from ngraph.opset1.ops import sqrt
|
||||
from ngraph.opset1.ops import squared_difference
|
||||
from ngraph.opset1.ops import squeeze
|
||||
from ngraph.opset1.ops import strided_slice
|
||||
from ngraph.opset1.ops import subtract
|
||||
from ngraph.opset1.ops import tan
|
||||
from ngraph.opset1.ops import tanh
|
||||
from ngraph.opset1.ops import tensor_iterator
|
||||
from ngraph.opset1.ops import tile
|
||||
from ngraph.opset1.ops import topk
|
||||
from ngraph.opset1.ops import transpose
|
||||
from ngraph.opset1.ops import unsqueeze
|
||||
from ngraph.opset1.ops import variadic_split
|
File diff suppressed because it is too large
Load Diff
131
ngraph/python/src/ngraph/opset2/__init__.py
Normal file
131
ngraph/python/src/ngraph/opset2/__init__.py
Normal file
@ -0,0 +1,131 @@
|
||||
# ******************************************************************************
|
||||
# Copyright 2017-2020 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
|
||||
from ngraph.opset1.ops import absolute
|
||||
from ngraph.opset1.ops import absolute as abs
|
||||
from ngraph.opset1.ops import acos
|
||||
from ngraph.opset1.ops import add
|
||||
from ngraph.opset1.ops import asin
|
||||
from ngraph.opset1.ops import atan
|
||||
from ngraph.opset1.ops import avg_pool
|
||||
from ngraph.opset1.ops import batch_norm_inference
|
||||
from ngraph.opset2.ops import batch_to_space
|
||||
from ngraph.opset1.ops import binary_convolution
|
||||
from ngraph.opset1.ops import broadcast
|
||||
from ngraph.opset1.ops import ceiling
|
||||
from ngraph.opset1.ops import ceiling as ceil
|
||||
from ngraph.opset1.ops import clamp
|
||||
from ngraph.opset1.ops import concat
|
||||
from ngraph.opset1.ops import constant
|
||||
from ngraph.opset1.ops import convert
|
||||
from ngraph.opset1.ops import convert_like
|
||||
from ngraph.opset1.ops import convolution
|
||||
from ngraph.opset1.ops import convolution_backprop_data
|
||||
from ngraph.opset1.ops import cos
|
||||
from ngraph.opset1.ops import cosh
|
||||
from ngraph.opset1.ops import ctc_greedy_decoder
|
||||
from ngraph.opset1.ops import deformable_convolution
|
||||
from ngraph.opset1.ops import deformable_psroi_pooling
|
||||
from ngraph.opset1.ops import depth_to_space
|
||||
from ngraph.opset1.ops import detection_output
|
||||
from ngraph.opset1.ops import divide
|
||||
from ngraph.opset1.ops import elu
|
||||
from ngraph.opset1.ops import equal
|
||||
from ngraph.opset1.ops import erf
|
||||
from ngraph.opset1.ops import exp
|
||||
from ngraph.opset1.ops import fake_quantize
|
||||
from ngraph.opset1.ops import floor
|
||||
from ngraph.opset1.ops import floor_mod
|
||||
from ngraph.opset1.ops import gather
|
||||
from ngraph.opset1.ops import gather_tree
|
||||
from ngraph.opset2.ops import gelu
|
||||
from ngraph.opset1.ops import greater
|
||||
from ngraph.opset1.ops import greater_equal
|
||||
from ngraph.opset1.ops import grn
|
||||
from ngraph.opset1.ops import group_convolution
|
||||
from ngraph.opset1.ops import group_convolution_backprop_data
|
||||
from ngraph.opset1.ops import hard_sigmoid
|
||||
from ngraph.opset1.ops import interpolate
|
||||
from ngraph.opset1.ops import less
|
||||
from ngraph.opset1.ops import less_equal
|
||||
from ngraph.opset1.ops import log
|
||||
from ngraph.opset1.ops import logical_and
|
||||
from ngraph.opset1.ops import logical_not
|
||||
from ngraph.opset1.ops import logical_or
|
||||
from ngraph.opset1.ops import logical_xor
|
||||
from ngraph.opset1.ops import lrn
|
||||
from ngraph.opset1.ops import lstm_cell
|
||||
from ngraph.opset1.ops import lstm_sequence
|
||||
from ngraph.opset1.ops import matmul
|
||||
from ngraph.opset1.ops import max_pool
|
||||
from ngraph.opset1.ops import maximum
|
||||
from ngraph.opset1.ops import minimum
|
||||
from ngraph.opset1.ops import mod
|
||||
from ngraph.opset1.ops import multiply
|
||||
from ngraph.opset2.ops import mvn
|
||||
from ngraph.opset1.ops import negative
|
||||
from ngraph.opset1.ops import non_max_suppression
|
||||
from ngraph.opset1.ops import normalize_l2
|
||||
from ngraph.opset1.ops import not_equal
|
||||
from ngraph.opset1.ops import one_hot
|
||||
from ngraph.opset1.ops import pad
|
||||
from ngraph.opset1.ops import parameter
|
||||
from ngraph.opset1.ops import power
|
||||
from ngraph.opset1.ops import prelu
|
||||
from ngraph.opset1.ops import prior_box
|
||||
from ngraph.opset1.ops import prior_box_clustered
|
||||
from ngraph.opset1.ops import psroi_pooling
|
||||
from ngraph.opset1.ops import proposal
|
||||
from ngraph.opset1.ops import range
|
||||
from ngraph.opset1.ops import reduce_logical_and
|
||||
from ngraph.opset1.ops import reduce_logical_or
|
||||
from ngraph.opset1.ops import reduce_max
|
||||
from ngraph.opset1.ops import reduce_mean
|
||||
from ngraph.opset1.ops import reduce_min
|
||||
from ngraph.opset1.ops import reduce_prod
|
||||
from ngraph.opset1.ops import reduce_sum
|
||||
from ngraph.opset1.ops import region_yolo
|
||||
from ngraph.opset2.ops import reorg_yolo
|
||||
from ngraph.opset1.ops import relu
|
||||
from ngraph.opset1.ops import reshape
|
||||
from ngraph.opset1.ops import result
|
||||
from ngraph.opset1.ops import reverse_sequence
|
||||
from ngraph.opset2.ops import roi_pooling
|
||||
from ngraph.opset1.ops import select
|
||||
from ngraph.opset1.ops import selu
|
||||
from ngraph.opset1.ops import shape_of
|
||||
from ngraph.opset1.ops import sigmoid
|
||||
from ngraph.opset1.ops import sign
|
||||
from ngraph.opset1.ops import sin
|
||||
from ngraph.opset1.ops import sinh
|
||||
from ngraph.opset1.ops import softmax
|
||||
from ngraph.opset2.ops import space_to_batch
|
||||
from ngraph.opset1.ops import space_to_depth
|
||||
from ngraph.opset1.ops import split
|
||||
from ngraph.opset1.ops import sqrt
|
||||
from ngraph.opset1.ops import squared_difference
|
||||
from ngraph.opset1.ops import squeeze
|
||||
from ngraph.opset1.ops import strided_slice
|
||||
from ngraph.opset1.ops import subtract
|
||||
from ngraph.opset1.ops import tan
|
||||
from ngraph.opset1.ops import tanh
|
||||
from ngraph.opset1.ops import tensor_iterator
|
||||
from ngraph.opset1.ops import tile
|
||||
from ngraph.opset1.ops import topk
|
||||
from ngraph.opset1.ops import transpose
|
||||
from ngraph.opset1.ops import unsqueeze
|
||||
from ngraph.opset1.ops import variadic_split
|
193
ngraph/python/src/ngraph/opset2/ops.py
Normal file
193
ngraph/python/src/ngraph/opset2/ops.py
Normal file
@ -0,0 +1,193 @@
|
||||
# ******************************************************************************
|
||||
# Copyright 2017-2020 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
"""Factory functions for all ngraph ops."""
|
||||
from typing import Callable, Iterable, List, Optional, Set, Union
|
||||
|
||||
import numpy as np
|
||||
from functools import partial
|
||||
|
||||
from ngraph.impl import Node, Shape
|
||||
from ngraph.impl.op import Constant, GetOutputElement, Parameter
|
||||
from ngraph.opset_utils import _get_node_factory
|
||||
from ngraph.utils.decorators import binary_op, nameable_op, unary_op
|
||||
from ngraph.utils.input_validation import (
|
||||
assert_list_of_ints,
|
||||
check_valid_attributes,
|
||||
is_non_negative_value,
|
||||
is_positive_value,
|
||||
)
|
||||
from ngraph.utils.node_factory import NodeFactory
|
||||
from ngraph.utils.tensor_iterator_types import (
|
||||
GraphBody,
|
||||
TensorIteratorSliceInputDesc,
|
||||
TensorIteratorMergedInputDesc,
|
||||
TensorIteratorInvariantInputDesc,
|
||||
TensorIteratorBodyOutputDesc,
|
||||
TensorIteratorConcatOutputDesc,
|
||||
)
|
||||
from ngraph.utils.types import (
|
||||
NodeInput,
|
||||
NumericData,
|
||||
NumericType,
|
||||
ScalarData,
|
||||
TensorShape,
|
||||
as_node,
|
||||
as_nodes,
|
||||
get_dtype,
|
||||
get_element_type,
|
||||
get_element_type_str,
|
||||
make_constant_node,
|
||||
)
|
||||
|
||||
_get_node_factory_opset2 = partial(_get_node_factory, "opset2")
|
||||
|
||||
# -------------------------------------------- ops ------------------------------------------------
|
||||
|
||||
|
||||
@nameable_op
|
||||
def batch_to_space(
|
||||
data: NodeInput,
|
||||
block_shape: NodeInput,
|
||||
crops_begin: NodeInput,
|
||||
crops_end: NodeInput,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Perform BatchToSpace operation on the input tensor.
|
||||
|
||||
BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions.
|
||||
|
||||
:param data: Node producing the data tensor.
|
||||
:param block_shape: The sizes of the block of values to be moved.
|
||||
:param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`.
|
||||
:param crops_end: Specifies the amount to crop from the end along each axis of `data`.
|
||||
:param name: Optional output node name.
|
||||
:return: The new node performing a BatchToSpace operation.
|
||||
"""
|
||||
return _get_node_factory_opset2().create(
|
||||
"BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end)
|
||||
)
|
||||
|
||||
|
||||
@unary_op
|
||||
def gelu(node: NodeInput, name: Optional[str] = None) -> Node:
|
||||
r"""Perform Gaussian Error Linear Unit operation element-wise on data from input node.
|
||||
|
||||
Computes GELU function:
|
||||
|
||||
.. math:: f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}})
|
||||
|
||||
For more information refer to:
|
||||
`Gaussian Error Linear Unit (GELU) <https://arxiv.org/pdf/1606.08415.pdf>`_
|
||||
|
||||
:param node: Input tensor. One of: input node, array or scalar.
|
||||
:param name: Optional output node name.
|
||||
:return: The new node performing a GELU operation on its input data element-wise.
|
||||
"""
|
||||
return _get_node_factory_opset2().create("Gelu", [node])
|
||||
|
||||
|
||||
@nameable_op
|
||||
def mvn(
|
||||
data: Node,
|
||||
across_channels: bool = False,
|
||||
normalize_variance: bool = False,
|
||||
eps: float = 1e-9,
|
||||
name: str = None,
|
||||
) -> Node:
|
||||
r"""Perform Mean Variance Normalization operation on data from input node.
|
||||
|
||||
Computes MVN on the input tensor :code:`data` (called `X`) using formula:
|
||||
|
||||
.. math:: Y = \dfrac{X-EX}{\sqrt{E(X-EX)^2}}
|
||||
|
||||
:param data: The node with data tensor.
|
||||
:param across_channels: Denotes if mean values are shared across channels.
|
||||
:param normalize_variance: Denotes whether to perform variance normalization.
|
||||
:param eps: The number added to the variance to avoid division by zero
|
||||
when normalizing the value. Scalar value.
|
||||
:param name: Optional output node name.
|
||||
:return: The new node performing a MVN operation on input tensor.
|
||||
"""
|
||||
return _get_node_factory_opset2().create(
|
||||
"MVN",
|
||||
[data],
|
||||
{"across_channels": across_channels, "normalize_variance": normalize_variance, "eps": eps},
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def reorg_yolo(input: Node, stride: List[int], name: Optional[str] = None) -> Node:
|
||||
"""Return a node which produces the ReorgYolo operation.
|
||||
|
||||
:param input: Input data
|
||||
:param stride: Stride to reorganize input by
|
||||
:param name: Optional name for output node.
|
||||
:return: ReorgYolo node
|
||||
"""
|
||||
return _get_node_factory_opset2().create("ReorgYolo", [input], {"stride": stride})
|
||||
|
||||
|
||||
@nameable_op
|
||||
def roi_pooling(
|
||||
input: NodeInput,
|
||||
coords: NodeInput,
|
||||
output_size: TensorShape,
|
||||
spatial_scale: NumericData,
|
||||
method: str,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return a node which produces an ROIPooling operation.
|
||||
|
||||
:param input: Input feature map {N, C, ...}
|
||||
:param coords: Coordinates of bounding boxes
|
||||
:param output_size: Height/Width of ROI output features (shape)
|
||||
:param spatial_scale: Ratio of input feature map over input image size (float)
|
||||
:param method: Method of pooling - string: "max" or "bilinear"
|
||||
:return: ROIPooling node
|
||||
"""
|
||||
method = method.lower()
|
||||
return _get_node_factory_opset2().create(
|
||||
"ROIPooling",
|
||||
as_nodes(input, coords),
|
||||
{"output_size": Shape(output_size), "spatial_scale": spatial_scale, "method": method},
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def space_to_batch(
|
||||
data: NodeInput,
|
||||
block_shape: NodeInput,
|
||||
pads_begin: NodeInput,
|
||||
pads_end: NodeInput,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Perform SpaceToBatch operation on the input tensor.
|
||||
|
||||
SpaceToBatch permutes data tensor blocks of spatial data into batch dimension.
|
||||
The operator returns a copy of the input tensor where values from spatial blocks dimensions
|
||||
are moved in the batch dimension
|
||||
|
||||
:param data: Node producing the data tensor.
|
||||
:param block_shape: The sizes of the block of values to be moved.
|
||||
:param pads_begin: Specifies the padding for the beginning along each axis of `data`.
|
||||
:param pads_end: Specifies the padding for the ending along each axis of `data`.
|
||||
:param name: Optional output node name.
|
||||
:return: The new node performing a SpaceToBatch operation.
|
||||
"""
|
||||
return _get_node_factory_opset2().create(
|
||||
"SpaceToBatch", as_nodes(data, block_shape, pads_begin, pads_end)
|
||||
)
|
148
ngraph/python/src/ngraph/opset3/__init__.py
Normal file
148
ngraph/python/src/ngraph/opset3/__init__.py
Normal file
@ -0,0 +1,148 @@
|
||||
# ******************************************************************************
|
||||
# Copyright 2017-2020 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
|
||||
from ngraph.opset1.ops import absolute
|
||||
from ngraph.opset1.ops import absolute as abs
|
||||
from ngraph.opset1.ops import acos
|
||||
from ngraph.opset1.ops import add
|
||||
from ngraph.opset1.ops import asin
|
||||
from ngraph.opset3.ops import assign
|
||||
from ngraph.opset1.ops import atan
|
||||
from ngraph.opset1.ops import avg_pool
|
||||
from ngraph.opset1.ops import batch_norm_inference
|
||||
from ngraph.opset2.ops import batch_to_space
|
||||
from ngraph.opset1.ops import binary_convolution
|
||||
from ngraph.opset3.ops import broadcast
|
||||
from ngraph.opset3.ops import bucketize
|
||||
from ngraph.opset1.ops import ceiling
|
||||
from ngraph.opset1.ops import ceiling as ceil
|
||||
from ngraph.opset1.ops import clamp
|
||||
from ngraph.opset1.ops import concat
|
||||
from ngraph.opset1.ops import constant
|
||||
from ngraph.opset1.ops import convert
|
||||
from ngraph.opset1.ops import convert_like
|
||||
from ngraph.opset1.ops import convolution
|
||||
from ngraph.opset1.ops import convolution_backprop_data
|
||||
from ngraph.opset1.ops import cos
|
||||
from ngraph.opset1.ops import cosh
|
||||
from ngraph.opset1.ops import ctc_greedy_decoder
|
||||
from ngraph.opset3.ops import cum_sum
|
||||
from ngraph.opset3.ops import cum_sum as cumsum
|
||||
from ngraph.opset1.ops import deformable_convolution
|
||||
from ngraph.opset1.ops import deformable_psroi_pooling
|
||||
from ngraph.opset1.ops import depth_to_space
|
||||
from ngraph.opset1.ops import detection_output
|
||||
from ngraph.opset1.ops import divide
|
||||
from ngraph.opset1.ops import elu
|
||||
from ngraph.opset3.ops import embedding_bag_offsets_sum
|
||||
from ngraph.opset3.ops import embedding_bag_packed_sum
|
||||
from ngraph.opset3.ops import embedding_segments_sum
|
||||
from ngraph.opset3.ops import extract_image_patches
|
||||
from ngraph.opset1.ops import equal
|
||||
from ngraph.opset1.ops import erf
|
||||
from ngraph.opset1.ops import exp
|
||||
from ngraph.opset1.ops import fake_quantize
|
||||
from ngraph.opset1.ops import floor
|
||||
from ngraph.opset1.ops import floor_mod
|
||||
from ngraph.opset1.ops import gather
|
||||
from ngraph.opset1.ops import gather_tree
|
||||
from ngraph.opset2.ops import gelu
|
||||
from ngraph.opset1.ops import greater
|
||||
from ngraph.opset1.ops import greater_equal
|
||||
from ngraph.opset1.ops import grn
|
||||
from ngraph.opset1.ops import group_convolution
|
||||
from ngraph.opset1.ops import group_convolution_backprop_data
|
||||
from ngraph.opset3.ops import gru_cell
|
||||
from ngraph.opset1.ops import hard_sigmoid
|
||||
from ngraph.opset1.ops import interpolate
|
||||
from ngraph.opset1.ops import less
|
||||
from ngraph.opset1.ops import less_equal
|
||||
from ngraph.opset1.ops import log
|
||||
from ngraph.opset1.ops import logical_and
|
||||
from ngraph.opset1.ops import logical_not
|
||||
from ngraph.opset1.ops import logical_or
|
||||
from ngraph.opset1.ops import logical_xor
|
||||
from ngraph.opset1.ops import lrn
|
||||
from ngraph.opset1.ops import lstm_cell
|
||||
from ngraph.opset1.ops import lstm_sequence
|
||||
from ngraph.opset1.ops import matmul
|
||||
from ngraph.opset1.ops import max_pool
|
||||
from ngraph.opset1.ops import maximum
|
||||
from ngraph.opset1.ops import minimum
|
||||
from ngraph.opset1.ops import mod
|
||||
from ngraph.opset1.ops import multiply
|
||||
from ngraph.opset2.ops import mvn
|
||||
from ngraph.opset1.ops import negative
|
||||
from ngraph.opset3.ops import non_max_suppression
|
||||
from ngraph.opset3.ops import non_zero
|
||||
from ngraph.opset1.ops import normalize_l2
|
||||
from ngraph.opset1.ops import not_equal
|
||||
from ngraph.opset1.ops import one_hot
|
||||
from ngraph.opset1.ops import pad
|
||||
from ngraph.opset1.ops import parameter
|
||||
from ngraph.opset1.ops import power
|
||||
from ngraph.opset1.ops import prelu
|
||||
from ngraph.opset1.ops import prior_box
|
||||
from ngraph.opset1.ops import prior_box_clustered
|
||||
from ngraph.opset1.ops import psroi_pooling
|
||||
from ngraph.opset1.ops import proposal
|
||||
from ngraph.opset1.ops import range
|
||||
from ngraph.opset3.ops import read_value
|
||||
from ngraph.opset1.ops import reduce_logical_and
|
||||
from ngraph.opset1.ops import reduce_logical_or
|
||||
from ngraph.opset1.ops import reduce_max
|
||||
from ngraph.opset1.ops import reduce_mean
|
||||
from ngraph.opset1.ops import reduce_min
|
||||
from ngraph.opset1.ops import reduce_prod
|
||||
from ngraph.opset1.ops import reduce_sum
|
||||
from ngraph.opset1.ops import region_yolo
|
||||
from ngraph.opset2.ops import reorg_yolo
|
||||
from ngraph.opset1.ops import relu
|
||||
from ngraph.opset1.ops import reshape
|
||||
from ngraph.opset1.ops import result
|
||||
from ngraph.opset3.ops import reverse
|
||||
from ngraph.opset1.ops import reverse_sequence
|
||||
from ngraph.opset3.ops import rnn_cell
|
||||
from ngraph.opset3.ops import roi_align
|
||||
from ngraph.opset2.ops import roi_pooling
|
||||
from ngraph.opset3.ops import scatter_elements_update
|
||||
from ngraph.opset3.ops import scatter_update
|
||||
from ngraph.opset1.ops import select
|
||||
from ngraph.opset1.ops import selu
|
||||
from ngraph.opset3.ops import shape_of
|
||||
from ngraph.opset3.ops import shuffle_channels
|
||||
from ngraph.opset1.ops import sigmoid
|
||||
from ngraph.opset1.ops import sign
|
||||
from ngraph.opset1.ops import sin
|
||||
from ngraph.opset1.ops import sinh
|
||||
from ngraph.opset1.ops import softmax
|
||||
from ngraph.opset2.ops import space_to_batch
|
||||
from ngraph.opset1.ops import space_to_depth
|
||||
from ngraph.opset1.ops import split
|
||||
from ngraph.opset1.ops import sqrt
|
||||
from ngraph.opset1.ops import squared_difference
|
||||
from ngraph.opset1.ops import squeeze
|
||||
from ngraph.opset1.ops import strided_slice
|
||||
from ngraph.opset1.ops import subtract
|
||||
from ngraph.opset1.ops import tan
|
||||
from ngraph.opset1.ops import tanh
|
||||
from ngraph.opset1.ops import tensor_iterator
|
||||
from ngraph.opset1.ops import tile
|
||||
from ngraph.opset3.ops import topk
|
||||
from ngraph.opset1.ops import transpose
|
||||
from ngraph.opset1.ops import unsqueeze
|
||||
from ngraph.opset1.ops import variadic_split
|
663
ngraph/python/src/ngraph/opset3/ops.py
Normal file
663
ngraph/python/src/ngraph/opset3/ops.py
Normal file
@ -0,0 +1,663 @@
|
||||
# ******************************************************************************
|
||||
# Copyright 2017-2020 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
"""Factory functions for all ngraph ops."""
|
||||
from typing import Callable, Iterable, List, Optional, Set, Union
|
||||
|
||||
import numpy as np
|
||||
from functools import partial
|
||||
|
||||
from ngraph.impl import Node, Shape
|
||||
from ngraph.impl.op import Constant, GetOutputElement, Parameter
|
||||
from ngraph.opset_utils import _get_node_factory
|
||||
from ngraph.utils.decorators import binary_op, nameable_op, unary_op
|
||||
from ngraph.utils.input_validation import (
|
||||
assert_list_of_ints,
|
||||
check_valid_attributes,
|
||||
is_non_negative_value,
|
||||
is_positive_value,
|
||||
)
|
||||
from ngraph.utils.node_factory import NodeFactory
|
||||
from ngraph.utils.tensor_iterator_types import (
|
||||
GraphBody,
|
||||
TensorIteratorSliceInputDesc,
|
||||
TensorIteratorMergedInputDesc,
|
||||
TensorIteratorInvariantInputDesc,
|
||||
TensorIteratorBodyOutputDesc,
|
||||
TensorIteratorConcatOutputDesc,
|
||||
)
|
||||
from ngraph.utils.types import (
|
||||
NodeInput,
|
||||
NumericData,
|
||||
NumericType,
|
||||
ScalarData,
|
||||
TensorShape,
|
||||
as_node,
|
||||
as_nodes,
|
||||
get_dtype,
|
||||
get_element_type,
|
||||
get_element_type_str,
|
||||
make_constant_node,
|
||||
)
|
||||
|
||||
_get_node_factory_opset3 = partial(_get_node_factory, "opset3")
|
||||
|
||||
# -------------------------------------------- ops ------------------------------------------------
|
||||
|
||||
|
||||
@nameable_op
|
||||
def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node:
|
||||
"""Return a node which produces the Assign operation.
|
||||
|
||||
:param new_value: Node producing a value to be assigned to a variable.
|
||||
:param variable_id: Id of a variable to be updated.
|
||||
:param name: Optional name for output node.
|
||||
:return: Assign node
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"Assign",
|
||||
[as_node(new_value)],
|
||||
{"variable_id": variable_id}
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def broadcast(
|
||||
data: NodeInput,
|
||||
target_shape: NodeInput,
|
||||
axes_mapping: Optional[NodeInput] = None,
|
||||
broadcast_spec: str = "NUMPY",
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Create a node which broadcasts the input node's values along specified axes to a desired shape.
|
||||
|
||||
:param data: The node with input tensor data.
|
||||
:param target_shape: The node with a new shape we want to broadcast tensor to.
|
||||
:param axes_mapping: The node with a axis positions (0-based) in the result
|
||||
that are being broadcast.
|
||||
:param broadcast_spec: The type of broadcasting that specifies mapping of input tensor axes
|
||||
to output shape axes. Range of values: NUMPY, EXPLICIT, BIDIRECTIONAL.
|
||||
:param name: Optional new name for output node.
|
||||
:return: New node with broadcast shape.
|
||||
"""
|
||||
inputs = as_nodes(data, target_shape)
|
||||
if broadcast_spec.upper() == "EXPLICIT":
|
||||
inputs.append(as_node(axes_mapping))
|
||||
return _get_node_factory_opset3().create(
|
||||
"Broadcast", inputs, {"broadcast_spec": broadcast_spec.upper()}
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def bucketize(
|
||||
data: Node,
|
||||
buckets: NodeInput,
|
||||
output_type: str = "i64",
|
||||
with_right_bound: bool = True,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return a node which produces the Bucketize operation.
|
||||
|
||||
:param data: Input data to bucketize
|
||||
:param buckets: 1-D of sorted unique boundaries for buckets
|
||||
:param output_type: Output tensor type, "i64" or "i32", defaults to i64
|
||||
:param with_right_bound: indicates whether bucket includes the right or left
|
||||
edge of interval. default true = includes right edge
|
||||
:param name: Optional name for output node.
|
||||
:return: Bucketize node
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"Bucketize",
|
||||
[data, as_node(buckets)],
|
||||
{"output_type": output_type, "with_right_bound": with_right_bound},
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def cum_sum(
|
||||
arg: NodeInput,
|
||||
axis: NodeInput,
|
||||
exclusive: bool = False,
|
||||
reverse: bool = False,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Construct a cumulative summation operation.
|
||||
|
||||
:param arg: The tensor to be summed.
|
||||
:param axis: zero dimension tensor specifying axis position along which sum will be performed.
|
||||
:param exclusive: if set to true, the top element is not included
|
||||
:param reverse: if set to true, will perform the sums in reverse direction
|
||||
:return: New node performing the operation
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"CumSum", as_nodes(arg, axis), {"exclusive": exclusive, "reverse": reverse}
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def embedding_bag_offsets_sum(
|
||||
emb_table: Node,
|
||||
indices: NodeInput,
|
||||
offsets: NodeInput,
|
||||
default_index: Optional[NodeInput] = None,
|
||||
per_sample_weights: Optional[NodeInput] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return a node which performs sums of bags of embeddings without the intermediate embeddings.
|
||||
|
||||
:param emb_table: Tensor containing the embedding lookup table.
|
||||
:param indices: Tensor with indices.
|
||||
:param offsets: Tensor containing the starting index positions of each bag in indices.
|
||||
:param per_sample_weights: Tensor with weights for each sample.
|
||||
:param default_index: Scalar containing default index in embedding table to fill empty bags.
|
||||
:param name: Optional name for output node.
|
||||
:return: The new node which performs EmbeddingBagOffsetsSum
|
||||
"""
|
||||
inputs = [emb_table, as_node(indices), as_node(offsets)]
|
||||
if per_sample_weights is not None:
|
||||
inputs.append(default_index)
|
||||
inputs.append(per_sample_weights)
|
||||
elif default_index is not None:
|
||||
inputs.append(default_index)
|
||||
|
||||
return _get_node_factory_opset3().create("EmbeddingBagOffsetsSum", inputs, {})
|
||||
|
||||
|
||||
@nameable_op
|
||||
def embedding_bag_packed_sum(
|
||||
emb_table: NodeInput,
|
||||
indices: NodeInput,
|
||||
per_sample_weights: Optional[NodeInput] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return an EmbeddingBagPackedSum node.
|
||||
|
||||
EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given
|
||||
input tensor with a row (from the weights matrix) at that index
|
||||
|
||||
:param emb_table: Tensor containing the embedding lookup table.
|
||||
:param indices: Tensor with indices.
|
||||
:param per_sample_weights: Weights to be multiplied with embedding table.
|
||||
:param name: Optional name for output node.
|
||||
:return: EmbeddingBagPackedSum node
|
||||
"""
|
||||
inputs = [as_node(emb_table), as_node(indices)]
|
||||
if per_sample_weights is not None:
|
||||
inputs.append(as_node(per_sample_weights))
|
||||
|
||||
return _get_node_factory_opset3().create("EmbeddingBagPackedSum", inputs, {})
|
||||
|
||||
|
||||
@nameable_op
|
||||
def embedding_segments_sum(
|
||||
emb_table: Node,
|
||||
indices: NodeInput,
|
||||
segment_ids: NodeInput,
|
||||
num_segments: Optional[NodeInput] = None,
|
||||
default_index: Optional[NodeInput] = None,
|
||||
per_sample_weights: Optional[NodeInput] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return an EmbeddingSegmentsSum node.
|
||||
|
||||
EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given
|
||||
input tensor with a row (from the weights matrix) at that index
|
||||
|
||||
:param emb_table: Tensor containing the embedding lookup table.
|
||||
:param indices: Tensor with indices.
|
||||
:param segment_ids: Tensor with indices into the output Tensor
|
||||
:param num_segments: Tensor with number of segments.
|
||||
:param default_index: Scalar containing default index in embedding table to fill empty bags.
|
||||
:param per_sample_weights: Weights to be multiplied with embedding table.
|
||||
:param name: Optional name for output node.
|
||||
:return: EmbeddingSegmentsSum node
|
||||
"""
|
||||
inputs = [as_node(emb_table), as_node(indices), as_node(segment_ids)]
|
||||
if per_sample_weights is not None:
|
||||
inputs.append(as_node(num_segments))
|
||||
inputs.append(as_node(default_index))
|
||||
inputs.append(as_node(per_sample_weights))
|
||||
elif default_index is not None:
|
||||
inputs.append(as_node(num_segments))
|
||||
inputs.append(as_node(default_index))
|
||||
elif num_segments is not None:
|
||||
inputs.append(as_node(num_segments))
|
||||
|
||||
return _get_node_factory_opset3().create("EmbeddingSegmentsSum", inputs, {})
|
||||
|
||||
|
||||
@nameable_op
|
||||
def extract_image_patches(
|
||||
image: NodeInput,
|
||||
sizes: TensorShape,
|
||||
strides: List[int],
|
||||
rates: TensorShape,
|
||||
auto_pad: str,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return a node which produces the ExtractImagePatches operation.
|
||||
|
||||
:param image: 4-D Input data to extract image patches.
|
||||
:param sizes: Patch size in the format of [size_rows, size_cols].
|
||||
:param strides: Patch movement stride in the format of [stride_rows, stride_cols]
|
||||
:param rates: Element seleciton rate for creating a patch.
|
||||
:param auto_pad: Padding type.
|
||||
:param name: Optional name for output node.
|
||||
:return: ExtractImagePatches node
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"ExtractImagePatches",
|
||||
[as_node(image)],
|
||||
{"sizes": sizes, "strides": strides, "rates": rates, "auto_pad": auto_pad},
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def gru_cell(
|
||||
X: NodeInput,
|
||||
initial_hidden_state: NodeInput,
|
||||
W: NodeInput,
|
||||
R: NodeInput,
|
||||
B: NodeInput,
|
||||
hidden_size: int,
|
||||
activations: List[str] = None,
|
||||
activations_alpha: List[float] = None,
|
||||
activations_beta: List[float] = None,
|
||||
clip: float = 0.0,
|
||||
linear_before_reset: bool = False,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Perform GRUCell operation on the tensor from input node.
|
||||
|
||||
GRUCell represents a single GRU Cell that computes the output
|
||||
using the formula described in the paper: https://arxiv.org/abs/1406.1078
|
||||
|
||||
Note this class represents only single *cell* and not whole *layer*.
|
||||
|
||||
:param X: The input tensor with shape: [batch_size, input_size].
|
||||
:param initial_hidden_state: The hidden state tensor at current time step with shape:
|
||||
[batch_size, hidden_size].
|
||||
:param W: The weights for matrix multiplication, gate order: zrh.
|
||||
Shape: [3*hidden_size, input_size].
|
||||
:param R: The recurrence weights for matrix multiplication.
|
||||
Shape: [3*hidden_size, hidden_size].
|
||||
:param B: The sum of biases (weight and recurrence).
|
||||
For linear_before_reset set True the shape is [4*hidden_size].
|
||||
Otherwise the shape is [3*hidden_size].
|
||||
:param hidden_size: The number of hidden units for recurrent cell.
|
||||
Specifies hidden state size.
|
||||
:param activations: The vector of activation functions used inside recurrent cell.
|
||||
:param activation_alpha: The vector of alpha parameters for activation functions in
|
||||
order respective to activation list.
|
||||
:param activation_beta: The vector of beta parameters for activation functions in order
|
||||
respective to activation list.
|
||||
:param clip: The value defining clipping range [-clip, clip] on input of
|
||||
activation functions.
|
||||
:param linear_before_reset: Flag denotes if the layer behaves according to the modification
|
||||
of GRUCell described in the formula in the ONNX documentation.
|
||||
:param name: Optional output node name.
|
||||
:returns: The new node performing a GRUCell operation on tensor from input node.
|
||||
"""
|
||||
if activations is None:
|
||||
activations = ["relu", "sigmoid", "tanh"]
|
||||
if activations_alpha is None:
|
||||
activations_alpha = []
|
||||
if activations_beta is None:
|
||||
activations_beta = []
|
||||
|
||||
input_nodes = as_nodes(X, initial_hidden_state, W, R, B)
|
||||
attributes = {
|
||||
"hidden_size": hidden_size,
|
||||
"activations": activations,
|
||||
"activations_alpha": activations_alpha,
|
||||
"activations_beta": activations_beta,
|
||||
"linear_before_reset": linear_before_reset,
|
||||
"clip": clip,
|
||||
}
|
||||
return _get_node_factory_opset3().create("GRUCell", input_nodes, attributes)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def non_max_suppression(
|
||||
boxes: NodeInput,
|
||||
scores: NodeInput,
|
||||
max_output_boxes_per_class: Optional[NodeInput] = None,
|
||||
iou_threshold: Optional[NodeInput] = None,
|
||||
score_threshold: Optional[NodeInput] = None,
|
||||
box_encoding: str = "corner",
|
||||
sort_result_descending: bool = True,
|
||||
output_type: str = "i64",
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return a node which performs NonMaxSuppression.
|
||||
|
||||
:param boxes: Tensor with box coordinates.
|
||||
:param scores: Tensor with box scores.
|
||||
:param max_output_boxes_per_class: Tensor Specifying maximum number of boxes
|
||||
to be selected per class.
|
||||
:param iou_threshold: Tensor specifying intersection over union threshold
|
||||
:param score_threshold: Tensor specifying minimum score to consider box for the processing.
|
||||
:param box_encoding: Format of boxes data encoding.
|
||||
:param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
|
||||
boxes across batches or not.
|
||||
:param output_type: Output element type.
|
||||
:return: The new node which performs NonMaxSuppression
|
||||
"""
|
||||
if max_output_boxes_per_class is None:
|
||||
max_output_boxes_per_class = make_constant_node(0, np.int64)
|
||||
if iou_threshold is None:
|
||||
iou_threshold = make_constant_node(0, np.float32)
|
||||
if score_threshold is None:
|
||||
score_threshold = make_constant_node(0, np.float32)
|
||||
|
||||
inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
|
||||
attributes = {
|
||||
"box_encoding": box_encoding,
|
||||
"sort_result_descending": sort_result_descending,
|
||||
"output_type": output_type,
|
||||
}
|
||||
|
||||
return _get_node_factory_opset3().create("NonMaxSuppression", inputs, attributes)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def non_zero(data: NodeInput, output_type: str = "i64", name: Optional[str] = None,) -> Node:
|
||||
"""Return the indices of the elements that are non-zero.
|
||||
|
||||
:param data: Input data.
|
||||
:param output_type: Output tensor type.
|
||||
|
||||
:return: The new node which performs NonZero
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"NonZero",
|
||||
[as_node(data)],
|
||||
{"output_type": output_type}
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node:
|
||||
"""Return a node which produces the Assign operation.
|
||||
|
||||
:param init_value: Node producing a value to be returned instead of an unassigned variable.
|
||||
:param variable_id: Id of a variable to be read.
|
||||
:param name: Optional name for output node.
|
||||
:return: ReadValue node
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"ReadValue",
|
||||
[as_node(init_value)],
|
||||
{"variable_id": variable_id}
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def reverse(data: NodeInput, axis: NodeInput, mode: str, name: Optional[str] = None) -> Node:
|
||||
"""Perform axis-reverse operation.
|
||||
|
||||
:param data: The input node on which operation will be carried out.
|
||||
:param axis: The list of indices of axes to be reversed.
|
||||
:param mode: The mode specifies how the second input tensor should be interpreted:
|
||||
as a set of indices or a mask. Range of values: index, mask.
|
||||
:param name: The optional name of the output node.
|
||||
:return: The new node with reversed axes.
|
||||
"""
|
||||
return _get_node_factory_opset3("opset1").create(
|
||||
"Reverse", as_nodes(data, axis), {"mode": mode.lower()}
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def rnn_cell(
|
||||
X: NodeInput,
|
||||
initial_hidden_state: NodeInput,
|
||||
W: NodeInput,
|
||||
R: NodeInput,
|
||||
B: NodeInput,
|
||||
hidden_size: int,
|
||||
activations: List[str],
|
||||
activations_alpha: List[float],
|
||||
activations_beta: List[float],
|
||||
clip: float = 0.0,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Perform RNNCell operation on tensor from input node.
|
||||
|
||||
It follows notation and equations defined as in ONNX standard:
|
||||
https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN
|
||||
|
||||
Note this class represents only single *cell* and not whole RNN *layer*.
|
||||
|
||||
:param X: The input tensor with shape: [batch_size, input_size].
|
||||
:param initial_hidden_state: The hidden state tensor at current time step with shape:
|
||||
[batch_size, hidden_size].
|
||||
:param W: The weight tensor with shape: [hidden_size, input_size].
|
||||
:param R: The recurrence weight tensor with shape: [hidden_size,
|
||||
hidden_size].
|
||||
:param B: The bias tensor for input gate with shape: [2*hidden_size].
|
||||
:param hidden_size: The number of hidden units for recurrent cell.
|
||||
Specifies hidden state size.
|
||||
:param activations: The vector of activation functions used inside recurrent cell.
|
||||
:param activation_alpha: The vector of alpha parameters for activation functions in
|
||||
order respective to activation list.
|
||||
:param activation_beta: The vector of beta parameters for activation functions in order
|
||||
respective to activation list.
|
||||
:param clip: The value defining clipping range [-clip, clip] on input of
|
||||
activation functions.
|
||||
:param name: Optional output node name.
|
||||
:returns: The new node performing a RNNCell operation on tensor from input node.
|
||||
"""
|
||||
if activations is None:
|
||||
activations = ["sigmoid", "tanh"]
|
||||
if activations_alpha is None:
|
||||
activations_alpha = []
|
||||
if activations_beta is None:
|
||||
activations_beta = []
|
||||
|
||||
input_nodes = as_nodes(X, initial_hidden_state, W, R, B)
|
||||
attributes = {
|
||||
"hidden_size": hidden_size,
|
||||
"activations": activations,
|
||||
"activations_alpha": activations_alpha,
|
||||
"activations_beta": activations_beta,
|
||||
"clip": clip,
|
||||
}
|
||||
return _get_node_factory_opset3().create("RNNCell", input_nodes, attributes)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def roi_align(
|
||||
data: NodeInput,
|
||||
rois: NodeInput,
|
||||
batch_indices: NodeInput,
|
||||
pooled_h: int,
|
||||
pooled_w: int,
|
||||
sampling_ratio: int,
|
||||
spatial_scale: float,
|
||||
mode: str,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return a node which performs ROIAlign.
|
||||
|
||||
:param data: Input data.
|
||||
:param rois: RoIs (Regions of Interest) to pool over.
|
||||
:param batch_indices: Tensor with each element denoting the index of
|
||||
the corresponding image in the batch.
|
||||
:param pooled_h: Height of the ROI output feature map.
|
||||
:param pooled_w: Width of the ROI output feature map.
|
||||
:param sampling_ratio: Number of bins over height and width to use to calculate
|
||||
each output feature map element.
|
||||
:param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates.
|
||||
:param mode: Method to perform pooling to produce output feature map elements.
|
||||
|
||||
:return: The new node which performs ROIAlign
|
||||
"""
|
||||
inputs = as_nodes(data, rois, batch_indices)
|
||||
attributes = {
|
||||
"pooled_h": pooled_h,
|
||||
"pooled_w": pooled_w,
|
||||
"sampling_ratio": sampling_ratio,
|
||||
"spatial_scale": spatial_scale,
|
||||
"mode": mode,
|
||||
}
|
||||
return _get_node_factory_opset3().create("ROIAlign", inputs, attributes)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def scatter_elements_update(
|
||||
data: NodeInput,
|
||||
indices: NodeInput,
|
||||
updates: NodeInput,
|
||||
axis: NodeInput,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return a node which produces a ScatterElementsUpdate operation.
|
||||
|
||||
ScatterElementsUpdate creates a copy of the first input tensor with updated elements
|
||||
specified with second and third input tensors.
|
||||
|
||||
|
||||
For each entry in `updates`, the target index in `data` is obtained by combining
|
||||
the corresponding entry in `indices` with the index of the entry itself: the
|
||||
index-value for dimension equal to `axis` is obtained from the value of the
|
||||
corresponding entry in `indices` and the index-value for dimension not equal
|
||||
to `axis` is obtained from the index of the entry itself.
|
||||
|
||||
:param data: The input tensor to be updated.
|
||||
:param indices: The tensor with indexes which will be updated.
|
||||
:param updates: The tensor with update values.
|
||||
:param axis: The axis for scatter.
|
||||
:return: ScatterElementsUpdate node
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"ScatterElementsUpdate", as_nodes(data, indices, updates, axis)
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def scatter_update(
|
||||
data: Node, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None
|
||||
) -> Node:
|
||||
"""Return a node which produces a ScatterUpdate operation.
|
||||
|
||||
ScatterUpdate sets new values to slices from data addressed by indices.
|
||||
|
||||
:param data: The input tensor to be updated.
|
||||
:param indices: The tensor with indexes which will be updated.
|
||||
:param updates: The tensor with update values.
|
||||
:param axis: The axis at which elements will be updated.
|
||||
:return: ScatterUpdate node
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"ScatterUpdate",
|
||||
as_nodes(data, indices, updates, axis)
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = None) -> Node:
|
||||
"""Return a node which produces a tensor containing the shape of its input data.
|
||||
|
||||
:param data: The tensor containing the input data.
|
||||
:para output_type: Output element type.
|
||||
:return: ShapeOf node
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"ShapeOf",
|
||||
[as_node(data)],
|
||||
{"output_type": output_type}
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def shuffle_channels(data: Node, axis: int, groups: int, name: Optional[str] = None) -> Node:
|
||||
"""Perform permutation on data in the channel dimension of the input tensor.
|
||||
|
||||
The operation is the equivalent with the following transformation of the input tensor
|
||||
:code:`data` of shape [N, C, H, W]:
|
||||
|
||||
:code:`data_reshaped` = reshape(:code:`data`, [N, group, C / group, H * W])
|
||||
|
||||
:code:`data_trnasposed` = transpose(:code:`data_reshaped`, [0, 2, 1, 3])
|
||||
|
||||
:code:`output` = reshape(:code:`data_trnasposed`, [N, C, H, W])
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
Inputs: tensor of shape [1, 6, 2, 2]
|
||||
|
||||
data = [[[[ 0., 1.], [ 2., 3.]],
|
||||
[[ 4., 5.], [ 6., 7.]],
|
||||
[[ 8., 9.], [10., 11.]],
|
||||
[[12., 13.], [14., 15.]],
|
||||
[[16., 17.], [18., 19.]],
|
||||
[[20., 21.], [22., 23.]]]]
|
||||
|
||||
axis = 1
|
||||
groups = 3
|
||||
|
||||
Output: tensor of shape [1, 6, 2, 2]
|
||||
|
||||
output = [[[[ 0., 1.], [ 2., 3.]],
|
||||
[[ 8., 9.], [10., 11.]],
|
||||
[[16., 17.], [18., 19.]],
|
||||
[[ 4., 5.], [ 6., 7.]],
|
||||
[[12., 13.], [14., 15.]],
|
||||
[[20., 21.], [22., 23.]]]]
|
||||
|
||||
:param data: The node with input tensor.
|
||||
:param axis: Channel dimension index in the data tensor.
|
||||
A negative value means that the index should be calculated
|
||||
from the back of the input data shape.
|
||||
:param group:The channel dimension specified by the axis parameter
|
||||
should be split into this number of groups.
|
||||
:param name: Optional output node name.
|
||||
:return: The new node performing a permutation on data in the channel dimension
|
||||
of the input tensor.
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"ShuffleChannels", [as_node(data)], {"axis": axis, "groups": groups}
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def topk(
|
||||
data: NodeInput,
|
||||
k: NodeInput,
|
||||
axis: int,
|
||||
mode: str,
|
||||
sort: str,
|
||||
index_element_type: str = "i32",
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return a node which performs TopK.
|
||||
|
||||
:param data: Input data.
|
||||
:param k: K.
|
||||
:param axis: TopK Axis.
|
||||
:param mode: Compute TopK largest ('max') or smallest ('min')
|
||||
:param sort: Order of output elements (sort by: 'none', 'index' or 'value')
|
||||
:param index_element_type: Type of output tensor with indices.
|
||||
:return: The new node which performs TopK (both indices and values)
|
||||
"""
|
||||
return _get_node_factory_opset3().create(
|
||||
"TopK",
|
||||
as_nodes(data, k),
|
||||
{"axis": axis, "mode": mode, "sort": sort, "index_element_type": index_element_type},
|
||||
)
|
148
ngraph/python/src/ngraph/opset4/__init__.py
Normal file
148
ngraph/python/src/ngraph/opset4/__init__.py
Normal file
@ -0,0 +1,148 @@
|
||||
# ******************************************************************************
|
||||
# Copyright 2017-2020 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
|
||||
from ngraph.opset1.ops import absolute
|
||||
from ngraph.opset1.ops import absolute as abs
|
||||
from ngraph.opset1.ops import acos
|
||||
from ngraph.opset1.ops import add
|
||||
from ngraph.opset1.ops import asin
|
||||
from ngraph.opset3.ops import assign
|
||||
from ngraph.opset1.ops import atan
|
||||
from ngraph.opset1.ops import avg_pool
|
||||
from ngraph.opset1.ops import batch_norm_inference
|
||||
from ngraph.opset2.ops import batch_to_space
|
||||
from ngraph.opset1.ops import binary_convolution
|
||||
from ngraph.opset3.ops import broadcast
|
||||
from ngraph.opset3.ops import bucketize
|
||||
from ngraph.opset1.ops import ceiling
|
||||
from ngraph.opset1.ops import ceiling as ceil
|
||||
from ngraph.opset1.ops import clamp
|
||||
from ngraph.opset1.ops import concat
|
||||
from ngraph.opset1.ops import constant
|
||||
from ngraph.opset1.ops import convert
|
||||
from ngraph.opset1.ops import convert_like
|
||||
from ngraph.opset1.ops import convolution
|
||||
from ngraph.opset1.ops import convolution_backprop_data
|
||||
from ngraph.opset1.ops import cos
|
||||
from ngraph.opset1.ops import cosh
|
||||
from ngraph.opset1.ops import ctc_greedy_decoder
|
||||
from ngraph.opset3.ops import cum_sum
|
||||
from ngraph.opset3.ops import cum_sum as cumsum
|
||||
from ngraph.opset1.ops import deformable_convolution
|
||||
from ngraph.opset1.ops import deformable_psroi_pooling
|
||||
from ngraph.opset1.ops import depth_to_space
|
||||
from ngraph.opset1.ops import detection_output
|
||||
from ngraph.opset1.ops import divide
|
||||
from ngraph.opset1.ops import elu
|
||||
from ngraph.opset3.ops import embedding_bag_offsets_sum
|
||||
from ngraph.opset3.ops import embedding_bag_packed_sum
|
||||
from ngraph.opset3.ops import embedding_segments_sum
|
||||
from ngraph.opset3.ops import extract_image_patches
|
||||
from ngraph.opset1.ops import equal
|
||||
from ngraph.opset1.ops import erf
|
||||
from ngraph.opset1.ops import exp
|
||||
from ngraph.opset1.ops import fake_quantize
|
||||
from ngraph.opset1.ops import floor
|
||||
from ngraph.opset1.ops import floor_mod
|
||||
from ngraph.opset1.ops import gather
|
||||
from ngraph.opset1.ops import gather_tree
|
||||
from ngraph.opset2.ops import gelu
|
||||
from ngraph.opset1.ops import greater
|
||||
from ngraph.opset1.ops import greater_equal
|
||||
from ngraph.opset1.ops import grn
|
||||
from ngraph.opset1.ops import group_convolution
|
||||
from ngraph.opset1.ops import group_convolution_backprop_data
|
||||
from ngraph.opset3.ops import gru_cell
|
||||
from ngraph.opset1.ops import hard_sigmoid
|
||||
from ngraph.opset1.ops import interpolate
|
||||
from ngraph.opset1.ops import less
|
||||
from ngraph.opset1.ops import less_equal
|
||||
from ngraph.opset1.ops import log
|
||||
from ngraph.opset1.ops import logical_and
|
||||
from ngraph.opset1.ops import logical_not
|
||||
from ngraph.opset1.ops import logical_or
|
||||
from ngraph.opset1.ops import logical_xor
|
||||
from ngraph.opset1.ops import lrn
|
||||
from ngraph.opset1.ops import lstm_cell
|
||||
from ngraph.opset1.ops import lstm_sequence
|
||||
from ngraph.opset1.ops import matmul
|
||||
from ngraph.opset1.ops import max_pool
|
||||
from ngraph.opset1.ops import maximum
|
||||
from ngraph.opset1.ops import minimum
|
||||
from ngraph.opset1.ops import mod
|
||||
from ngraph.opset1.ops import multiply
|
||||
from ngraph.opset2.ops import mvn
|
||||
from ngraph.opset1.ops import negative
|
||||
from ngraph.opset4.ops import non_max_suppression
|
||||
from ngraph.opset3.ops import non_zero
|
||||
from ngraph.opset1.ops import normalize_l2
|
||||
from ngraph.opset1.ops import not_equal
|
||||
from ngraph.opset1.ops import one_hot
|
||||
from ngraph.opset1.ops import pad
|
||||
from ngraph.opset1.ops import parameter
|
||||
from ngraph.opset1.ops import power
|
||||
from ngraph.opset1.ops import prelu
|
||||
from ngraph.opset1.ops import prior_box
|
||||
from ngraph.opset1.ops import prior_box_clustered
|
||||
from ngraph.opset1.ops import psroi_pooling
|
||||
from ngraph.opset1.ops import proposal
|
||||
from ngraph.opset1.ops import range
|
||||
from ngraph.opset3.ops import read_value
|
||||
from ngraph.opset1.ops import reduce_logical_and
|
||||
from ngraph.opset1.ops import reduce_logical_or
|
||||
from ngraph.opset1.ops import reduce_max
|
||||
from ngraph.opset1.ops import reduce_mean
|
||||
from ngraph.opset1.ops import reduce_min
|
||||
from ngraph.opset1.ops import reduce_prod
|
||||
from ngraph.opset1.ops import reduce_sum
|
||||
from ngraph.opset1.ops import region_yolo
|
||||
from ngraph.opset2.ops import reorg_yolo
|
||||
from ngraph.opset1.ops import relu
|
||||
from ngraph.opset1.ops import reshape
|
||||
from ngraph.opset1.ops import result
|
||||
from ngraph.opset3.ops import reverse
|
||||
from ngraph.opset1.ops import reverse_sequence
|
||||
from ngraph.opset3.ops import rnn_cell
|
||||
from ngraph.opset3.ops import roi_align
|
||||
from ngraph.opset2.ops import roi_pooling
|
||||
from ngraph.opset3.ops import scatter_elements_update
|
||||
from ngraph.opset3.ops import scatter_update
|
||||
from ngraph.opset1.ops import select
|
||||
from ngraph.opset1.ops import selu
|
||||
from ngraph.opset3.ops import shape_of
|
||||
from ngraph.opset3.ops import shuffle_channels
|
||||
from ngraph.opset1.ops import sigmoid
|
||||
from ngraph.opset1.ops import sign
|
||||
from ngraph.opset1.ops import sin
|
||||
from ngraph.opset1.ops import sinh
|
||||
from ngraph.opset1.ops import softmax
|
||||
from ngraph.opset2.ops import space_to_batch
|
||||
from ngraph.opset1.ops import space_to_depth
|
||||
from ngraph.opset1.ops import split
|
||||
from ngraph.opset1.ops import sqrt
|
||||
from ngraph.opset1.ops import squared_difference
|
||||
from ngraph.opset1.ops import squeeze
|
||||
from ngraph.opset1.ops import strided_slice
|
||||
from ngraph.opset1.ops import subtract
|
||||
from ngraph.opset1.ops import tan
|
||||
from ngraph.opset1.ops import tanh
|
||||
from ngraph.opset1.ops import tensor_iterator
|
||||
from ngraph.opset1.ops import tile
|
||||
from ngraph.opset3.ops import topk
|
||||
from ngraph.opset1.ops import transpose
|
||||
from ngraph.opset1.ops import unsqueeze
|
||||
from ngraph.opset1.ops import variadic_split
|
101
ngraph/python/src/ngraph/opset4/ops.py
Normal file
101
ngraph/python/src/ngraph/opset4/ops.py
Normal file
@ -0,0 +1,101 @@
|
||||
# ******************************************************************************
|
||||
# Copyright 2017-2020 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
"""Factory functions for all ngraph ops."""
|
||||
from typing import Callable, Iterable, List, Optional, Set, Union
|
||||
|
||||
import numpy as np
|
||||
from functools import partial
|
||||
|
||||
from ngraph.impl import Node, Shape
|
||||
from ngraph.impl.op import Constant, GetOutputElement, Parameter
|
||||
from ngraph.opset_utils import _get_node_factory
|
||||
from ngraph.utils.decorators import binary_op, nameable_op, unary_op
|
||||
from ngraph.utils.input_validation import (
|
||||
assert_list_of_ints,
|
||||
check_valid_attributes,
|
||||
is_non_negative_value,
|
||||
is_positive_value,
|
||||
)
|
||||
from ngraph.utils.node_factory import NodeFactory
|
||||
from ngraph.utils.tensor_iterator_types import (
|
||||
GraphBody,
|
||||
TensorIteratorSliceInputDesc,
|
||||
TensorIteratorMergedInputDesc,
|
||||
TensorIteratorInvariantInputDesc,
|
||||
TensorIteratorBodyOutputDesc,
|
||||
TensorIteratorConcatOutputDesc,
|
||||
)
|
||||
from ngraph.utils.types import (
|
||||
NodeInput,
|
||||
NumericData,
|
||||
NumericType,
|
||||
ScalarData,
|
||||
TensorShape,
|
||||
as_node,
|
||||
as_nodes,
|
||||
get_dtype,
|
||||
get_element_type,
|
||||
get_element_type_str,
|
||||
make_constant_node,
|
||||
)
|
||||
|
||||
_get_node_factory_opset4 = partial(_get_node_factory, "opset4")
|
||||
|
||||
# -------------------------------------------- ops ------------------------------------------------
|
||||
|
||||
|
||||
@nameable_op
|
||||
def non_max_suppression(
|
||||
boxes: NodeInput,
|
||||
scores: NodeInput,
|
||||
max_output_boxes_per_class: Optional[NodeInput] = None,
|
||||
iou_threshold: Optional[NodeInput] = None,
|
||||
score_threshold: Optional[NodeInput] = None,
|
||||
box_encoding: str = "corner",
|
||||
sort_result_descending: bool = True,
|
||||
output_type: str = "i64",
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return a node which performs NonMaxSuppression.
|
||||
|
||||
:param boxes: Tensor with box coordinates.
|
||||
:param scores: Tensor with box scores.
|
||||
:param max_output_boxes_per_class: Tensor Specifying maximum number of boxes
|
||||
to be selected per class.
|
||||
:param iou_threshold: Tensor specifying intersection over union threshold
|
||||
:param score_threshold: Tensor specifying minimum score to consider box for the processing.
|
||||
:param box_encoding: Format of boxes data encoding.
|
||||
:param sort_result_descending: Flag that specifies whenever it is necessary to sort selected
|
||||
boxes across batches or not.
|
||||
:param output_type: Output element type.
|
||||
:return: The new node which performs NonMaxSuppression
|
||||
"""
|
||||
if max_output_boxes_per_class is None:
|
||||
max_output_boxes_per_class = make_constant_node(0, np.int64)
|
||||
if iou_threshold is None:
|
||||
iou_threshold = make_constant_node(0, np.float32)
|
||||
if score_threshold is None:
|
||||
score_threshold = make_constant_node(0, np.float32)
|
||||
|
||||
inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
|
||||
attributes = {
|
||||
"box_encoding": box_encoding,
|
||||
"sort_result_descending": sort_result_descending,
|
||||
"output_type": output_type,
|
||||
}
|
||||
|
||||
return _get_node_factory_opset4().create("NonMaxSuppression", inputs, attributes)
|
35
ngraph/python/src/ngraph/opset_utils.py
Normal file
35
ngraph/python/src/ngraph/opset_utils.py
Normal file
@ -0,0 +1,35 @@
|
||||
# ******************************************************************************
|
||||
# Copyright 2017-2020 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
|
||||
from typing import Optional
|
||||
import numpy as np
|
||||
|
||||
from ngraph.impl import Node
|
||||
from ngraph.impl.op import GetOutputElement
|
||||
from ngraph.utils.decorators import nameable_op
|
||||
from ngraph.utils.node_factory import NodeFactory
|
||||
from ngraph.utils.types import (
|
||||
as_node,
|
||||
NodeInput,
|
||||
)
|
||||
|
||||
|
||||
def _get_node_factory(opset_version: Optional[str] = None) -> NodeFactory:
|
||||
"""Return NodeFactory configured to create operators from specified opset version."""
|
||||
if opset_version:
|
||||
return NodeFactory(opset_version)
|
||||
else:
|
||||
return NodeFactory()
|
@ -268,8 +268,7 @@ def test_backend_config():
|
||||
|
||||
def test_result():
|
||||
node = [[11, 10], [1, 8], [3, 4]]
|
||||
|
||||
result = run_op_node([node], ng.ops.result)
|
||||
result = run_op_node([node], ng.result)
|
||||
assert np.allclose(result, node)
|
||||
|
||||
|
||||
|
@ -813,7 +813,7 @@ def test_tensor_iterator():
|
||||
zero = ng.constant(0, dtype=np.int32)
|
||||
one = ng.constant(1, dtype=np.int32)
|
||||
initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32)
|
||||
iter_cnt = ng.ops.range(zero, np.int32(16), np.int32(1))
|
||||
iter_cnt = ng.range(zero, np.int32(16), np.int32(1))
|
||||
ti_inputs = [iter_cnt, data, initial_cma, one]
|
||||
|
||||
graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], [curr_cma, cma_hist])
|
||||
|
@ -96,7 +96,7 @@ def test_lrn_factory():
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
result = run_op_node([x, axis], ng.ops.lrn, alpha, beta, bias, nsize)
|
||||
result = run_op_node([x, axis], ng.lrn, alpha, beta, bias, nsize)
|
||||
|
||||
assert np.allclose(result, excepted)
|
||||
|
||||
@ -110,5 +110,6 @@ def test_batch_norm_inference():
|
||||
epsilon = 9.99e-06
|
||||
excepted = [[2.0, 6.0, 12.0], [-2.0, -6.0, -12.0]]
|
||||
|
||||
result = run_op_node([data, gamma, beta, mean, variance], ng.ops.batch_norm_inference, epsilon)
|
||||
result = run_op_node([data, gamma, beta, mean, variance], ng.batch_norm_inference, epsilon)
|
||||
|
||||
assert np.allclose(result, excepted)
|
||||
|
@ -119,7 +119,7 @@ def test_softmax():
|
||||
axis = 0
|
||||
input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
|
||||
|
||||
result = run_op_node([input_tensor], ng.ops.softmax, axis)
|
||||
result = run_op_node([input_tensor], ng.softmax, axis)
|
||||
|
||||
expected = [[0.00426978, 0.01160646, 0.03154963], [0.08576079, 0.23312202, 0.6336913]]
|
||||
|
||||
|
@ -49,5 +49,5 @@ def test_range():
|
||||
stop = 35
|
||||
step = 5
|
||||
|
||||
result = run_op_node([start, stop, step], ng.ops.range)
|
||||
result = run_op_node([start, stop, step], ng.range)
|
||||
assert np.allclose(result, [5, 10, 15, 20, 25, 30])
|
||||
|
Loading…
Reference in New Issue
Block a user