Remove opset8 from compatibility ngraph python API (#8452)
This commit is contained in:
parent
331372e7ff
commit
ba8f9d613e
@ -33,7 +33,6 @@ packages = [
|
||||
"ngraph.opset5",
|
||||
"ngraph.opset6",
|
||||
"ngraph.opset7",
|
||||
"ngraph.opset8",
|
||||
"ngraph.utils",
|
||||
"ngraph.impl",
|
||||
"ngraph.impl.op",
|
||||
|
@ -27,164 +27,159 @@ from ngraph.frontend import Place
|
||||
from ngraph.helpers import function_from_cnn
|
||||
from ngraph.helpers import function_to_cnn
|
||||
from ngraph.helpers import partial_shape_from_data
|
||||
from ngraph.opset8 import absolute
|
||||
from ngraph.opset8 import absolute as abs
|
||||
from ngraph.opset8 import acos
|
||||
from ngraph.opset8 import acosh
|
||||
from ngraph.opset8 import adaptive_avg_pool
|
||||
from ngraph.opset8 import adaptive_max_pool
|
||||
from ngraph.opset8 import add
|
||||
from ngraph.opset8 import asin
|
||||
from ngraph.opset8 import asinh
|
||||
from ngraph.opset8 import assign
|
||||
from ngraph.opset8 import atan
|
||||
from ngraph.opset8 import atanh
|
||||
from ngraph.opset8 import avg_pool
|
||||
from ngraph.opset8 import batch_norm_inference
|
||||
from ngraph.opset8 import batch_to_space
|
||||
from ngraph.opset8 import binary_convolution
|
||||
from ngraph.opset8 import broadcast
|
||||
from ngraph.opset8 import bucketize
|
||||
from ngraph.opset8 import ceiling
|
||||
from ngraph.opset8 import ceiling as ceil
|
||||
from ngraph.opset8 import clamp
|
||||
from ngraph.opset8 import concat
|
||||
from ngraph.opset8 import constant
|
||||
from ngraph.opset8 import convert
|
||||
from ngraph.opset8 import convert_like
|
||||
from ngraph.opset8 import convolution
|
||||
from ngraph.opset8 import convolution_backprop_data
|
||||
from ngraph.opset8 import cos
|
||||
from ngraph.opset8 import cosh
|
||||
from ngraph.opset8 import ctc_greedy_decoder
|
||||
from ngraph.opset8 import ctc_greedy_decoder_seq_len
|
||||
from ngraph.opset8 import ctc_loss
|
||||
from ngraph.opset8 import cum_sum
|
||||
from ngraph.opset8 import cum_sum as cumsum
|
||||
from ngraph.opset8 import deformable_convolution
|
||||
from ngraph.opset8 import deformable_psroi_pooling
|
||||
from ngraph.opset8 import depth_to_space
|
||||
from ngraph.opset8 import detection_output
|
||||
from ngraph.opset8 import dft
|
||||
from ngraph.opset8 import divide
|
||||
from ngraph.opset8 import einsum
|
||||
from ngraph.opset8 import elu
|
||||
from ngraph.opset8 import embedding_bag_offsets_sum
|
||||
from ngraph.opset8 import embedding_bag_packed_sum
|
||||
from ngraph.opset8 import embedding_segments_sum
|
||||
from ngraph.opset8 import extract_image_patches
|
||||
from ngraph.opset8 import equal
|
||||
from ngraph.opset8 import erf
|
||||
from ngraph.opset8 import exp
|
||||
from ngraph.opset8 import fake_quantize
|
||||
from ngraph.opset8 import floor
|
||||
from ngraph.opset8 import floor_mod
|
||||
from ngraph.opset8 import gather
|
||||
from ngraph.opset8 import gather_elements
|
||||
from ngraph.opset8 import gather_nd
|
||||
from ngraph.opset8 import gather_tree
|
||||
from ngraph.opset8 import gelu
|
||||
from ngraph.opset8 import greater
|
||||
from ngraph.opset8 import greater_equal
|
||||
from ngraph.opset8 import grn
|
||||
from ngraph.opset8 import group_convolution
|
||||
from ngraph.opset8 import group_convolution_backprop_data
|
||||
from ngraph.opset8 import gru_cell
|
||||
from ngraph.opset8 import gru_sequence
|
||||
from ngraph.opset8 import hard_sigmoid
|
||||
from ngraph.opset8 import hsigmoid
|
||||
from ngraph.opset8 import hswish
|
||||
from ngraph.opset8 import idft
|
||||
from ngraph.opset8 import interpolate
|
||||
from ngraph.opset8 import less
|
||||
from ngraph.opset8 import less_equal
|
||||
from ngraph.opset8 import log
|
||||
from ngraph.opset8 import logical_and
|
||||
from ngraph.opset8 import logical_not
|
||||
from ngraph.opset8 import logical_or
|
||||
from ngraph.opset8 import logical_xor
|
||||
from ngraph.opset8 import log_softmax
|
||||
from ngraph.opset8 import loop
|
||||
from ngraph.opset8 import lrn
|
||||
from ngraph.opset8 import lstm_cell
|
||||
from ngraph.opset8 import lstm_sequence
|
||||
from ngraph.opset8 import matmul
|
||||
from ngraph.opset8 import matrix_nms
|
||||
from ngraph.opset8 import max_pool
|
||||
from ngraph.opset8 import maximum
|
||||
from ngraph.opset8 import minimum
|
||||
from ngraph.opset8 import mish
|
||||
from ngraph.opset8 import mod
|
||||
from ngraph.opset8 import multiclass_nms
|
||||
from ngraph.opset8 import multiply
|
||||
from ngraph.opset8 import mvn
|
||||
from ngraph.opset8 import negative
|
||||
from ngraph.opset8 import non_max_suppression
|
||||
from ngraph.opset8 import non_zero
|
||||
from ngraph.opset8 import normalize_l2
|
||||
from ngraph.opset8 import not_equal
|
||||
from ngraph.opset8 import one_hot
|
||||
from ngraph.opset8 import pad
|
||||
from ngraph.opset8 import parameter
|
||||
from ngraph.opset8 import power
|
||||
from ngraph.opset8 import prelu
|
||||
from ngraph.opset8 import prior_box
|
||||
from ngraph.opset8 import prior_box_clustered
|
||||
from ngraph.opset8 import psroi_pooling
|
||||
from ngraph.opset8 import proposal
|
||||
from ngraph.opset8 import random_uniform
|
||||
from ngraph.opset8 import range
|
||||
from ngraph.opset8 import read_value
|
||||
from ngraph.opset8 import reduce_l1
|
||||
from ngraph.opset8 import reduce_l2
|
||||
from ngraph.opset8 import reduce_logical_and
|
||||
from ngraph.opset8 import reduce_logical_or
|
||||
from ngraph.opset8 import reduce_max
|
||||
from ngraph.opset8 import reduce_mean
|
||||
from ngraph.opset8 import reduce_min
|
||||
from ngraph.opset8 import reduce_prod
|
||||
from ngraph.opset8 import reduce_sum
|
||||
from ngraph.opset8 import region_yolo
|
||||
from ngraph.opset8 import reorg_yolo
|
||||
from ngraph.opset8 import relu
|
||||
from ngraph.opset8 import reshape
|
||||
from ngraph.opset8 import result
|
||||
from ngraph.opset8 import reverse_sequence
|
||||
from ngraph.opset8 import rnn_cell
|
||||
from ngraph.opset8 import rnn_sequence
|
||||
from ngraph.opset8 import roi_align
|
||||
from ngraph.opset8 import roi_pooling
|
||||
from ngraph.opset8 import roll
|
||||
from ngraph.opset8 import round
|
||||
from ngraph.opset8 import scatter_elements_update
|
||||
from ngraph.opset8 import scatter_update
|
||||
from ngraph.opset8 import select
|
||||
from ngraph.opset8 import selu
|
||||
from ngraph.opset8 import shape_of
|
||||
from ngraph.opset8 import shuffle_channels
|
||||
from ngraph.opset8 import sigmoid
|
||||
from ngraph.opset8 import sign
|
||||
from ngraph.opset8 import sin
|
||||
from ngraph.opset8 import sinh
|
||||
from ngraph.opset8 import softmax
|
||||
from ngraph.opset8 import softplus
|
||||
from ngraph.opset8 import space_to_batch
|
||||
from ngraph.opset8 import space_to_depth
|
||||
from ngraph.opset8 import split
|
||||
from ngraph.opset8 import sqrt
|
||||
from ngraph.opset8 import squared_difference
|
||||
from ngraph.opset8 import squeeze
|
||||
from ngraph.opset8 import strided_slice
|
||||
from ngraph.opset8 import subtract
|
||||
from ngraph.opset8 import swish
|
||||
from ngraph.opset8 import tan
|
||||
from ngraph.opset8 import tanh
|
||||
from ngraph.opset8 import tensor_iterator
|
||||
from ngraph.opset8 import tile
|
||||
from ngraph.opset8 import topk
|
||||
from ngraph.opset8 import transpose
|
||||
from ngraph.opset8 import unsqueeze
|
||||
from ngraph.opset8 import variadic_split
|
||||
from ngraph.opset7 import absolute
|
||||
from ngraph.opset7 import absolute as abs
|
||||
from ngraph.opset7 import acos
|
||||
from ngraph.opset7 import acosh
|
||||
from ngraph.opset7 import add
|
||||
from ngraph.opset7 import asin
|
||||
from ngraph.opset7 import asinh
|
||||
from ngraph.opset7 import assign
|
||||
from ngraph.opset7 import atan
|
||||
from ngraph.opset7 import atanh
|
||||
from ngraph.opset7 import avg_pool
|
||||
from ngraph.opset7 import batch_norm_inference
|
||||
from ngraph.opset7 import batch_to_space
|
||||
from ngraph.opset7 import binary_convolution
|
||||
from ngraph.opset7 import broadcast
|
||||
from ngraph.opset7 import bucketize
|
||||
from ngraph.opset7 import ceiling
|
||||
from ngraph.opset7 import ceiling as ceil
|
||||
from ngraph.opset7 import clamp
|
||||
from ngraph.opset7 import concat
|
||||
from ngraph.opset7 import constant
|
||||
from ngraph.opset7 import convert
|
||||
from ngraph.opset7 import convert_like
|
||||
from ngraph.opset7 import convolution
|
||||
from ngraph.opset7 import convolution_backprop_data
|
||||
from ngraph.opset7 import cos
|
||||
from ngraph.opset7 import cosh
|
||||
from ngraph.opset7 import ctc_greedy_decoder
|
||||
from ngraph.opset7 import ctc_greedy_decoder_seq_len
|
||||
from ngraph.opset7 import ctc_loss
|
||||
from ngraph.opset7 import cum_sum
|
||||
from ngraph.opset7 import cum_sum as cumsum
|
||||
from ngraph.opset7 import deformable_convolution
|
||||
from ngraph.opset7 import deformable_psroi_pooling
|
||||
from ngraph.opset7 import depth_to_space
|
||||
from ngraph.opset7 import detection_output
|
||||
from ngraph.opset7 import dft
|
||||
from ngraph.opset7 import divide
|
||||
from ngraph.opset7 import einsum
|
||||
from ngraph.opset7 import elu
|
||||
from ngraph.opset7 import embedding_bag_offsets_sum
|
||||
from ngraph.opset7 import embedding_bag_packed_sum
|
||||
from ngraph.opset7 import embedding_segments_sum
|
||||
from ngraph.opset7 import extract_image_patches
|
||||
from ngraph.opset7 import equal
|
||||
from ngraph.opset7 import erf
|
||||
from ngraph.opset7 import exp
|
||||
from ngraph.opset7 import fake_quantize
|
||||
from ngraph.opset7 import floor
|
||||
from ngraph.opset7 import floor_mod
|
||||
from ngraph.opset7 import gather
|
||||
from ngraph.opset7 import gather_elements
|
||||
from ngraph.opset7 import gather_nd
|
||||
from ngraph.opset7 import gather_tree
|
||||
from ngraph.opset7 import gelu
|
||||
from ngraph.opset7 import greater
|
||||
from ngraph.opset7 import greater_equal
|
||||
from ngraph.opset7 import grn
|
||||
from ngraph.opset7 import group_convolution
|
||||
from ngraph.opset7 import group_convolution_backprop_data
|
||||
from ngraph.opset7 import gru_cell
|
||||
from ngraph.opset7 import gru_sequence
|
||||
from ngraph.opset7 import hard_sigmoid
|
||||
from ngraph.opset7 import hsigmoid
|
||||
from ngraph.opset7 import hswish
|
||||
from ngraph.opset7 import idft
|
||||
from ngraph.opset7 import interpolate
|
||||
from ngraph.opset7 import less
|
||||
from ngraph.opset7 import less_equal
|
||||
from ngraph.opset7 import log
|
||||
from ngraph.opset7 import logical_and
|
||||
from ngraph.opset7 import logical_not
|
||||
from ngraph.opset7 import logical_or
|
||||
from ngraph.opset7 import logical_xor
|
||||
from ngraph.opset7 import log_softmax
|
||||
from ngraph.opset7 import loop
|
||||
from ngraph.opset7 import lrn
|
||||
from ngraph.opset7 import lstm_cell
|
||||
from ngraph.opset7 import lstm_sequence
|
||||
from ngraph.opset7 import matmul
|
||||
from ngraph.opset7 import max_pool
|
||||
from ngraph.opset7 import maximum
|
||||
from ngraph.opset7 import minimum
|
||||
from ngraph.opset7 import mish
|
||||
from ngraph.opset7 import mod
|
||||
from ngraph.opset7 import multiply
|
||||
from ngraph.opset7 import mvn
|
||||
from ngraph.opset7 import negative
|
||||
from ngraph.opset7 import non_max_suppression
|
||||
from ngraph.opset7 import non_zero
|
||||
from ngraph.opset7 import normalize_l2
|
||||
from ngraph.opset7 import not_equal
|
||||
from ngraph.opset7 import one_hot
|
||||
from ngraph.opset7 import pad
|
||||
from ngraph.opset7 import parameter
|
||||
from ngraph.opset7 import power
|
||||
from ngraph.opset7 import prelu
|
||||
from ngraph.opset7 import prior_box
|
||||
from ngraph.opset7 import prior_box_clustered
|
||||
from ngraph.opset7 import psroi_pooling
|
||||
from ngraph.opset7 import proposal
|
||||
from ngraph.opset7 import range
|
||||
from ngraph.opset7 import read_value
|
||||
from ngraph.opset7 import reduce_l1
|
||||
from ngraph.opset7 import reduce_l2
|
||||
from ngraph.opset7 import reduce_logical_and
|
||||
from ngraph.opset7 import reduce_logical_or
|
||||
from ngraph.opset7 import reduce_max
|
||||
from ngraph.opset7 import reduce_mean
|
||||
from ngraph.opset7 import reduce_min
|
||||
from ngraph.opset7 import reduce_prod
|
||||
from ngraph.opset7 import reduce_sum
|
||||
from ngraph.opset7 import region_yolo
|
||||
from ngraph.opset7 import reorg_yolo
|
||||
from ngraph.opset7 import relu
|
||||
from ngraph.opset7 import reshape
|
||||
from ngraph.opset7 import result
|
||||
from ngraph.opset7 import reverse_sequence
|
||||
from ngraph.opset7 import rnn_cell
|
||||
from ngraph.opset7 import rnn_sequence
|
||||
from ngraph.opset7 import roi_align
|
||||
from ngraph.opset7 import roi_pooling
|
||||
from ngraph.opset7 import roll
|
||||
from ngraph.opset7 import round
|
||||
from ngraph.opset7 import scatter_elements_update
|
||||
from ngraph.opset7 import scatter_update
|
||||
from ngraph.opset7 import select
|
||||
from ngraph.opset7 import selu
|
||||
from ngraph.opset7 import shape_of
|
||||
from ngraph.opset7 import shuffle_channels
|
||||
from ngraph.opset7 import sigmoid
|
||||
from ngraph.opset7 import sign
|
||||
from ngraph.opset7 import sin
|
||||
from ngraph.opset7 import sinh
|
||||
from ngraph.opset7 import softmax
|
||||
from ngraph.opset7 import softplus
|
||||
from ngraph.opset7 import space_to_batch
|
||||
from ngraph.opset7 import space_to_depth
|
||||
from ngraph.opset7 import split
|
||||
from ngraph.opset7 import sqrt
|
||||
from ngraph.opset7 import squared_difference
|
||||
from ngraph.opset7 import squeeze
|
||||
from ngraph.opset7 import strided_slice
|
||||
from ngraph.opset7 import subtract
|
||||
from ngraph.opset7 import swish
|
||||
from ngraph.opset7 import tan
|
||||
from ngraph.opset7 import tanh
|
||||
from ngraph.opset7 import tensor_iterator
|
||||
from ngraph.opset7 import tile
|
||||
from ngraph.opset7 import topk
|
||||
from ngraph.opset7 import transpose
|
||||
from ngraph.opset7 import unsqueeze
|
||||
from ngraph.opset7 import variadic_split
|
||||
|
||||
|
||||
# Extend Node class to support binary operators
|
||||
|
@ -1,161 +0,0 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from ngraph.opset1.ops import absolute
|
||||
from ngraph.opset1.ops import absolute as abs
|
||||
from ngraph.opset1.ops import acos
|
||||
from ngraph.opset4.ops import acosh
|
||||
from ngraph.opset8.ops import adaptive_avg_pool
|
||||
from ngraph.opset8.ops import adaptive_max_pool
|
||||
from ngraph.opset1.ops import add
|
||||
from ngraph.opset1.ops import asin
|
||||
from ngraph.opset4.ops import asinh
|
||||
from ngraph.opset3.ops import assign
|
||||
from ngraph.opset1.ops import atan
|
||||
from ngraph.opset4.ops import atanh
|
||||
from ngraph.opset1.ops import avg_pool
|
||||
from ngraph.opset5.ops import batch_norm_inference
|
||||
from ngraph.opset2.ops import batch_to_space
|
||||
from ngraph.opset1.ops import binary_convolution
|
||||
from ngraph.opset3.ops import broadcast
|
||||
from ngraph.opset3.ops import bucketize
|
||||
from ngraph.opset1.ops import ceiling
|
||||
from ngraph.opset1.ops import ceiling as ceil
|
||||
from ngraph.opset1.ops import clamp
|
||||
from ngraph.opset1.ops import concat
|
||||
from ngraph.opset1.ops import constant
|
||||
from ngraph.opset1.ops import convert
|
||||
from ngraph.opset1.ops import convert_like
|
||||
from ngraph.opset1.ops import convolution
|
||||
from ngraph.opset1.ops import convolution_backprop_data
|
||||
from ngraph.opset1.ops import cos
|
||||
from ngraph.opset1.ops import cosh
|
||||
from ngraph.opset1.ops import ctc_greedy_decoder
|
||||
from ngraph.opset6.ops import ctc_greedy_decoder_seq_len
|
||||
from ngraph.opset4.ops import ctc_loss
|
||||
from ngraph.opset3.ops import cum_sum
|
||||
from ngraph.opset3.ops import cum_sum as cumsum
|
||||
from ngraph.opset8.ops import deformable_convolution
|
||||
from ngraph.opset1.ops import deformable_psroi_pooling
|
||||
from ngraph.opset1.ops import depth_to_space
|
||||
from ngraph.opset1.ops import detection_output
|
||||
from ngraph.opset7.ops import dft
|
||||
from ngraph.opset1.ops import divide
|
||||
from ngraph.opset7.ops import einsum
|
||||
from ngraph.opset1.ops import elu
|
||||
from ngraph.opset3.ops import embedding_bag_offsets_sum
|
||||
from ngraph.opset3.ops import embedding_bag_packed_sum
|
||||
from ngraph.opset3.ops import embedding_segments_sum
|
||||
from ngraph.opset3.ops import extract_image_patches
|
||||
from ngraph.opset1.ops import equal
|
||||
from ngraph.opset1.ops import erf
|
||||
from ngraph.opset1.ops import exp
|
||||
from ngraph.opset1.ops import fake_quantize
|
||||
from ngraph.opset1.ops import floor
|
||||
from ngraph.opset1.ops import floor_mod
|
||||
from ngraph.opset8.ops import gather
|
||||
from ngraph.opset6.ops import gather_elements
|
||||
from ngraph.opset5.ops import gather_nd
|
||||
from ngraph.opset1.ops import gather_tree
|
||||
from ngraph.opset7.ops import gelu
|
||||
from ngraph.opset1.ops import greater
|
||||
from ngraph.opset1.ops import greater_equal
|
||||
from ngraph.opset1.ops import grn
|
||||
from ngraph.opset1.ops import group_convolution
|
||||
from ngraph.opset1.ops import group_convolution_backprop_data
|
||||
from ngraph.opset3.ops import gru_cell
|
||||
from ngraph.opset5.ops import gru_sequence
|
||||
from ngraph.opset1.ops import hard_sigmoid
|
||||
from ngraph.opset5.ops import hsigmoid
|
||||
from ngraph.opset4.ops import hswish
|
||||
from ngraph.opset7.ops import idft
|
||||
from ngraph.opset1.ops import interpolate
|
||||
from ngraph.opset1.ops import less
|
||||
from ngraph.opset1.ops import less_equal
|
||||
from ngraph.opset1.ops import log
|
||||
from ngraph.opset1.ops import logical_and
|
||||
from ngraph.opset1.ops import logical_not
|
||||
from ngraph.opset1.ops import logical_or
|
||||
from ngraph.opset1.ops import logical_xor
|
||||
from ngraph.opset5.ops import log_softmax
|
||||
from ngraph.opset5.ops import loop
|
||||
from ngraph.opset1.ops import lrn
|
||||
from ngraph.opset4.ops import lstm_cell
|
||||
from ngraph.opset1.ops import lstm_sequence
|
||||
from ngraph.opset1.ops import matmul
|
||||
from ngraph.opset8.ops import matrix_nms
|
||||
from ngraph.opset8.ops import max_pool
|
||||
from ngraph.opset1.ops import maximum
|
||||
from ngraph.opset1.ops import minimum
|
||||
from ngraph.opset4.ops import mish
|
||||
from ngraph.opset1.ops import mod
|
||||
from ngraph.opset8.ops import multiclass_nms
|
||||
from ngraph.opset1.ops import multiply
|
||||
from ngraph.opset6.ops import mvn
|
||||
from ngraph.opset1.ops import negative
|
||||
from ngraph.opset5.ops import non_max_suppression
|
||||
from ngraph.opset3.ops import non_zero
|
||||
from ngraph.opset1.ops import normalize_l2
|
||||
from ngraph.opset1.ops import not_equal
|
||||
from ngraph.opset1.ops import one_hot
|
||||
from ngraph.opset1.ops import pad
|
||||
from ngraph.opset1.ops import parameter
|
||||
from ngraph.opset1.ops import power
|
||||
from ngraph.opset1.ops import prelu
|
||||
from ngraph.opset1.ops import prior_box
|
||||
from ngraph.opset1.ops import prior_box_clustered
|
||||
from ngraph.opset1.ops import psroi_pooling
|
||||
from ngraph.opset4.ops import proposal
|
||||
from ngraph.opset8.ops import random_uniform
|
||||
from ngraph.opset1.ops import range
|
||||
from ngraph.opset3.ops import read_value
|
||||
from ngraph.opset4.ops import reduce_l1
|
||||
from ngraph.opset4.ops import reduce_l2
|
||||
from ngraph.opset1.ops import reduce_logical_and
|
||||
from ngraph.opset1.ops import reduce_logical_or
|
||||
from ngraph.opset1.ops import reduce_max
|
||||
from ngraph.opset1.ops import reduce_mean
|
||||
from ngraph.opset1.ops import reduce_min
|
||||
from ngraph.opset1.ops import reduce_prod
|
||||
from ngraph.opset1.ops import reduce_sum
|
||||
from ngraph.opset1.ops import region_yolo
|
||||
from ngraph.opset2.ops import reorg_yolo
|
||||
from ngraph.opset1.ops import relu
|
||||
from ngraph.opset1.ops import reshape
|
||||
from ngraph.opset1.ops import result
|
||||
from ngraph.opset1.ops import reverse_sequence
|
||||
from ngraph.opset3.ops import rnn_cell
|
||||
from ngraph.opset5.ops import rnn_sequence
|
||||
from ngraph.opset3.ops import roi_align
|
||||
from ngraph.opset2.ops import roi_pooling
|
||||
from ngraph.opset7.ops import roll
|
||||
from ngraph.opset5.ops import round
|
||||
from ngraph.opset3.ops import scatter_elements_update
|
||||
from ngraph.opset3.ops import scatter_update
|
||||
from ngraph.opset1.ops import select
|
||||
from ngraph.opset1.ops import selu
|
||||
from ngraph.opset3.ops import shape_of
|
||||
from ngraph.opset3.ops import shuffle_channels
|
||||
from ngraph.opset1.ops import sigmoid
|
||||
from ngraph.opset1.ops import sign
|
||||
from ngraph.opset1.ops import sin
|
||||
from ngraph.opset1.ops import sinh
|
||||
from ngraph.opset1.ops import softmax
|
||||
from ngraph.opset4.ops import softplus
|
||||
from ngraph.opset2.ops import space_to_batch
|
||||
from ngraph.opset1.ops import space_to_depth
|
||||
from ngraph.opset1.ops import split
|
||||
from ngraph.opset1.ops import sqrt
|
||||
from ngraph.opset1.ops import squared_difference
|
||||
from ngraph.opset1.ops import squeeze
|
||||
from ngraph.opset1.ops import strided_slice
|
||||
from ngraph.opset1.ops import subtract
|
||||
from ngraph.opset4.ops import swish
|
||||
from ngraph.opset1.ops import tan
|
||||
from ngraph.opset1.ops import tanh
|
||||
from ngraph.opset1.ops import tensor_iterator
|
||||
from ngraph.opset1.ops import tile
|
||||
from ngraph.opset3.ops import topk
|
||||
from ngraph.opset1.ops import transpose
|
||||
from ngraph.opset1.ops import unsqueeze
|
||||
from ngraph.opset1.ops import variadic_split
|
@ -1,369 +0,0 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
"""Factory functions for all ngraph ops."""
|
||||
from functools import partial
|
||||
from typing import Callable, Iterable, List, Optional, Set, Union
|
||||
|
||||
import numpy as np
|
||||
from ngraph.impl import Node, Shape
|
||||
from ngraph.impl.op import Constant, Parameter
|
||||
from ngraph.opset_utils import _get_node_factory
|
||||
from ngraph.utils.decorators import binary_op, nameable_op, unary_op
|
||||
from ngraph.utils.input_validation import (
|
||||
assert_list_of_ints,
|
||||
check_valid_attributes,
|
||||
is_non_negative_value,
|
||||
is_positive_value,
|
||||
)
|
||||
from ngraph.utils.node_factory import NodeFactory
|
||||
from ngraph.utils.tensor_iterator_types import (
|
||||
GraphBody,
|
||||
TensorIteratorSliceInputDesc,
|
||||
TensorIteratorMergedInputDesc,
|
||||
TensorIteratorInvariantInputDesc,
|
||||
TensorIteratorBodyOutputDesc,
|
||||
TensorIteratorConcatOutputDesc,
|
||||
)
|
||||
from ngraph.utils.types import (
|
||||
NodeInput,
|
||||
NumericData,
|
||||
NumericType,
|
||||
ScalarData,
|
||||
TensorShape,
|
||||
as_node,
|
||||
as_nodes,
|
||||
get_dtype,
|
||||
get_element_type,
|
||||
get_element_type_str,
|
||||
make_constant_node,
|
||||
)
|
||||
|
||||
_get_node_factory_opset8 = partial(_get_node_factory, "opset8")
|
||||
|
||||
|
||||
# -------------------------------------------- ops ------------------------------------------------
|
||||
|
||||
|
||||
@nameable_op
|
||||
def deformable_convolution(
|
||||
data: NodeInput,
|
||||
offsets: NodeInput,
|
||||
filters: NodeInput,
|
||||
strides: List[int],
|
||||
pads_begin: List[int],
|
||||
pads_end: List[int],
|
||||
dilations: List[int],
|
||||
mask: Optional[NodeInput] = None,
|
||||
auto_pad: str = "EXPLICIT",
|
||||
group: int = 1,
|
||||
deformable_group: int = 1,
|
||||
bilinear_interpolation_pad: bool = False,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Return a node which performs deformable convolution operation.
|
||||
|
||||
@param data: The node providing data batch tensor.
|
||||
@param offsets: The node providing offset tensor.
|
||||
@param filters: The node providing filters tensor.
|
||||
@param strides: The distance (in pixels) to slide the filter on the feature map over the axes.
|
||||
@param pads_begin: The number of pixels to add to the beginning along each axis.
|
||||
@param pads_end: The number of pixels to add to the end along each axis.
|
||||
@param dilations: The distance in width and height between elements (weights) in the filter.
|
||||
@param mask: The node providing modulation scalar (mask) tensor.
|
||||
@param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid.
|
||||
@param group: The number of groups which both output and input should be split into.
|
||||
@param deformable_group: The number of groups which deformable values and output should be split
|
||||
into along the channel axis.
|
||||
@param bilinear_interpolation_pad: The flag that determines the mode of bilinear interpolation
|
||||
execution.
|
||||
@param name: The optional new name for output node.
|
||||
@return New node performing deformable convolution operation.
|
||||
"""
|
||||
if mask is None:
|
||||
inputs = as_nodes(data, offsets, filters)
|
||||
else:
|
||||
inputs = as_nodes(data, offsets, filters, mask)
|
||||
|
||||
return _get_node_factory_opset8().create(
|
||||
"DeformableConvolution",
|
||||
inputs,
|
||||
{
|
||||
"strides": strides,
|
||||
"pads_begin": pads_begin,
|
||||
"pads_end": pads_end,
|
||||
"dilations": dilations,
|
||||
"auto_pad": auto_pad,
|
||||
"group": group,
|
||||
"deformable_group": deformable_group,
|
||||
"bilinear_interpolation_pad": bilinear_interpolation_pad
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def adaptive_avg_pool(
|
||||
data: NodeInput,
|
||||
output_shape: NodeInput
|
||||
) -> Node:
|
||||
"""Return a node which performs AdaptiveAvgPool operation.
|
||||
|
||||
@param data: The list of input nodes
|
||||
@param output_shape: the shape of spatial dimentions after operation
|
||||
@return: The new node performing AdaptiveAvgPool operation on the data
|
||||
"""
|
||||
inputs = as_nodes(data, output_shape)
|
||||
return _get_node_factory_opset8().create("AdaptiveAvgPool", inputs)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def adaptive_max_pool(
|
||||
data: NodeInput,
|
||||
output_shape: NodeInput,
|
||||
index_element_type: str = "i64"
|
||||
) -> Node:
|
||||
"""Return a node which performs AdaptiveMaxPool operation.
|
||||
|
||||
@param data: The list of input nodes
|
||||
@param output_shape: the shape of spatial dimentions after operation
|
||||
@param index_element_type: Type of indices output.
|
||||
@return: The new node performing AdaptiveMaxPool operation on the data
|
||||
"""
|
||||
inputs = as_nodes(data, output_shape)
|
||||
|
||||
attributes = {
|
||||
"index_element_type": index_element_type,
|
||||
}
|
||||
|
||||
return _get_node_factory_opset8().create("AdaptiveMaxPool", inputs, attributes)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def multiclass_nms(
|
||||
boxes: NodeInput,
|
||||
scores: NodeInput,
|
||||
sort_result_type: str = "none",
|
||||
sort_result_across_batch: bool = False,
|
||||
output_type: str = "i64",
|
||||
iou_threshold: float = 0.0,
|
||||
score_threshold: float = 0.0,
|
||||
nms_top_k: int = -1,
|
||||
keep_top_k: int = -1,
|
||||
background_class: int = -1,
|
||||
nms_eta: float = 1.0,
|
||||
normalized: bool = True
|
||||
) -> Node:
|
||||
"""Return a node which performs MulticlassNms.
|
||||
|
||||
@param boxes: Tensor with box coordinates.
|
||||
@param scores: Tensor with box scores.
|
||||
@param sort_result_type: Specifies order of output elements, possible values:
|
||||
'class': sort selected boxes by class id (ascending)
|
||||
'score': sort selected boxes by score (descending)
|
||||
'none': do not guarantee the order.
|
||||
@param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes
|
||||
across batches or not
|
||||
@param output_type: Specifies the output tensor type, possible values:
|
||||
'i64', 'i32'
|
||||
@param iou_threshold: Specifies intersection over union threshold
|
||||
@param score_threshold: Specifies minimum score to consider box for the processing
|
||||
@param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning
|
||||
to keep all boxes
|
||||
@param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1
|
||||
meaning to keep all boxes
|
||||
@param background_class: Specifies the background class id, -1 meaning to keep all classes
|
||||
@param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0]
|
||||
@param normalized: Specifies whether boxes are normalized or not
|
||||
@return: The new node which performs MuticlassNms
|
||||
"""
|
||||
inputs = as_nodes(boxes, scores)
|
||||
|
||||
attributes = {
|
||||
"sort_result_type": sort_result_type,
|
||||
"sort_result_across_batch": sort_result_across_batch,
|
||||
"output_type": output_type,
|
||||
"iou_threshold": iou_threshold,
|
||||
"score_threshold": score_threshold,
|
||||
"nms_top_k": nms_top_k,
|
||||
"keep_top_k": keep_top_k,
|
||||
"background_class": background_class,
|
||||
"nms_eta": nms_eta,
|
||||
"normalized": normalized
|
||||
}
|
||||
|
||||
return _get_node_factory_opset8().create("MulticlassNms", inputs, attributes)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def matrix_nms(
|
||||
boxes: NodeInput,
|
||||
scores: NodeInput,
|
||||
sort_result_type: str = "none",
|
||||
sort_result_across_batch: bool = False,
|
||||
output_type: str = "i64",
|
||||
score_threshold: float = 0.0,
|
||||
nms_top_k: int = -1,
|
||||
keep_top_k: int = -1,
|
||||
background_class: int = -1,
|
||||
decay_function: str = "linear",
|
||||
gaussian_sigma: float = 2.0,
|
||||
post_threshold: float = 0.0,
|
||||
normalized: bool = True
|
||||
) -> Node:
|
||||
"""Return a node which performs MatrixNms.
|
||||
|
||||
@param boxes: Tensor with box coordinates.
|
||||
@param scores: Tensor with box scores.
|
||||
@param sort_result_type: Specifies order of output elements, possible values:
|
||||
'class': sort selected boxes by class id (ascending)
|
||||
'score': sort selected boxes by score (descending)
|
||||
'none': do not guarantee the order.
|
||||
@param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes
|
||||
across batches or not
|
||||
@param output_type: Specifies the output tensor type, possible values:
|
||||
'i64', 'i32'
|
||||
@param score_threshold: Specifies minimum score to consider box for the processing
|
||||
@param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning
|
||||
to keep all boxes
|
||||
@param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1
|
||||
meaning to keep all boxes
|
||||
@param background_class: Specifies the background class id, -1 meaning to keep all classes
|
||||
@param decay_function: Specifies decay function used to decay scores, possible values:
|
||||
'gaussian', 'linear'
|
||||
@param gaussian_sigma: Specifies gaussian_sigma parameter for gaussian decay_function
|
||||
@param post_threshold: Specifies threshold to filter out boxes with low confidence score
|
||||
after decaying
|
||||
@param normalized: Specifies whether boxes are normalized or not
|
||||
@return: The new node which performs MatrixNms
|
||||
"""
|
||||
inputs = as_nodes(boxes, scores)
|
||||
|
||||
attributes = {
|
||||
"sort_result_type": sort_result_type,
|
||||
"sort_result_across_batch": sort_result_across_batch,
|
||||
"output_type": output_type,
|
||||
"score_threshold": score_threshold,
|
||||
"nms_top_k": nms_top_k,
|
||||
"keep_top_k": keep_top_k,
|
||||
"background_class": background_class,
|
||||
"decay_function": decay_function,
|
||||
"gaussian_sigma": gaussian_sigma,
|
||||
"post_threshold": post_threshold,
|
||||
"normalized": normalized
|
||||
}
|
||||
|
||||
return _get_node_factory_opset8().create("MatrixNms", inputs, attributes)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def gather(
|
||||
data: NodeInput,
|
||||
indices: NodeInput,
|
||||
axis: NodeInput,
|
||||
batch_dims: Optional[int] = 0,
|
||||
) -> Node:
|
||||
"""Return a node which performs Gather with support of negative indices.
|
||||
|
||||
@param data: N-D tensor with data for gathering
|
||||
@param indices: N-D tensor with indices by which data is gathered. Negative indices
|
||||
indicate reverse indexing from the end
|
||||
@param axis: axis along which elements are gathered
|
||||
@param batch_dims: number of batch dimensions
|
||||
@return: The new node which performs Gather
|
||||
"""
|
||||
inputs = as_nodes(data, indices, axis)
|
||||
attributes = {
|
||||
"batch_dims": batch_dims
|
||||
}
|
||||
return _get_node_factory_opset8().create("Gather", inputs, attributes)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def max_pool(
|
||||
data: NodeInput,
|
||||
strides: List[int],
|
||||
dilations: List[int],
|
||||
pads_begin: List[int],
|
||||
pads_end: List[int],
|
||||
kernel_shape: TensorShape,
|
||||
rounding_type: str = "floor",
|
||||
auto_pad: Optional[str] = None,
|
||||
index_element_type: Optional[str] = "i64",
|
||||
axis: Optional[int] = 0,
|
||||
name: Optional[str] = None,
|
||||
) -> Node:
|
||||
"""Perform max pooling operation and return both values and indices of the selected elements.
|
||||
|
||||
@param data: The node providing input data.
|
||||
@param strides: The distance (in pixels) to slide the filter on the feature map
|
||||
over the axes.
|
||||
@param dilations: The dilation of filter elements(distance between elements).
|
||||
@param pads_begin: The number of pixels to add at the beginning along each axis.
|
||||
@param pads_end: The number of pixels to add at the end along each axis.
|
||||
@param kernel_shape: The pooling operation kernel shape.
|
||||
@param rounding_type: Determines used rounding schema when computing output shape.
|
||||
Acceptable values are: ['floor', 'ceil']. Defaults to 'floor'.
|
||||
@param auto_pad: Determines how the padding is calculated. Acceptable values:
|
||||
[None, 'same_upper', 'same_lower', 'valid']. Defaults to None.
|
||||
@param index_element_type: The data type used for the indices output of this operator.
|
||||
Defaults to i64.
|
||||
@param axis: The first dimension in the data shape used to determine the maximum
|
||||
returned index value. The value is the product of all dimensions
|
||||
starting at the provided axis. Defaults to 0.
|
||||
@param name: The optional name for the created output node.
|
||||
|
||||
@return The new node performing max pooling operation.
|
||||
"""
|
||||
if auto_pad is None:
|
||||
auto_pad = "explicit"
|
||||
return _get_node_factory_opset8().create(
|
||||
"MaxPool",
|
||||
[as_node(data)],
|
||||
{
|
||||
"strides": strides,
|
||||
"dilations": dilations,
|
||||
"pads_begin": pads_begin,
|
||||
"pads_end": pads_end,
|
||||
"kernel": kernel_shape,
|
||||
"rounding_type": rounding_type.upper(),
|
||||
"auto_pad": auto_pad.upper(),
|
||||
"index_element_type": index_element_type,
|
||||
"axis": axis,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@nameable_op
|
||||
def random_uniform(
|
||||
output_shape: NodeInput,
|
||||
min_val: NodeInput,
|
||||
max_val: NodeInput,
|
||||
output_type: str,
|
||||
global_seed: int = 0,
|
||||
op_seed: int = 0
|
||||
) -> Node:
|
||||
"""Return a node which generates sequence of random values from uniform distribution.
|
||||
|
||||
@param output_shape: Tensor with shape of the output tensor.
|
||||
@param min_val: Tensor with the lower bound on the range of random values to generate.
|
||||
@param max_val: Tensor with the upper bound on the range of random values to generate.
|
||||
@param output_type: Specifies the output tensor type, possible values:
|
||||
'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'.
|
||||
@param global_seed: Specifies global seed value. Required to be a positive integer or 0.
|
||||
@param op_seed: Specifies operational seed value. Required to be a positive integer or 0.
|
||||
@return The new node which performs generation of random values from uniform distribution.
|
||||
"""
|
||||
inputs = as_nodes(output_shape, min_val, max_val)
|
||||
|
||||
if global_seed < 0:
|
||||
raise RuntimeError("global_seed should be positive or 0. Got: {}".format(global_seed))
|
||||
|
||||
if op_seed < 0:
|
||||
raise RuntimeError("op_seed should be positive or 0. Got: {}".format(op_seed))
|
||||
|
||||
attributes = {
|
||||
"output_type": output_type,
|
||||
"global_seed": global_seed,
|
||||
"op_seed": op_seed,
|
||||
}
|
||||
return _get_node_factory_opset8().create("RandomUniform", inputs, attributes)
|
@ -12,7 +12,7 @@ from ngraph.impl import Node, Output
|
||||
|
||||
from ngraph.exceptions import UserInputError
|
||||
|
||||
DEFAULT_OPSET = "opset8"
|
||||
DEFAULT_OPSET = "opset7"
|
||||
|
||||
|
||||
class NodeFactory(object):
|
||||
|
@ -82,7 +82,6 @@ private:
|
||||
{"opset5", OpsetFunction(ngraph::get_opset5)},
|
||||
{"opset6", OpsetFunction(ngraph::get_opset6)},
|
||||
{"opset7", OpsetFunction(ngraph::get_opset7)},
|
||||
{"opset8", OpsetFunction(ngraph::get_opset8)},
|
||||
};
|
||||
|
||||
auto it = s_opsets.find(opset_ver);
|
||||
|
@ -1,63 +0,0 @@
|
||||
import ngraph as ng
|
||||
import numpy as np
|
||||
from tests_compatibility.runtime import get_runtime
|
||||
|
||||
|
||||
def test_adaptive_avg_pool():
|
||||
runtime = get_runtime()
|
||||
input = np.reshape([0.0, 4, 1, 3, -2, -5, -2,
|
||||
-2, 1, -3, 1, -3, -4, 0,
|
||||
-2, 1, -1, -2, 3, -1, -3,
|
||||
|
||||
-1, -2, 3, 4, -3, -4, 1,
|
||||
2, 0, -4, -5, -2, -2, -3,
|
||||
2, 3, 1, -5, 2, -4, -2], (2, 3, 7))
|
||||
input_tensor = ng.constant(input)
|
||||
output_shape = ng.constant(np.array([3], dtype=np.int32))
|
||||
|
||||
adaptive_pool_node = ng.adaptive_avg_pool(input_tensor, output_shape)
|
||||
computation = runtime.computation(adaptive_pool_node)
|
||||
adaptive_pool_results = computation()
|
||||
expected_results = np.reshape([1.66666663, 0.66666669, -3.,
|
||||
-1.33333337, -1.66666663, -2.33333325,
|
||||
-0.66666669, 0., -0.33333334,
|
||||
|
||||
0., 1.33333337, -2.,
|
||||
-0.66666669, -3.66666675, -2.33333325,
|
||||
2., -0.66666669, -1.33333337], (2, 3, 3))
|
||||
|
||||
assert np.allclose(adaptive_pool_results, expected_results)
|
||||
|
||||
|
||||
def test_adaptive_max_pool():
|
||||
runtime = get_runtime()
|
||||
input = np.reshape([0, 4, 1, 3, -2, -5, -2,
|
||||
-2, 1, -3, 1, -3, -4, 0,
|
||||
-2, 1, -1, -2, 3, -1, -3,
|
||||
|
||||
-1, -2, 3, 4, -3, -4, 1,
|
||||
2, 0, -4, -5, -2, -2, -3,
|
||||
2, 3, 1, -5, 2, -4, -2], (2, 3, 7))
|
||||
input_tensor = ng.constant(input)
|
||||
output_shape = ng.constant(np.array([3], dtype=np.int32))
|
||||
|
||||
adaptive_pool_node = ng.adaptive_max_pool(input_tensor, output_shape)
|
||||
computation = runtime.computation(adaptive_pool_node)
|
||||
adaptive_pool_results = computation()
|
||||
expected_results = np.reshape([4, 3, -2,
|
||||
1, 1, 0,
|
||||
1, 3, 3,
|
||||
|
||||
3, 4, 1,
|
||||
2, -2, -2,
|
||||
3, 2, 2], (2, 3, 3))
|
||||
|
||||
expected_indices = np.reshape([1, 3, 4,
|
||||
1, 3, 6,
|
||||
1, 4, 4,
|
||||
|
||||
2, 3, 6,
|
||||
0, 4, 4,
|
||||
1, 4, 4], (2, 3, 3))
|
||||
|
||||
assert np.allclose(adaptive_pool_results, [expected_results, expected_indices])
|
@ -3,7 +3,7 @@
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from _pyngraph import PartialShape, Dimension
|
||||
from _pyngraph import PartialShape
|
||||
|
||||
import ngraph as ng
|
||||
import ngraph.opset1 as ng_opset1
|
||||
@ -23,33 +23,6 @@ integral_np_types = [
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
||||
def test_adaptive_avg_pool(dtype):
|
||||
data = ng.parameter([2, 24, 34, 62], name="input", dtype=dtype)
|
||||
output_shape = ng.constant(np.array([16, 16], dtype=np.int32))
|
||||
|
||||
node = ng.adaptive_avg_pool(data, output_shape)
|
||||
|
||||
assert node.get_type_name() == "AdaptiveAvgPool"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [2, 24, 16, 16]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
||||
@pytest.mark.parametrize("ind_type", ["i32", "i64"])
|
||||
def test_adaptive_max_pool(dtype, ind_type):
|
||||
data = ng.parameter([2, 24, 34, 62], name="input", dtype=dtype)
|
||||
output_shape = ng.constant(np.array([16, 16], dtype=np.int32))
|
||||
|
||||
node = ng.adaptive_max_pool(data, output_shape, ind_type)
|
||||
|
||||
assert node.get_type_name() == "AdaptiveMaxPool"
|
||||
assert node.get_output_size() == 2
|
||||
assert list(node.get_output_shape(0)) == [2, 24, 16, 16]
|
||||
assert list(node.get_output_shape(1)) == [2, 24, 16, 16]
|
||||
assert node.get_output_element_type(1) == Type.i32 if ind_type == "i32" else Type.i64
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
||||
def test_binary_convolution(dtype):
|
||||
strides = np.array([1, 1])
|
||||
@ -67,7 +40,14 @@ def test_binary_convolution(dtype):
|
||||
parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype)
|
||||
|
||||
node = ng.binary_convolution(
|
||||
parameter_input0, parameter_input1, strides, pads_begin, pads_end, dilations, mode, pad_value,
|
||||
parameter_input0,
|
||||
parameter_input1,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
mode,
|
||||
pad_value,
|
||||
)
|
||||
|
||||
assert node.get_type_name() == "BinaryConvolution"
|
||||
@ -91,26 +71,30 @@ def test_ctc_greedy_decoder(dtype):
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index",
|
||||
[
|
||||
(np.float32, np.int32, "i32", "i32", True, True),
|
||||
(np.float32, np.int32, "i64", "i32", True, True),
|
||||
(np.float32, np.int32, "i32", "i64", True, True),
|
||||
(np.float32, np.int32, "i64", "i64", True, True),
|
||||
(np.float64, np.int64, "i32", "i32", False, True),
|
||||
(np.float64, np.int64, "i64", "i32", False, True),
|
||||
(np.float64, np.int64, "i32", "i64", False, True),
|
||||
(np.float64, np.int64, "i64", "i64", False, True),
|
||||
(np.float32, np.int32, "i32", "i32", True, False),
|
||||
(np.float32, np.int32, "i64", "i32", True, False),
|
||||
(np.float32, np.int32, "i32", "i64", True, False),
|
||||
(np.float32, np.int32, "i64", "i64", True, False),
|
||||
(np.float64, np.int64, "i32", "i32", False, False),
|
||||
(np.float64, np.int64, "i64", "i32", False, False),
|
||||
(np.float64, np.int64, "i32", "i64", False, False),
|
||||
(np.float64, np.int64, "i64", "i64", False, False)
|
||||
],)
|
||||
def test_ctc_greedy_decoder_seq_len(fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index):
|
||||
@pytest.mark.parametrize(
|
||||
"fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index",
|
||||
[
|
||||
(np.float32, np.int32, "i32", "i32", True, True),
|
||||
(np.float32, np.int32, "i64", "i32", True, True),
|
||||
(np.float32, np.int32, "i32", "i64", True, True),
|
||||
(np.float32, np.int32, "i64", "i64", True, True),
|
||||
(np.float64, np.int64, "i32", "i32", False, True),
|
||||
(np.float64, np.int64, "i64", "i32", False, True),
|
||||
(np.float64, np.int64, "i32", "i64", False, True),
|
||||
(np.float64, np.int64, "i64", "i64", False, True),
|
||||
(np.float32, np.int32, "i32", "i32", True, False),
|
||||
(np.float32, np.int32, "i64", "i32", True, False),
|
||||
(np.float32, np.int32, "i32", "i64", True, False),
|
||||
(np.float32, np.int32, "i64", "i64", True, False),
|
||||
(np.float64, np.int64, "i32", "i32", False, False),
|
||||
(np.float64, np.int64, "i64", "i32", False, False),
|
||||
(np.float64, np.int64, "i32", "i64", False, False),
|
||||
(np.float64, np.int64, "i64", "i64", False, False),
|
||||
],
|
||||
)
|
||||
def test_ctc_greedy_decoder_seq_len(
|
||||
fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index
|
||||
):
|
||||
input0_shape = [8, 20, 128]
|
||||
input1_shape = [8]
|
||||
input2_shape = [1]
|
||||
@ -123,7 +107,12 @@ def test_ctc_greedy_decoder_seq_len(fp_dtype, int_dtype, int_ci, int_sl, merge_r
|
||||
parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=int_dtype)
|
||||
|
||||
node = ng.ctc_greedy_decoder_seq_len(
|
||||
parameter_input0, parameter_input1, parameter_input2, merge_repeated, int_ci, int_sl
|
||||
parameter_input0,
|
||||
parameter_input1,
|
||||
parameter_input2,
|
||||
merge_repeated,
|
||||
int_ci,
|
||||
int_sl,
|
||||
)
|
||||
|
||||
assert node.get_type_name() == "CTCGreedyDecoderSeqLen"
|
||||
@ -148,7 +137,13 @@ def test_deformable_convolution_opset1(dtype):
|
||||
parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype)
|
||||
|
||||
node = ng_opset1.deformable_convolution(
|
||||
parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations,
|
||||
parameter_input0,
|
||||
parameter_input1,
|
||||
parameter_input2,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
)
|
||||
|
||||
assert node.get_type_name() == "DeformableConvolution"
|
||||
@ -173,35 +168,13 @@ def test_deformable_convolution(dtype):
|
||||
parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype)
|
||||
|
||||
node = ng.deformable_convolution(
|
||||
parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations,
|
||||
)
|
||||
|
||||
assert node.get_type_name() == "DeformableConvolution"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", np_types)
|
||||
def test_deformable_convolution_mask(dtype):
|
||||
strides = np.array([1, 1])
|
||||
pads_begin = np.array([0, 0])
|
||||
pads_end = np.array([0, 0])
|
||||
dilations = np.array([1, 1])
|
||||
|
||||
input0_shape = [1, 1, 9, 9]
|
||||
input1_shape = [1, 18, 7, 7]
|
||||
input2_shape = [1, 1, 3, 3]
|
||||
input3_shape = [1, 9, 7, 7]
|
||||
expected_shape = [1, 1, 7, 7]
|
||||
|
||||
parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype)
|
||||
parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype)
|
||||
parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype)
|
||||
parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype)
|
||||
|
||||
node = ng.deformable_convolution(
|
||||
parameter_input0, parameter_input1, parameter_input2, strides,
|
||||
pads_begin, pads_end, dilations, parameter_input3
|
||||
parameter_input0,
|
||||
parameter_input1,
|
||||
parameter_input2,
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
dilations,
|
||||
)
|
||||
|
||||
assert node.get_type_name() == "DeformableConvolution"
|
||||
@ -277,7 +250,9 @@ def test_gather_tree(dtype):
|
||||
parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype)
|
||||
parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype)
|
||||
|
||||
node = ng.gather_tree(parameter_input0, parameter_input1, parameter_input2, parameter_input3)
|
||||
node = ng.gather_tree(
|
||||
parameter_input0, parameter_input1, parameter_input2, parameter_input3
|
||||
)
|
||||
|
||||
assert node.get_type_name() == "GatherTree"
|
||||
assert node.get_output_size() == 1
|
||||
@ -307,7 +282,13 @@ def test_lstm_cell_operator(dtype):
|
||||
expected_shape = [1, 128]
|
||||
|
||||
node_default = ng.lstm_cell(
|
||||
parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size,
|
||||
parameter_X,
|
||||
parameter_H_t,
|
||||
parameter_C_t,
|
||||
parameter_W,
|
||||
parameter_R,
|
||||
parameter_B,
|
||||
hidden_size,
|
||||
)
|
||||
|
||||
assert node_default.get_type_name() == "LSTMCell"
|
||||
@ -363,7 +344,13 @@ def test_lstm_cell_operator_opset1(dtype):
|
||||
expected_shape = [1, 128]
|
||||
|
||||
node_default = ng_opset1.lstm_cell(
|
||||
parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size,
|
||||
parameter_X,
|
||||
parameter_H_t,
|
||||
parameter_C_t,
|
||||
parameter_W,
|
||||
parameter_R,
|
||||
parameter_B,
|
||||
hidden_size,
|
||||
)
|
||||
|
||||
assert node_default.get_type_name() == "LSTMCell"
|
||||
@ -612,7 +599,9 @@ def test_gru_cell_operator():
|
||||
|
||||
expected_shape = [1, 128]
|
||||
|
||||
node_default = ng.gru_cell(parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B, hidden_size)
|
||||
node_default = ng.gru_cell(
|
||||
parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B, hidden_size
|
||||
)
|
||||
|
||||
assert node_default.get_type_name() == "GRUCell"
|
||||
assert node_default.get_output_size() == 1
|
||||
@ -820,8 +809,10 @@ def test_loop():
|
||||
ti_inputs = [iter_cnt, data, initial_cma, one]
|
||||
body_const_condition = ng.constant(True, dtype=np.bool)
|
||||
|
||||
graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one],
|
||||
[curr_cma, cma_hist, body_const_condition])
|
||||
graph_body = GraphBody(
|
||||
[body_timestep, body_data_in, body_prev_cma, body_const_one],
|
||||
[curr_cma, cma_hist, body_const_condition],
|
||||
)
|
||||
ti_slice_input_desc = [
|
||||
# timestep
|
||||
# input_idx, body_param_idx, start, stride, part_size, end, axis
|
||||
@ -926,7 +917,9 @@ def test_region_yolo():
|
||||
end_axis = 3
|
||||
do_softmax = False
|
||||
|
||||
node = ng.region_yolo(data, num_coords, num_classes, num_regions, do_softmax, mask, axis, end_axis)
|
||||
node = ng.region_yolo(
|
||||
data, num_coords, num_classes, num_regions, do_softmax, mask, axis, end_axis
|
||||
)
|
||||
|
||||
assert node.get_type_name() == "RegionYolo"
|
||||
assert node.get_output_size() == 1
|
||||
@ -996,7 +989,9 @@ def test_embedding_segments_sum_with_some_opt_inputs():
|
||||
def test_embedding_bag_packed_sum():
|
||||
emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32)
|
||||
indices = ng.parameter([3, 3], name="indices", dtype=np.int64)
|
||||
per_sample_weights = ng.parameter([3, 3], name="per_sample_weights", dtype=np.float32)
|
||||
per_sample_weights = ng.parameter(
|
||||
[3, 3], name="per_sample_weights", dtype=np.float32
|
||||
)
|
||||
|
||||
# only 1 out of 3 optional inputs
|
||||
node = ng.embedding_bag_packed_sum(emb_table, indices, per_sample_weights)
|
||||
@ -1048,7 +1043,7 @@ def test_prior_box(int_dtype, fp_dtype):
|
||||
"offset": fp_dtype(0),
|
||||
"min_size": np.array([2, 3], dtype=fp_dtype),
|
||||
"aspect_ratio": np.array([1.5, 2.0, 2.5], dtype=fp_dtype),
|
||||
"scale_all_sizes": False
|
||||
"scale_all_sizes": False,
|
||||
}
|
||||
|
||||
layer_shape = ng.constant(np.array([32, 32], dtype=int_dtype), int_dtype)
|
||||
@ -1120,7 +1115,9 @@ def test_detection_output(int_dtype, fp_dtype):
|
||||
aux_class_preds = ng.parameter([4, 4], fp_dtype, "aux_class_preds")
|
||||
aux_box_preds = ng.parameter([4, 8], fp_dtype, "aux_box_preds")
|
||||
|
||||
node = ng.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds)
|
||||
node = ng.detection_output(
|
||||
box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds
|
||||
)
|
||||
|
||||
assert node.get_type_name() == "DetectionOutput"
|
||||
assert node.get_output_size() == 1
|
||||
@ -1158,7 +1155,10 @@ def test_proposal(int_dtype, fp_dtype):
|
||||
|
||||
assert node.get_type_name() == "Proposal"
|
||||
assert node.get_output_size() == 2
|
||||
assert list(node.get_output_shape(0)) == [batch_size * attributes["post_nms_topn"], 5]
|
||||
assert list(node.get_output_shape(0)) == [
|
||||
batch_size * attributes["post_nms_topn"],
|
||||
5,
|
||||
]
|
||||
|
||||
|
||||
def test_tensor_iterator():
|
||||
@ -1193,7 +1193,10 @@ def test_tensor_iterator():
|
||||
iter_cnt = ng.range(zero, np.int32(16), np.int32(1))
|
||||
ti_inputs = [iter_cnt, data, initial_cma, one]
|
||||
|
||||
graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], [curr_cma, cma_hist])
|
||||
graph_body = GraphBody(
|
||||
[body_timestep, body_data_in, body_prev_cma, body_const_one],
|
||||
[curr_cma, cma_hist],
|
||||
)
|
||||
ti_slice_input_desc = [
|
||||
# timestep
|
||||
# input_idx, body_param_idx, start, stride, part_size, end, axis
|
||||
@ -1551,7 +1554,7 @@ def test_gru_sequence_operator_bidirectional(dtype):
|
||||
activation_alpha,
|
||||
activation_beta,
|
||||
clip,
|
||||
linear_before_reset
|
||||
linear_before_reset,
|
||||
)
|
||||
|
||||
assert node_param.get_type_name() == "GRUSequence"
|
||||
@ -1617,7 +1620,7 @@ def test_gru_sequence_operator_reverse(dtype):
|
||||
activation_alpha,
|
||||
activation_beta,
|
||||
clip,
|
||||
linear_before_reset
|
||||
linear_before_reset,
|
||||
)
|
||||
|
||||
assert node_param.get_type_name() == "GRUSequence"
|
||||
@ -1683,7 +1686,7 @@ def test_gru_sequence_operator_forward(dtype):
|
||||
activation_alpha,
|
||||
activation_beta,
|
||||
clip,
|
||||
linear_before_reset
|
||||
linear_before_reset,
|
||||
)
|
||||
|
||||
assert node.get_type_name() == "GRUSequence"
|
||||
@ -1873,53 +1876,3 @@ def test_rnn_sequence_operator_forward(dtype):
|
||||
|
||||
assert node.get_type_name() == "RNNSequence"
|
||||
assert node.get_output_size() == 2
|
||||
|
||||
|
||||
def test_multiclass_nms():
|
||||
boxes_data = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1,
|
||||
0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0,
|
||||
0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32")
|
||||
boxes_data = boxes_data.reshape([1, 6, 4])
|
||||
box = ng.constant(boxes_data, dtype=np.float)
|
||||
scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3,
|
||||
0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32")
|
||||
scores_data = scores_data.reshape([1, 2, 6])
|
||||
score = ng.constant(scores_data, dtype=np.float)
|
||||
|
||||
nms_node = ng.multiclass_nms(box, score, output_type="i32", nms_top_k=3,
|
||||
iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid",
|
||||
nms_eta=1.0)
|
||||
|
||||
assert nms_node.get_type_name() == "MulticlassNms"
|
||||
assert nms_node.get_output_size() == 3
|
||||
assert nms_node.outputs()[0].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(6)])
|
||||
assert nms_node.outputs()[1].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(1)])
|
||||
assert list(nms_node.outputs()[2].get_shape()) == [1, ]
|
||||
assert nms_node.get_output_element_type(0) == Type.f32
|
||||
assert nms_node.get_output_element_type(1) == Type.i32
|
||||
assert nms_node.get_output_element_type(2) == Type.i32
|
||||
|
||||
|
||||
def test_matrix_nms():
|
||||
boxes_data = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1,
|
||||
0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0,
|
||||
0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32")
|
||||
boxes_data = boxes_data.reshape([1, 6, 4])
|
||||
box = ng.constant(boxes_data, dtype=np.float)
|
||||
scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3,
|
||||
0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32")
|
||||
scores_data = scores_data.reshape([1, 2, 6])
|
||||
score = ng.constant(scores_data, dtype=np.float)
|
||||
|
||||
nms_node = ng.matrix_nms(box, score, output_type="i32", nms_top_k=3,
|
||||
score_threshold=0.0, sort_result_type="score", background_class=0,
|
||||
decay_function="linear", gaussian_sigma=2.0, post_threshold=0.0)
|
||||
|
||||
assert nms_node.get_type_name() == "MatrixNms"
|
||||
assert nms_node.get_output_size() == 3
|
||||
assert nms_node.outputs()[0].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(6)])
|
||||
assert nms_node.outputs()[1].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(1)])
|
||||
assert list(nms_node.outputs()[2].get_shape()) == [1, ]
|
||||
assert nms_node.get_output_element_type(0) == Type.f32
|
||||
assert nms_node.get_output_element_type(1) == Type.i32
|
||||
assert nms_node.get_output_element_type(2) == Type.i32
|
||||
|
@ -26,14 +26,18 @@ def test_avg_pool_2d(_ndarray_1x1x4x4):
|
||||
exclude_pad = True
|
||||
expected = [[[[13.5, 15.5], [21.5, 23.5]]]]
|
||||
|
||||
avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad)
|
||||
avg_pool_node = ng.avg_pool(
|
||||
param, strides, pads_begin, pads_end, kernel_shape, exclude_pad
|
||||
)
|
||||
computation = runtime.computation(avg_pool_node, param)
|
||||
result = computation(input_data)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
expected = [[[[13.5, 14.5, 15.5], [17.5, 18.5, 19.5], [21.5, 22.5, 23.5]]]]
|
||||
strides = [1, 1]
|
||||
avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad)
|
||||
avg_pool_node = ng.avg_pool(
|
||||
param, strides, pads_begin, pads_end, kernel_shape, exclude_pad
|
||||
)
|
||||
computation = runtime.computation(avg_pool_node, param)
|
||||
result = computation(input_data)
|
||||
assert np.allclose(result, expected)
|
||||
@ -44,14 +48,18 @@ def test_avg_pool_2d(_ndarray_1x1x4x4):
|
||||
exclude_pad = True
|
||||
|
||||
expected = [[[[11.0, 12.5, 14.0], [17.0, 18.5, 20.0], [23.0, 24.5, 26.0]]]]
|
||||
avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad)
|
||||
avg_pool_node = ng.avg_pool(
|
||||
param, strides, pads_begin, pads_end, kernel_shape, exclude_pad
|
||||
)
|
||||
computation = runtime.computation(avg_pool_node, param)
|
||||
result = computation(input_data)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
exclude_pad = False
|
||||
expected = [[[[2.75, 6.25, 3.5], [8.5, 18.5, 10.0], [5.75, 12.25, 6.5]]]]
|
||||
avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad)
|
||||
avg_pool_node = ng.avg_pool(
|
||||
param, strides, pads_begin, pads_end, kernel_shape, exclude_pad
|
||||
)
|
||||
computation = runtime.computation(avg_pool_node, param)
|
||||
result = computation(input_data)
|
||||
assert np.allclose(result, expected)
|
||||
@ -69,7 +77,9 @@ def test_avg_pooling_3d(_ndarray_1x1x4x4):
|
||||
pads_end = [0] * spatial_dim_count
|
||||
exclude_pad = True
|
||||
|
||||
avgpool = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad)
|
||||
avgpool = ng.avg_pool(
|
||||
param, strides, pads_begin, pads_end, kernel_shape, exclude_pad
|
||||
)
|
||||
comp = rt.computation(avgpool, param)
|
||||
result = comp(data)
|
||||
result_ref = [[[[[13.5, 15.5], [21.5, 23.5]], [[13.5, 15.5], [21.5, 23.5]]]]]
|
||||
@ -85,35 +95,20 @@ def test_max_pool_basic():
|
||||
# [12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
|
||||
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
|
||||
strides = [1, 1]
|
||||
dilations = [1, 1]
|
||||
pads_begin = [0, 0]
|
||||
pads_end = [0, 0]
|
||||
kernel_shape = [2, 2]
|
||||
rounding_type = "floor"
|
||||
auto_pad = None
|
||||
index_et = "i32"
|
||||
|
||||
data_node = ng.parameter(data.shape, name="A", dtype=np.float32)
|
||||
maxpool_node = ng.max_pool(
|
||||
data_node,
|
||||
strides,
|
||||
dilations,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
kernel_shape,
|
||||
rounding_type,
|
||||
auto_pad,
|
||||
index_et,
|
||||
)
|
||||
maxpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape)
|
||||
comp = rt.computation(maxpool_node, data_node)
|
||||
|
||||
result = comp(data)
|
||||
|
||||
expected = np.array(
|
||||
[[[[5.5, 6.5, 7.5], [9.5, 10.5, 11.5], [13.5, 14.5, 15.5]]]], dtype=np.float32
|
||||
)
|
||||
expected_idx = np.array([[[[5, 6, 7], [9, 10, 11], [13, 14, 15]]]], dtype=np.int32)
|
||||
assert np.allclose(result[0], expected)
|
||||
assert np.allclose(result[1], expected_idx)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
def test_max_pool_strides():
|
||||
@ -125,33 +120,17 @@ def test_max_pool_strides():
|
||||
# [12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
|
||||
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
|
||||
strides = [2, 1]
|
||||
dilations = [1, 1]
|
||||
pads_begin = [0, 0]
|
||||
pads_end = [0, 0]
|
||||
kernel_shape = [2, 2]
|
||||
rounding_type = "floor"
|
||||
auto_pad = None
|
||||
index_et = "i32"
|
||||
|
||||
data_node = ng.parameter(data.shape, name="A", dtype=np.float32)
|
||||
maxpool_node = ng.max_pool(
|
||||
data_node,
|
||||
strides,
|
||||
dilations,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
kernel_shape,
|
||||
rounding_type,
|
||||
auto_pad,
|
||||
index_et,
|
||||
)
|
||||
maxpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape)
|
||||
comp = rt.computation(maxpool_node, data_node)
|
||||
result = comp(data)
|
||||
|
||||
expected = np.array([[[[5.5, 6.5, 7.5], [13.5, 14.5, 15.5]]]], dtype=np.float32)
|
||||
expected_idx = np.array([[[[5, 6, 7], [13, 14, 15]]]], dtype=np.int32)
|
||||
assert np.allclose(result[0], expected)
|
||||
assert np.allclose(result[1], expected_idx)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
def test_max_pool_kernel_shape1x1():
|
||||
@ -163,31 +142,16 @@ def test_max_pool_kernel_shape1x1():
|
||||
# [12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
|
||||
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
|
||||
strides = [1, 1]
|
||||
dilations = [1, 1]
|
||||
pads_begin = [0, 0]
|
||||
pads_end = [0, 0]
|
||||
kernel_shape = [1, 1]
|
||||
rounding_type = "floor"
|
||||
auto_pad = None
|
||||
index_et = "i32"
|
||||
|
||||
data_node = ng.parameter(data.shape, name="A", dtype=np.float32)
|
||||
maxpool_node = ng.max_pool(
|
||||
data_node,
|
||||
strides,
|
||||
dilations,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
kernel_shape,
|
||||
rounding_type,
|
||||
auto_pad,
|
||||
index_et,
|
||||
)
|
||||
maxpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape)
|
||||
comp = rt.computation(maxpool_node, data_node)
|
||||
result = comp(data)
|
||||
|
||||
assert np.allclose(result[0], data)
|
||||
assert np.allclose(result[1], np.arange(0, 16, dtype=np.int32).reshape((1, 1, 4, 4)))
|
||||
assert np.allclose(result, data)
|
||||
|
||||
|
||||
def test_max_pool_kernel_shape3x3():
|
||||
@ -199,31 +163,17 @@ def test_max_pool_kernel_shape3x3():
|
||||
# [12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
|
||||
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
|
||||
strides = [1, 1]
|
||||
dilations = [1, 1]
|
||||
pads_begin = [0, 0]
|
||||
pads_end = [0, 0]
|
||||
kernel_shape = [3, 3]
|
||||
rounding_type = "floor"
|
||||
auto_pad = None
|
||||
index_et = "i32"
|
||||
|
||||
data_node = ng.parameter(data.shape, name="A", dtype=np.float32)
|
||||
maxpool_node = ng.max_pool(
|
||||
data_node,
|
||||
strides,
|
||||
dilations,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
kernel_shape,
|
||||
rounding_type,
|
||||
auto_pad,
|
||||
index_et,
|
||||
)
|
||||
maxpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape)
|
||||
comp = rt.computation(maxpool_node, data_node)
|
||||
result = comp(data)
|
||||
|
||||
expected = np.array([[[[10.5, 11.5], [14.5, 15.5]]]], dtype=np.float32)
|
||||
assert np.allclose(result[0], expected)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
def test_max_pool_non_zero_pads():
|
||||
@ -235,7 +185,6 @@ def test_max_pool_non_zero_pads():
|
||||
# [12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
|
||||
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
|
||||
strides = [1, 1]
|
||||
dilations = [1, 1]
|
||||
pads_begin = [1, 1]
|
||||
pads_end = [1, 1]
|
||||
# 0 0 , 0 , 0 , 0, 0
|
||||
@ -245,22 +194,9 @@ def test_max_pool_non_zero_pads():
|
||||
# 0 [12.5, 13.5, 14.5, 15.5], 0
|
||||
# 0 0 , 0 , 0 , 0, 0
|
||||
kernel_shape = [2, 2]
|
||||
rounding_type = "floor"
|
||||
auto_pad = None
|
||||
index_et = "i32"
|
||||
|
||||
data_node = ng.parameter(data.shape, name="A", dtype=np.float32)
|
||||
maxpool_node = ng.max_pool(
|
||||
data_node,
|
||||
strides,
|
||||
dilations,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
kernel_shape,
|
||||
rounding_type,
|
||||
auto_pad,
|
||||
index_et,
|
||||
)
|
||||
maxpool_node = ng.max_pool(data_node, strides, pads_begin, pads_end, kernel_shape)
|
||||
comp = rt.computation(maxpool_node, data_node)
|
||||
result = comp(data)
|
||||
|
||||
@ -278,22 +214,7 @@ def test_max_pool_non_zero_pads():
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
expected_idx = np.array(
|
||||
[
|
||||
[
|
||||
[
|
||||
[0, 1, 2, 3, 3],
|
||||
[4, 5, 6, 7, 7],
|
||||
[8, 9, 10, 11, 11],
|
||||
[12, 13, 14, 15, 15],
|
||||
[12, 13, 14, 15, 15],
|
||||
]
|
||||
]
|
||||
],
|
||||
dtype=np.int32,
|
||||
)
|
||||
assert np.allclose(result[0], expected)
|
||||
assert np.allclose(result[1], expected_idx)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
def test_max_pool_same_upper_auto_pads():
|
||||
@ -305,7 +226,6 @@ def test_max_pool_same_upper_auto_pads():
|
||||
# [12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
|
||||
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
|
||||
strides = [1, 1]
|
||||
dilations = [1, 1]
|
||||
pads_begin = [0, 0]
|
||||
pads_end = [0, 0]
|
||||
# [ 0.5, 1.5, 2.5, 3.5], 0,
|
||||
@ -315,20 +235,10 @@ def test_max_pool_same_upper_auto_pads():
|
||||
# 0 , 0 , 0 , 0, 0
|
||||
kernel_shape = [2, 2]
|
||||
auto_pad = "same_upper"
|
||||
rounding_type = "floor"
|
||||
index_et = "i32"
|
||||
|
||||
data_node = ng.parameter(data.shape, name="A", dtype=np.float32)
|
||||
maxpool_node = ng.max_pool(
|
||||
data_node,
|
||||
strides,
|
||||
dilations,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
kernel_shape,
|
||||
rounding_type,
|
||||
auto_pad,
|
||||
index_et,
|
||||
data_node, strides, pads_begin, pads_end, kernel_shape, auto_pad=auto_pad
|
||||
)
|
||||
comp = rt.computation(maxpool_node, data_node)
|
||||
result = comp(data)
|
||||
@ -346,21 +256,7 @@ def test_max_pool_same_upper_auto_pads():
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
expected_idx = np.array(
|
||||
[
|
||||
[
|
||||
[
|
||||
[5, 6, 7, 7],
|
||||
[9, 10, 11, 11],
|
||||
[13, 14, 15, 15],
|
||||
[13, 14, 15, 15],
|
||||
]
|
||||
]
|
||||
],
|
||||
dtype=np.int32,
|
||||
)
|
||||
assert np.allclose(result[0], expected)
|
||||
assert np.allclose(result[1], expected_idx)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
def test_max_pool_same_lower_auto_pads():
|
||||
@ -372,7 +268,6 @@ def test_max_pool_same_lower_auto_pads():
|
||||
# [12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
|
||||
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
|
||||
strides = [1, 1]
|
||||
dilations = [1, 1]
|
||||
pads_begin = [0, 0]
|
||||
pads_end = [0, 0]
|
||||
# 0 0 , 0 , 0 , 0,
|
||||
@ -382,20 +277,10 @@ def test_max_pool_same_lower_auto_pads():
|
||||
# 0 [12.5, 13.5, 14.5, 15.5],
|
||||
kernel_shape = [2, 2]
|
||||
auto_pad = "same_lower"
|
||||
rounding_type = "floor"
|
||||
index_et = "i32"
|
||||
|
||||
data_node = ng.parameter(data.shape, name="A", dtype=np.float32)
|
||||
maxpool_node = ng.max_pool(
|
||||
data_node,
|
||||
strides,
|
||||
dilations,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
kernel_shape,
|
||||
rounding_type,
|
||||
auto_pad,
|
||||
index_et,
|
||||
data_node, strides, pads_begin, pads_end, kernel_shape, auto_pad=auto_pad
|
||||
)
|
||||
comp = rt.computation(maxpool_node, data_node)
|
||||
result = comp(data)
|
||||
@ -413,18 +298,4 @@ def test_max_pool_same_lower_auto_pads():
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
expected_idx = np.array(
|
||||
[
|
||||
[
|
||||
[
|
||||
[0, 1, 2, 3],
|
||||
[4, 5, 6, 7],
|
||||
[8, 9, 10, 11],
|
||||
[12, 13, 14, 15],
|
||||
]
|
||||
]
|
||||
],
|
||||
dtype=np.int32,
|
||||
)
|
||||
assert np.allclose(result[0], expected)
|
||||
assert np.allclose(result[1], expected_idx)
|
||||
assert np.allclose(result, expected)
|
||||
|
@ -1,27 +0,0 @@
|
||||
import ngraph as ng
|
||||
import numpy as np
|
||||
from tests_compatibility.runtime import get_runtime
|
||||
|
||||
|
||||
def test_random_uniform():
|
||||
runtime = get_runtime()
|
||||
input_tensor = ng.constant(np.array([2, 4, 3], dtype=np.int32))
|
||||
min_val = ng.constant(np.array([-2.7], dtype=np.float32))
|
||||
max_val = ng.constant(np.array([3.5], dtype=np.float32))
|
||||
|
||||
random_uniform_node = ng.random_uniform(input_tensor, min_val, max_val,
|
||||
output_type="f32", global_seed=7461,
|
||||
op_seed=1546)
|
||||
computation = runtime.computation(random_uniform_node)
|
||||
random_uniform_results = computation()
|
||||
expected_results = np.array([[[2.8450181, -2.3457108, 2.2134445],
|
||||
[-1.0436587, 0.79548645, 1.3023183],
|
||||
[0.34447956, -2.0267959, 1.3989122],
|
||||
[0.9607613, 1.5363653, 3.117298]],
|
||||
|
||||
[[1.570041, 2.2782724, 2.3193843],
|
||||
[3.3393657, 0.63299894, 0.41231918],
|
||||
[3.1739233, 0.03919673, -0.2136085],
|
||||
[-1.4519991, -2.277353, 2.630727]]], dtype=np.float32)
|
||||
|
||||
assert np.allclose(random_uniform_results, expected_results)
|
Loading…
Reference in New Issue
Block a user