[Tools] Avoid use of NumPy deprecated types

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
Kazantsev, Roman 2022-12-19 14:26:39 +04:00
parent ee256e801c
commit 21ffc167d1
35 changed files with 106 additions and 106 deletions

View File

@ -260,7 +260,7 @@ def get_image_info_tensors(image_sizes, layer):
def fill_tensors_with_random(layer):
dtype = get_dtype(layer.element_type)
rand_min, rand_max = (0, 1) if dtype == np.bool else (np.iinfo(np.uint8).min, np.iinfo(np.uint8).max)
rand_min, rand_max = (0, 1) if dtype == bool else (np.iinfo(np.uint8).min, np.iinfo(np.uint8).max)
# np.random.uniform excludes high: add 1 to have it generated
if np.dtype(dtype).kind in ['i', 'u', 'b']:
rand_max += 1

View File

@ -528,7 +528,7 @@ def parse_scale_or_mean(parameter_string, input_info):
if matches:
for match in matches:
input_name, value = match
f_value = np.array(value.split(",")).astype(np.float)
f_value = np.array(value.split(",")).astype(np.float32)
if input_name != '':
return_value[input_name] = f_value
else:

View File

@ -10,7 +10,7 @@ from openvino.tools.mo.utils.model_analysis import AnalyzeAction
class TrainingPhaseAnalysis(AnalyzeAction):
def analyze(self, graph: Graph):
nodes = graph.get_op_nodes(op='Parameter', data_type=np.bool)
nodes = graph.get_op_nodes(op='Parameter', data_type=bool)
names = ""
params = ""
if not nodes:

View File

@ -46,10 +46,10 @@ class FreezePlaceholderValue(FrontReplacementSubgraph):
data_type = SUPPORTED_DATA_TYPES[graph.graph['cmd_params'].data_type][0]
string_value = graph.graph['freeze_placeholder'][name]
try:
if data_type != np.bool:
if data_type != bool:
value = mo_array(string_value, dtype=data_type)
# TODO: investigate why boolean type is allowed only for TensorFlow
elif data_type == np.bool and graph.graph['fw'] == 'tf':
elif data_type == bool and graph.graph['fw'] == 'tf':
from openvino.tools.mo.front.tf.common import tf_data_type_cast
if isinstance(string_value, list):
casted_list = list()

View File

@ -22,7 +22,7 @@ class LSTMNonlinearityFrontExtractor(FrontExtractorOp):
ifo_x_weights, ifo_x_weights_shape = read_binary_matrix(pb)
try:
use_dropout = collect_until_token_and_read(pb, b'<UseDropout>', np.bool)
use_dropout = collect_until_token_and_read(pb, b'<UseDropout>', bool)
except Error:
# layer have not UseDropout attribute, so setup it to False
use_dropout = False

View File

@ -258,7 +258,7 @@ def read_token_value(file_desc: io.BufferedReader, token: bytes = b'', value_typ
getters = {
np.uint32: read_binary_integer32_token,
np.uint64: read_binary_integer64_token,
np.bool: read_binary_bool_token
bool: read_binary_bool_token
}
current_token = collect_until_whitespace(file_desc)
if token != b'' and token != current_token:
@ -314,7 +314,7 @@ def collect_until_token_and_read(file_desc: io.BufferedReader, token, value_type
getters = {
np.uint32: read_binary_integer32_token,
np.uint64: read_binary_integer64_token,
np.bool: read_binary_bool_token,
bool: read_binary_bool_token,
np.string_: read_string
}
collect_until_token(file_desc, token)

View File

@ -177,7 +177,7 @@ class RestrictedAttentionComponentReplacer(FrontReplacementPattern):
split_2_node.out_port(0).connect(einsum_1_node.in_port(1))
mul_node = create_op_with_const_inputs(graph, Mul, {1: mo_array(key_scale, dtype=np.float)},
mul_node = create_op_with_const_inputs(graph, Mul, {1: mo_array(key_scale, dtype=np.float32)},
{'name': self.in_name + '/Mul'})
reshape_helper_1_node.out_port(0).connect(mul_node.in_port(0))

View File

@ -39,7 +39,7 @@ class ONNXLoopNormalize(FrontReplacementSubgraph):
# connect "execution condition" input if it is not connected with default value True
if not loop_node.is_in_port_connected(1):
loop_node.add_input_port(1, skip_if_exist=True)
Const(loop_node.graph, {'name': loop_name + '/execution_cond', 'value': mo_array(True, dtype=np.bool)}).\
Const(loop_node.graph, {'name': loop_name + '/execution_cond', 'value': mo_array(True, dtype=bool)}).\
create_node().out_port(0).connect(loop_node.in_port(1))
# scan output need Unsqueeze over axis 0

View File

@ -56,7 +56,7 @@ def get_onnx_opset_version(node: Node):
def get_onnx_datatype_as_numpy(value):
datatype_to_numpy = {
1: np.float32,
9: np.bool,
9: bool,
11: np.double,
10: np.float16,
5: np.int16,

View File

@ -16,7 +16,7 @@ class ExperimentalDetectronGroupNorm(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = {
'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=np.float),
'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=np.float32),
'num_groups': int64_array(onnx_attr(node, 'num_groups', 'i', default=1)),
}
GroupNorm.update_node_stat(node, attrs)
@ -30,7 +30,7 @@ class GroupNormExtractor(FrontExtractorOp):
@classmethod
def extract(cls, node):
attrs = {
'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=np.float),
'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=np.float32),
'num_groups': int64_array(onnx_attr(node, 'num_groups', 'i', default=1)),
}
GroupNorm.update_node_stat(node, attrs)

View File

@ -243,7 +243,7 @@ class MapFNOutputConcatenation(FrontReplacementSubgraph):
if 'purpose' in record and record['purpose'] == 'execution_condition':
exec_cond_layer_id = record['internal_layer_id']
exec_cond_node = Loop.get_body_node_by_internal_id(loop_node, exec_cond_layer_id)
const_true = Const(body_graph, {'value': mo_array(True, dtype=np.bool)}).create_node()
const_true = Const(body_graph, {'value': mo_array(True, dtype=bool)}).create_node()
exec_cond_node.in_port(0).get_connection().set_source(const_true.out_port(0))
# remove back edge

View File

@ -34,7 +34,7 @@ class WhileNormalize(FrontReplacementSubgraph):
# connect execution condition port
exec_cond_node = Const(graph, {'name': loop_name + '/ExecutionConditionValue',
'value': mo_array(True, dtype=np.bool)}).create_node()
'value': mo_array(True, dtype=bool)}).create_node()
loop_node.in_port(1).get_connection().set_source(exec_cond_node.out_port(0))
loop_node.body.clean_up()

View File

@ -7,7 +7,7 @@ from tensorflow.core.framework import types_pb2 as tf_types # pylint: disable=n
# Suppress false positive pylint warning about function with too many arguments
# pylint: disable=E1121
# mapping between TF data type and numpy data type and function to extract data from TF tensor
_tf_np_mapping = [('DT_BOOL', np.bool, lambda pb: pb.bool_val, lambda x: bool_cast(x)),
_tf_np_mapping = [('DT_BOOL', bool, lambda pb: pb.bool_val, lambda x: bool_cast(x)),
('DT_INT8', np.int8, lambda pb: pb.int_val, lambda x: np.int8(x)),
('DT_INT16', np.int16, lambda pb: pb.int_val, lambda x: np.int16(x)),
('DT_INT32', np.int32, lambda pb: pb.int_val, lambda x: np.int32(x)),
@ -32,4 +32,4 @@ def bool_cast(x):
if isinstance(x, str):
return False if x.lower() in ['false', '0'] else True if x.lower() in ['true', '1'] else 'unknown_boolean_cast'
else:
return np.bool(x)
return bool(x)

View File

@ -40,7 +40,7 @@ SUPPORTED_DATA_TYPES = {
'int8': (np.int8, 'I8', 'i8'),
'int32': (np.int32, 'I32', 'i32'),
'int64': (np.int64, 'I64', 'i64'),
'bool': (np.bool, 'BOOL', 'boolean'),
'bool': (bool, 'BOOL', 'boolean'),
'uint8': (np.uint8, 'U8', 'u8'),
'uint32': (np.uint32, 'U32', 'u32'),
'uint64': (np.uint64, 'U64', 'u64'),

View File

@ -43,7 +43,7 @@ def reduce_helper(func: callable, x: np.array, axis: tuple, keepdims: bool):
if is_fully_defined(x):
return result
else:
return np.ma.masked_array(result, mask=np.ones(result.shape, dtype=np.bool))
return np.ma.masked_array(result, mask=np.ones(result.shape, dtype=bool))
def reduce_infer(node: Node):
@ -73,7 +73,7 @@ def reduce_infer(node: Node):
value = reduce_helper(reduce_map[node.op], in_value.copy(), axis=tuple(axis), keepdims=node.keep_dims)
node.out_port(0).data.set_value(value)
else:
used_dims = np.zeros(len(in_shape), dtype=np.bool)
used_dims = np.zeros(len(in_shape), dtype=bool)
output_shape = in_shape.copy()
for dim in axis:

View File

@ -231,7 +231,7 @@ class LogicalNot(Activation):
@staticmethod
def type_infer(node: Node):
node.out_port(0).set_data_type(np.bool)
node.out_port(0).set_data_type(bool)
class Log(Activation):

View File

@ -115,7 +115,7 @@ class LogicalElementwise(Elementwise):
@staticmethod
def type_infer(node):
override_data_type_of_constant(node)
node.out_port(0).set_data_type(np.bool)
node.out_port(0).set_data_type(bool)
class Greater(LogicalElementwise):

View File

@ -90,7 +90,7 @@ class Select(Op):
# one of the branches is None (which is not selected)
# if we use np.where for such cases then dtype of output_value will be object (non numeric type)
# and subsequent numpy operation on such tensors will fail
output_value = resulting_tensors[not np.bool(condition_value.item(0))]
output_value = resulting_tensors[not bool(condition_value.item(0))]
if output_value is None:
return
if broadcast_rule == 'numpy':

View File

@ -152,6 +152,6 @@ class Unique(Op):
# write result to output nodes
j = 0
for out_node_ind in node.out_nodes():
node.out_node(out_node_ind).value = mo_array(unique_output[j], dtype=np.float)
node.out_node(out_node_ind).value = mo_array(unique_output[j], dtype=np.float32)
node.out_node(out_node_ind).shape = int64_array(node.out_node(out_node_ind).value.shape)
j += 1

View File

@ -346,7 +346,7 @@ class IREngine(object):
'U1': (1, np.uint8),
'U4': (1, np.uint8),
'I4': (1, np.uint8),
'BOOL': (1, np.bool),
'BOOL': (1, bool),
'BIN': (1, np.uint8),
'U64': (8, np.uint64)
}

View File

@ -9,7 +9,7 @@ from openvino.tools.mo.graph.graph import Node
from openvino.tools.mo.pipeline.common import convert_const_node_value_type
from openvino.tools.mo.utils.error import Error
np_map_cast = {np.bool: lambda x: bool_cast(x),
np_map_cast = {bool: lambda x: bool_cast(x),
np.int8: lambda x: np.int8(x),
np.int16: lambda x: np.int16(x),
np.int32: lambda x: np.int32(x),
@ -28,7 +28,7 @@ def bool_cast(x):
if isinstance(x, str):
return False if x.lower() in ['false', '0'] else True if x.lower() in ['true', '1'] else 'unknown_boolean_cast'
else:
return np.bool(x)
return bool(x)
def override_data_type_of_constant(node: Node, lhs_idx: int = 0, rhs_idx: int = 1):

View File

@ -35,7 +35,7 @@ class RestrictedAttentionComponentReplacerTest(unittest.TestCase):
**regular_op('reshape_helper_1', {'type': 'Reshape'}),
**const('reshape_helper_1_shape', int64_array([10, 1])),
**regular_op('mul', {'type': 'Multiply'}),
**const('mul_scale', mo_array(0.5, dtype=np.float)),
**const('mul_scale', mo_array(0.5, dtype=np.float32)),
**regular_op('add', {'type': 'Add'}),
**regular_op('softmax', {'type': 'SoftMax'}),
**regular_op('reshape_helper_3', {'type': 'Reshape'}),

View File

@ -46,8 +46,8 @@ class TestPriorBoxClusteredExt(unittest.TestCase):
self.assertRaises(AttributeError, PriorBoxClusteredFrontExtractor.extract, None)
def test_priorbox_clustered_ext_ideal_numbers(self):
node = self._create_priorbox_clustered_node(width= np.array([2, 3], dtype=np.float),
height=np.array([4, 5], dtype=np.float),
node = self._create_priorbox_clustered_node(width= np.array([2, 3], dtype=np.float32),
height=np.array([4, 5], dtype=np.float32),
variance=np.array([0.2, 0.3, 0.2, 0.3]),
img_size=300, step=5.0, offset=0.6, flip=True)
@ -58,8 +58,8 @@ class TestPriorBoxClusteredExt(unittest.TestCase):
'type': 'PriorBoxClustered',
'clip': 0,
'flip': 1,
'width': np.array([2, 3], dtype=np.float),
'height': np.array([4, 5], dtype=np.float),
'width': np.array([2, 3], dtype=np.float32),
'height': np.array([4, 5], dtype=np.float32),
'variance': [0.2, 0.3, 0.2, 0.3],
'img_size': 300,
'img_h': 0,

View File

@ -47,7 +47,7 @@ class TestPriorBoxExt(unittest.TestCase):
self.assertRaises(AttributeError, PriorBoxFrontExtractor.extract, None)
def test_priorbox_ext_ideal_numbers(self):
node = self._create_priorbox_node(aspect_ratio=np.array([2, 3], dtype=np.float),
node = self._create_priorbox_node(aspect_ratio=np.array([2, 3], dtype=np.float32),
variance=np.array([0.2, 0.3, 0.2, 0.3]),
img_size=300, step=5.0, offset=0.6, flip=True)
@ -58,7 +58,7 @@ class TestPriorBoxExt(unittest.TestCase):
'type': 'PriorBox',
'clip': 0,
'flip': 1,
'aspect_ratio': np.array([2, 3], dtype=np.float),
'aspect_ratio': np.array([2, 3], dtype=np.float32),
'variance': [0.2, 0.3, 0.2, 0.3],
'img_size': 300,
'img_h': 0,

View File

@ -25,8 +25,8 @@ from unit_tests.utils.graph import regular_op_with_empty_data, connect, result,
@generator
class TestIf(unittest.TestCase):
@generate(*[
(np.array([True], dtype=np.bool), shape_array([3]), shape_array([3])),
(np.array([False], dtype=np.bool), shape_array([3]), shape_array([2])),
(np.array([True], dtype=bool), shape_array([3]), shape_array([3])),
(np.array([False], dtype=bool), shape_array([3]), shape_array([2])),
(shape_array(dynamic_dimension_value), shape_array([3]), shape_array([dynamic_dimension_value])),
])
def test_simple_shape_inf(self, cond, output_port_0_shape, output_port_1_shape):

View File

@ -107,7 +107,7 @@ class TestPadOps(unittest.TestCase):
nodes_with_edges_only=True,
)
out_shape = (1, 1, 5, 8)
mask = np.zeros(out_shape, dtype=np.bool)
mask = np.zeros(out_shape, dtype=bool)
mask[0][0][1][2] = True
ref_value = np.ma.masked_array(np.zeros(out_shape, dtype=np.int64), mask=mask, dtype=np.int64)
ref_value[0][0][1][3] = 3

View File

@ -60,16 +60,16 @@ class TestSelect(unittest.TestCase):
def test_1(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([5, 6], dtype=bool),
then_value=np.ones([5, 6], dtype=np.float),
else_value=np.zeros([5, 6], dtype=np.float),
out_value=np.ones([5, 6], dtype=np.float))
then_value=np.ones([5, 6], dtype=np.float32),
else_value=np.zeros([5, 6], dtype=np.float32),
out_value=np.ones([5, 6], dtype=np.float32))
self.assertTrue(flag, msg)
def test_2(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([15, 3, 5], dtype=bool),
then_value=np.ones([15, 3, 5], dtype=np.float),
else_value=np.zeros([15, 1, 5], dtype=np.float),
out_value=np.ones([15, 3, 5], dtype=np.float))
then_value=np.ones([15, 3, 5], dtype=np.float32),
else_value=np.zeros([15, 1, 5], dtype=np.float32),
out_value=np.ones([15, 3, 5], dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_infer_no_condition(self):
@ -95,44 +95,44 @@ class TestSelect(unittest.TestCase):
def test_select_infer_condition_true_2(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.array([True], dtype=bool),
then_value=np.ones([15, 3, 5], dtype=np.float),
else_value=np.zeros([15, 1, 5], dtype=np.float),
out_value=np.ones([15, 3, 5], dtype=np.float))
then_value=np.ones([15, 3, 5], dtype=np.float32),
else_value=np.zeros([15, 1, 5], dtype=np.float32),
out_value=np.ones([15, 3, 5], dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_infer_condition_true_then_and_else_are_scalars(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.array([True], dtype=bool),
then_value=np.array(3, dtype=np.float),
else_value=np.array(1, dtype=np.float),
out_value=np.array([3], dtype=np.float))
then_value=np.array(3, dtype=np.float32),
else_value=np.array(1, dtype=np.float32),
out_value=np.array([3], dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_infer_condition_true_then_and_else_are_scalars_2(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.array(True, dtype=bool),
then_value=np.array(3, dtype=np.float),
else_value=np.array(1, dtype=np.float),
out_value=np.array(3, dtype=np.float))
then_value=np.array(3, dtype=np.float32),
else_value=np.array(1, dtype=np.float32),
out_value=np.array(3, dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_infer_condition_false_then_and_else_are_scalars(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.array([False], dtype=bool),
then_value=np.array(3, dtype=np.float),
else_value=np.array(1, dtype=np.float),
out_value=np.array([1], dtype=np.float))
then_value=np.array(3, dtype=np.float32),
else_value=np.array(1, dtype=np.float32),
out_value=np.array([1], dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_infer_condition_false_then_and_else_are_scalars_2(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.array(False, dtype=bool),
then_value=np.array(3, dtype=np.float),
else_value=np.array(1, dtype=np.float),
out_value=np.array(1, dtype=np.float))
then_value=np.array(3, dtype=np.float32),
else_value=np.array(1, dtype=np.float32),
out_value=np.array(1, dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_infer_condition_false_2(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.array([False], dtype=bool),
then_value=np.ones([15, 3, 5], dtype=np.float),
else_value=np.zeros([15, 1, 5], dtype=np.float),
out_value=np.zeros([15, 3, 5], dtype=np.float))
then_value=np.ones([15, 3, 5], dtype=np.float32),
else_value=np.zeros([15, 1, 5], dtype=np.float32),
out_value=np.zeros([15, 3, 5], dtype=np.float32))
self.assertTrue(flag, msg)
# if one of the branches is None then np.where shouldn't be used to avoid object dtype in output
@ -142,57 +142,57 @@ class TestSelect(unittest.TestCase):
def test_select_infer_None_then_branch_1(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.zeros([15, 3, 5], dtype=bool),
then_value=None, then_shape=[15, 3, 5],
else_value=np.ones([15, 1, 5], dtype=np.float),
out_value=np.ones([15, 3, 5], dtype=np.float))
else_value=np.ones([15, 1, 5], dtype=np.float32),
out_value=np.ones([15, 3, 5], dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_infer_None_then_branch_2(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([15, 3, 5], dtype=bool),
then_value=None, then_shape=[15, 3, 5],
else_value=np.ones([15, 1, 5], dtype=np.float),
else_value=np.ones([15, 1, 5], dtype=np.float32),
out_value=None)
self.assertTrue(flag, msg)
def test_select_infer_None_else_branch_1(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([15, 3, 5], dtype=bool),
then_value=np.ones([15, 1, 5], dtype=np.float),
then_value=np.ones([15, 1, 5], dtype=np.float32),
else_value=None, else_shape=[15, 3, 5],
out_value=np.ones([15, 3, 5], dtype=np.float))
out_value=np.ones([15, 3, 5], dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_infer_None_else_branch_2(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.zeros([15, 3, 5], dtype=bool),
then_value=np.ones([15, 1, 5], dtype=np.float),
then_value=np.ones([15, 1, 5], dtype=np.float32),
else_value=None, else_shape=[15, 3, 5],
out_value=None)
self.assertTrue(flag, msg)
def test_select_broadcast_1(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 4, 5], dtype=bool),
then_value=np.ones([], dtype=np.float),
else_value=np.zeros([2, 3, 4, 5], dtype=np.float),
out_value=np.ones([2, 3, 4, 5], dtype=np.float))
then_value=np.ones([], dtype=np.float32),
else_value=np.zeros([2, 3, 4, 5], dtype=np.float32),
out_value=np.ones([2, 3, 4, 5], dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_broadcast_2(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 4, 1], dtype=bool),
then_value= np.ones([1, 3, 1, 5], dtype=np.float),
else_value=np.zeros([2, 1, 1, 5], dtype=np.float),
out_value=np.ones([2, 3, 4, 5], dtype=np.float))
then_value= np.ones([1, 3, 1, 5], dtype=np.float32),
else_value=np.zeros([2, 1, 1, 5], dtype=np.float32),
out_value=np.ones([2, 3, 4, 5], dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_broadcast_3(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 1, 1], dtype=bool),
then_value= np.ones([2, 3, 4, 5], dtype=np.float),
else_value=np.zeros([2, 1, 1, 5], dtype=np.float),
out_value=np.ones([2, 3, 4, 5], dtype=np.float))
then_value= np.ones([2, 3, 4, 5], dtype=np.float32),
else_value=np.zeros([2, 1, 1, 5], dtype=np.float32),
out_value=np.ones([2, 3, 4, 5], dtype=np.float32))
self.assertTrue(flag, msg)
def test_select_broadcast_4(self):
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 4, 5], dtype=bool),
then_value= np.ones([5], dtype=np.float),
else_value=np.zeros([2, 3, 4, 5], dtype=np.float),
out_value=np.ones([2, 3, 4, 5], dtype=np.float))
then_value= np.ones([5], dtype=np.float32),
else_value=np.zeros([2, 3, 4, 5], dtype=np.float32),
out_value=np.ones([2, 3, 4, 5], dtype=np.float32))
self.assertTrue(flag, msg)
# when output shape is broadcasted from condition, then, and else shapes

View File

@ -40,9 +40,9 @@ edges2 = [('input_data', 'sparse_segment_mean_node', {'in': 0}),
('input_segment_ids', 'sparse_segment_mean_node', {'in': 2}),
('sparse_segment_mean_node', 'output_segments', {'out': 0})]
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float)},
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 2, 1, 1, 2], dtype=np.float)},
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 1, 2, 2], dtype=np.float)}}
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float32)},
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 2, 1, 1, 2], dtype=np.float32)},
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 1, 2, 2], dtype=np.float32)}}
class TestSparseSegmentMean(unittest.TestCase):
def test_partial_infer(self):
@ -76,7 +76,7 @@ class TestSparseSegmentMean(unittest.TestCase):
# prepare reference results
ref_output_segments_shape = int64_array([3, 4])
ref_output_segments_value = np.array([[3, 4, 5, 6], [-1, -2, -3, -4], [2, 2, 2, 2]], dtype=np.float)
ref_output_segments_value = np.array([[3, 4, 5, 6], [-1, -2, -3, -4], [2, 2, 2, 2]], dtype=np.float32)
# get resulted shapes
res_output_segments_shape = graph.node['output_segments']['shape']

View File

@ -40,9 +40,9 @@ edges2 = [('input_data', 'sparse_segment_sqrtn_node', {'in': 0}),
('input_segment_ids', 'sparse_segment_sqrtn_node', {'in': 2}),
('sparse_segment_sqrtn_node', 'output_segments', {'out': 0})]
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float)},
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 2, 1, 1, 2], dtype=np.float)},
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 0, 0, 2], dtype=np.float)}}
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float32)},
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 2, 1, 1, 2], dtype=np.float32)},
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 0, 0, 2], dtype=np.float32)}}
class TestSparseSegmentSqrtN(unittest.TestCase):
def test_partial_infer(self):
@ -76,7 +76,7 @@ class TestSparseSegmentSqrtN(unittest.TestCase):
# prepare reference results
ref_output_segments_shape = int64_array([3, 4])
ref_output_segments_value = np.array([[2, 2, 2, 2], [0, 0, 0, 0], [5, 6, 7, 8]], dtype=np.float)
ref_output_segments_value = np.array([[2, 2, 2, 2], [0, 0, 0, 0], [5, 6, 7, 8]], dtype=np.float32)
# get resulted shapes
res_output_segments_shape = graph.node['output_segments']['shape']

View File

@ -40,9 +40,9 @@ edges2 = [('input_data', 'sparse_segment_sum_node', {'in': 0}),
('input_segment_ids', 'sparse_segment_sum_node', {'in': 2}),
('sparse_segment_sum_node', 'output_segments', {'out': 0})]
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float)},
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 1, 2], dtype=np.float)},
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 1], dtype=np.float)}}
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float32)},
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 1, 2], dtype=np.float32)},
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 1], dtype=np.float32)}}
class TestSparseSegmentSum(unittest.TestCase):
def test_partial_infer(self):
@ -76,7 +76,7 @@ class TestSparseSegmentSum(unittest.TestCase):
# prepare reference results
ref_output_segments_shape = int64_array([2, 4])
ref_output_segments_value = np.array([[0, 0, 0, 0], [5, 6, 7, 8]], dtype=np.float)
ref_output_segments_value = np.array([[0, 0, 0, 0], [5, 6, 7, 8]], dtype=np.float32)
# get resulted shapes
res_output_segments_shape = graph.node['output_segments']['shape']

View File

@ -162,7 +162,7 @@ class TestUnique(unittest.TestCase):
('unique_node', 'output_indices', {'out': 1}),
('unique_node', 'output_counts', {'out': 2})]
inputs_ = {'input': {'shape': int64_array([10]),
'value': np.array([8.0, 1.0, 2.0, 1.0, 8.0, 5.0, 1.0, 5.0, 0.0, 0.0], dtype=np.float)},
'value': np.array([8.0, 1.0, 2.0, 1.0, 8.0, 5.0, 1.0, 5.0, 0.0, 0.0], dtype=np.float32)},
'unique_node': {
'sorted': 'false',
'return_inverse': 'true',
@ -175,11 +175,11 @@ class TestUnique(unittest.TestCase):
# prepare reference results
ref_output_uniques_shape = int64_array([5])
ref_output_uniques_value = np.array([8.0, 1.0, 2.0, 5.0, 0.0], dtype=np.float)
ref_output_uniques_value = np.array([8.0, 1.0, 2.0, 5.0, 0.0], dtype=np.float32)
ref_output_indices_shape = int64_array([10])
ref_output_indices_value = np.array([0.0, 1.0, 2.0, 1.0, 0.0, 3.0, 1.0, 3.0, 4.0, 4.0], dtype=np.float)
ref_output_indices_value = np.array([0.0, 1.0, 2.0, 1.0, 0.0, 3.0, 1.0, 3.0, 4.0, 4.0], dtype=np.float32)
ref_output_counts_shape = int64_array([5])
ref_output_counts_value = np.array([2.0, 3.0, 1.0, 2.0, 2.0], dtype=np.float)
ref_output_counts_value = np.array([2.0, 3.0, 1.0, 2.0, 2.0], dtype=np.float32)
# get resulted shapes
res_output_uniques_shape = graph.node['output_uniques']['shape']
@ -217,7 +217,7 @@ class TestUnique(unittest.TestCase):
('unique_node', 'output_indices', {'out': 1}),
('unique_node', 'output_counts', {'out': 2})]
inputs_ = {'input': {'shape': int64_array([10]),
'value': np.array([8.0, 1.0, 2.0, 1.0, 8.0, 5.0, 1.0, 5.0, 0.0, 0.0], dtype=np.float)},
'value': np.array([8.0, 1.0, 2.0, 1.0, 8.0, 5.0, 1.0, 5.0, 0.0, 0.0], dtype=np.float32)},
'unique_node': {
'sorted': 'true',
'return_inverse': 'true',
@ -230,11 +230,11 @@ class TestUnique(unittest.TestCase):
# prepare reference results
ref_output_uniques_shape = int64_array([5])
ref_output_uniques_value = np.array([0.0, 1.0, 2.0, 5.0, 8.0], dtype=np.float)
ref_output_uniques_value = np.array([0.0, 1.0, 2.0, 5.0, 8.0], dtype=np.float32)
ref_output_indices_shape = int64_array([10])
ref_output_indices_value = np.array([4.0, 1.0, 2.0, 1.0, 4.0, 3.0, 1.0, 3.0, 0.0, 0.0], dtype=np.float)
ref_output_indices_value = np.array([4.0, 1.0, 2.0, 1.0, 4.0, 3.0, 1.0, 3.0, 0.0, 0.0], dtype=np.float32)
ref_output_counts_shape = int64_array([5])
ref_output_counts_value = np.array([2.0, 3.0, 1.0, 2.0, 2.0], dtype=np.float)
ref_output_counts_value = np.array([2.0, 3.0, 1.0, 2.0, 2.0], dtype=np.float32)
# get resulted shapes
res_output_uniques_shape = graph.node['output_uniques']['shape']

View File

@ -36,7 +36,7 @@ class TestConvertingConvertArgumentsToString(UnitTestWithMockedTelemetry):
inp8 = InputCutInfo("data2", [4, 5, 6], np.int64, [5, 4, 3, 2, 1])
self.assertTrue(input_to_str(inp8) == "data2[4 5 6]{i64}->[5 4 3 2 1]")
inp9 = InputCutInfo("data", [1], np.bool, True)
inp9 = InputCutInfo("data", [1], bool, True)
self.assertTrue(input_to_str(inp9) == "data[1]{boolean}->True")
inp = [inp6, inp7, inp8]
@ -114,7 +114,7 @@ class TestConvertingConvertArgumentsToString(UnitTestWithMockedTelemetry):
self.assertRaises(Exception, input_to_str, **{"input": ("name", [np.int, 2, 3])})
self.assertRaises(Exception, input_to_str, **{"input": ("name", "name1", [2, 3])})
self.assertRaises(Exception, input_to_str, **{"input": ("name", [2, 3], Shape([1, 2]))})
self.assertRaises(Exception, input_to_str, **{"input": ("name", np.int, Type(np.float))})
self.assertRaises(Exception, input_to_str, **{"input": ("name", np.int, Type(np.float32))})
self.assertRaises(Exception, input_to_str, **{"input": Exception})
self.assertRaises(Exception, input_to_str, **{"input": ("name", Exception)})
self.assertRaises(Exception, input_to_str, **{"input": ("name", Dimension(1))})

View File

@ -229,7 +229,7 @@ class MTCNNEngine(IEEngine):
def postprocess(output):
# extract_predictions
total_boxes = np.zeros((0, 9), np.float)
total_boxes = np.zeros((0, 9), np.float32)
for idx, outputs in enumerate(output):
scales = input_meta['scales'][idx]
mapping = outputs[[i for i, _ in outputs.items()

View File

@ -59,8 +59,8 @@ class MAP(Metric):
npig = np.count_nonzero(gt_ignored == 0)
tps = np.logical_and(dtm, np.logical_not(dt_ignored))
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dt_ignored))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float32)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float32)
if npig == 0:
return np.nan, np.nan
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):

View File

@ -20,7 +20,7 @@ GOLD_VALUES_AGGREGATION_FUNCTIONS_2_2_1 = {
'mean_no_outliers': 1.5,
'median_no_outliers': 1.5,
'hl_estimator': 1.5,
'batch_mean': np.array([[[1], [2]]], dtype=np.float)
'batch_mean': np.array([[[1], [2]]], dtype=np.float32)
}
GOLD_VALUES_AGGREGATION_FUNCTIONS_2_2_2 = {
@ -31,7 +31,7 @@ GOLD_VALUES_AGGREGATION_FUNCTIONS_2_2_2 = {
'mean_no_outliers': [3., 4.],
'median_no_outliers': [3., 4.],
'hl_estimator': [3., 4.],
'batch_mean': np.array([[[2, 3], [4, 5]]], dtype=np.float)
'batch_mean': np.array([[[2, 3], [4, 5]]], dtype=np.float32)
}
GOLD_VALUES_AGGREGATION_FUNCTIONS = [