[Tools] Support NumPy 1.24 (#14728)
* [Tools] Avoid use of NumPy deprecated types
Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
* Revert "[Tools] Avoid use of NumPy deprecated types"
This reverts commit 21ffc167d1
.
* Move to 1.24 Numpy
Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
parent
d300abf743
commit
0b2f3347f6
@ -2203,7 +2203,7 @@ def test_interpolate_opset10(dtype, expected_shape, shape_calculation_mode):
|
||||
|
||||
def test_is_finite_opset10():
|
||||
input_shape = [1, 2, 3, 4]
|
||||
input_node = ov.parameter(input_shape, np.float, name="InputData")
|
||||
input_node = ov.parameter(input_shape, float, name="InputData")
|
||||
node = ov_opset10.is_finite(input_node)
|
||||
|
||||
assert node.get_type_name() == "IsFinite"
|
||||
@ -2214,7 +2214,7 @@ def test_is_finite_opset10():
|
||||
|
||||
def test_is_inf_opset10_default():
|
||||
input_shape = [2, 2, 2, 2]
|
||||
input_node = ov.parameter(input_shape, dtype=np.float, name="InputData")
|
||||
input_node = ov.parameter(input_shape, dtype=float, name="InputData")
|
||||
node = ov_opset10.is_inf(input_node)
|
||||
|
||||
assert node.get_type_name() == "IsInf"
|
||||
@ -2228,7 +2228,7 @@ def test_is_inf_opset10_default():
|
||||
|
||||
def test_is_inf_opset10_custom_attribute():
|
||||
input_shape = [2, 2, 2]
|
||||
input_node = ov.parameter(input_shape, dtype=np.float, name="InputData")
|
||||
input_node = ov.parameter(input_shape, dtype=float, name="InputData")
|
||||
attributes = {
|
||||
"detect_positive": False,
|
||||
}
|
||||
@ -2245,7 +2245,7 @@ def test_is_inf_opset10_custom_attribute():
|
||||
|
||||
def test_is_inf_opset10_custom_all_attributes():
|
||||
input_shape = [2, 2, 2]
|
||||
input_node = ov.parameter(input_shape, dtype=np.float, name="InputData")
|
||||
input_node = ov.parameter(input_shape, dtype=float, name="InputData")
|
||||
attributes = {
|
||||
"detect_negative": False,
|
||||
"detect_positive": True,
|
||||
@ -2263,7 +2263,7 @@ def test_is_inf_opset10_custom_all_attributes():
|
||||
|
||||
def test_is_nan_opset10():
|
||||
input_shape = [1, 2, 3, 4]
|
||||
input_node = ov.parameter(input_shape, np.float, name="InputData")
|
||||
input_node = ov.parameter(input_shape, float, name="InputData")
|
||||
node = ov_opset10.is_nan(input_node)
|
||||
|
||||
assert node.get_type_name() == "IsNaN"
|
||||
@ -2274,7 +2274,7 @@ def test_is_nan_opset10():
|
||||
|
||||
def test_unique_opset10():
|
||||
input_shape = [1, 2, 3, 4]
|
||||
input_node = ov.parameter(input_shape, np.float, name="input_data")
|
||||
input_node = ov.parameter(input_shape, float, name="input_data")
|
||||
axis = ov.constant([1], np.int32, [1])
|
||||
|
||||
node = ov_opset10.unique(input_node, axis, False, "i32")
|
||||
|
@ -175,7 +175,7 @@ def test_convert_to_uint(destination_type, expected_type):
|
||||
|
||||
def test_constant_get_data_bool():
|
||||
input_data = np.array([True, False, False, True])
|
||||
node = ng.constant(input_data, dtype=np.bool)
|
||||
node = ng.constant(input_data, dtype=bool)
|
||||
retrieved_data = node.get_data()
|
||||
assert np.allclose(input_data, retrieved_data)
|
||||
|
||||
|
@ -259,7 +259,7 @@ def test_deformable_psroi_pooling(dtype):
|
||||
([2, 3, 5, 6], [7, 4], [7], 2, 2, 1, 1.0, "avg", "asymmetric", [7, 3, 2, 2]),
|
||||
([10, 3, 5, 5], [7, 4], [7], 3, 4, 1, 1.0, "avg", "half_pixel_for_nn", [7, 3, 3, 4]),
|
||||
([10, 3, 5, 5], [3, 4], [3], 3, 4, 1, 1.0, "avg", "half_pixel", [3, 3, 3, 4]),
|
||||
([10, 3, 5, 5], [3, 4], [3], 3, 4, 1, np.float(1), "avg", "half_pixel", [3, 3, 3, 4]),
|
||||
([10, 3, 5, 5], [3, 4], [3], 3, 4, 1, float(1), "avg", "half_pixel", [3, 3, 3, 4]),
|
||||
],
|
||||
)
|
||||
def test_roi_align(data_shape, rois, batch_indices, pooled_h, pooled_w, sampling_ratio, spatial_scale, mode, aligned_mode, expected_shape):
|
||||
@ -832,7 +832,7 @@ def test_loop():
|
||||
TensorIteratorConcatOutputDesc,
|
||||
)
|
||||
|
||||
condition = ng.constant(True, dtype=np.bool)
|
||||
condition = ng.constant(True, dtype=bool)
|
||||
trip_count = ng.constant(16, dtype=np.int32)
|
||||
# Body parameters
|
||||
body_timestep = ng.parameter([], np.int32, "timestep")
|
||||
@ -855,7 +855,7 @@ def test_loop():
|
||||
initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32)
|
||||
iter_cnt = ng.range(zero, np.int32(16), np.int32(1))
|
||||
ti_inputs = [iter_cnt, data, initial_cma, one]
|
||||
body_const_condition = ng.constant(True, dtype=np.bool)
|
||||
body_const_condition = ng.constant(True, dtype=bool)
|
||||
|
||||
graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one],
|
||||
[curr_cma, cma_hist, body_const_condition])
|
||||
@ -1882,11 +1882,11 @@ def test_multiclass_nms():
|
||||
0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0,
|
||||
0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32")
|
||||
boxes_data = boxes_data.reshape([1, 6, 4])
|
||||
box = ng.constant(boxes_data, dtype=np.float)
|
||||
box = ng.constant(boxes_data, dtype=float)
|
||||
scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3,
|
||||
0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32")
|
||||
scores_data = scores_data.reshape([1, 2, 6])
|
||||
score = ng.constant(scores_data, dtype=np.float)
|
||||
score = ng.constant(scores_data, dtype=float)
|
||||
|
||||
nms_node = ng.multiclass_nms(box, score, None, output_type="i32", nms_top_k=3,
|
||||
iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid",
|
||||
@ -1907,13 +1907,13 @@ def test_multiclass_nms():
|
||||
[9.66, 3.36, 18.57, 13.26]],
|
||||
[[6.50, 7.00, 13.33, 17.63],
|
||||
[0.73, 5.34, 19.97, 19.97]]]).astype("float32")
|
||||
box = ng.constant(boxes_data, dtype=np.float)
|
||||
box = ng.constant(boxes_data, dtype=float)
|
||||
scores_data = np.array([[0.34, 0.66],
|
||||
[0.45, 0.61],
|
||||
[0.39, 0.59]]).astype("float32")
|
||||
score = ng.constant(scores_data, dtype=np.float)
|
||||
score = ng.constant(scores_data, dtype=float)
|
||||
rois_num_data = np.array([3]).astype("int32")
|
||||
roisnum = ng.constant(rois_num_data, dtype=np.int)
|
||||
roisnum = ng.constant(rois_num_data, dtype=int)
|
||||
nms_node = ng.multiclass_nms(box, score, roisnum, output_type="i32", nms_top_k=3,
|
||||
iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid",
|
||||
nms_eta=1.0)
|
||||
@ -1933,11 +1933,11 @@ def test_matrix_nms():
|
||||
0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0,
|
||||
0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32")
|
||||
boxes_data = boxes_data.reshape([1, 6, 4])
|
||||
box = ng.constant(boxes_data, dtype=np.float)
|
||||
box = ng.constant(boxes_data, dtype=float)
|
||||
scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3,
|
||||
0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32")
|
||||
scores_data = scores_data.reshape([1, 2, 6])
|
||||
score = ng.constant(scores_data, dtype=np.float)
|
||||
score = ng.constant(scores_data, dtype=float)
|
||||
|
||||
nms_node = ng.matrix_nms(box, score, output_type="i32", nms_top_k=3,
|
||||
score_threshold=0.0, sort_result_type="score", background_class=0,
|
||||
@ -2268,7 +2268,7 @@ def test_interpolate_opset10(dtype, expected_shape, shape_calculation_mode):
|
||||
|
||||
def test_is_finite_opset10():
|
||||
input_shape = [1, 2, 3, 4]
|
||||
input_node = ng.parameter(input_shape, np.float, name="InputData")
|
||||
input_node = ng.parameter(input_shape, float, name="InputData")
|
||||
node = ng_opset10.is_finite(input_node)
|
||||
|
||||
assert node.get_type_name() == "IsFinite"
|
||||
@ -2278,7 +2278,7 @@ def test_is_finite_opset10():
|
||||
|
||||
def test_is_inf_opset10_default():
|
||||
input_shape = [2, 2, 2, 2]
|
||||
input_node = ng.parameter(input_shape, dtype=np.float, name="InputData")
|
||||
input_node = ng.parameter(input_shape, dtype=float, name="InputData")
|
||||
node = ng_opset10.is_inf(input_node)
|
||||
|
||||
assert node.get_type_name() == "IsInf"
|
||||
@ -2292,7 +2292,7 @@ def test_is_inf_opset10_default():
|
||||
|
||||
def test_is_inf_opset10_custom_attribute():
|
||||
input_shape = [2, 2, 2]
|
||||
input_node = ng.parameter(input_shape, dtype=np.float, name="InputData")
|
||||
input_node = ng.parameter(input_shape, dtype=float, name="InputData")
|
||||
attributes = {
|
||||
"detect_positive": False,
|
||||
}
|
||||
@ -2309,7 +2309,7 @@ def test_is_inf_opset10_custom_attribute():
|
||||
|
||||
def test_is_inf_opset10_custom_all_attributes():
|
||||
input_shape = [2, 2, 2]
|
||||
input_node = ng.parameter(input_shape, dtype=np.float, name="InputData")
|
||||
input_node = ng.parameter(input_shape, dtype=float, name="InputData")
|
||||
attributes = {
|
||||
"detect_negative": False,
|
||||
"detect_positive": True,
|
||||
@ -2327,7 +2327,7 @@ def test_is_inf_opset10_custom_all_attributes():
|
||||
|
||||
def test_is_nan_opset10():
|
||||
input_shape = [1, 2, 3, 4]
|
||||
input_node = ng.parameter(input_shape, np.float, name="InputData")
|
||||
input_node = ng.parameter(input_shape, float, name="InputData")
|
||||
node = ng_opset10.is_nan(input_node)
|
||||
|
||||
assert node.get_type_name() == "IsNaN"
|
||||
@ -2338,7 +2338,7 @@ def test_is_nan_opset10():
|
||||
|
||||
def test_unique_opset10():
|
||||
input_shape = [1, 2, 3, 4]
|
||||
input_node = ng.parameter(input_shape, np.float, name="input_data")
|
||||
input_node = ng.parameter(input_shape, float, name="input_data")
|
||||
axis = ng.constant([1], np.int32, [1])
|
||||
|
||||
node = ng_opset10.unique(input_node, axis, False, "i32")
|
||||
|
@ -11,7 +11,7 @@ from ngraph.utils.tensor_iterator_types import (
|
||||
|
||||
|
||||
def create_simple_if_with_two_outputs(condition_val):
|
||||
condition = ng.constant(condition_val, dtype=np.bool)
|
||||
condition = ng.constant(condition_val, dtype=bool)
|
||||
|
||||
# then_body
|
||||
X_t = ng.parameter([], np.float32, "X")
|
||||
@ -51,7 +51,7 @@ def create_simple_if_with_two_outputs(condition_val):
|
||||
|
||||
|
||||
def create_diff_if_with_two_outputs(condition_val):
|
||||
condition = ng.constant(condition_val, dtype=np.bool)
|
||||
condition = ng.constant(condition_val, dtype=bool)
|
||||
|
||||
# then_body
|
||||
X_t = ng.parameter([2], np.float32, "X")
|
||||
@ -83,7 +83,7 @@ def create_diff_if_with_two_outputs(condition_val):
|
||||
|
||||
|
||||
def simple_if(condition_val):
|
||||
condition = ng.constant(condition_val, dtype=np.bool)
|
||||
condition = ng.constant(condition_val, dtype=bool)
|
||||
# then_body
|
||||
X_t = ng.parameter([2], np.float32, "X")
|
||||
Y_t = ng.parameter([2], np.float32, "Y")
|
||||
@ -112,17 +112,17 @@ def simple_if(condition_val):
|
||||
|
||||
|
||||
def simple_if_without_parameters(condition_val):
|
||||
condition = ng.constant(condition_val, dtype=np.bool)
|
||||
condition = ng.constant(condition_val, dtype=bool)
|
||||
|
||||
# then_body
|
||||
then_constant = ng.constant(0.7, dtype=np.float)
|
||||
then_constant = ng.constant(0.7, dtype=float)
|
||||
then_body_res_1 = ng.result(then_constant)
|
||||
then_body = GraphBody([], [then_body_res_1])
|
||||
then_body_inputs = []
|
||||
then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)]
|
||||
|
||||
# else_body
|
||||
else_const = ng.constant(9.0, dtype=np.float)
|
||||
else_const = ng.constant(9.0, dtype=float)
|
||||
else_body_res_1 = ng.result(else_const)
|
||||
else_body = GraphBody([], [else_body_res_1])
|
||||
else_body_inputs = []
|
||||
|
@ -532,7 +532,7 @@ def test_select():
|
||||
runtime = get_runtime()
|
||||
computation = runtime.computation(function, *parameter_list)
|
||||
result = computation(
|
||||
np.array([[True, False]], dtype=np.bool),
|
||||
np.array([[True, False]], dtype=bool),
|
||||
np.array([[5, 6]], dtype=np.float32),
|
||||
np.array([[7, 8]], dtype=np.float32),
|
||||
)[0]
|
||||
|
@ -76,8 +76,8 @@ def test_binary_op(ng_api_helper, expected_type):
|
||||
)
|
||||
def test_binary_logical_op(ng_api_helper):
|
||||
shape = [2, 2]
|
||||
parameter_a = ng.parameter(shape, name="A", dtype=np.bool)
|
||||
parameter_b = ng.parameter(shape, name="B", dtype=np.bool)
|
||||
parameter_a = ng.parameter(shape, name="A", dtype=bool)
|
||||
parameter_b = ng.parameter(shape, name="B", dtype=bool)
|
||||
|
||||
model = ng_api_helper(parameter_a, parameter_b)
|
||||
|
||||
@ -91,10 +91,10 @@ def test_binary_logical_op(ng_api_helper):
|
||||
[ng.logical_and, ng.logical_or, ng.logical_xor],
|
||||
)
|
||||
def test_binary_logical_op_with_scalar(ng_api_helper):
|
||||
value_b = np.array([[False, True], [False, True]], dtype=np.bool)
|
||||
value_b = np.array([[False, True], [False, True]], dtype=bool)
|
||||
|
||||
shape = [2, 2]
|
||||
parameter_a = ng.parameter(shape, name="A", dtype=np.bool)
|
||||
parameter_a = ng.parameter(shape, name="A", dtype=bool)
|
||||
|
||||
model = ng_api_helper(parameter_a, value_b)
|
||||
assert model.get_output_size() == 1
|
||||
|
@ -52,7 +52,7 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
|
||||
def test_reduction_logical_ops(ng_api_helper, numpy_function, reduction_axes):
|
||||
shape = [2, 4, 3, 2]
|
||||
np.random.seed(133391)
|
||||
input_data = np.random.randn(*shape).astype(np.bool)
|
||||
input_data = np.random.randn(*shape).astype(bool)
|
||||
|
||||
expected = numpy_function(input_data, axis=tuple(reduction_axes))
|
||||
result = run_op_node([input_data], ng_api_helper, reduction_axes)
|
||||
|
@ -11,9 +11,9 @@ from tests_compatibility.test_onnx.utils import run_node
|
||||
@pytest.mark.parametrize(
|
||||
"onnx_op, numpy_func, data_type",
|
||||
[
|
||||
pytest.param("And", np.logical_and, np.bool),
|
||||
pytest.param("Or", np.logical_or, np.bool),
|
||||
pytest.param("Xor", np.logical_xor, np.bool),
|
||||
pytest.param("And", np.logical_and, bool),
|
||||
pytest.param("Or", np.logical_or, bool),
|
||||
pytest.param("Xor", np.logical_xor, bool),
|
||||
pytest.param("Equal", np.equal, np.int32),
|
||||
pytest.param("Greater", np.greater, np.int32),
|
||||
pytest.param("Less", np.less, np.int32),
|
||||
|
@ -59,7 +59,7 @@ if __name__ == "__main__":
|
||||
'clip': True,
|
||||
'steps': np.array([1.25, 1.25]).astype('float32').tolist(),
|
||||
'offset': 0.5,
|
||||
'variance': np.array([0.1, 0.1, 0.2, 0.2], dtype=np.float).flatten(),
|
||||
'variance': np.array([0.1, 0.1, 0.2, 0.2], dtype=float).flatten(),
|
||||
'min_max_aspect_ratios_order': False
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ if __name__ == "__main__":
|
||||
'clip': True,
|
||||
'steps': np.array([1.25, 1.25]).astype('float32').tolist(),
|
||||
'offset': 0.5,
|
||||
'variance': np.array([0.1, 0.1, 0.2, 0.2], dtype=np.float).flatten(),
|
||||
'variance': np.array([0.1, 0.1, 0.2, 0.2], dtype=float).flatten(),
|
||||
'min_max_aspect_ratios_order': False
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ if __name__ == "__main__":
|
||||
'clip': False,
|
||||
'steps': np.array([1.25, 1.25]).astype('float32').tolist(),
|
||||
'offset': 0.5,
|
||||
'variance': np.array([0.1, 0.1, 0.2, 0.2], dtype=np.float).flatten(),
|
||||
'variance': np.array([0.1, 0.1, 0.2, 0.2], dtype=float).flatten(),
|
||||
'min_max_aspect_ratios_order': False
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ if __name__ == "__main__":
|
||||
'clip': True,
|
||||
'steps': np.array([1.25, 1.25]).astype('float32').tolist(),
|
||||
'offset': 0.5,
|
||||
'variance': np.array([0.1, 0.1, 0.2, 0.2], dtype=np.float).flatten(),
|
||||
'variance': np.array([0.1, 0.1, 0.2, 0.2], dtype=float).flatten(),
|
||||
'min_max_aspect_ratios_order': True
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ class TestAbs(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
constant = np.random.randint(-127, 127, shape).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, shape).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -30,12 +30,12 @@ class TestOperations(OnnxRuntimeLayerTest):
|
||||
|
||||
min_val = 1 if op == 'Div' else -127
|
||||
if shape2:
|
||||
const = np.random.randint(min_val, 127, shape2).astype(np.float)
|
||||
const = np.random.randint(min_val, 127, shape2).astype(float)
|
||||
else:
|
||||
const = np.random.randint(min_val, 127, 1).astype(np.float)
|
||||
const = np.random.randint(min_val, 127, 1).astype(float)
|
||||
# TODO: add check when MO remove redundant layer (as Add/Sub if const = 0 or Mul/Div if const = 1)
|
||||
if const in [0, 1]:
|
||||
const = np.array([2], dtype=np.float)
|
||||
const = np.array([2], dtype=float)
|
||||
|
||||
node_const_def = helper.make_node(
|
||||
'Constant',
|
||||
@ -103,12 +103,12 @@ class TestOperations(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape1)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
const1 = np.random.randint(-127, 127, shape1).astype(np.float)
|
||||
const1 = np.random.randint(-127, 127, shape1).astype(float)
|
||||
min_val = 1 if op == 'Div' else -127
|
||||
if shape2:
|
||||
const2 = np.random.randint(min_val, 127, shape2).astype(np.float)
|
||||
const2 = np.random.randint(min_val, 127, shape2).astype(float)
|
||||
else:
|
||||
const2 = np.random.randint(min_val, 127, 1).astype(np.float)
|
||||
const2 = np.random.randint(min_val, 127, 1).astype(float)
|
||||
|
||||
node_const1_def = helper.make_node(
|
||||
'Constant',
|
||||
|
@ -12,7 +12,7 @@ from unit_tests.utils.graph import build_graph
|
||||
class TestAnd(OnnxRuntimeLayerTest):
|
||||
def _prepare_input(self, inputs_dict):
|
||||
for input in inputs_dict.keys():
|
||||
inputs_dict[input] = np.random.randint(0, 2, inputs_dict[input]).astype(np.bool)
|
||||
inputs_dict[input] = np.random.randint(0, 2, inputs_dict[input]).astype(bool)
|
||||
return inputs_dict
|
||||
|
||||
def create_net(self, shape1, shape2, ir_version):
|
||||
@ -90,7 +90,7 @@ class TestAnd(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.BOOL, shape1)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.BOOL, shape1)
|
||||
|
||||
const = np.random.randint(0, 2, shape2).astype(np.bool)
|
||||
const = np.random.randint(0, 2, shape2).astype(bool)
|
||||
|
||||
node_const_def = helper.make_node(
|
||||
'Constant',
|
||||
@ -167,8 +167,8 @@ class TestAnd(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.BOOL, shape1)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.BOOL, output_shape)
|
||||
|
||||
const1 = np.random.randint(0, 2, shape1).astype(np.bool)
|
||||
const2 = np.random.randint(0, 2, shape2).astype(np.bool)
|
||||
const1 = np.random.randint(0, 2, shape1).astype(bool)
|
||||
const2 = np.random.randint(0, 2, shape2).astype(bool)
|
||||
|
||||
node_const1_def = helper.make_node(
|
||||
'Constant',
|
||||
|
@ -92,7 +92,7 @@ class TestCeil(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
constant = np.random.randn(*shape).astype(np.float)
|
||||
constant = np.random.randn(*shape).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -33,7 +33,7 @@ class TestConcat(OnnxRuntimeLayerTest):
|
||||
concat_output_shape[concat_axis] *= 2
|
||||
|
||||
const_number = np.prod(input_shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, output_shape)
|
||||
|
||||
|
@ -36,8 +36,8 @@ class TestConv(OnnxRuntimeLayerTest):
|
||||
_pads = np.array(pads).reshape([2, -1])
|
||||
kernel_extent = np.array(dilations) * (np.array(weights_shape[2:]) - 1) + 1
|
||||
spatial_val_wo_stride = shape[2:] + np.add(_pads[0, :], _pads[1, :]) - kernel_extent
|
||||
output_shape[2:] = (spatial_val_wo_stride.astype(np.float) / strides + 1).astype(np.int64)
|
||||
output_shape = output_shape.astype(np.int).tolist()
|
||||
output_shape[2:] = (spatial_val_wo_stride.astype(float) / strides + 1).astype(np.int64)
|
||||
output_shape = output_shape.astype(int).tolist()
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
|
@ -28,7 +28,7 @@ class TestConvTranspose(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
weights = np.random.randn(*kernel_shape).astype(np.float)
|
||||
weights = np.random.randn(*kernel_shape).astype(float)
|
||||
|
||||
node_weights_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -142,7 +142,7 @@ class TestCumSum(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
constant = np.random.randn(*shape).astype(np.float)
|
||||
constant = np.random.randn(*shape).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -139,55 +139,55 @@ class TestDequantizeLinear(OnnxRuntimeLayerTest):
|
||||
return onnx_net, ref_net
|
||||
|
||||
test_data = [
|
||||
dict(shape=[8], y_scale=np.array(2, dtype=np.float),
|
||||
dict(shape=[8], y_scale=np.array(2, dtype=float),
|
||||
y_zero_point=np.array(128, dtype=np.uint8)),
|
||||
dict(shape=[8], y_scale=np.array(2, dtype=np.float),
|
||||
dict(shape=[8], y_scale=np.array(2, dtype=float),
|
||||
y_zero_point=np.array(1, dtype=np.int8)),
|
||||
dict(shape=[2, 4], y_scale=np.array(2, dtype=np.float),
|
||||
dict(shape=[2, 4], y_scale=np.array(2, dtype=float),
|
||||
y_zero_point=np.array(128, dtype=np.uint8)),
|
||||
dict(shape=[2, 4], y_scale=np.array(2, dtype=np.float),
|
||||
dict(shape=[2, 4], y_scale=np.array(2, dtype=float),
|
||||
y_zero_point=np.array(1, dtype=np.int8)),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array(2, dtype=np.float),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array(2, dtype=float),
|
||||
y_zero_point=np.array(128, dtype=np.uint8)),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array(2, dtype=np.float),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array(2, dtype=float),
|
||||
y_zero_point=np.array(1, dtype=np.int8)),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array(2, dtype=np.float),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array(2, dtype=float),
|
||||
y_zero_point=np.array(128, dtype=np.uint8)),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array(2, dtype=np.float),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array(2, dtype=float),
|
||||
y_zero_point=np.array(1, dtype=np.int8)),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array(2, dtype=np.float),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array(2, dtype=float),
|
||||
y_zero_point=np.array(128, dtype=np.uint8)),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array(2, dtype=np.float),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array(2, dtype=float),
|
||||
y_zero_point=np.array(1, dtype=np.int8)),
|
||||
]
|
||||
test_data_def_zerop = [
|
||||
dict(shape=[8], y_scale=np.array(2, dtype=np.float)),
|
||||
dict(shape=[2, 4], y_scale=np.array(2, dtype=np.float)),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array(2, dtype=np.float)),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array(2, dtype=np.float)),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array(2, dtype=np.float)),
|
||||
dict(shape=[8], y_scale=np.array(2, dtype=float)),
|
||||
dict(shape=[2, 4], y_scale=np.array(2, dtype=float)),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array(2, dtype=float)),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array(2, dtype=float)),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array(2, dtype=float)),
|
||||
]
|
||||
|
||||
test_data_axis = [
|
||||
dict(shape=[2, 4], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float), axis=1),
|
||||
dict(shape=[2, 4], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float),
|
||||
dict(shape=[2, 4], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float), axis=1),
|
||||
dict(shape=[2, 4], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float),
|
||||
y_zero_point=np.array([128, 128, 128, 128], dtype=np.uint8), axis=1),
|
||||
dict(shape=[2, 4], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float),
|
||||
dict(shape=[2, 4], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float),
|
||||
y_zero_point=np.array([1, 1, 1, 1], dtype=np.int8), axis=1),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float), axis=1),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float), axis=1),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float),
|
||||
y_zero_point=np.array([128, 128, 128, 128], dtype=np.uint8), axis=1),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float),
|
||||
dict(shape=[2, 4, 6], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float),
|
||||
y_zero_point=np.array([1, 1, 1, 1], dtype=np.int8), axis=1),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float), axis=1),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float), axis=1),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float),
|
||||
y_zero_point=np.array([128, 128, 128, 128], dtype=np.uint8), axis=1),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float),
|
||||
dict(shape=[2, 4, 6, 8], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float),
|
||||
y_zero_point=np.array([1, 1, 1, 1], dtype=np.int8), axis=1),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float), axis=1),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float), axis=1),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float),
|
||||
y_zero_point=np.array([128, 128, 128, 128], dtype=np.uint8), axis=1),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array([2, 2.5, 3, 2.3], dtype=np.float),
|
||||
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array([2, 2.5, 3, 2.3], dtype=float),
|
||||
y_zero_point=np.array([1, 1, 1, 1], dtype=np.int8), axis=1),
|
||||
]
|
||||
|
||||
|
@ -77,7 +77,7 @@ class TestDropout(OnnxRuntimeLayerTest):
|
||||
from onnx import helper
|
||||
from onnx import TensorProto
|
||||
|
||||
constant = np.random.randint(-127, 127, shape).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, shape).astype(float)
|
||||
|
||||
concat_axis = 0
|
||||
output_shape = shape.copy()
|
||||
|
@ -94,7 +94,7 @@ class TestElu(OnnxRuntimeLayerTest):
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
const_number = np.prod(shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
constant = np.reshape(constant, shape)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
|
@ -84,7 +84,7 @@ class TestFlatten(OnnxRuntimeLayerTest):
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, concat_output_shape)
|
||||
|
||||
const_number = np.prod(input_shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -92,7 +92,7 @@ class TestFloor(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
constant = np.random.randn(*shape).astype(np.float)
|
||||
constant = np.random.randn(*shape).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -127,7 +127,7 @@ class TestGather(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, concat_output_shape)
|
||||
|
||||
constant = np.random.randint(-127, 127, shape).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, shape).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -37,15 +37,15 @@ class TestGemm(OnnxRuntimeLayerTest):
|
||||
extended_shape2 = np.concatenate([np.ones(max_len - len(shapeB)), shapeB], axis=0)
|
||||
output_shape = np.concatenate(
|
||||
[np.maximum(*[extended_shape1[0:-2], extended_shape2[0:-2]]), [shapeA[-2], shapeB[-1]]],
|
||||
axis=0).astype(np.int).tolist()
|
||||
axis=0).astype(int).tolist()
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shapeA)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
_shapeB = shapeB.copy()
|
||||
if trans_b:
|
||||
_shapeB.reverse()
|
||||
const1 = np.random.ranf(_shapeB).astype(np.float)
|
||||
const2 = np.random.ranf(shapeC).astype(np.float)
|
||||
const1 = np.random.ranf(_shapeB).astype(float)
|
||||
const2 = np.random.ranf(shapeC).astype(float)
|
||||
|
||||
nodes = list()
|
||||
node_const1_def = onnx.helper.make_node(
|
||||
@ -157,12 +157,12 @@ class TestGemm(OnnxRuntimeLayerTest):
|
||||
extended_shape2 = np.concatenate([np.ones(max_len - len(shapeB)), shapeB], axis=0)
|
||||
output_shape = np.concatenate(
|
||||
[np.maximum(*[extended_shape1[0:-2], extended_shape2[0:-2]]), [shapeA[-2], shapeB[-1]]],
|
||||
axis=0).astype(np.int).tolist()
|
||||
axis=0).astype(int).tolist()
|
||||
input1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, _shapeA)
|
||||
input2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, _shapeB)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
const = np.random.ranf(shapeC).astype(np.float)
|
||||
const = np.random.ranf(shapeC).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -118,7 +118,7 @@ class TestHardSigmoid(OnnxRuntimeLayerTest):
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
const_number = np.prod(shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
constant = np.reshape(constant, shape)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
|
@ -89,7 +89,7 @@ class TestIdentity(OnnxRuntimeLayerTest):
|
||||
from onnx import helper
|
||||
from onnx import TensorProto
|
||||
|
||||
constant = np.random.randint(-127, 127, shape).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, shape).astype(float)
|
||||
|
||||
concat_axis = 0
|
||||
output_shape = shape.copy()
|
||||
|
@ -27,7 +27,7 @@ class TestImageScaler(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)
|
||||
|
||||
bias = np.random.randint(-10, 10, shape[1]).astype(np.float)
|
||||
bias = np.random.randint(-10, 10, shape[1]).astype(float)
|
||||
|
||||
node_def = onnx.helper.make_node(
|
||||
'ImageScaler',
|
||||
@ -79,8 +79,8 @@ class TestImageScaler(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
constant = np.random.randint(-127, 127, shape).astype(np.float)
|
||||
bias = np.random.randint(-10, 10, shape[1]).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, shape).astype(float)
|
||||
bias = np.random.randint(-10, 10, shape[1]).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -25,8 +25,8 @@ class TestInstanceNormalization(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)
|
||||
|
||||
scale_const = np.random.randn(shape[1]).astype(np.float)
|
||||
bias_const = np.random.randn(shape[1]).astype(np.float)
|
||||
scale_const = np.random.randn(shape[1]).astype(float)
|
||||
bias_const = np.random.randn(shape[1]).astype(float)
|
||||
|
||||
node_scale_def = helper.make_node(
|
||||
'Constant',
|
||||
|
@ -98,7 +98,7 @@ class TestLeakyRelu(OnnxRuntimeLayerTest):
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
const_number = np.prod(shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
constant = np.reshape(constant, shape)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
|
@ -94,7 +94,7 @@ class TestLog(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
constant = np.random.rand(*shape).astype(np.float) * 255 + 0.5
|
||||
constant = np.random.rand(*shape).astype(float) * 255 + 0.5
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -42,7 +42,7 @@ class TestLoop(OnnxRuntimeLayerTest):
|
||||
assert len(input_nodes) == len(input_names)
|
||||
assert len(output_nodes) == len(output_names)
|
||||
other_inputs_count = len(input_nodes) - 2
|
||||
one_value = np.ones(input_shape, dtype=np.float)
|
||||
one_value = np.ones(input_shape, dtype=float)
|
||||
|
||||
one = TestLoop.create_const('one_' + graph_name, TensorProto.FLOAT, one_value)
|
||||
one_int = TestLoop.create_const('one_int_' + graph_name, TensorProto.INT64, np.ones([1]))
|
||||
@ -108,7 +108,7 @@ class TestLoop(OnnxRuntimeLayerTest):
|
||||
cond_out_1 = helper.make_tensor_value_info('cond_out_1', TensorProto.BOOL, [1])
|
||||
|
||||
m_1_value = np.array([10], dtype=np.int64)
|
||||
cond_value = np.array([True], np.bool)
|
||||
cond_value = np.array([True], bool)
|
||||
|
||||
M_1 = self.create_const('M_1', TensorProto.INT64, m_1_value)
|
||||
cond = self.create_const('cond', TensorProto.BOOL, cond_value)
|
||||
@ -179,8 +179,8 @@ class TestLoop(OnnxRuntimeLayerTest):
|
||||
|
||||
m_1_value = np.array([10], dtype=np.int64)
|
||||
m_2_value = np.array([5], dtype=np.int64)
|
||||
cond_value = np.array([True], np.bool)
|
||||
one_value = np.ones(input_shape, dtype=np.float)
|
||||
cond_value = np.array([True], bool)
|
||||
one_value = np.ones(input_shape, dtype=float)
|
||||
|
||||
M_1 = self.create_const('M_1', TensorProto.INT64, m_1_value)
|
||||
M_2 = self.create_const('M_2', TensorProto.INT64, m_2_value)
|
||||
|
@ -33,7 +33,7 @@ class TestMatMul(OnnxRuntimeLayerTest):
|
||||
extended_shape2 = np.concatenate([np.ones(max_len - len(shape2)), shape2], axis=0)
|
||||
output_shape = np.concatenate(
|
||||
[np.maximum(*[extended_shape1[0:-2], extended_shape2[0:-2]]), [shape1[-2], shape2[-1]]],
|
||||
axis=0).astype(np.int).tolist()
|
||||
axis=0).astype(int).tolist()
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape1)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
@ -107,7 +107,7 @@ class TestMatMul(OnnxRuntimeLayerTest):
|
||||
extended_shape2 = np.concatenate([np.ones(max_len - len(shape2)), shape2], axis=0)
|
||||
output_shape = np.concatenate(
|
||||
[np.maximum(*[extended_shape1[0:-2], extended_shape2[0:-2]]), [shape1[-2], shape2[-1]]],
|
||||
axis=0).astype(np.int).tolist()
|
||||
axis=0).astype(int).tolist()
|
||||
input1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, shape1)
|
||||
input2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, shape2)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
@ -12,7 +12,7 @@ from unit_tests.utils.graph import build_graph
|
||||
class TestNot(OnnxRuntimeLayerTest):
|
||||
def _prepare_input(self, inputs_dict):
|
||||
for input in inputs_dict.keys():
|
||||
inputs_dict[input] = np.random.randint(0, 2, inputs_dict[input]).astype(np.bool)
|
||||
inputs_dict[input] = np.random.randint(0, 2, inputs_dict[input]).astype(bool)
|
||||
return inputs_dict
|
||||
|
||||
def create_net(self, shape, ir_version):
|
||||
@ -96,7 +96,7 @@ class TestNot(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.BOOL, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.BOOL, output_shape)
|
||||
|
||||
constant = np.random.randint(0, 2, shape).astype(np.bool)
|
||||
constant = np.random.randint(0, 2, shape).astype(bool)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -12,7 +12,7 @@ from unit_tests.utils.graph import build_graph
|
||||
class TestOr(OnnxRuntimeLayerTest):
|
||||
def _prepare_input(self, inputs_dict):
|
||||
for input in inputs_dict.keys():
|
||||
inputs_dict[input] = np.random.randint(0, 2, inputs_dict[input]).astype(np.bool)
|
||||
inputs_dict[input] = np.random.randint(0, 2, inputs_dict[input]).astype(bool)
|
||||
return inputs_dict
|
||||
|
||||
def create_net(self, shape1, shape2, ir_version):
|
||||
@ -90,7 +90,7 @@ class TestOr(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.BOOL, shape1)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.BOOL, shape1)
|
||||
|
||||
const = np.random.randint(0, 2, shape2).astype(np.bool)
|
||||
const = np.random.randint(0, 2, shape2).astype(bool)
|
||||
|
||||
node_const_def = helper.make_node(
|
||||
'Constant',
|
||||
@ -167,8 +167,8 @@ class TestOr(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.BOOL, shape1)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.BOOL, output_shape)
|
||||
|
||||
const1 = np.random.randint(0, 2, shape1).astype(np.bool)
|
||||
const2 = np.random.randint(0, 2, shape2).astype(np.bool)
|
||||
const1 = np.random.randint(0, 2, shape1).astype(bool)
|
||||
const2 = np.random.randint(0, 2, shape2).astype(bool)
|
||||
|
||||
node_const1_def = helper.make_node(
|
||||
'Constant',
|
||||
|
@ -10,7 +10,7 @@ from unit_tests.utils.graph import build_graph
|
||||
|
||||
|
||||
def float_array(x):
|
||||
return np.array(x, dtype=np.float)
|
||||
return np.array(x, dtype=float)
|
||||
|
||||
|
||||
class TestPooling(OnnxRuntimeLayerTest):
|
||||
@ -36,7 +36,7 @@ class TestPooling(OnnxRuntimeLayerTest):
|
||||
if auto_pad is not None:
|
||||
node_args['auto_pad'] = auto_pad
|
||||
if auto_pad == 'VALID':
|
||||
pads = np.zeros(len(shape[2:]) * 2, dtype=np.int)
|
||||
pads = np.zeros(len(shape[2:]) * 2, dtype=int)
|
||||
else:
|
||||
auto_pad = 'NOTSET'
|
||||
if count_include_pad is not None:
|
||||
@ -60,7 +60,7 @@ class TestPooling(OnnxRuntimeLayerTest):
|
||||
node_args['ceil_mode'] = 1
|
||||
|
||||
if auto_pad in ['SAME_UPPER', 'SAME_LOWER']:
|
||||
out_spacial_shape = np.ceil(np.array(shape[2:], dtype=np.float) / strides)
|
||||
out_spacial_shape = np.ceil(np.array(shape[2:], dtype=float) / strides)
|
||||
else:
|
||||
rounding = np.ceil if ceil else np.floor
|
||||
out_spacial_shape = rounding(
|
||||
@ -69,14 +69,14 @@ class TestPooling(OnnxRuntimeLayerTest):
|
||||
|
||||
out_shape = np.array(shape)
|
||||
out_shape[2:] = out_spacial_shape
|
||||
out_shape = out_shape.astype(np.int).tolist()
|
||||
out_shape = out_shape.astype(int).tolist()
|
||||
concat_axis = 0
|
||||
out_concat_shape = out_shape.copy()
|
||||
out_concat_shape[concat_axis] *= 2
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, out_concat_shape)
|
||||
|
||||
constant = np.random.randint(-127, 127, out_shape).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, out_shape).astype(float)
|
||||
|
||||
node_def = onnx.helper.make_node(
|
||||
op,
|
||||
@ -183,7 +183,7 @@ class TestPooling(OnnxRuntimeLayerTest):
|
||||
|
||||
out_shape = np.ones(len(shape))
|
||||
out_shape[:2] = np.array(shape)[:2]
|
||||
out_shape = out_shape.astype(np.int).tolist()
|
||||
out_shape = out_shape.astype(int).tolist()
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, out_shape)
|
||||
|
||||
|
@ -88,7 +88,7 @@ class TestReciprocal(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
const = np.random.randint(1, 256, shape).astype(np.float)
|
||||
const = np.random.randint(1, 256, shape).astype(float)
|
||||
|
||||
node_const_def = helper.make_node(
|
||||
'Constant',
|
||||
|
@ -122,7 +122,7 @@ class TestReduceL1L2(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, output_shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, concat_output_shape)
|
||||
|
||||
constant = np.random.randn(*shape).astype(np.float)
|
||||
constant = np.random.randn(*shape).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -93,7 +93,7 @@ class TestRelu(OnnxRuntimeLayerTest):
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
const_number = np.prod(shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
constant = np.reshape(constant, shape)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
|
@ -116,7 +116,7 @@ class TestReshape(OnnxRuntimeLayerTest):
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, concat_output_shape)
|
||||
|
||||
const_number = np.prod(input_shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -36,8 +36,8 @@ class TestResize(OnnxRuntimeLayerTest):
|
||||
|
||||
onnx_scales = scales
|
||||
if scales is None:
|
||||
onnx_scales = np.array(output_shape).astype(np.float) / np.array(input_shape).astype(
|
||||
np.float)
|
||||
onnx_scales = np.array(output_shape).astype(float) / np.array(input_shape).astype(
|
||||
float)
|
||||
scales_node = onnx.helper.make_node(
|
||||
'Constant',
|
||||
inputs=[],
|
||||
@ -114,14 +114,14 @@ class TestResize(OnnxRuntimeLayerTest):
|
||||
if sizes is not None and scales is not None:
|
||||
shape_calculation_mode = 'sizes'
|
||||
sizes_value = int64_array(sizes)
|
||||
scales_value = np.array(scales).astype(np.float)
|
||||
scales_value = np.array(scales).astype(float)
|
||||
elif sizes is not None and scales is None:
|
||||
shape_calculation_mode = 'sizes'
|
||||
sizes_value = int64_array(sizes)
|
||||
scales_value = sizes_value / input_shape_as_array
|
||||
else:
|
||||
shape_calculation_mode = 'scales'
|
||||
scales_value = np.array(scales).astype(np.float)
|
||||
scales_value = np.array(scales).astype(float)
|
||||
sizes_value = np.floor(input_shape_as_array * scales_value + 1e-5).astype(np.int64)
|
||||
|
||||
if precision == 'FP16':
|
||||
|
@ -76,7 +76,7 @@ class TestScale(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
constant = np.random.randint(-127, 127, shape).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, shape).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -99,7 +99,7 @@ class TestSigmoid(OnnxRuntimeLayerTest):
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
const_number = np.prod(shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
constant = np.reshape(constant, shape)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
|
@ -92,7 +92,7 @@ class TestSign(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
constant = np.random.randn(*shape).astype(np.float)
|
||||
constant = np.random.randn(*shape).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -152,7 +152,7 @@ class TestSlice(OnnxRuntimeLayerTest):
|
||||
from onnx import TensorProto
|
||||
|
||||
# calculate output shape
|
||||
constant = np.random.randint(-127, 127, shape).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, shape).astype(float)
|
||||
|
||||
slice_idx = [None] * len(shape)
|
||||
for i, axis in enumerate(axes):
|
||||
|
@ -93,7 +93,7 @@ class TestSoftsign(OnnxRuntimeLayerTest):
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
const_number = np.prod(shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
constant = np.reshape(constant, shape)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
|
@ -191,7 +191,7 @@ class TestSplitConcat(OnnxRuntimeLayerTest):
|
||||
concat_output_shape[concat_axis] *= 2
|
||||
|
||||
const_number = np.prod(input_shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)
|
||||
outputs, split = [], []
|
||||
|
@ -100,7 +100,7 @@ class TestSqrt(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
constant = np.random.rand(*shape).astype(np.float) * 255
|
||||
constant = np.random.rand(*shape).astype(float) * 255
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -79,7 +79,7 @@ class TestSqueeze(OnnxRuntimeLayerTest):
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, concat_output_shape)
|
||||
|
||||
const_number = np.prod(input_shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
constant = np.reshape(constant, input_shape)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
|
@ -37,7 +37,7 @@ class TestSum(OnnxRuntimeLayerTest):
|
||||
nodes = list()
|
||||
consts = list()
|
||||
for i, shape in enumerate(const_shapes):
|
||||
const = np.random.randint(-127, 127, shape).astype(np.float)
|
||||
const = np.random.randint(-127, 127, shape).astype(float)
|
||||
const_name = 'const{}'.format(i + 1)
|
||||
nodes.append(helper.make_node(
|
||||
'Constant',
|
||||
@ -111,7 +111,7 @@ class TestSum(OnnxRuntimeLayerTest):
|
||||
input_names = list()
|
||||
consts = list()
|
||||
for i, shape in enumerate(const_shapes):
|
||||
const = np.random.randint(-127, 127, shape).astype(np.float)
|
||||
const = np.random.randint(-127, 127, shape).astype(float)
|
||||
const_name = 'const{}'.format(i + 1)
|
||||
nodes.append(helper.make_node(
|
||||
'Constant',
|
||||
|
@ -34,7 +34,7 @@ class TestTopK(OnnxRuntimeLayerTest):
|
||||
indices = helper.make_tensor_value_info('cindices', TensorProto.INT64, output_shape)
|
||||
|
||||
const1 = np.ones(output_shape).astype(np.int64)
|
||||
const2 = np.ones(output_shape).astype(np.float)
|
||||
const2 = np.ones(output_shape).astype(float)
|
||||
|
||||
nodes = list()
|
||||
inputs = ['input']
|
||||
|
@ -80,7 +80,7 @@ class TestTranspose(OnnxRuntimeLayerTest):
|
||||
from onnx import helper
|
||||
from onnx import TensorProto
|
||||
|
||||
constant = np.random.randint(-127, 127, shape).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, shape).astype(float)
|
||||
constant_transposed = np.transpose(constant, perm)
|
||||
|
||||
concat_axis = 0
|
||||
|
@ -85,7 +85,7 @@ class TestTrigonomery(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
|
||||
|
||||
constant = np.random.rand(*shape).astype(np.float)
|
||||
constant = np.random.rand(*shape).astype(float)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
'Constant',
|
||||
|
@ -79,7 +79,7 @@ class TestUnsqueeze(OnnxRuntimeLayerTest):
|
||||
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, concat_output_shape)
|
||||
|
||||
const_number = np.prod(input_shape)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(np.float)
|
||||
constant = np.random.randint(-127, 127, const_number).astype(float)
|
||||
constant = np.reshape(constant, input_shape)
|
||||
|
||||
node_const_def = onnx.helper.make_node(
|
||||
|
@ -12,7 +12,7 @@ from unit_tests.utils.graph import build_graph
|
||||
class TestWhere(OnnxRuntimeLayerTest):
|
||||
def _prepare_input(self, inputs_dict):
|
||||
for input in inputs_dict.keys():
|
||||
inputs_dict[input] = np.random.randint(0, 2, inputs_dict[input]).astype(np.bool)
|
||||
inputs_dict[input] = np.random.randint(0, 2, inputs_dict[input]).astype(bool)
|
||||
return inputs_dict
|
||||
|
||||
def create_net(self, condition_shape, shape_than, else_shape, ir_version):
|
||||
|
@ -12,7 +12,7 @@ from unit_tests.utils.graph import build_graph
|
||||
class TestXor(OnnxRuntimeLayerTest):
|
||||
def _prepare_input(self, inputs_dict):
|
||||
for input in inputs_dict.keys():
|
||||
inputs_dict[input] = np.random.randint(0, 2, inputs_dict[input]).astype(np.bool)
|
||||
inputs_dict[input] = np.random.randint(0, 2, inputs_dict[input]).astype(bool)
|
||||
return inputs_dict
|
||||
|
||||
def create_net(self, shape1, shape2, ir_version):
|
||||
@ -90,7 +90,7 @@ class TestXor(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.BOOL, shape1)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.BOOL, shape1)
|
||||
|
||||
const = np.random.randint(0, 2, shape2).astype(np.bool)
|
||||
const = np.random.randint(0, 2, shape2).astype(bool)
|
||||
|
||||
node_const_def = helper.make_node(
|
||||
'Constant',
|
||||
@ -167,8 +167,8 @@ class TestXor(OnnxRuntimeLayerTest):
|
||||
input = helper.make_tensor_value_info('input', TensorProto.BOOL, shape1)
|
||||
output = helper.make_tensor_value_info('output', TensorProto.BOOL, output_shape)
|
||||
|
||||
const1 = np.random.randint(0, 2, shape1).astype(np.bool)
|
||||
const2 = np.random.randint(0, 2, shape2).astype(np.bool)
|
||||
const1 = np.random.randint(0, 2, shape1).astype(bool)
|
||||
const2 = np.random.randint(0, 2, shape2).astype(bool)
|
||||
|
||||
node_const1_def = helper.make_node(
|
||||
'Constant',
|
||||
|
@ -23,7 +23,7 @@ def generate_input(op_type, size):
|
||||
upper = 16
|
||||
|
||||
if op_type in logical_type:
|
||||
return np.random.randint(0, 1, size).astype(np.bool)
|
||||
return np.random.randint(0, 1, size).astype(bool)
|
||||
elif op_type in narrow_borders:
|
||||
return np.random.uniform(lower, upper, size).astype(np.float32)
|
||||
else:
|
||||
|
@ -39,7 +39,7 @@ class TestUnaryOps(CommonTFLayerTest):
|
||||
|
||||
for input in inputs_dict.keys():
|
||||
if self.current_op_type in logical_type:
|
||||
inputs_dict[input] = np.random.randint(0, 1, inputs_dict[input]).astype(np.bool)
|
||||
inputs_dict[input] = np.random.randint(0, 1, inputs_dict[input]).astype(bool)
|
||||
else:
|
||||
inputs_dict[input] = np.random.uniform(lower, upper, inputs_dict[input]).astype(
|
||||
np.float32)
|
||||
|
@ -260,7 +260,7 @@ def get_image_info_tensors(image_sizes, layer):
|
||||
|
||||
def fill_tensors_with_random(layer):
|
||||
dtype = get_dtype(layer.element_type)
|
||||
rand_min, rand_max = (0, 1) if dtype == np.bool else (np.iinfo(np.uint8).min, np.iinfo(np.uint8).max)
|
||||
rand_min, rand_max = (0, 1) if dtype == bool else (np.iinfo(np.uint8).min, np.iinfo(np.uint8).max)
|
||||
# np.random.uniform excludes high: add 1 to have it generated
|
||||
if np.dtype(dtype).kind in ['i', 'u', 'b']:
|
||||
rand_max += 1
|
||||
|
@ -528,7 +528,7 @@ def parse_scale_or_mean(parameter_string, input_info):
|
||||
if matches:
|
||||
for match in matches:
|
||||
input_name, value = match
|
||||
f_value = np.array(value.split(",")).astype(np.float)
|
||||
f_value = np.array(value.split(",")).astype(float)
|
||||
if input_name != '':
|
||||
return_value[input_name] = f_value
|
||||
else:
|
||||
|
@ -10,7 +10,7 @@ from openvino.tools.mo.utils.model_analysis import AnalyzeAction
|
||||
class TrainingPhaseAnalysis(AnalyzeAction):
|
||||
|
||||
def analyze(self, graph: Graph):
|
||||
nodes = graph.get_op_nodes(op='Parameter', data_type=np.bool)
|
||||
nodes = graph.get_op_nodes(op='Parameter', data_type=bool)
|
||||
names = ""
|
||||
params = ""
|
||||
if not nodes:
|
||||
|
@ -46,10 +46,10 @@ class FreezePlaceholderValue(FrontReplacementSubgraph):
|
||||
data_type = SUPPORTED_DATA_TYPES[graph.graph['cmd_params'].data_type][0]
|
||||
string_value = graph.graph['freeze_placeholder'][name]
|
||||
try:
|
||||
if data_type != np.bool:
|
||||
if data_type != bool:
|
||||
value = mo_array(string_value, dtype=data_type)
|
||||
# TODO: investigate why boolean type is allowed only for TensorFlow
|
||||
elif data_type == np.bool and graph.graph['fw'] == 'tf':
|
||||
elif data_type == bool and graph.graph['fw'] == 'tf':
|
||||
from openvino.tools.mo.front.tf.common import tf_data_type_cast
|
||||
if isinstance(string_value, list):
|
||||
casted_list = list()
|
||||
|
@ -22,7 +22,7 @@ class LSTMNonlinearityFrontExtractor(FrontExtractorOp):
|
||||
ifo_x_weights, ifo_x_weights_shape = read_binary_matrix(pb)
|
||||
|
||||
try:
|
||||
use_dropout = collect_until_token_and_read(pb, b'<UseDropout>', np.bool)
|
||||
use_dropout = collect_until_token_and_read(pb, b'<UseDropout>', bool)
|
||||
except Error:
|
||||
# layer have not UseDropout attribute, so setup it to False
|
||||
use_dropout = False
|
||||
|
@ -324,7 +324,7 @@ def read_node(file_descr, graph, component_layer_map, layer_node_map):
|
||||
if tokens[0] == b'input-node':
|
||||
in_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
|
||||
in_name = str(in_name).strip('b').replace('\'', "")
|
||||
in_shape = mo_array([1, s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]], dtype=np.int)
|
||||
in_shape = mo_array([1, s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]], dtype=int)
|
||||
|
||||
if in_name not in layer_node_map:
|
||||
graph.add_node(in_name, name=in_name, kind='op', op='Parameter', parameters=None, shape=in_shape)
|
||||
|
@ -258,7 +258,7 @@ def read_token_value(file_desc: io.BufferedReader, token: bytes = b'', value_typ
|
||||
getters = {
|
||||
np.uint32: read_binary_integer32_token,
|
||||
np.uint64: read_binary_integer64_token,
|
||||
np.bool: read_binary_bool_token
|
||||
bool: read_binary_bool_token
|
||||
}
|
||||
current_token = collect_until_whitespace(file_desc)
|
||||
if token != b'' and token != current_token:
|
||||
@ -314,7 +314,7 @@ def collect_until_token_and_read(file_desc: io.BufferedReader, token, value_type
|
||||
getters = {
|
||||
np.uint32: read_binary_integer32_token,
|
||||
np.uint64: read_binary_integer64_token,
|
||||
np.bool: read_binary_bool_token,
|
||||
bool: read_binary_bool_token,
|
||||
np.string_: read_string
|
||||
}
|
||||
collect_until_token(file_desc, token)
|
||||
|
@ -177,7 +177,7 @@ class RestrictedAttentionComponentReplacer(FrontReplacementPattern):
|
||||
|
||||
split_2_node.out_port(0).connect(einsum_1_node.in_port(1))
|
||||
|
||||
mul_node = create_op_with_const_inputs(graph, Mul, {1: mo_array(key_scale, dtype=np.float)},
|
||||
mul_node = create_op_with_const_inputs(graph, Mul, {1: mo_array(key_scale, dtype=float)},
|
||||
{'name': self.in_name + '/Mul'})
|
||||
reshape_helper_1_node.out_port(0).connect(mul_node.in_port(0))
|
||||
|
||||
|
@ -39,7 +39,7 @@ class ONNXLoopNormalize(FrontReplacementSubgraph):
|
||||
# connect "execution condition" input if it is not connected with default value True
|
||||
if not loop_node.is_in_port_connected(1):
|
||||
loop_node.add_input_port(1, skip_if_exist=True)
|
||||
Const(loop_node.graph, {'name': loop_name + '/execution_cond', 'value': mo_array(True, dtype=np.bool)}).\
|
||||
Const(loop_node.graph, {'name': loop_name + '/execution_cond', 'value': mo_array(True, dtype=bool)}).\
|
||||
create_node().out_port(0).connect(loop_node.in_port(1))
|
||||
|
||||
# scan output need Unsqueeze over axis 0
|
||||
|
@ -56,7 +56,7 @@ def get_onnx_opset_version(node: Node):
|
||||
def get_onnx_datatype_as_numpy(value):
|
||||
datatype_to_numpy = {
|
||||
1: np.float32,
|
||||
9: np.bool,
|
||||
9: bool,
|
||||
11: np.double,
|
||||
10: np.float16,
|
||||
5: np.int16,
|
||||
|
@ -16,7 +16,7 @@ class ExperimentalDetectronGroupNorm(FrontExtractorOp):
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
attrs = {
|
||||
'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=np.float),
|
||||
'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=float),
|
||||
'num_groups': int64_array(onnx_attr(node, 'num_groups', 'i', default=1)),
|
||||
}
|
||||
GroupNorm.update_node_stat(node, attrs)
|
||||
@ -30,7 +30,7 @@ class GroupNormExtractor(FrontExtractorOp):
|
||||
@classmethod
|
||||
def extract(cls, node):
|
||||
attrs = {
|
||||
'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=np.float),
|
||||
'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=float),
|
||||
'num_groups': int64_array(onnx_attr(node, 'num_groups', 'i', default=1)),
|
||||
}
|
||||
GroupNorm.update_node_stat(node, attrs)
|
||||
|
@ -25,7 +25,7 @@ class SliceFrontExtractor(FrontExtractorOp):
|
||||
if len(starts) == 0 or len(ends) == 0:
|
||||
raise Error("starts or/and ends are not specified for the node {}".format(node.name))
|
||||
if len(axes) == 0:
|
||||
axes = np.arange(len(starts), dtype=np.int)
|
||||
axes = np.arange(len(starts), dtype=int)
|
||||
|
||||
attrs = {'axes': axes, 'starts': starts, 'ends': ends}
|
||||
AttributedSlice.update_node_stat(node, attrs)
|
||||
|
@ -243,7 +243,7 @@ class MapFNOutputConcatenation(FrontReplacementSubgraph):
|
||||
if 'purpose' in record and record['purpose'] == 'execution_condition':
|
||||
exec_cond_layer_id = record['internal_layer_id']
|
||||
exec_cond_node = Loop.get_body_node_by_internal_id(loop_node, exec_cond_layer_id)
|
||||
const_true = Const(body_graph, {'value': mo_array(True, dtype=np.bool)}).create_node()
|
||||
const_true = Const(body_graph, {'value': mo_array(True, dtype=bool)}).create_node()
|
||||
exec_cond_node.in_port(0).get_connection().set_source(const_true.out_port(0))
|
||||
|
||||
# remove back edge
|
||||
|
@ -34,7 +34,7 @@ class WhileNormalize(FrontReplacementSubgraph):
|
||||
|
||||
# connect execution condition port
|
||||
exec_cond_node = Const(graph, {'name': loop_name + '/ExecutionConditionValue',
|
||||
'value': mo_array(True, dtype=np.bool)}).create_node()
|
||||
'value': mo_array(True, dtype=bool)}).create_node()
|
||||
loop_node.in_port(1).get_connection().set_source(exec_cond_node.out_port(0))
|
||||
|
||||
loop_node.body.clean_up()
|
||||
|
@ -7,7 +7,7 @@ from tensorflow.core.framework import types_pb2 as tf_types # pylint: disable=n
|
||||
# Suppress false positive pylint warning about function with too many arguments
|
||||
# pylint: disable=E1121
|
||||
# mapping between TF data type and numpy data type and function to extract data from TF tensor
|
||||
_tf_np_mapping = [('DT_BOOL', np.bool, lambda pb: pb.bool_val, lambda x: bool_cast(x)),
|
||||
_tf_np_mapping = [('DT_BOOL', bool, lambda pb: pb.bool_val, lambda x: bool_cast(x)),
|
||||
('DT_INT8', np.int8, lambda pb: pb.int_val, lambda x: np.int8(x)),
|
||||
('DT_INT16', np.int16, lambda pb: pb.int_val, lambda x: np.int16(x)),
|
||||
('DT_INT32', np.int32, lambda pb: pb.int_val, lambda x: np.int32(x)),
|
||||
@ -19,7 +19,7 @@ _tf_np_mapping = [('DT_BOOL', np.bool, lambda pb: pb.bool_val, lambda x: bool_ca
|
||||
('DT_HALF', np.float16, lambda pb: np.uint16(pb.half_val).view(np.float16), lambda x: np.float16(x)),
|
||||
('DT_FLOAT', np.float32, lambda pb: pb.float_val, lambda x: np.float32(x)),
|
||||
('DT_DOUBLE', np.double, lambda pb: pb.double_val, lambda x: np.double(x)),
|
||||
('DT_STRING', np.str, lambda pb: pb.string_val, lambda x: np.str(x)),
|
||||
('DT_STRING', str, lambda pb: pb.string_val, lambda x: str(x)),
|
||||
]
|
||||
|
||||
tf_data_type_decode = {getattr(tf_types, tf_dt): (np_type, func) for tf_dt, np_type, func, _ in _tf_np_mapping if
|
||||
@ -32,4 +32,4 @@ def bool_cast(x):
|
||||
if isinstance(x, str):
|
||||
return False if x.lower() in ['false', '0'] else True if x.lower() in ['true', '1'] else 'unknown_boolean_cast'
|
||||
else:
|
||||
return np.bool(x)
|
||||
return bool(x)
|
||||
|
@ -63,7 +63,7 @@ def tf_tensor_content(tf_dtype, shape, pb_tensor):
|
||||
value = mo_array(np.frombuffer(pb_tensor.tensor_content, type_helper[0]))
|
||||
else:
|
||||
# load typed value
|
||||
if type_helper[0] != np.str:
|
||||
if type_helper[0] != str:
|
||||
value = mo_array(type_helper[1](pb_tensor), dtype=type_helper[0])
|
||||
else:
|
||||
try:
|
||||
|
@ -40,7 +40,7 @@ SUPPORTED_DATA_TYPES = {
|
||||
'int8': (np.int8, 'I8', 'i8'),
|
||||
'int32': (np.int32, 'I32', 'i32'),
|
||||
'int64': (np.int64, 'I64', 'i64'),
|
||||
'bool': (np.bool, 'BOOL', 'boolean'),
|
||||
'bool': (bool, 'BOOL', 'boolean'),
|
||||
'uint8': (np.uint8, 'U8', 'u8'),
|
||||
'uint32': (np.uint32, 'U32', 'u32'),
|
||||
'uint64': (np.uint64, 'U64', 'u64'),
|
||||
|
@ -99,7 +99,7 @@ class QuantizeLinearResolver(MiddleReplacementPattern):
|
||||
format(quantize_node.soft_get('name', soft_get('id')))
|
||||
if axis is not None and len(scale_y_shape) > 0 and scale_y_shape[0] > 1:
|
||||
input_shape = fake_quantize.in_port(0).data.get_shape()
|
||||
target_shape = np.ones(len(input_shape), np.int)
|
||||
target_shape = np.ones(len(input_shape), int)
|
||||
target_shape[axis] = input_shape[axis]
|
||||
mul_low_reshape = create_op_with_const_inputs(graph, Reshape, {1: int64_array(target_shape)},
|
||||
{'name': node_name + '/Reshape/Mul/Low'})
|
||||
|
@ -43,7 +43,7 @@ def reduce_helper(func: callable, x: np.array, axis: tuple, keepdims: bool):
|
||||
if is_fully_defined(x):
|
||||
return result
|
||||
else:
|
||||
return np.ma.masked_array(result, mask=np.ones(result.shape, dtype=np.bool))
|
||||
return np.ma.masked_array(result, mask=np.ones(result.shape, dtype=bool))
|
||||
|
||||
|
||||
def reduce_infer(node: Node):
|
||||
@ -73,7 +73,7 @@ def reduce_infer(node: Node):
|
||||
value = reduce_helper(reduce_map[node.op], in_value.copy(), axis=tuple(axis), keepdims=node.keep_dims)
|
||||
node.out_port(0).data.set_value(value)
|
||||
else:
|
||||
used_dims = np.zeros(len(in_shape), dtype=np.bool)
|
||||
used_dims = np.zeros(len(in_shape), dtype=bool)
|
||||
output_shape = in_shape.copy()
|
||||
|
||||
for dim in axis:
|
||||
|
@ -231,7 +231,7 @@ class LogicalNot(Activation):
|
||||
|
||||
@staticmethod
|
||||
def type_infer(node: Node):
|
||||
node.out_port(0).set_data_type(np.bool)
|
||||
node.out_port(0).set_data_type(bool)
|
||||
|
||||
|
||||
class Log(Activation):
|
||||
|
@ -115,7 +115,7 @@ class LogicalElementwise(Elementwise):
|
||||
@staticmethod
|
||||
def type_infer(node):
|
||||
override_data_type_of_constant(node)
|
||||
node.out_port(0).set_data_type(np.bool)
|
||||
node.out_port(0).set_data_type(bool)
|
||||
|
||||
|
||||
class Greater(LogicalElementwise):
|
||||
|
@ -90,7 +90,7 @@ class Select(Op):
|
||||
# one of the branches is None (which is not selected)
|
||||
# if we use np.where for such cases then dtype of output_value will be object (non numeric type)
|
||||
# and subsequent numpy operation on such tensors will fail
|
||||
output_value = resulting_tensors[not np.bool(condition_value.item(0))]
|
||||
output_value = resulting_tensors[not bool(condition_value.item(0))]
|
||||
if output_value is None:
|
||||
return
|
||||
if broadcast_rule == 'numpy':
|
||||
|
@ -81,7 +81,7 @@ class SparseSegmentMean(Op):
|
||||
"Some value in indices tensor is out of range"
|
||||
|
||||
# infer
|
||||
num_adds = np.zeros(num_segments, dtype=np.int)
|
||||
num_adds = np.zeros(num_segments, dtype=int)
|
||||
output_value = np.zeros([num_segments] + data_shape[1:].tolist(), dtype=np.float32)
|
||||
output_shape = output_value.shape
|
||||
for i in range(len(segment_ids_value)):
|
||||
|
@ -81,7 +81,7 @@ class SparseSegmentSqrtN(Op):
|
||||
"Some value in indices tensor is out of range"
|
||||
|
||||
# infer
|
||||
num_adds = np.zeros(num_segments, dtype=np.int)
|
||||
num_adds = np.zeros(num_segments, dtype=int)
|
||||
output_value = np.zeros([num_segments] + data_shape[1:].tolist(), dtype=np.float32)
|
||||
output_shape = output_value.shape
|
||||
for i in range(len(segment_ids_value)):
|
||||
|
@ -152,6 +152,6 @@ class Unique(Op):
|
||||
# write result to output nodes
|
||||
j = 0
|
||||
for out_node_ind in node.out_nodes():
|
||||
node.out_node(out_node_ind).value = mo_array(unique_output[j], dtype=np.float)
|
||||
node.out_node(out_node_ind).value = mo_array(unique_output[j], dtype=float)
|
||||
node.out_node(out_node_ind).shape = int64_array(node.out_node(out_node_ind).value.shape)
|
||||
j += 1
|
||||
|
@ -346,7 +346,7 @@ class IREngine(object):
|
||||
'U1': (1, np.uint8),
|
||||
'U4': (1, np.uint8),
|
||||
'I4': (1, np.uint8),
|
||||
'BOOL': (1, np.bool),
|
||||
'BOOL': (1, bool),
|
||||
'BIN': (1, np.uint8),
|
||||
'U64': (8, np.uint64)
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ from openvino.tools.mo.graph.graph import Node
|
||||
from openvino.tools.mo.pipeline.common import convert_const_node_value_type
|
||||
from openvino.tools.mo.utils.error import Error
|
||||
|
||||
np_map_cast = {np.bool: lambda x: bool_cast(x),
|
||||
np_map_cast = {bool: lambda x: bool_cast(x),
|
||||
np.int8: lambda x: np.int8(x),
|
||||
np.int16: lambda x: np.int16(x),
|
||||
np.int32: lambda x: np.int32(x),
|
||||
@ -21,14 +21,14 @@ np_map_cast = {np.bool: lambda x: bool_cast(x),
|
||||
np.float16: lambda x: np.float16(x),
|
||||
np.float32: lambda x: np.float32(x),
|
||||
np.double: lambda x: np.double(x),
|
||||
np.str: lambda x: np.str(x)}
|
||||
str: lambda x: str(x)}
|
||||
|
||||
|
||||
def bool_cast(x):
|
||||
if isinstance(x, str):
|
||||
return False if x.lower() in ['false', '0'] else True if x.lower() in ['true', '1'] else 'unknown_boolean_cast'
|
||||
else:
|
||||
return np.bool(x)
|
||||
return bool(x)
|
||||
|
||||
|
||||
def override_data_type_of_constant(node: Node, lhs_idx: int = 0, rhs_idx: int = 1):
|
||||
|
@ -1,6 +1,6 @@
|
||||
networkx~=2.5; python_version <= "3.6"
|
||||
networkx<=2.8.8; python_version > "3.6"
|
||||
numpy>=1.16.6,<=1.23.4
|
||||
numpy>=1.16.6,<=1.24.0
|
||||
protobuf>=3.18.1,<4.0.0
|
||||
defusedxml>=0.7.1
|
||||
requests>=2.25.1
|
||||
|
@ -1,6 +1,6 @@
|
||||
networkx~=2.5; python_version <= "3.6"
|
||||
networkx<=2.8.8; python_version > "3.6"
|
||||
numpy>=1.16.6,<=1.23.4
|
||||
numpy>=1.16.6,<=1.24.0
|
||||
defusedxml>=0.7.1
|
||||
requests>=2.25.1
|
||||
fastjsonschema~=2.15.1
|
||||
|
@ -2,7 +2,7 @@ mxnet~=1.2.0; sys_platform == 'win32'
|
||||
mxnet>=1.7.0.post2,<=1.9.1; sys_platform != 'win32'
|
||||
networkx~=2.5; python_version <= "3.6"
|
||||
networkx<=2.8.8; python_version > "3.6"
|
||||
numpy>=1.16.6,<=1.23.4
|
||||
numpy>=1.16.6,<=1.24.0
|
||||
defusedxml>=0.7.1
|
||||
urllib3>=1.26.4
|
||||
requests>=2.25.1
|
||||
|
@ -1,7 +1,7 @@
|
||||
onnx>=1.8.1,<=1.12
|
||||
networkx~=2.5; python_version <= "3.6"
|
||||
networkx<=2.8.8; python_version > "3.6"
|
||||
numpy>=1.16.6,<=1.23.4
|
||||
numpy>=1.16.6,<=1.24.0
|
||||
defusedxml>=0.7.1
|
||||
requests>=2.25.1
|
||||
fastjsonschema~=2.15.1
|
||||
|
@ -1,4 +1,4 @@
|
||||
numpy>=1.16.6,<=1.23.4
|
||||
numpy>=1.16.6,<=1.24.0
|
||||
tensorflow>=1.15.5,<=2.10.0
|
||||
networkx~=2.5; python_version <= "3.6"
|
||||
networkx<=2.8.8; python_version > "3.6"
|
||||
|
@ -1,4 +1,4 @@
|
||||
numpy>=1.16.6,<=1.23.4
|
||||
numpy>=1.16.6,<=1.24.0
|
||||
tensorflow>=2.5,<=2.10.0
|
||||
networkx~=2.5; python_version <= "3.6"
|
||||
networkx<=2.8.8; python_version > "3.6"
|
||||
|
@ -35,7 +35,7 @@ class RestrictedAttentionComponentReplacerTest(unittest.TestCase):
|
||||
**regular_op('reshape_helper_1', {'type': 'Reshape'}),
|
||||
**const('reshape_helper_1_shape', int64_array([10, 1])),
|
||||
**regular_op('mul', {'type': 'Multiply'}),
|
||||
**const('mul_scale', mo_array(0.5, dtype=np.float)),
|
||||
**const('mul_scale', mo_array(0.5, dtype=float)),
|
||||
**regular_op('add', {'type': 'Add'}),
|
||||
**regular_op('softmax', {'type': 'SoftMax'}),
|
||||
**regular_op('reshape_helper_3', {'type': 'Reshape'}),
|
||||
|
@ -46,8 +46,8 @@ class TestPriorBoxClusteredExt(unittest.TestCase):
|
||||
self.assertRaises(AttributeError, PriorBoxClusteredFrontExtractor.extract, None)
|
||||
|
||||
def test_priorbox_clustered_ext_ideal_numbers(self):
|
||||
node = self._create_priorbox_clustered_node(width= np.array([2, 3], dtype=np.float),
|
||||
height=np.array([4, 5], dtype=np.float),
|
||||
node = self._create_priorbox_clustered_node(width= np.array([2, 3], dtype=float),
|
||||
height=np.array([4, 5], dtype=float),
|
||||
variance=np.array([0.2, 0.3, 0.2, 0.3]),
|
||||
img_size=300, step=5.0, offset=0.6, flip=True)
|
||||
|
||||
@ -58,8 +58,8 @@ class TestPriorBoxClusteredExt(unittest.TestCase):
|
||||
'type': 'PriorBoxClustered',
|
||||
'clip': 0,
|
||||
'flip': 1,
|
||||
'width': np.array([2, 3], dtype=np.float),
|
||||
'height': np.array([4, 5], dtype=np.float),
|
||||
'width': np.array([2, 3], dtype=float),
|
||||
'height': np.array([4, 5], dtype=float),
|
||||
'variance': [0.2, 0.3, 0.2, 0.3],
|
||||
'img_size': 300,
|
||||
'img_h': 0,
|
||||
|
@ -47,7 +47,7 @@ class TestPriorBoxExt(unittest.TestCase):
|
||||
self.assertRaises(AttributeError, PriorBoxFrontExtractor.extract, None)
|
||||
|
||||
def test_priorbox_ext_ideal_numbers(self):
|
||||
node = self._create_priorbox_node(aspect_ratio=np.array([2, 3], dtype=np.float),
|
||||
node = self._create_priorbox_node(aspect_ratio=np.array([2, 3], dtype=float),
|
||||
variance=np.array([0.2, 0.3, 0.2, 0.3]),
|
||||
img_size=300, step=5.0, offset=0.6, flip=True)
|
||||
|
||||
@ -58,7 +58,7 @@ class TestPriorBoxExt(unittest.TestCase):
|
||||
'type': 'PriorBox',
|
||||
'clip': 0,
|
||||
'flip': 1,
|
||||
'aspect_ratio': np.array([2, 3], dtype=np.float),
|
||||
'aspect_ratio': np.array([2, 3], dtype=float),
|
||||
'variance': [0.2, 0.3, 0.2, 0.3],
|
||||
'img_size': 300,
|
||||
'img_h': 0,
|
||||
|
@ -25,8 +25,8 @@ from unit_tests.utils.graph import regular_op_with_empty_data, connect, result,
|
||||
@generator
|
||||
class TestIf(unittest.TestCase):
|
||||
@generate(*[
|
||||
(np.array([True], dtype=np.bool), shape_array([3]), shape_array([3])),
|
||||
(np.array([False], dtype=np.bool), shape_array([3]), shape_array([2])),
|
||||
(np.array([True], dtype=bool), shape_array([3]), shape_array([3])),
|
||||
(np.array([False], dtype=bool), shape_array([3]), shape_array([2])),
|
||||
(shape_array(dynamic_dimension_value), shape_array([3]), shape_array([dynamic_dimension_value])),
|
||||
])
|
||||
def test_simple_shape_inf(self, cond, output_port_0_shape, output_port_1_shape):
|
||||
|
@ -107,7 +107,7 @@ class TestPadOps(unittest.TestCase):
|
||||
nodes_with_edges_only=True,
|
||||
)
|
||||
out_shape = (1, 1, 5, 8)
|
||||
mask = np.zeros(out_shape, dtype=np.bool)
|
||||
mask = np.zeros(out_shape, dtype=bool)
|
||||
mask[0][0][1][2] = True
|
||||
ref_value = np.ma.masked_array(np.zeros(out_shape, dtype=np.int64), mask=mask, dtype=np.int64)
|
||||
ref_value[0][0][1][3] = 3
|
||||
|
@ -60,16 +60,16 @@ class TestSelect(unittest.TestCase):
|
||||
|
||||
def test_1(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([5, 6], dtype=bool),
|
||||
then_value=np.ones([5, 6], dtype=np.float),
|
||||
else_value=np.zeros([5, 6], dtype=np.float),
|
||||
out_value=np.ones([5, 6], dtype=np.float))
|
||||
then_value=np.ones([5, 6], dtype=float),
|
||||
else_value=np.zeros([5, 6], dtype=float),
|
||||
out_value=np.ones([5, 6], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_2(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([15, 3, 5], dtype=bool),
|
||||
then_value=np.ones([15, 3, 5], dtype=np.float),
|
||||
else_value=np.zeros([15, 1, 5], dtype=np.float),
|
||||
out_value=np.ones([15, 3, 5], dtype=np.float))
|
||||
then_value=np.ones([15, 3, 5], dtype=float),
|
||||
else_value=np.zeros([15, 1, 5], dtype=float),
|
||||
out_value=np.ones([15, 3, 5], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_infer_no_condition(self):
|
||||
@ -95,44 +95,44 @@ class TestSelect(unittest.TestCase):
|
||||
|
||||
def test_select_infer_condition_true_2(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.array([True], dtype=bool),
|
||||
then_value=np.ones([15, 3, 5], dtype=np.float),
|
||||
else_value=np.zeros([15, 1, 5], dtype=np.float),
|
||||
out_value=np.ones([15, 3, 5], dtype=np.float))
|
||||
then_value=np.ones([15, 3, 5], dtype=float),
|
||||
else_value=np.zeros([15, 1, 5], dtype=float),
|
||||
out_value=np.ones([15, 3, 5], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_infer_condition_true_then_and_else_are_scalars(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.array([True], dtype=bool),
|
||||
then_value=np.array(3, dtype=np.float),
|
||||
else_value=np.array(1, dtype=np.float),
|
||||
out_value=np.array([3], dtype=np.float))
|
||||
then_value=np.array(3, dtype=float),
|
||||
else_value=np.array(1, dtype=float),
|
||||
out_value=np.array([3], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_infer_condition_true_then_and_else_are_scalars_2(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.array(True, dtype=bool),
|
||||
then_value=np.array(3, dtype=np.float),
|
||||
else_value=np.array(1, dtype=np.float),
|
||||
out_value=np.array(3, dtype=np.float))
|
||||
then_value=np.array(3, dtype=float),
|
||||
else_value=np.array(1, dtype=float),
|
||||
out_value=np.array(3, dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_infer_condition_false_then_and_else_are_scalars(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.array([False], dtype=bool),
|
||||
then_value=np.array(3, dtype=np.float),
|
||||
else_value=np.array(1, dtype=np.float),
|
||||
out_value=np.array([1], dtype=np.float))
|
||||
then_value=np.array(3, dtype=float),
|
||||
else_value=np.array(1, dtype=float),
|
||||
out_value=np.array([1], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_infer_condition_false_then_and_else_are_scalars_2(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.array(False, dtype=bool),
|
||||
then_value=np.array(3, dtype=np.float),
|
||||
else_value=np.array(1, dtype=np.float),
|
||||
out_value=np.array(1, dtype=np.float))
|
||||
then_value=np.array(3, dtype=float),
|
||||
else_value=np.array(1, dtype=float),
|
||||
out_value=np.array(1, dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_infer_condition_false_2(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.array([False], dtype=bool),
|
||||
then_value=np.ones([15, 3, 5], dtype=np.float),
|
||||
else_value=np.zeros([15, 1, 5], dtype=np.float),
|
||||
out_value=np.zeros([15, 3, 5], dtype=np.float))
|
||||
then_value=np.ones([15, 3, 5], dtype=float),
|
||||
else_value=np.zeros([15, 1, 5], dtype=float),
|
||||
out_value=np.zeros([15, 3, 5], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
# if one of the branches is None then np.where shouldn't be used to avoid object dtype in output
|
||||
@ -142,57 +142,57 @@ class TestSelect(unittest.TestCase):
|
||||
def test_select_infer_None_then_branch_1(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.zeros([15, 3, 5], dtype=bool),
|
||||
then_value=None, then_shape=[15, 3, 5],
|
||||
else_value=np.ones([15, 1, 5], dtype=np.float),
|
||||
out_value=np.ones([15, 3, 5], dtype=np.float))
|
||||
else_value=np.ones([15, 1, 5], dtype=float),
|
||||
out_value=np.ones([15, 3, 5], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_infer_None_then_branch_2(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([15, 3, 5], dtype=bool),
|
||||
then_value=None, then_shape=[15, 3, 5],
|
||||
else_value=np.ones([15, 1, 5], dtype=np.float),
|
||||
else_value=np.ones([15, 1, 5], dtype=float),
|
||||
out_value=None)
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_infer_None_else_branch_1(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([15, 3, 5], dtype=bool),
|
||||
then_value=np.ones([15, 1, 5], dtype=np.float),
|
||||
then_value=np.ones([15, 1, 5], dtype=float),
|
||||
else_value=None, else_shape=[15, 3, 5],
|
||||
out_value=np.ones([15, 3, 5], dtype=np.float))
|
||||
out_value=np.ones([15, 3, 5], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_infer_None_else_branch_2(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.zeros([15, 3, 5], dtype=bool),
|
||||
then_value=np.ones([15, 1, 5], dtype=np.float),
|
||||
then_value=np.ones([15, 1, 5], dtype=float),
|
||||
else_value=None, else_shape=[15, 3, 5],
|
||||
out_value=None)
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_broadcast_1(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 4, 5], dtype=bool),
|
||||
then_value=np.ones([], dtype=np.float),
|
||||
else_value=np.zeros([2, 3, 4, 5], dtype=np.float),
|
||||
out_value=np.ones([2, 3, 4, 5], dtype=np.float))
|
||||
then_value=np.ones([], dtype=float),
|
||||
else_value=np.zeros([2, 3, 4, 5], dtype=float),
|
||||
out_value=np.ones([2, 3, 4, 5], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_broadcast_2(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 4, 1], dtype=bool),
|
||||
then_value= np.ones([1, 3, 1, 5], dtype=np.float),
|
||||
else_value=np.zeros([2, 1, 1, 5], dtype=np.float),
|
||||
out_value=np.ones([2, 3, 4, 5], dtype=np.float))
|
||||
then_value= np.ones([1, 3, 1, 5], dtype=float),
|
||||
else_value=np.zeros([2, 1, 1, 5], dtype=float),
|
||||
out_value=np.ones([2, 3, 4, 5], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_broadcast_3(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 1, 1], dtype=bool),
|
||||
then_value= np.ones([2, 3, 4, 5], dtype=np.float),
|
||||
else_value=np.zeros([2, 1, 1, 5], dtype=np.float),
|
||||
out_value=np.ones([2, 3, 4, 5], dtype=np.float))
|
||||
then_value= np.ones([2, 3, 4, 5], dtype=float),
|
||||
else_value=np.zeros([2, 1, 1, 5], dtype=float),
|
||||
out_value=np.ones([2, 3, 4, 5], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
def test_select_broadcast_4(self):
|
||||
flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 4, 5], dtype=bool),
|
||||
then_value= np.ones([5], dtype=np.float),
|
||||
else_value=np.zeros([2, 3, 4, 5], dtype=np.float),
|
||||
out_value=np.ones([2, 3, 4, 5], dtype=np.float))
|
||||
then_value= np.ones([5], dtype=float),
|
||||
else_value=np.zeros([2, 3, 4, 5], dtype=float),
|
||||
out_value=np.ones([2, 3, 4, 5], dtype=float))
|
||||
self.assertTrue(flag, msg)
|
||||
|
||||
# when output shape is broadcasted from condition, then, and else shapes
|
||||
|
@ -40,9 +40,9 @@ edges2 = [('input_data', 'sparse_segment_mean_node', {'in': 0}),
|
||||
('input_segment_ids', 'sparse_segment_mean_node', {'in': 2}),
|
||||
('sparse_segment_mean_node', 'output_segments', {'out': 0})]
|
||||
|
||||
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float)},
|
||||
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 2, 1, 1, 2], dtype=np.float)},
|
||||
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 1, 2, 2], dtype=np.float)}}
|
||||
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=float)},
|
||||
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 2, 1, 1, 2], dtype=float)},
|
||||
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 1, 2, 2], dtype=float)}}
|
||||
|
||||
class TestSparseSegmentMean(unittest.TestCase):
|
||||
def test_partial_infer(self):
|
||||
@ -76,7 +76,7 @@ class TestSparseSegmentMean(unittest.TestCase):
|
||||
|
||||
# prepare reference results
|
||||
ref_output_segments_shape = int64_array([3, 4])
|
||||
ref_output_segments_value = np.array([[3, 4, 5, 6], [-1, -2, -3, -4], [2, 2, 2, 2]], dtype=np.float)
|
||||
ref_output_segments_value = np.array([[3, 4, 5, 6], [-1, -2, -3, -4], [2, 2, 2, 2]], dtype=float)
|
||||
|
||||
# get resulted shapes
|
||||
res_output_segments_shape = graph.node['output_segments']['shape']
|
||||
|
@ -40,9 +40,9 @@ edges2 = [('input_data', 'sparse_segment_sqrtn_node', {'in': 0}),
|
||||
('input_segment_ids', 'sparse_segment_sqrtn_node', {'in': 2}),
|
||||
('sparse_segment_sqrtn_node', 'output_segments', {'out': 0})]
|
||||
|
||||
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float)},
|
||||
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 2, 1, 1, 2], dtype=np.float)},
|
||||
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 0, 0, 2], dtype=np.float)}}
|
||||
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=float)},
|
||||
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 2, 1, 1, 2], dtype=float)},
|
||||
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 0, 0, 2], dtype=float)}}
|
||||
|
||||
class TestSparseSegmentSqrtN(unittest.TestCase):
|
||||
def test_partial_infer(self):
|
||||
@ -76,7 +76,7 @@ class TestSparseSegmentSqrtN(unittest.TestCase):
|
||||
|
||||
# prepare reference results
|
||||
ref_output_segments_shape = int64_array([3, 4])
|
||||
ref_output_segments_value = np.array([[2, 2, 2, 2], [0, 0, 0, 0], [5, 6, 7, 8]], dtype=np.float)
|
||||
ref_output_segments_value = np.array([[2, 2, 2, 2], [0, 0, 0, 0], [5, 6, 7, 8]], dtype=float)
|
||||
|
||||
# get resulted shapes
|
||||
res_output_segments_shape = graph.node['output_segments']['shape']
|
||||
|
@ -40,9 +40,9 @@ edges2 = [('input_data', 'sparse_segment_sum_node', {'in': 0}),
|
||||
('input_segment_ids', 'sparse_segment_sum_node', {'in': 2}),
|
||||
('sparse_segment_sum_node', 'output_segments', {'out': 0})]
|
||||
|
||||
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float)},
|
||||
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 1, 2], dtype=np.float)},
|
||||
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 1], dtype=np.float)}}
|
||||
inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=float)},
|
||||
'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 1, 2], dtype=float)},
|
||||
'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 1], dtype=float)}}
|
||||
|
||||
class TestSparseSegmentSum(unittest.TestCase):
|
||||
def test_partial_infer(self):
|
||||
@ -76,7 +76,7 @@ class TestSparseSegmentSum(unittest.TestCase):
|
||||
|
||||
# prepare reference results
|
||||
ref_output_segments_shape = int64_array([2, 4])
|
||||
ref_output_segments_value = np.array([[0, 0, 0, 0], [5, 6, 7, 8]], dtype=np.float)
|
||||
ref_output_segments_value = np.array([[0, 0, 0, 0], [5, 6, 7, 8]], dtype=float)
|
||||
|
||||
# get resulted shapes
|
||||
res_output_segments_shape = graph.node['output_segments']['shape']
|
||||
|
@ -162,7 +162,7 @@ class TestUnique(unittest.TestCase):
|
||||
('unique_node', 'output_indices', {'out': 1}),
|
||||
('unique_node', 'output_counts', {'out': 2})]
|
||||
inputs_ = {'input': {'shape': int64_array([10]),
|
||||
'value': np.array([8.0, 1.0, 2.0, 1.0, 8.0, 5.0, 1.0, 5.0, 0.0, 0.0], dtype=np.float)},
|
||||
'value': np.array([8.0, 1.0, 2.0, 1.0, 8.0, 5.0, 1.0, 5.0, 0.0, 0.0], dtype=float)},
|
||||
'unique_node': {
|
||||
'sorted': 'false',
|
||||
'return_inverse': 'true',
|
||||
@ -175,11 +175,11 @@ class TestUnique(unittest.TestCase):
|
||||
|
||||
# prepare reference results
|
||||
ref_output_uniques_shape = int64_array([5])
|
||||
ref_output_uniques_value = np.array([8.0, 1.0, 2.0, 5.0, 0.0], dtype=np.float)
|
||||
ref_output_uniques_value = np.array([8.0, 1.0, 2.0, 5.0, 0.0], dtype=float)
|
||||
ref_output_indices_shape = int64_array([10])
|
||||
ref_output_indices_value = np.array([0.0, 1.0, 2.0, 1.0, 0.0, 3.0, 1.0, 3.0, 4.0, 4.0], dtype=np.float)
|
||||
ref_output_indices_value = np.array([0.0, 1.0, 2.0, 1.0, 0.0, 3.0, 1.0, 3.0, 4.0, 4.0], dtype=float)
|
||||
ref_output_counts_shape = int64_array([5])
|
||||
ref_output_counts_value = np.array([2.0, 3.0, 1.0, 2.0, 2.0], dtype=np.float)
|
||||
ref_output_counts_value = np.array([2.0, 3.0, 1.0, 2.0, 2.0], dtype=float)
|
||||
|
||||
# get resulted shapes
|
||||
res_output_uniques_shape = graph.node['output_uniques']['shape']
|
||||
@ -217,7 +217,7 @@ class TestUnique(unittest.TestCase):
|
||||
('unique_node', 'output_indices', {'out': 1}),
|
||||
('unique_node', 'output_counts', {'out': 2})]
|
||||
inputs_ = {'input': {'shape': int64_array([10]),
|
||||
'value': np.array([8.0, 1.0, 2.0, 1.0, 8.0, 5.0, 1.0, 5.0, 0.0, 0.0], dtype=np.float)},
|
||||
'value': np.array([8.0, 1.0, 2.0, 1.0, 8.0, 5.0, 1.0, 5.0, 0.0, 0.0], dtype=float)},
|
||||
'unique_node': {
|
||||
'sorted': 'true',
|
||||
'return_inverse': 'true',
|
||||
@ -230,11 +230,11 @@ class TestUnique(unittest.TestCase):
|
||||
|
||||
# prepare reference results
|
||||
ref_output_uniques_shape = int64_array([5])
|
||||
ref_output_uniques_value = np.array([0.0, 1.0, 2.0, 5.0, 8.0], dtype=np.float)
|
||||
ref_output_uniques_value = np.array([0.0, 1.0, 2.0, 5.0, 8.0], dtype=float)
|
||||
ref_output_indices_shape = int64_array([10])
|
||||
ref_output_indices_value = np.array([4.0, 1.0, 2.0, 1.0, 4.0, 3.0, 1.0, 3.0, 0.0, 0.0], dtype=np.float)
|
||||
ref_output_indices_value = np.array([4.0, 1.0, 2.0, 1.0, 4.0, 3.0, 1.0, 3.0, 0.0, 0.0], dtype=float)
|
||||
ref_output_counts_shape = int64_array([5])
|
||||
ref_output_counts_value = np.array([2.0, 3.0, 1.0, 2.0, 2.0], dtype=np.float)
|
||||
ref_output_counts_value = np.array([2.0, 3.0, 1.0, 2.0, 2.0], dtype=float)
|
||||
|
||||
# get resulted shapes
|
||||
res_output_uniques_shape = graph.node['output_uniques']['shape']
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user