[nGraph] Python tests - move to int32 (#1966)
This commit is contained in:
parent
aeaffef11c
commit
757b1f0d9e
@ -119,9 +119,8 @@ def test_serialization():
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
def test_broadcast_1():
|
def test_broadcast_1():
|
||||||
input_data = np.array([1, 2, 3])
|
input_data = np.array([1, 2, 3], dtype=np.int32)
|
||||||
|
|
||||||
new_shape = [3, 3]
|
new_shape = [3, 3]
|
||||||
expected = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
|
expected = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
|
||||||
@ -129,18 +128,16 @@ def test_broadcast_1():
|
|||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
def test_broadcast_2():
|
def test_broadcast_2():
|
||||||
input_data = np.arange(4)
|
input_data = np.arange(4, dtype=np.int32)
|
||||||
new_shape = [3, 4, 2, 4]
|
new_shape = [3, 4, 2, 4]
|
||||||
expected = np.broadcast_to(input_data, new_shape)
|
expected = np.broadcast_to(input_data, new_shape)
|
||||||
result = run_op_node([input_data], ng.broadcast, new_shape)
|
result = run_op_node([input_data], ng.broadcast, new_shape)
|
||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
def test_broadcast_3():
|
def test_broadcast_3():
|
||||||
input_data = np.array([1, 2, 3])
|
input_data = np.array([1, 2, 3], dtype=np.int32)
|
||||||
new_shape = [3, 3]
|
new_shape = [3, 3]
|
||||||
axis_mapping = [0]
|
axis_mapping = [0]
|
||||||
expected = [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
|
expected = [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
|
||||||
@ -149,10 +146,10 @@ def test_broadcast_3():
|
|||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
@pytest.mark.xfail(reason="AssertionError: assert dtype('float32') == <class 'bool'")
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"destination_type, input_data",
|
"destination_type, input_data",
|
||||||
[(bool, np.zeros((2, 2), dtype=int)), ("boolean", np.zeros((2, 2), dtype=int))],
|
[(bool, np.zeros((2, 2), dtype=np.int32)), ("boolean", np.zeros((2, 2), dtype=np.int32))],
|
||||||
)
|
)
|
||||||
def test_convert_to_bool(destination_type, input_data):
|
def test_convert_to_bool(destination_type, input_data):
|
||||||
expected = np.array(input_data, dtype=bool)
|
expected = np.array(input_data, dtype=bool)
|
||||||
@ -179,7 +176,7 @@ def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type)
|
|||||||
assert np.array(result).dtype == expected_type
|
assert np.array(result).dtype == expected_type
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
@xfail_issue_35929
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"destination_type, expected_type",
|
"destination_type, expected_type",
|
||||||
[
|
[
|
||||||
@ -202,7 +199,7 @@ def test_convert_to_int(destination_type, expected_type):
|
|||||||
assert np.array(result).dtype == expected_type
|
assert np.array(result).dtype == expected_type
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
@xfail_issue_35929
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"destination_type, expected_type",
|
"destination_type, expected_type",
|
||||||
[
|
[
|
||||||
@ -290,7 +287,7 @@ def test_backend_config():
|
|||||||
|
|
||||||
@xfail_issue_34323
|
@xfail_issue_34323
|
||||||
def test_result():
|
def test_result():
|
||||||
node = [[11, 10], [1, 8], [3, 4]]
|
node = np.array([[11, 10], [1, 8], [3, 4]])
|
||||||
result = run_op_node([node], ng.result)
|
result = run_op_node([node], ng.result)
|
||||||
assert np.allclose(result, node)
|
assert np.allclose(result, node)
|
||||||
|
|
||||||
|
@ -14,11 +14,11 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# ******************************************************************************
|
# ******************************************************************************
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import pytest
|
||||||
|
|
||||||
import ngraph as ng
|
import ngraph as ng
|
||||||
from tests.runtime import get_runtime
|
from tests.runtime import get_runtime
|
||||||
from tests.test_ngraph.util import run_op_node
|
from tests.test_ngraph.util import run_op_node
|
||||||
from tests import xfail_issue_35926, xfail_issue_34323
|
|
||||||
|
|
||||||
|
|
||||||
def test_reverse_sequence():
|
def test_reverse_sequence():
|
||||||
@ -166,14 +166,14 @@ def test_pad_edge():
|
|||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
@pytest.mark.xfail(reason="AssertionError")
|
||||||
def test_pad_constant():
|
def test_pad_constant():
|
||||||
input_data = np.arange(1, 13).reshape([3, 4])
|
input_data = np.arange(1, 13).reshape([3, 4])
|
||||||
pads_begin = np.array([0, 1], dtype=np.int32)
|
pads_begin = np.array([0, 1], dtype=np.int32)
|
||||||
pads_end = np.array([2, 3], dtype=np.int32)
|
pads_end = np.array([2, 3], dtype=np.int32)
|
||||||
|
|
||||||
input_param = ng.parameter(input_data.shape, name="input", dtype=np.int64)
|
input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32)
|
||||||
model = ng.pad(input_param, pads_begin, pads_end, "constant", arg_pad_value=np.array(100, dtype=np.int64))
|
model = ng.pad(input_param, pads_begin, pads_end, "constant", arg_pad_value=np.array(100, dtype=np.int32))
|
||||||
|
|
||||||
runtime = get_runtime()
|
runtime = get_runtime()
|
||||||
computation = runtime.computation(model, input_param)
|
computation = runtime.computation(model, input_param)
|
||||||
@ -191,12 +191,11 @@ def test_pad_constant():
|
|||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
def test_select():
|
def test_select():
|
||||||
cond = [[False, False], [True, False], [True, True]]
|
cond = np.array([[False, False], [True, False], [True, True]])
|
||||||
then_node = [[-1, 0], [1, 2], [3, 4]]
|
then_node = np.array([[-1, 0], [1, 2], [3, 4]], dtype=np.int32)
|
||||||
else_node = [[11, 10], [9, 8], [7, 6]]
|
else_node = np.array([[11, 10], [9, 8], [7, 6]], dtype=np.int32)
|
||||||
excepted = [[11, 10], [1, 8], [3, 4]]
|
excepted = np.array([[11, 10], [1, 8], [3, 4]], dtype=np.int32)
|
||||||
|
|
||||||
result = run_op_node([cond, then_node, else_node], ng.select)
|
result = run_op_node([cond, then_node, else_node], ng.select)
|
||||||
assert np.allclose(result, excepted)
|
assert np.allclose(result, excepted)
|
||||||
|
@ -58,13 +58,12 @@ def test_lrn():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
def test_lrn_factory():
|
def test_lrn_factory():
|
||||||
alpha = 0.0002
|
alpha = 0.0002
|
||||||
beta = 0.5
|
beta = 0.5
|
||||||
bias = 2.0
|
bias = 2.0
|
||||||
nsize = 3
|
nsize = 3
|
||||||
axis = [1]
|
axis = np.array([1], dtype=np.int32)
|
||||||
x = np.array(
|
x = np.array(
|
||||||
[
|
[
|
||||||
[
|
[
|
||||||
@ -99,20 +98,20 @@ def test_lrn_factory():
|
|||||||
],
|
],
|
||||||
dtype=np.float32,
|
dtype=np.float32,
|
||||||
)
|
)
|
||||||
result = run_op_node([x, axis], ng.lrn, alpha, beta, bias, nsize)
|
result = run_op_node([x], ng.lrn, axis, alpha, beta, bias, nsize)
|
||||||
|
|
||||||
assert np.allclose(result, excepted)
|
assert np.allclose(result, excepted)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35929
|
@xfail_issue_35929
|
||||||
def test_batch_norm_inference():
|
def test_batch_norm_inference():
|
||||||
data = [[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]]
|
data = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]])
|
||||||
gamma = [2.0, 3.0, 4.0]
|
gamma = np.array([2.0, 3.0, 4.0])
|
||||||
beta = [0.0, 0.0, 0.0]
|
beta = np.array([0.0, 0.0, 0.0])
|
||||||
mean = [0.0, 0.0, 0.0]
|
mean = np.array([0.0, 0.0, 0.0])
|
||||||
variance = [1.0, 1.0, 1.0]
|
variance = np.array([1.0, 1.0, 1.0])
|
||||||
epsilon = 9.99e-06
|
epsilon = 9.99e-06
|
||||||
excepted = [[2.0, 6.0, 12.0], [-2.0, -6.0, -12.0]]
|
excepted = np.array([[2.0, 6.0, 12.0], [-2.0, -6.0, -12.0]])
|
||||||
|
|
||||||
result = run_op_node([data, gamma, beta, mean, variance], ng.batch_norm_inference, epsilon)
|
result = run_op_node([data, gamma, beta, mean, variance], ng.batch_norm_inference, epsilon)
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ import pytest
|
|||||||
import ngraph as ng
|
import ngraph as ng
|
||||||
from tests.runtime import get_runtime
|
from tests.runtime import get_runtime
|
||||||
from tests.test_ngraph.util import run_op_node
|
from tests.test_ngraph.util import run_op_node
|
||||||
from tests import xfail_issue_34323
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -202,10 +201,9 @@ def test_binary_operators_with_scalar(operator, numpy_function):
|
|||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
def test_multiply():
|
def test_multiply():
|
||||||
A = np.arange(48).reshape((8, 1, 6, 1))
|
A = np.arange(48, dtype=np.int32).reshape((8, 1, 6, 1))
|
||||||
B = np.arange(35).reshape((7, 1, 5))
|
B = np.arange(35, dtype=np.int32).reshape((7, 1, 5))
|
||||||
|
|
||||||
expected = np.multiply(A, B)
|
expected = np.multiply(A, B)
|
||||||
result = run_op_node([A, B], ng.multiply)
|
result = run_op_node([A, B], ng.multiply)
|
||||||
|
@ -19,7 +19,7 @@ import pytest
|
|||||||
import ngraph as ng
|
import ngraph as ng
|
||||||
from tests.runtime import get_runtime
|
from tests.runtime import get_runtime
|
||||||
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
|
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
|
||||||
from tests import xfail_issue_34323, xfail_issue_35929, xfail_issue_35926
|
from tests import xfail_issue_34323, xfail_issue_35929
|
||||||
|
|
||||||
|
|
||||||
def test_concat():
|
def test_concat():
|
||||||
@ -38,7 +38,9 @@ def test_concat():
|
|||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
@xfail_issue_34323
|
||||||
@pytest.mark.parametrize("val_type, value", [(bool, False), (bool, np.empty((2, 2), dtype=bool))])
|
@pytest.mark.parametrize(
|
||||||
|
"val_type, value", [(bool, False), (bool, np.empty((2, 2), dtype=bool))]
|
||||||
|
)
|
||||||
def test_constant_from_bool(val_type, value):
|
def test_constant_from_bool(val_type, value):
|
||||||
expected = np.array(value, dtype=val_type)
|
expected = np.array(value, dtype=val_type)
|
||||||
result = run_op_numeric_data(value, ng.constant, val_type)
|
result = run_op_numeric_data(value, ng.constant, val_type)
|
||||||
@ -66,9 +68,13 @@ def test_constant_from_scalar(val_type, value):
|
|||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("val_type",
|
@pytest.mark.parametrize(
|
||||||
[pytest.param(np.float32, marks=xfail_issue_34323),
|
"val_type",
|
||||||
pytest.param(np.float64, marks=xfail_issue_35929)])
|
[
|
||||||
|
pytest.param(np.float32, marks=xfail_issue_34323),
|
||||||
|
pytest.param(np.float64, marks=xfail_issue_35929),
|
||||||
|
],
|
||||||
|
)
|
||||||
def test_constant_from_float_array(val_type):
|
def test_constant_from_float_array(val_type):
|
||||||
np.random.seed(133391)
|
np.random.seed(133391)
|
||||||
input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type)
|
input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type)
|
||||||
@ -92,7 +98,9 @@ def test_constant_from_float_array(val_type):
|
|||||||
)
|
)
|
||||||
def test_constant_from_integer_array(val_type, range_start, range_end):
|
def test_constant_from_integer_array(val_type, range_start, range_end):
|
||||||
np.random.seed(133391)
|
np.random.seed(133391)
|
||||||
input_data = np.array(np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type)
|
input_data = np.array(
|
||||||
|
np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type
|
||||||
|
)
|
||||||
result = run_op_numeric_data(input_data, ng.constant, val_type)
|
result = run_op_numeric_data(input_data, ng.constant, val_type)
|
||||||
assert np.allclose(result, input_data)
|
assert np.allclose(result, input_data)
|
||||||
|
|
||||||
@ -102,7 +110,9 @@ def test_broadcast_numpy():
|
|||||||
target_shape_shape = [4]
|
target_shape_shape = [4]
|
||||||
|
|
||||||
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
|
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
|
||||||
target_shape_parameter = ng.parameter(target_shape_shape, name="Target_shape", dtype=np.int64)
|
target_shape_parameter = ng.parameter(
|
||||||
|
target_shape_shape, name="Target_shape", dtype=np.int64
|
||||||
|
)
|
||||||
|
|
||||||
node = ng.broadcast(data_parameter, target_shape_parameter)
|
node = ng.broadcast(data_parameter, target_shape_parameter)
|
||||||
|
|
||||||
@ -115,7 +125,9 @@ def test_broadcast_bidirectional():
|
|||||||
target_shape_shape = [4]
|
target_shape_shape = [4]
|
||||||
|
|
||||||
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
|
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
|
||||||
target_shape_parameter = ng.parameter(target_shape_shape, name="Target_shape", dtype=np.int64)
|
target_shape_parameter = ng.parameter(
|
||||||
|
target_shape_shape, name="Target_shape", dtype=np.int64
|
||||||
|
)
|
||||||
|
|
||||||
node = ng.broadcast(data_parameter, target_shape_parameter, "BIDIRECTIONAL")
|
node = ng.broadcast(data_parameter, target_shape_parameter, "BIDIRECTIONAL")
|
||||||
|
|
||||||
@ -123,69 +135,68 @@ def test_broadcast_bidirectional():
|
|||||||
assert node.get_output_size() == 1
|
assert node.get_output_size() == 1
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
|
||||||
def test_gather():
|
def test_gather():
|
||||||
input_data = np.array([1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32).reshape((3, 3))
|
input_data = np.array(
|
||||||
input_indices = np.array([0, 2], np.int64).reshape(1, 2)
|
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32
|
||||||
input_axes = np.array([1], np.int64)
|
).reshape((3, 3))
|
||||||
|
input_indices = np.array([0, 2], np.int32).reshape(1, 2)
|
||||||
|
input_axes = np.array([1], np.int32)
|
||||||
|
|
||||||
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape((3, 1, 2))
|
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
|
||||||
|
(3, 1, 2)
|
||||||
|
)
|
||||||
|
|
||||||
result = run_op_node([input_data, input_indices, input_axes], ng.gather)
|
result = run_op_node([input_data], ng.gather, input_indices, input_axes)
|
||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
def test_gather_using_constants():
|
|
||||||
input_data = np.array([1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32).reshape((3, 3))
|
|
||||||
input_indices = np.array([0, 2], np.int64).reshape(1, 2)
|
|
||||||
input_axes = np.array([1], np.int64)
|
|
||||||
|
|
||||||
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape((3, 1, 2))
|
|
||||||
|
|
||||||
result = run_op_numeric_data(input_data, ng.gather, input_indices, input_axes)
|
|
||||||
assert np.allclose(result, expected)
|
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
def test_transpose():
|
def test_transpose():
|
||||||
input_tensor = np.arange(3 * 3 * 224 * 224).reshape((3, 3, 224, 224))
|
input_tensor = np.arange(3 * 3 * 224 * 224, dtype=np.int32).reshape(
|
||||||
input_order = np.array([0, 2, 3, 1])
|
(3, 3, 224, 224)
|
||||||
|
)
|
||||||
|
input_order = np.array([0, 2, 3, 1], dtype=np.int32)
|
||||||
|
|
||||||
result = run_op_node([input_tensor, input_order], ng.transpose)
|
result = run_op_node([input_tensor], ng.transpose, input_order)
|
||||||
|
|
||||||
expected = np.transpose(input_tensor, input_order)
|
expected = np.transpose(input_tensor, input_order)
|
||||||
|
|
||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
@pytest.mark.xfail(
|
||||||
|
reason="Tile operation has a form that is not supported. Tile_2 should be converted to TileIE operation."
|
||||||
|
)
|
||||||
def test_tile():
|
def test_tile():
|
||||||
input_tensor = np.arange(6).reshape((2, 1, 3))
|
input_tensor = np.arange(6, dtype=np.int32).reshape((2, 1, 3))
|
||||||
repeats = np.array([2, 1])
|
repeats = np.array([2, 1], dtype=np.int32)
|
||||||
|
|
||||||
result = run_op_node([input_tensor, repeats], ng.tile)
|
result = run_op_node([input_tensor], ng.tile, repeats)
|
||||||
|
|
||||||
expected = np.array([0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5]).reshape((2, 2, 3))
|
expected = np.array([0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5]).reshape((2, 2, 3))
|
||||||
|
|
||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
@pytest.mark.xfail(
|
||||||
|
reason="RuntimeError: Check 'shape_size(get_input_shape(0)) == shape_size(output_shape)'"
|
||||||
|
)
|
||||||
def test_strided_slice():
|
def test_strided_slice():
|
||||||
input_tensor = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4))
|
input_tensor = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4))
|
||||||
begin = np.array([1, 0], dtype=np.int64)
|
begin = np.array([1, 0], dtype=np.int32)
|
||||||
end = np.array([0, 0], dtype=np.int64)
|
end = np.array([0, 0], dtype=np.int32)
|
||||||
strides = np.array([1, 1], dtype=np.int64)
|
strides = np.array([1, 1], dtype=np.int32)
|
||||||
begin_mask = np.array([0, 0, 0], dtype=np.int64)
|
begin_mask = np.array([0, 0, 0], dtype=np.int32)
|
||||||
end_mask = np.array([0, 0, 0], dtype=np.int64)
|
end_mask = np.array([0, 0, 0], dtype=np.int32)
|
||||||
new_axis_mask = np.array([0, 1, 0], dtype=np.int64)
|
new_axis_mask = np.array([0, 1, 0], dtype=np.int32)
|
||||||
shrink_axis_mask = np.array([1, 0, 0], dtype=np.int64)
|
shrink_axis_mask = np.array([1, 0, 0], dtype=np.int32)
|
||||||
ellipsis_mask = np.array([0, 0, 0], dtype=np.int64)
|
ellipsis_mask = np.array([0, 0, 0], dtype=np.int32)
|
||||||
|
|
||||||
result = run_op_node(
|
result = run_op_node(
|
||||||
[input_tensor, begin, end, strides],
|
[input_tensor],
|
||||||
ng.strided_slice,
|
ng.strided_slice,
|
||||||
|
begin,
|
||||||
|
end,
|
||||||
|
strides,
|
||||||
begin_mask,
|
begin_mask,
|
||||||
end_mask,
|
end_mask,
|
||||||
new_axis_mask,
|
new_axis_mask,
|
||||||
@ -193,20 +204,21 @@ def test_strided_slice():
|
|||||||
ellipsis_mask,
|
ellipsis_mask,
|
||||||
)
|
)
|
||||||
|
|
||||||
expected = np.array([12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype=np.float32).reshape((1, 3, 4))
|
expected = np.array(
|
||||||
|
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype=np.float32
|
||||||
|
).reshape((1, 3, 4))
|
||||||
|
|
||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
def test_reshape_v1():
|
def test_reshape_v1():
|
||||||
A = np.arange(1200, dtype=np.float32).reshape((2, 5, 5, 24))
|
A = np.arange(1200, dtype=np.float32).reshape((2, 5, 5, 24))
|
||||||
shape = np.array([0, -1, 4])
|
shape = np.array([0, -1, 4], dtype=np.int32)
|
||||||
special_zero = True
|
special_zero = True
|
||||||
|
|
||||||
expected_shape = np.array([2, 150, 4])
|
expected_shape = np.array([2, 150, 4])
|
||||||
expected = np.reshape(A, expected_shape)
|
expected = np.reshape(A, expected_shape)
|
||||||
result = run_op_node([A, shape], ng.reshape, special_zero)
|
result = run_op_node([A], ng.reshape, shape, special_zero)
|
||||||
|
|
||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
@ -18,8 +18,8 @@ import pytest
|
|||||||
|
|
||||||
import ngraph as ng
|
import ngraph as ng
|
||||||
from ngraph.impl import Shape, Type
|
from ngraph.impl import Shape, Type
|
||||||
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
|
from tests.test_ngraph.util import run_op_node
|
||||||
from tests import xfail_issue_35929, xfail_issue_34323, xfail_issue_36483
|
from tests import xfail_issue_35929, xfail_issue_36483
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35929
|
@xfail_issue_35929
|
||||||
@ -59,40 +59,6 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
|
|||||||
assert np.allclose(result, expected, rtol=0.001)
|
assert np.allclose(result, expected, rtol=0.001)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35929
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"ng_api_fn, numpy_fn, range_start, range_end",
|
|
||||||
[
|
|
||||||
(ng.absolute, np.abs, -1, 1),
|
|
||||||
(ng.abs, np.abs, -1, 1),
|
|
||||||
(ng.acos, np.arccos, -1, 1),
|
|
||||||
(ng.asin, np.arcsin, -1, 1),
|
|
||||||
(ng.atan, np.arctan, -100.0, 100.0),
|
|
||||||
(ng.ceiling, np.ceil, -100.0, 100.0),
|
|
||||||
(ng.ceil, np.ceil, -100.0, 100.0),
|
|
||||||
(ng.cos, np.cos, -100.0, 100.0),
|
|
||||||
(ng.cosh, np.cosh, -100.0, 100.0),
|
|
||||||
(ng.exp, np.exp, -100.0, 100.0),
|
|
||||||
(ng.floor, np.floor, -100.0, 100.0),
|
|
||||||
(ng.log, np.log, 0, 100.0),
|
|
||||||
(ng.relu, lambda x: np.maximum(0, x), -100.0, 100.0),
|
|
||||||
(ng.sign, np.sign, -100.0, 100.0),
|
|
||||||
(ng.sin, np.sin, -100.0, 100.0),
|
|
||||||
(ng.sinh, np.sinh, -100.0, 100.0),
|
|
||||||
(ng.sqrt, np.sqrt, 0.0, 100.0),
|
|
||||||
(ng.tan, np.tan, -1.0, 1.0),
|
|
||||||
(ng.tanh, np.tanh, -100.0, 100.0),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_unary_op_array_using_constants(ng_api_fn, numpy_fn, range_start, range_end):
|
|
||||||
np.random.seed(133391)
|
|
||||||
input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start)
|
|
||||||
expected = numpy_fn(input_data)
|
|
||||||
|
|
||||||
result = run_op_numeric_data(input_data, ng_api_fn)
|
|
||||||
assert np.allclose(result, expected, rtol=0.001)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Segmentation fault")
|
@pytest.mark.skip(reason="Segmentation fault")
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"ng_api_fn, numpy_fn, input_data",
|
"ng_api_fn, numpy_fn, input_data",
|
||||||
@ -125,41 +91,6 @@ def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data):
|
|||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"ng_api_fn, numpy_fn, input_data",
|
|
||||||
[
|
|
||||||
(ng.absolute, np.abs, np.float32(-3)),
|
|
||||||
(ng.abs, np.abs, np.float32(-3)),
|
|
||||||
(ng.acos, np.arccos, np.float32(-0.5)),
|
|
||||||
(ng.acosh, np.arccosh, np.float32(-0.5)),
|
|
||||||
(ng.asin, np.arcsin, np.float32(-0.5)),
|
|
||||||
(ng.asinh, np.arcsinh, np.float32(-0.5)),
|
|
||||||
(ng.atan, np.arctan, np.float32(-0.5)),
|
|
||||||
(ng.atanh, np.arctanh, np.float32(-0.5)),
|
|
||||||
(ng.ceiling, np.ceil, np.float32(1.5)),
|
|
||||||
(ng.ceil, np.ceil, np.float32(1.5)),
|
|
||||||
(ng.cos, np.cos, np.float32(np.pi / 4.0)),
|
|
||||||
(ng.cosh, np.cosh, np.float32(np.pi / 4.0)),
|
|
||||||
(ng.exp, np.exp, np.float32(1.5)),
|
|
||||||
(ng.floor, np.floor, np.float32(1.5)),
|
|
||||||
(ng.log, np.log, np.float32(1.5)),
|
|
||||||
(ng.relu, lambda x: np.maximum(0, x), np.float32(-0.125)),
|
|
||||||
(ng.sign, np.sign, np.float32(0.0)),
|
|
||||||
(ng.sin, np.sin, np.float32(np.pi / 4.0)),
|
|
||||||
(ng.sinh, np.sinh, np.float32(0.0)),
|
|
||||||
(ng.sqrt, np.sqrt, np.float32(3.5)),
|
|
||||||
(ng.tan, np.tan, np.float32(np.pi / 4.0)),
|
|
||||||
(ng.tanh, np.tanh, np.float32(0.1234)),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_unary_op_scalar_using_constants(ng_api_fn, numpy_fn, input_data):
|
|
||||||
expected = numpy_fn(input_data)
|
|
||||||
|
|
||||||
result = run_op_numeric_data(input_data, ng_api_fn)
|
|
||||||
assert np.allclose(result, expected)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"input_data", [(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))]
|
"input_data", [(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))]
|
||||||
)
|
)
|
||||||
@ -170,17 +101,6 @@ def test_logical_not(input_data):
|
|||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"input_data", [(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))]
|
|
||||||
)
|
|
||||||
def test_logical_not_using_constants(input_data):
|
|
||||||
expected = np.logical_not(input_data)
|
|
||||||
|
|
||||||
result = run_op_numeric_data(input_data, ng.logical_not)
|
|
||||||
assert np.allclose(result, expected)
|
|
||||||
|
|
||||||
|
|
||||||
def test_sigmoid():
|
def test_sigmoid():
|
||||||
input_data = np.array([-3.14, -1.0, 0.0, 2.71001, 1000.0], dtype=np.float32)
|
input_data = np.array([-3.14, -1.0, 0.0, 2.71001, 1000.0], dtype=np.float32)
|
||||||
result = run_op_node([input_data], ng.sigmoid)
|
result = run_op_node([input_data], ng.sigmoid)
|
||||||
@ -193,7 +113,7 @@ def test_sigmoid():
|
|||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
@pytest.mark.skip(reason="Wrong results are broadcasted along given axis")
|
||||||
def test_softmax():
|
def test_softmax():
|
||||||
axis = 0
|
axis = 0
|
||||||
input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
|
input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
|
||||||
@ -213,15 +133,6 @@ def test_erf():
|
|||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
def test_erf_using_constants():
|
|
||||||
input_tensor = np.array([-1.0, 0.0, 1.0, 2.5, 3.14, 4.0], dtype=np.float32)
|
|
||||||
expected = [-0.842701, 0.0, 0.842701, 0.999593, 0.999991, 1.0]
|
|
||||||
|
|
||||||
result = run_op_numeric_data(input_tensor, ng.erf)
|
|
||||||
assert np.allclose(result, expected)
|
|
||||||
|
|
||||||
|
|
||||||
def test_hswish():
|
def test_hswish():
|
||||||
float_dtype = np.float32
|
float_dtype = np.float32
|
||||||
data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data")
|
data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data")
|
||||||
|
@ -22,22 +22,21 @@ from tests.test_ngraph.util import run_op_node
|
|||||||
from tests import xfail_issue_34323
|
from tests import xfail_issue_34323
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"ng_api_helper, numpy_function, reduction_axes",
|
"ng_api_helper, numpy_function, reduction_axes",
|
||||||
[
|
[
|
||||||
(ng.reduce_max, np.max, [0, 1, 2, 3]),
|
(ng.reduce_max, np.max, np.array([0, 1, 2, 3])),
|
||||||
(ng.reduce_min, np.min, [0, 1, 2, 3]),
|
(ng.reduce_min, np.min, np.array([0, 1, 2, 3])),
|
||||||
(ng.reduce_sum, np.sum, [0, 1, 2, 3]),
|
(ng.reduce_sum, np.sum, np.array([0, 1, 2, 3])),
|
||||||
(ng.reduce_prod, np.prod, [0, 1, 2, 3]),
|
(ng.reduce_prod, np.prod, np.array([0, 1, 2, 3])),
|
||||||
(ng.reduce_max, np.max, [0]),
|
(ng.reduce_max, np.max, np.array([0])),
|
||||||
(ng.reduce_min, np.min, [0]),
|
(ng.reduce_min, np.min, np.array([0])),
|
||||||
(ng.reduce_sum, np.sum, [0]),
|
(ng.reduce_sum, np.sum, np.array([0])),
|
||||||
(ng.reduce_prod, np.prod, [0]),
|
(ng.reduce_prod, np.prod, np.array([0])),
|
||||||
(ng.reduce_max, np.max, [0, 2]),
|
(ng.reduce_max, np.max, np.array([0, 2])),
|
||||||
(ng.reduce_min, np.min, [0, 2]),
|
(ng.reduce_min, np.min, np.array([0, 2])),
|
||||||
(ng.reduce_sum, np.sum, [0, 2]),
|
(ng.reduce_sum, np.sum, np.array([0, 2])),
|
||||||
(ng.reduce_prod, np.prod, [0, 2]),
|
(ng.reduce_prod, np.prod, np.array([0, 2])),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
|
def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
|
||||||
@ -46,20 +45,20 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
|
|||||||
input_data = np.random.randn(*shape).astype(np.float32)
|
input_data = np.random.randn(*shape).astype(np.float32)
|
||||||
|
|
||||||
expected = numpy_function(input_data, axis=tuple(reduction_axes))
|
expected = numpy_function(input_data, axis=tuple(reduction_axes))
|
||||||
result = run_op_node([input_data, reduction_axes], ng_api_helper)
|
result = run_op_node([input_data], ng_api_helper, reduction_axes)
|
||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
@pytest.mark.xfail(reason="RuntimeError: Incorrect Reduce layer type")
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"ng_api_helper, numpy_function, reduction_axes",
|
"ng_api_helper, numpy_function, reduction_axes",
|
||||||
[
|
[
|
||||||
(ng.reduce_logical_and, np.logical_and.reduce, [0]),
|
(ng.reduce_logical_and, np.logical_and.reduce, np.array([0])),
|
||||||
(ng.reduce_logical_or, np.logical_or.reduce, [0]),
|
(ng.reduce_logical_or, np.logical_or.reduce, np.array([0])),
|
||||||
(ng.reduce_logical_and, np.logical_and.reduce, [0, 2]),
|
(ng.reduce_logical_and, np.logical_and.reduce, np.array([0, 2])),
|
||||||
(ng.reduce_logical_or, np.logical_or.reduce, [0, 2]),
|
(ng.reduce_logical_or, np.logical_or.reduce, np.array([0, 2])),
|
||||||
(ng.reduce_logical_and, np.logical_and.reduce, [0, 1, 2, 3]),
|
(ng.reduce_logical_and, np.logical_and.reduce, np.array([0, 1, 2, 3])),
|
||||||
(ng.reduce_logical_or, np.logical_or.reduce, [0, 1, 2, 3]),
|
(ng.reduce_logical_or, np.logical_or.reduce, np.array([0, 1, 2, 3])),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_reduction_logical_ops(ng_api_helper, numpy_function, reduction_axes):
|
def test_reduction_logical_ops(ng_api_helper, numpy_function, reduction_axes):
|
||||||
@ -68,7 +67,7 @@ def test_reduction_logical_ops(ng_api_helper, numpy_function, reduction_axes):
|
|||||||
input_data = np.random.randn(*shape).astype(np.bool)
|
input_data = np.random.randn(*shape).astype(np.bool)
|
||||||
|
|
||||||
expected = numpy_function(input_data, axis=tuple(reduction_axes))
|
expected = numpy_function(input_data, axis=tuple(reduction_axes))
|
||||||
result = run_op_node([input_data, reduction_axes], ng_api_helper)
|
result = run_op_node([input_data], ng_api_helper, reduction_axes)
|
||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
@ -84,13 +83,12 @@ def test_topk():
|
|||||||
assert list(node.get_output_shape(1)) == [6, 3, 10, 24]
|
assert list(node.get_output_shape(1)) == [6, 3, 10, 24]
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_34323
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"ng_api_helper, numpy_function, reduction_axes",
|
"ng_api_helper, numpy_function, reduction_axes",
|
||||||
[
|
[
|
||||||
(ng.reduce_mean, np.mean, [0, 1, 2, 3]),
|
(ng.reduce_mean, np.mean, np.array([0, 1, 2, 3])),
|
||||||
(ng.reduce_mean, np.mean, [0]),
|
(ng.reduce_mean, np.mean, np.array([0])),
|
||||||
(ng.reduce_mean, np.mean, [0, 2]),
|
(ng.reduce_mean, np.mean, np.array([0, 2])),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_reduce_mean_op(ng_api_helper, numpy_function, reduction_axes):
|
def test_reduce_mean_op(ng_api_helper, numpy_function, reduction_axes):
|
||||||
@ -99,7 +97,7 @@ def test_reduce_mean_op(ng_api_helper, numpy_function, reduction_axes):
|
|||||||
input_data = np.random.randn(*shape).astype(np.float32)
|
input_data = np.random.randn(*shape).astype(np.float32)
|
||||||
|
|
||||||
expected = numpy_function(input_data, axis=tuple(reduction_axes))
|
expected = numpy_function(input_data, axis=tuple(reduction_axes))
|
||||||
result = run_op_node([input_data, reduction_axes], ng_api_helper)
|
result = run_op_node([input_data], ng_api_helper, reduction_axes)
|
||||||
assert np.allclose(result, expected)
|
assert np.allclose(result, expected)
|
||||||
|
|
||||||
|
|
||||||
|
@ -19,14 +19,21 @@ import pytest
|
|||||||
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
|
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
|
||||||
|
|
||||||
from tests.runtime import get_runtime
|
from tests.runtime import get_runtime
|
||||||
from tests.test_onnx.utils import all_arrays_equal, get_node_model, import_onnx_model, run_model, run_node
|
from tests.test_onnx.utils import (
|
||||||
from tests import xfail_issue_35926, xfail_issue_35927
|
all_arrays_equal,
|
||||||
|
get_node_model,
|
||||||
|
import_onnx_model,
|
||||||
|
run_model,
|
||||||
|
run_node,
|
||||||
|
)
|
||||||
|
from tests import xfail_issue_35927
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
|
||||||
def test_reshape():
|
def test_reshape():
|
||||||
input_data = np.arange(2560).reshape([16, 4, 4, 10])
|
input_data = np.arange(2560, dtype=np.int32).reshape([16, 4, 4, 10])
|
||||||
reshape_node = onnx.helper.make_node("Reshape", inputs=["x"], outputs=["y"], shape=(256, 10))
|
reshape_node = onnx.helper.make_node(
|
||||||
|
"Reshape", inputs=["x"], outputs=["y"], shape=(256, 10)
|
||||||
|
)
|
||||||
expected_output = input_data.reshape([256, 10])
|
expected_output = input_data.reshape([256, 10])
|
||||||
|
|
||||||
ng_results = run_node(reshape_node, [input_data], opset_version=4)
|
ng_results = run_node(reshape_node, [input_data], opset_version=4)
|
||||||
@ -50,10 +57,15 @@ def test_reshape_opset5():
|
|||||||
inputs=[],
|
inputs=[],
|
||||||
outputs=["const_shape"],
|
outputs=["const_shape"],
|
||||||
value=onnx.helper.make_tensor(
|
value=onnx.helper.make_tensor(
|
||||||
name="const_tensor", data_type=onnx.TensorProto.INT64, dims=shape.shape, vals=shape.flatten()
|
name="const_tensor",
|
||||||
|
data_type=onnx.TensorProto.INT64,
|
||||||
|
dims=shape.shape,
|
||||||
|
vals=shape.flatten(),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
reshape_node = onnx.helper.make_node("Reshape", inputs=["data", "const_shape"], outputs=["reshaped"])
|
reshape_node = onnx.helper.make_node(
|
||||||
|
"Reshape", inputs=["data", "const_shape"], outputs=["reshaped"]
|
||||||
|
)
|
||||||
|
|
||||||
graph = make_graph(
|
graph = make_graph(
|
||||||
[const_node, reshape_node],
|
[const_node, reshape_node],
|
||||||
@ -72,17 +84,16 @@ def test_reshape_opset5():
|
|||||||
assert np.array_equal(ng_results[0], expected_output)
|
assert np.array_equal(ng_results[0], expected_output)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
@pytest.mark.xfail(reason="RuntimeError: Reshape z has dynamic second input!")
|
||||||
def test_reshape_opset5_param_err():
|
def test_reshape_opset5_param_err():
|
||||||
original_shape = [2, 3, 4]
|
original_shape = [2, 3, 4]
|
||||||
output_shape = np.array([4, 2, 3], dtype=np.int64)
|
output_shape = np.array([4, 2, 3], dtype=np.int32)
|
||||||
input_data = np.random.random_sample(original_shape).astype(np.float32)
|
input_data = np.random.random_sample(original_shape).astype(np.float32)
|
||||||
reshape_node = onnx.helper.make_node("Reshape", inputs=["x", "y"], outputs=["z"])
|
reshape_node = onnx.helper.make_node("Reshape", inputs=["x", "y"], outputs=["z"])
|
||||||
ng_result = run_node(reshape_node, [input_data, output_shape], opset_version=5)
|
ng_result = run_node(reshape_node, [input_data, output_shape], opset_version=5)
|
||||||
assert ng_result[0].shape == output_shape
|
assert ng_result[0].shape == output_shape
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"axis,expected_output",
|
"axis,expected_output",
|
||||||
[
|
[
|
||||||
@ -94,7 +105,7 @@ def test_reshape_opset5_param_err():
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_flatten(axis, expected_output):
|
def test_flatten(axis, expected_output):
|
||||||
data = np.arange(120).reshape([2, 3, 4, 5])
|
data = np.arange(120, dtype=np.int32).reshape([2, 3, 4, 5])
|
||||||
node = onnx.helper.make_node("Flatten", inputs=["x"], outputs=["y"], axis=axis)
|
node = onnx.helper.make_node("Flatten", inputs=["x"], outputs=["y"], axis=axis)
|
||||||
ng_results = run_node(node, [data])
|
ng_results = run_node(node, [data])
|
||||||
assert np.array_equal(ng_results, [expected_output])
|
assert np.array_equal(ng_results, [expected_output])
|
||||||
@ -108,16 +119,17 @@ def test_flatten_exception():
|
|||||||
run_node(node, [data])
|
run_node(node, [data])
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
|
||||||
def test_transpose():
|
def test_transpose():
|
||||||
data = np.arange(120).reshape([2, 3, 4, 5])
|
data = np.arange(120, dtype=np.int32).reshape([2, 3, 4, 5])
|
||||||
|
|
||||||
node = onnx.helper.make_node("Transpose", inputs=["x"], outputs=["y"])
|
node = onnx.helper.make_node("Transpose", inputs=["x"], outputs=["y"])
|
||||||
expected_output = data.T
|
expected_output = data.T
|
||||||
ng_results = run_node(node, [data])
|
ng_results = run_node(node, [data])
|
||||||
assert np.array_equal(ng_results, [expected_output])
|
assert np.array_equal(ng_results, [expected_output])
|
||||||
|
|
||||||
node = onnx.helper.make_node("Transpose", inputs=["x"], outputs=["y"], perm=(3, 1, 0, 2))
|
node = onnx.helper.make_node(
|
||||||
|
"Transpose", inputs=["x"], outputs=["y"], perm=(3, 1, 0, 2)
|
||||||
|
)
|
||||||
expected_output = np.transpose(data, axes=(3, 1, 0, 2))
|
expected_output = np.transpose(data, axes=(3, 1, 0, 2))
|
||||||
ng_results = run_node(node, [data])
|
ng_results = run_node(node, [data])
|
||||||
assert np.array_equal(ng_results, [expected_output])
|
assert np.array_equal(ng_results, [expected_output])
|
||||||
@ -172,10 +184,9 @@ def test_slice_opset1():
|
|||||||
assert np.array_equal(ng_results, [expected_output])
|
assert np.array_equal(ng_results, [expected_output])
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
|
||||||
def test_concat():
|
def test_concat():
|
||||||
a = np.array([[1, 2], [3, 4]])
|
a = np.array([[1, 2], [3, 4]], dtype=np.int32)
|
||||||
b = np.array([[5, 6]])
|
b = np.array([[5, 6]], dtype=np.int32)
|
||||||
|
|
||||||
node = onnx.helper.make_node("Concat", inputs=["x"], outputs=["z"], axis=0)
|
node = onnx.helper.make_node("Concat", inputs=["x"], outputs=["z"], axis=0)
|
||||||
ng_results = run_node(node, [a])
|
ng_results = run_node(node, [a])
|
||||||
@ -186,8 +197,8 @@ def test_concat():
|
|||||||
ng_results = run_node(node, [a, b])
|
ng_results = run_node(node, [a, b])
|
||||||
assert np.array_equal(ng_results, [expected_output])
|
assert np.array_equal(ng_results, [expected_output])
|
||||||
|
|
||||||
a = np.array([[1, 2], [3, 4]])
|
a = np.array([[1, 2], [3, 4]], dtype=np.int32)
|
||||||
b = np.array([[5, 6]]).T
|
b = np.array([[5, 6]], dtype=np.int32).T
|
||||||
expected_output = np.concatenate((a, b), axis=1)
|
expected_output = np.concatenate((a, b), axis=1)
|
||||||
node = onnx.helper.make_node("Concat", inputs=["x", "y"], outputs=["z"], axis=1)
|
node = onnx.helper.make_node("Concat", inputs=["x", "y"], outputs=["z"], axis=1)
|
||||||
ng_results = run_node(node, [a, b])
|
ng_results = run_node(node, [a, b])
|
||||||
@ -196,22 +207,29 @@ def test_concat():
|
|||||||
test_cases = {
|
test_cases = {
|
||||||
"1d": ([1, 2], [3, 4]),
|
"1d": ([1, 2], [3, 4]),
|
||||||
"2d": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]),
|
"2d": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]),
|
||||||
"3d": ([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]], [[13, 14], [15, 16]]]),
|
"3d": (
|
||||||
|
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
|
||||||
|
[[[9, 10], [11, 12]], [[13, 14], [15, 16]]],
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, values in test_cases.items():
|
for _, values in test_cases.items():
|
||||||
values = [np.asarray(v) for v in values]
|
values = [np.asarray(v) for v in values]
|
||||||
for i in range(len(values[0].shape)):
|
for i in range(len(values[0].shape)):
|
||||||
in_args = ["value" + str(k) for k in range(len(values))]
|
in_args = ["value" + str(k) for k in range(len(values))]
|
||||||
node = onnx.helper.make_node("Concat", inputs=list(in_args), outputs=["output"], axis=i,)
|
node = onnx.helper.make_node(
|
||||||
|
"Concat",
|
||||||
|
inputs=list(in_args),
|
||||||
|
outputs=["output"],
|
||||||
|
axis=i,
|
||||||
|
)
|
||||||
expected_output = np.concatenate(values, i)
|
expected_output = np.concatenate(values, i)
|
||||||
ng_results = run_node(node, list(values))
|
ng_results = run_node(node, np.array(values, dtype=np.int32))
|
||||||
assert np.array_equal(ng_results, [expected_output])
|
assert np.array_equal(ng_results, [expected_output])
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
|
||||||
def test_squeeze():
|
def test_squeeze():
|
||||||
data = np.arange(6).reshape([1, 2, 3, 1])
|
data = np.arange(6, dtype=np.int32).reshape([1, 2, 3, 1])
|
||||||
expected_output = data.reshape([2, 3])
|
expected_output = data.reshape([2, 3])
|
||||||
|
|
||||||
node = onnx.helper.make_node("Squeeze", inputs=["x"], outputs=["y"], axes=[0, 3])
|
node = onnx.helper.make_node("Squeeze", inputs=["x"], outputs=["y"], axes=[0, 3])
|
||||||
@ -243,39 +261,59 @@ def test_unsqueeze():
|
|||||||
assert np.array_equal(ng_results, [expected_output])
|
assert np.array_equal(ng_results, [expected_output])
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"node, expected_output",
|
"node, expected_output",
|
||||||
[
|
[
|
||||||
# Split into 2 equal parts along axis=0
|
# Split into 2 equal parts along axis=0
|
||||||
(
|
(
|
||||||
onnx.helper.make_node("Split", inputs=["x"], outputs=["y", "z"], axis=0),
|
onnx.helper.make_node("Split", inputs=["x"], outputs=["y", "z"], axis=0),
|
||||||
[np.array([[0, 1, 2, 3]]), np.array([[4, 5, 6, 7]])],
|
[
|
||||||
|
np.array([[0, 1, 2, 3]], dtype=np.int32),
|
||||||
|
np.array([[4, 5, 6, 7]], dtype=np.int32),
|
||||||
|
],
|
||||||
),
|
),
|
||||||
# Default, split along axis=0 into 2 equal parts
|
# Default, split along axis=0 into 2 equal parts
|
||||||
(
|
(
|
||||||
onnx.helper.make_node("Split", inputs=["x"], outputs=["y", "z"]),
|
onnx.helper.make_node("Split", inputs=["x"], outputs=["y", "z"]),
|
||||||
[np.array([[0, 1, 2, 3]]), np.array([[4, 5, 6, 7]])],
|
[
|
||||||
|
np.array([[0, 1, 2, 3]], dtype=np.int32),
|
||||||
|
np.array([[4, 5, 6, 7]], dtype=np.int32),
|
||||||
|
],
|
||||||
),
|
),
|
||||||
# Split into 2 equal parts along axis=1
|
# Split into 2 equal parts along axis=1
|
||||||
(
|
(
|
||||||
onnx.helper.make_node("Split", inputs=["x"], outputs=["a", "b"], axis=1),
|
onnx.helper.make_node("Split", inputs=["x"], outputs=["a", "b"], axis=1),
|
||||||
[np.array([[0, 1], [4, 5]]), np.array([[2, 3], [6, 7]])],
|
[
|
||||||
|
np.array([[0, 1], [4, 5]], dtype=np.int32),
|
||||||
|
np.array([[2, 3], [6, 7]], dtype=np.int32),
|
||||||
|
],
|
||||||
),
|
),
|
||||||
# Split into 4 equal parts along axis=1
|
# Split into 4 equal parts along axis=1
|
||||||
(
|
(
|
||||||
onnx.helper.make_node("Split", inputs=["x"], outputs=["a", "b", "c", "d"], axis=1),
|
onnx.helper.make_node(
|
||||||
[np.array([[0], [4]]), np.array([[1], [5]]), np.array([[2], [6]]), np.array([[3], [7]])],
|
"Split", inputs=["x"], outputs=["a", "b", "c", "d"], axis=1
|
||||||
|
),
|
||||||
|
[
|
||||||
|
np.array([[0], [4]], dtype=np.int32),
|
||||||
|
np.array([[1], [5]], dtype=np.int32),
|
||||||
|
np.array([[2], [6]], dtype=np.int32),
|
||||||
|
np.array([[3], [7]], dtype=np.int32),
|
||||||
|
],
|
||||||
),
|
),
|
||||||
# Split into 2 unequal parts along axis=1
|
# Split into 2 unequal parts along axis=1
|
||||||
(
|
(
|
||||||
onnx.helper.make_node("Split", inputs=["x"], outputs=["a", "b"], axis=1, split=(3, 1)),
|
onnx.helper.make_node(
|
||||||
[np.array([[0, 1, 2], [4, 5, 6]]), np.array([[3], [7]])],
|
"Split", inputs=["x"], outputs=["a", "b"], axis=1, split=(3, 1)
|
||||||
|
),
|
||||||
|
[
|
||||||
|
np.array([[0, 1, 2], [4, 5, 6]], dtype=np.int32),
|
||||||
|
np.array([[3], [7]], dtype=np.int32),
|
||||||
|
],
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_split_2d(node, expected_output):
|
def test_split_2d(node, expected_output):
|
||||||
data = np.arange(8).reshape(2, 4)
|
data = np.arange(8, dtype=np.int32).reshape(2, 4)
|
||||||
ng_results = run_node(node, [data])
|
ng_results = run_node(node, [data])
|
||||||
assert all_arrays_equal(ng_results, expected_output)
|
assert all_arrays_equal(ng_results, expected_output)
|
||||||
|
|
||||||
@ -292,7 +330,9 @@ def test_split_1d():
|
|||||||
ng_results = run_node(node, [data])
|
ng_results = run_node(node, [data])
|
||||||
assert all_arrays_equal(ng_results, expected_outputs)
|
assert all_arrays_equal(ng_results, expected_outputs)
|
||||||
|
|
||||||
node = onnx.helper.make_node("Split", inputs=["input"], outputs=["y", "z", "w"], axis=0, split=[2, 3, 1])
|
node = onnx.helper.make_node(
|
||||||
|
"Split", inputs=["input"], outputs=["y", "z", "w"], axis=0, split=[2, 3, 1]
|
||||||
|
)
|
||||||
expected_outputs = [
|
expected_outputs = [
|
||||||
np.array([1.0, 2.0]).astype(np.float32),
|
np.array([1.0, 2.0]).astype(np.float32),
|
||||||
np.array([3.0, 4.0, 5.0]).astype(np.float32),
|
np.array([3.0, 4.0, 5.0]).astype(np.float32),
|
||||||
@ -313,7 +353,9 @@ def test_split_1d():
|
|||||||
ng_results = run_node(node, [data])
|
ng_results = run_node(node, [data])
|
||||||
assert all_arrays_equal(ng_results, expected_outputs)
|
assert all_arrays_equal(ng_results, expected_outputs)
|
||||||
|
|
||||||
node = onnx.helper.make_node("Split", inputs=["input"], outputs=["y", "z"], split=[2, 4])
|
node = onnx.helper.make_node(
|
||||||
|
"Split", inputs=["input"], outputs=["y", "z"], split=[2, 4]
|
||||||
|
)
|
||||||
expected_outputs = [
|
expected_outputs = [
|
||||||
np.array([1.0, 2.0]).astype(np.float32),
|
np.array([1.0, 2.0]).astype(np.float32),
|
||||||
np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),
|
np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),
|
||||||
@ -328,9 +370,13 @@ def test_depth_to_space():
|
|||||||
data = np.random.random_sample(shape).astype(np.float32)
|
data = np.random.random_sample(shape).astype(np.float32)
|
||||||
tmp = np.reshape(data, [b, blocksize, blocksize, c // (blocksize ** 2), h, w])
|
tmp = np.reshape(data, [b, blocksize, blocksize, c // (blocksize ** 2), h, w])
|
||||||
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
|
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
|
||||||
expected_output = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])
|
expected_output = np.reshape(
|
||||||
|
tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize]
|
||||||
|
)
|
||||||
|
|
||||||
node = onnx.helper.make_node("DepthToSpace", inputs=["x"], outputs=["y"], blocksize=blocksize)
|
node = onnx.helper.make_node(
|
||||||
|
"DepthToSpace", inputs=["x"], outputs=["y"], blocksize=blocksize
|
||||||
|
)
|
||||||
ng_results = run_node(node, [data])
|
ng_results = run_node(node, [data])
|
||||||
assert np.array_equal(ng_results, [expected_output])
|
assert np.array_equal(ng_results, [expected_output])
|
||||||
|
|
||||||
@ -347,7 +393,16 @@ def test_depth_to_space():
|
|||||||
).astype(np.float32)
|
).astype(np.float32)
|
||||||
# (1, 1, 4, 6) output tensor
|
# (1, 1, 4, 6) output tensor
|
||||||
expected_output = np.array(
|
expected_output = np.array(
|
||||||
[[[[0, 6, 1, 7, 2, 8], [12, 18, 13, 19, 14, 20], [3, 9, 4, 10, 5, 11], [15, 21, 16, 22, 17, 23]]]]
|
[
|
||||||
|
[
|
||||||
|
[
|
||||||
|
[0, 6, 1, 7, 2, 8],
|
||||||
|
[12, 18, 13, 19, 14, 20],
|
||||||
|
[3, 9, 4, 10, 5, 11],
|
||||||
|
[15, 21, 16, 22, 17, 23],
|
||||||
|
]
|
||||||
|
]
|
||||||
|
]
|
||||||
).astype(np.float32)
|
).astype(np.float32)
|
||||||
|
|
||||||
ng_results = run_node(node, [data])
|
ng_results = run_node(node, [data])
|
||||||
|
@ -22,20 +22,18 @@ from onnx.helper import make_graph, make_model, make_node, make_tensor_value_inf
|
|||||||
from ngraph.exceptions import NgraphTypeError
|
from ngraph.exceptions import NgraphTypeError
|
||||||
from tests.runtime import get_runtime
|
from tests.runtime import get_runtime
|
||||||
from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node
|
from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node
|
||||||
from tests import (xfail_issue_35926,
|
from tests import (xfail_issue_35929,
|
||||||
xfail_issue_35929,
|
|
||||||
xfail_issue_34323,
|
xfail_issue_34323,
|
||||||
xfail_issue_35930,
|
xfail_issue_35930,
|
||||||
xfail_issue_35932)
|
xfail_issue_35932)
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"input_data",
|
"input_data",
|
||||||
[
|
[
|
||||||
np.array([-4, 0, 5, -10]),
|
np.array([-4, 0, 5, -10], dtype=np.float32),
|
||||||
np.array([[-4, 0, 5, -10], [-4, 0, 5, -10]]),
|
np.array([[-4, 0, 5, -10], [-4, 0, 5, -10]], dtype=np.float32),
|
||||||
np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]]),
|
np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]], dtype=np.float32),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_abs(input_data):
|
def test_abs(input_data):
|
||||||
@ -93,13 +91,12 @@ def test_log(input_data):
|
|||||||
assert np.allclose(ng_results, [expected_output])
|
assert np.allclose(ng_results, [expected_output])
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"input_data",
|
"input_data",
|
||||||
[
|
[
|
||||||
np.array([-4, 0, 5, -10]),
|
np.array([-4, 0, 5, -10], dtype=np.float32),
|
||||||
np.array([[-4, 0, 5, -10], [-4, 0, 5, -10]]),
|
np.array([[-4, 0, 5, -10], [-4, 0, 5, -10]], dtype=np.float32),
|
||||||
np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]]),
|
np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]], dtype=np.float32),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_neg(input_data):
|
def test_neg(input_data):
|
||||||
|
@ -20,24 +20,35 @@ import onnx
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from tests.test_onnx.utils import run_node
|
from tests.test_onnx.utils import run_node
|
||||||
from tests import xfail_issue_35926
|
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
@pytest.mark.parametrize(
|
||||||
@pytest.mark.parametrize("onnx_op,numpy_func", [("Sum", np.add), ("Min", np.minimum), ("Max", np.maximum)])
|
"onnx_op,numpy_func", [("Sum", np.add), ("Min", np.minimum), ("Max", np.maximum)]
|
||||||
|
)
|
||||||
def test_variadic(onnx_op, numpy_func):
|
def test_variadic(onnx_op, numpy_func):
|
||||||
data = [np.array([1, 2, 3]), np.array([4, 5, 6]), np.array([7, 8, 9])]
|
data = [
|
||||||
node = onnx.helper.make_node(onnx_op, inputs=["data_0", "data_1", "data_2"], outputs=["y"])
|
np.array([1, 2, 3], dtype=np.int32),
|
||||||
|
np.array([4, 5, 6], dtype=np.int32),
|
||||||
|
np.array([7, 8, 9], dtype=np.int32),
|
||||||
|
]
|
||||||
|
node = onnx.helper.make_node(
|
||||||
|
onnx_op, inputs=["data_0", "data_1", "data_2"], outputs=["y"]
|
||||||
|
)
|
||||||
expected_output = reduce(numpy_func, data)
|
expected_output = reduce(numpy_func, data)
|
||||||
|
|
||||||
ng_results = run_node(node, data)
|
ng_results = run_node(node, data)
|
||||||
assert np.array_equal(ng_results, [expected_output])
|
assert np.array_equal(ng_results, [expected_output])
|
||||||
|
|
||||||
|
|
||||||
@xfail_issue_35926
|
|
||||||
def test_mean():
|
def test_mean():
|
||||||
data = [np.array([1, 2, 3]), np.array([4, 5, 6]), np.array([7, 8, 9])]
|
data = [
|
||||||
node = onnx.helper.make_node("Mean", inputs=["data_0", "data_1", "data_2"], outputs=["y"])
|
np.array([1, 2, 3], dtype=np.int32),
|
||||||
|
np.array([4, 5, 6], dtype=np.int32),
|
||||||
|
np.array([7, 8, 9], dtype=np.int32),
|
||||||
|
]
|
||||||
|
node = onnx.helper.make_node(
|
||||||
|
"Mean", inputs=["data_0", "data_1", "data_2"], outputs=["y"]
|
||||||
|
)
|
||||||
expected_output = reduce(np.add, data) / len(data)
|
expected_output = reduce(np.add, data) / len(data)
|
||||||
|
|
||||||
ng_results = run_node(node, data)
|
ng_results = run_node(node, data)
|
||||||
|
Loading…
Reference in New Issue
Block a user