Enable ngraph python tests in OpenVINO-ONNX CI (#1603)
* Enable ngraph python tests * Refactor and unify ngraph with onnx python tests * Revert deprecated test cases * Set ngraph and onnx python tests as a one test suite execution * Change unstrict Xfails to strict ones * Update after review: - add model zoo to onnx tests, - improvements of tests * Revert mounting zoo models dir Co-authored-by: Michał Karzyński <4430709+postrational@users.noreply.github.com>
This commit is contained in:
parent
f9023ff7da
commit
054a7cdf8d
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ******************************************************************************
|
||||
import pytest
|
||||
|
||||
# test.BACKEND_NAME is a configuration variable determining which
|
||||
# nGraph backend tests will use. It's set during pytest configuration time.
|
||||
@ -24,3 +25,71 @@ BACKEND_NAME = None
|
||||
# configuration time. See `pytest_configure` hook in `conftest.py` for more
|
||||
# details.
|
||||
ADDITIONAL_MODELS_DIR = None
|
||||
|
||||
|
||||
def xfail_test(reason="Mark the test as expected to fail", strict=True):
|
||||
return pytest.mark.xfail(reason=reason, strict=strict)
|
||||
|
||||
|
||||
xfail_issue_34314 = xfail_test(reason="RuntimeError: RNNCell operation has a form that is not "
|
||||
"supported.RNNCell_21204 should be converted to RNNCellIE operation")
|
||||
skip_segfault = pytest.mark.skip(reason="Segmentation fault error")
|
||||
xfail_issue_34323 = xfail_test(reason="RuntimeError: data [value] doesn't exist")
|
||||
xfail_issue_34327 = xfail_test(reason="RuntimeError: '<value>' layer has different "
|
||||
"IN and OUT channels number")
|
||||
xfail_issue_35893 = xfail_test(reason="ValueError: could not broadcast input array")
|
||||
xfail_issue_35911 = xfail_test(reason="Assertion error: Pad model mismatch error")
|
||||
xfail_issue_35912 = xfail_test(reason="RuntimeError: Error of validate layer: B with type: "
|
||||
"Pad. Cannot parse parameter pads_end from IR for layer B. "
|
||||
"Value -1,0 cannot be casted to int.")
|
||||
xfail_issue_35914 = xfail_test(reason="IndexError: too many indices for array: "
|
||||
"array is 0-dimensional, but 1 were indexed")
|
||||
xfail_issue_35915 = xfail_test(reason="RuntimeError: Eltwise node with unsupported combination "
|
||||
"of input and output types")
|
||||
xfail_issue_35916 = xfail_test(reason="RuntimeError: Unsupported input dims count for layer Z")
|
||||
xfail_issue_35917 = xfail_test(reason="RuntimeError: Unsupported input dims count for "
|
||||
"layer MatMul")
|
||||
xfail_issue_35918 = xfail_test(reason="onnx.onnx_cpp2py_export.checker.ValidationError: "
|
||||
"Mismatched attribute type in 'test_node : alpha'")
|
||||
xfail_issue_35921 = xfail_test(reason="ValueError - shapes mismatch in gemm")
|
||||
|
||||
xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is not supported")
|
||||
xfail_issue_35924 = xfail_test(reason="Assertion error - elu results mismatch")
|
||||
xfail_issue_35925 = xfail_test(reason="Assertion error - reduction ops results mismatch")
|
||||
xfail_issue_35926 = xfail_test(reason="RuntimeError: [NOT_IMPLEMENTED] Input image format I64 is "
|
||||
"not supported yet...")
|
||||
xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable")
|
||||
xfail_issue_35929 = xfail_test(reason="CRuntimeError: Incorrect precision f64!")
|
||||
xfail_issue_35930 = xfail_test(reason="onnx.onnx_cpp2py_export.checker.ValidationError: "
|
||||
"Required attribute 'to' is missing.")
|
||||
xfail_issue_35932 = xfail_test(reason="Assertion error - logsoftmax results mismatch")
|
||||
xfail_issue_36437 = xfail_test(reason="RuntimeError: Cannot find blob with name: y")
|
||||
xfail_issue_36476 = xfail_test(reason="RuntimeError: [NOT_IMPLEMENTED] Input image format U32 is "
|
||||
"not supported yet...")
|
||||
xfail_issue_36478 = xfail_test(reason="RuntimeError: [NOT_IMPLEMENTED] Input image format U64 is "
|
||||
"not supported yet...")
|
||||
xfail_issue_36479 = xfail_test(reason="Assertion error - basic computation on ndarrays mismatch")
|
||||
xfail_issue_36480 = xfail_test(reason="RuntimeError: [NOT_FOUND] Unsupported property dummy_option "
|
||||
"by CPU plugin")
|
||||
xfail_issue_36481 = xfail_test(reason="TypeError: _get_node_factory() takes from 0 to 1 positional "
|
||||
"arguments but 2 were given")
|
||||
xfail_issue_36483 = xfail_test(reason="RuntimeError: Unsupported primitive of type: "
|
||||
"Ceiling name: Ceiling_22669")
|
||||
xfail_issue_36485 = xfail_test(reason="RuntimeError: Check 'm_group >= 1' failed at "
|
||||
"/openvino/ngraph/src/ngraph/op/fused/shuffle_channels.cpp:77:")
|
||||
xfail_issue_36486 = xfail_test(reason="RuntimeError: HardSigmoid operation should be converted "
|
||||
"to HardSigmoid_IE")
|
||||
xfail_issue_36487 = xfail_test(reason="Assertion error - mvn operator computation mismatch")
|
||||
|
||||
|
||||
# Model Zoo issues:
|
||||
xfail_issue_36533 = xfail_test(reason="AssertionError: zoo models results mismatch")
|
||||
xfail_issue_36534 = xfail_test(reason="RuntimeError: node input index is out of range")
|
||||
xfail_issue_36535 = xfail_test(reason="RuntimeError: get_shape was called on a descriptor::Tensor "
|
||||
"with dynamic shape")
|
||||
xfail_issue_36536 = xfail_test(reason="RuntimeError: can't protect")
|
||||
xfail_issue_36537 = xfail_test(reason="ngraph.exceptions.UserInputError: (Provided tensor's shape: "
|
||||
"<value> does not match the expected: <value>")
|
||||
xfail_issue_36538 = xfail_test(reason="RuntimeError: Check 'PartialShape::broadcast_merge_into( pshape, "
|
||||
"node->get_input_partial_shape(i), autob)' failed at "
|
||||
"/openvino/ngraph/src/ngraph/op/util/elementwise_args.cpp:48:")
|
||||
|
@ -23,6 +23,13 @@ from ngraph.exceptions import UserInputError
|
||||
from ngraph.impl import Function, PartialShape, Shape
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import (xfail_issue_34323,
|
||||
xfail_issue_35929,
|
||||
xfail_issue_35926,
|
||||
xfail_issue_36476,
|
||||
xfail_issue_36478,
|
||||
xfail_issue_36479,
|
||||
xfail_issue_36480)
|
||||
|
||||
|
||||
def test_ngraph_function_api():
|
||||
@ -53,15 +60,15 @@ def test_ngraph_function_api():
|
||||
"dtype",
|
||||
[
|
||||
np.float32,
|
||||
np.float64,
|
||||
np.int8,
|
||||
pytest.param(np.float64, marks=xfail_issue_35929),
|
||||
pytest.param(np.int8, marks=xfail_issue_36479),
|
||||
np.int16,
|
||||
np.int32,
|
||||
np.int64,
|
||||
np.uint8,
|
||||
np.uint16,
|
||||
np.uint32,
|
||||
np.uint64,
|
||||
pytest.param(np.int64, marks=xfail_issue_35926),
|
||||
pytest.param(np.uint8, marks=xfail_issue_36479),
|
||||
pytest.param(np.uint16, marks=xfail_issue_36479),
|
||||
pytest.param(np.uint32, marks=xfail_issue_36476),
|
||||
pytest.param(np.uint64, marks=xfail_issue_36478),
|
||||
],
|
||||
)
|
||||
def test_simple_computation_on_ndarrays(dtype):
|
||||
@ -107,6 +114,7 @@ def test_serialization():
|
||||
pass
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_broadcast_1():
|
||||
input_data = np.array([1, 2, 3])
|
||||
|
||||
@ -116,6 +124,7 @@ def test_broadcast_1():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_broadcast_2():
|
||||
input_data = np.arange(4)
|
||||
new_shape = [3, 4, 2, 4]
|
||||
@ -124,6 +133,7 @@ def test_broadcast_2():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_broadcast_3():
|
||||
input_data = np.array([1, 2, 3])
|
||||
new_shape = [3, 3]
|
||||
@ -134,6 +144,7 @@ def test_broadcast_3():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"destination_type, input_data",
|
||||
[(bool, np.zeros((2, 2), dtype=int)), ("boolean", np.zeros((2, 2), dtype=int))],
|
||||
@ -148,10 +159,10 @@ def test_convert_to_bool(destination_type, input_data):
|
||||
@pytest.mark.parametrize(
|
||||
"destination_type, rand_range, in_dtype, expected_type",
|
||||
[
|
||||
(np.float32, (-8, 8), np.int32, np.float32),
|
||||
(np.float64, (-16383, 16383), np.int64, np.float64),
|
||||
("f32", (-8, 8), np.int32, np.float32),
|
||||
("f64", (-16383, 16383), np.int64, np.float64),
|
||||
pytest.param(np.float32, (-8, 8), np.int32, np.float32, marks=xfail_issue_34323),
|
||||
pytest.param(np.float64, (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929),
|
||||
pytest.param("f32", (-8, 8), np.int32, np.float32, marks=xfail_issue_34323),
|
||||
pytest.param("f64", (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929),
|
||||
],
|
||||
)
|
||||
def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type):
|
||||
@ -163,6 +174,7 @@ def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type)
|
||||
assert np.array(result).dtype == expected_type
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"destination_type, expected_type",
|
||||
[
|
||||
@ -185,6 +197,7 @@ def test_convert_to_int(destination_type, expected_type):
|
||||
assert np.array(result).dtype == expected_type
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"destination_type, expected_type",
|
||||
[
|
||||
@ -262,6 +275,7 @@ def test_constant_get_data_unsigned_integer(data_type):
|
||||
assert np.allclose(input_data, retrieved_data)
|
||||
|
||||
|
||||
@xfail_issue_36480
|
||||
def test_backend_config():
|
||||
dummy_config = {"dummy_option": "dummy_value"}
|
||||
# Expect no throw
|
||||
@ -269,6 +283,7 @@ def test_backend_config():
|
||||
runtime.set_config(dummy_config)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_result():
|
||||
node = [[11, 10], [1, 8], [3, 4]]
|
||||
result = run_op_node([node], ng.result)
|
||||
|
@ -20,8 +20,10 @@ import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.test_ops import convolution2d
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import xfail_issue_34323
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_convolution_2d():
|
||||
|
||||
# input_x should have shape N(batch) x C x H x W
|
||||
@ -212,6 +214,7 @@ def test_convolution_backprop_data():
|
||||
)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_convolution_v1():
|
||||
input_tensor = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16)
|
||||
filters = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)
|
||||
|
@ -18,6 +18,7 @@ import numpy as np
|
||||
import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import xfail_issue_35926, xfail_issue_34323
|
||||
|
||||
|
||||
def test_reverse_sequence():
|
||||
@ -165,6 +166,7 @@ def test_pad_edge():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_35926
|
||||
def test_pad_constant():
|
||||
input_data = np.arange(1, 13).reshape([3, 4])
|
||||
pads_begin = np.array([0, 1], dtype=np.int32)
|
||||
@ -189,6 +191,7 @@ def test_pad_constant():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_select():
|
||||
cond = [[False, False], [True, False], [True, True]]
|
||||
then_node = [[-1, 0], [1, 2], [3, 4]]
|
||||
|
@ -19,8 +19,10 @@ import numpy as np
|
||||
import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import xfail_issue_34323, xfail_issue_35929
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_lrn():
|
||||
input_image_shape = (2, 3, 2, 1)
|
||||
input_image = np.arange(int(np.prod(input_image_shape))).reshape(input_image_shape).astype("f")
|
||||
@ -56,6 +58,7 @@ def test_lrn():
|
||||
)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_lrn_factory():
|
||||
alpha = 0.0002
|
||||
beta = 0.5
|
||||
@ -101,6 +104,7 @@ def test_lrn_factory():
|
||||
assert np.allclose(result, excepted)
|
||||
|
||||
|
||||
@xfail_issue_35929
|
||||
def test_batch_norm_inference():
|
||||
data = [[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]]
|
||||
gamma = [2.0, 3.0, 4.0]
|
||||
|
@ -20,6 +20,7 @@ import ngraph as ng
|
||||
from ngraph.impl import AxisSet, Function, Shape, Type
|
||||
from ngraph.impl.op import Constant, Parameter
|
||||
from tests.runtime import get_runtime
|
||||
from tests import xfail_issue_36483, xfail_issue_34323
|
||||
|
||||
|
||||
def binary_op(op_str, a, b):
|
||||
@ -339,6 +340,7 @@ def test_atan():
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
@xfail_issue_36483
|
||||
def test_ceiling():
|
||||
input_list = [0.5, 0, 0.4, 0.5]
|
||||
op_str = "Ceiling"
|
||||
@ -450,6 +452,7 @@ def test_broadcast():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_constant():
|
||||
element_type = Type.f32
|
||||
parameter_list = []
|
||||
|
@ -21,6 +21,7 @@ import pytest
|
||||
import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import xfail_issue_34323
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -201,6 +202,7 @@ def test_binary_operators_with_scalar(operator, numpy_function):
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_multiply():
|
||||
A = np.arange(48).reshape((8, 1, 6, 1))
|
||||
B = np.arange(35).reshape((7, 1, 5))
|
||||
@ -211,6 +213,7 @@ def test_multiply():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_power_v1():
|
||||
A = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1))
|
||||
B = np.arange(20, dtype=np.float32).reshape((4, 1, 5))
|
||||
|
@ -18,8 +18,17 @@ import pytest
|
||||
|
||||
import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests import (xfail_issue_34323,
|
||||
skip_segfault,
|
||||
xfail_issue_34327,
|
||||
xfail_issue_36485,
|
||||
xfail_issue_35923,
|
||||
xfail_issue_36486,
|
||||
xfail_issue_34314,
|
||||
xfail_issue_36487)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_elu_operator_with_scalar_and_array():
|
||||
runtime = get_runtime()
|
||||
|
||||
@ -51,7 +60,7 @@ def test_elu_operator_with_scalar():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Causes segmentation fault")
|
||||
@skip_segfault
|
||||
def test_fake_quantize():
|
||||
runtime = get_runtime()
|
||||
|
||||
@ -142,6 +151,7 @@ def test_depth_to_space():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34327
|
||||
def test_space_to_batch():
|
||||
runtime = get_runtime()
|
||||
|
||||
@ -178,6 +188,7 @@ def test_space_to_batch():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34327
|
||||
def test_batch_to_space():
|
||||
runtime = get_runtime()
|
||||
|
||||
@ -231,6 +242,7 @@ def test_gelu_operator_with_parameters():
|
||||
assert np.allclose(result, expected, 0.007, 0.007)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_gelu_operator_with_array():
|
||||
runtime = get_runtime()
|
||||
|
||||
@ -263,6 +275,7 @@ def test_clamp_operator():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_clamp_operator_with_array():
|
||||
runtime = get_runtime()
|
||||
|
||||
@ -314,6 +327,7 @@ def test_squared_difference_operator():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_36485
|
||||
def test_shuffle_channels_operator():
|
||||
runtime = get_runtime()
|
||||
|
||||
@ -404,6 +418,7 @@ def test_grn_operator():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_35923
|
||||
def test_prelu_operator():
|
||||
runtime = get_runtime()
|
||||
|
||||
@ -441,6 +456,7 @@ def test_selu_operator():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_36486
|
||||
def test_hard_sigmoid_operator():
|
||||
runtime = get_runtime()
|
||||
|
||||
@ -462,6 +478,7 @@ def test_hard_sigmoid_operator():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_36487
|
||||
def test_mvn_operator():
|
||||
runtime = get_runtime()
|
||||
|
||||
@ -521,6 +538,7 @@ def test_mvn_operator():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34314
|
||||
def test_space_to_depth_operator():
|
||||
runtime = get_runtime()
|
||||
|
||||
|
@ -18,8 +18,10 @@ import pytest
|
||||
|
||||
import ngraph as ng
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import xfail_issue_34323
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"shape_a, shape_b, transpose_a, transpose_b",
|
||||
[
|
||||
|
@ -17,8 +17,10 @@ import numpy as np
|
||||
|
||||
import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests import xfail_issue_34323
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_split():
|
||||
runtime = get_runtime()
|
||||
input_tensor = ng.constant(np.array([0, 1, 2, 3, 4, 5], dtype=np.int32))
|
||||
@ -32,6 +34,7 @@ def test_split():
|
||||
assert np.allclose(split_results, expected_results)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_variadic_split():
|
||||
runtime = get_runtime()
|
||||
input_tensor = ng.constant(np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]], dtype=np.int32))
|
||||
|
@ -19,6 +19,7 @@ import pytest
|
||||
import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
|
||||
from tests import xfail_issue_34323, xfail_issue_35929
|
||||
|
||||
|
||||
def test_concat():
|
||||
@ -36,6 +37,7 @@ def test_concat():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize("val_type, value", [(bool, False), (bool, np.empty((2, 2), dtype=bool))])
|
||||
def test_constant_from_bool(val_type, value):
|
||||
expected = np.array(value, dtype=val_type)
|
||||
@ -46,16 +48,16 @@ def test_constant_from_bool(val_type, value):
|
||||
@pytest.mark.parametrize(
|
||||
"val_type, value",
|
||||
[
|
||||
(np.float32, np.float32(0.1234)),
|
||||
(np.float64, np.float64(0.1234)),
|
||||
(np.int8, np.int8(-63)),
|
||||
(np.int16, np.int16(-12345)),
|
||||
(np.int32, np.int32(-123456)),
|
||||
(np.int64, np.int64(-1234567)),
|
||||
(np.uint8, np.uint8(63)),
|
||||
(np.uint16, np.uint16(12345)),
|
||||
(np.uint32, np.uint32(123456)),
|
||||
(np.uint64, np.uint64(1234567)),
|
||||
pytest.param(np.float32, np.float32(0.1234), marks=xfail_issue_34323),
|
||||
pytest.param(np.float64, np.float64(0.1234), marks=xfail_issue_35929),
|
||||
pytest.param(np.int8, np.int8(-63), marks=xfail_issue_34323),
|
||||
pytest.param(np.int16, np.int16(-12345), marks=xfail_issue_34323),
|
||||
pytest.param(np.int32, np.int32(-123456), marks=xfail_issue_34323),
|
||||
pytest.param(np.int64, np.int64(-1234567), marks=xfail_issue_34323),
|
||||
pytest.param(np.uint8, np.uint8(63), marks=xfail_issue_34323),
|
||||
pytest.param(np.uint16, np.uint16(12345), marks=xfail_issue_34323),
|
||||
pytest.param(np.uint32, np.uint32(123456), marks=xfail_issue_34323),
|
||||
pytest.param(np.uint64, np.uint64(1234567), marks=xfail_issue_34323),
|
||||
],
|
||||
)
|
||||
def test_constant_from_scalar(val_type, value):
|
||||
@ -64,7 +66,9 @@ def test_constant_from_scalar(val_type, value):
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("val_type", [np.float32, np.float64])
|
||||
@pytest.mark.parametrize("val_type",
|
||||
[pytest.param(np.float32, marks=xfail_issue_34323),
|
||||
pytest.param(np.float64, marks=xfail_issue_35929)])
|
||||
def test_constant_from_float_array(val_type):
|
||||
np.random.seed(133391)
|
||||
input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type)
|
||||
@ -72,6 +76,7 @@ def test_constant_from_float_array(val_type):
|
||||
assert np.allclose(result, input_data)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"val_type, range_start, range_end",
|
||||
[
|
||||
@ -118,6 +123,7 @@ def test_broadcast_bidirectional():
|
||||
assert node.get_output_size() == 1
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_gather():
|
||||
input_data = np.array([1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32).reshape((3, 3))
|
||||
input_indices = np.array([0, 2], np.int64).reshape(1, 2)
|
||||
@ -132,6 +138,7 @@ def test_gather():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_transpose():
|
||||
input_tensor = np.arange(3 * 3 * 224 * 224).reshape((3, 3, 224, 224))
|
||||
input_order = np.array([0, 2, 3, 1])
|
||||
@ -143,6 +150,7 @@ def test_transpose():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_tile():
|
||||
input_tensor = np.arange(6).reshape((2, 1, 3))
|
||||
repeats = np.array([2, 1])
|
||||
@ -154,6 +162,7 @@ def test_tile():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_strided_slice():
|
||||
input_tensor = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4))
|
||||
begin = np.array([1, 0], dtype=np.int64)
|
||||
@ -180,6 +189,7 @@ def test_strided_slice():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_reshape_v1():
|
||||
A = np.arange(1200, dtype=np.float32).reshape((2, 5, 5, 24))
|
||||
shape = np.array([0, -1, 4])
|
||||
@ -192,6 +202,7 @@ def test_reshape_v1():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_shape_of():
|
||||
input_tensor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
|
||||
|
||||
|
@ -18,8 +18,10 @@ import pytest
|
||||
|
||||
import ngraph as ng
|
||||
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
|
||||
from tests import xfail_issue_35929, xfail_issue_34323
|
||||
|
||||
|
||||
@xfail_issue_35929
|
||||
@pytest.mark.parametrize(
|
||||
"ng_api_fn, numpy_fn, range_start, range_end",
|
||||
[
|
||||
@ -56,6 +58,7 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
|
||||
assert np.allclose(result, expected, rtol=0.001)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"ng_api_fn, numpy_fn, input_data",
|
||||
[
|
||||
@ -90,6 +93,7 @@ def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data):
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"input_data", [(np.array([True, False, True, False])), (np.array(True)), (np.array(False))]
|
||||
)
|
||||
@ -103,6 +107,7 @@ def test_logical_not(input_data):
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_sigmoid():
|
||||
input_data = np.array([-3.14, -1.0, 0.0, 2.71001, 1000.0], dtype=np.float32)
|
||||
result = run_op_node([input_data], ng.sigmoid)
|
||||
@ -115,6 +120,7 @@ def test_sigmoid():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_softmax():
|
||||
axis = 0
|
||||
input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
|
||||
@ -126,6 +132,7 @@ def test_softmax():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_erf():
|
||||
input_tensor = np.array([-1.0, 0.0, 1.0, 2.5, 3.14, 4.0], dtype=np.float32)
|
||||
expected = [-0.842701, 0.0, 0.842701, 0.999593, 0.999991, 1.0]
|
||||
|
@ -19,8 +19,10 @@ import pytest
|
||||
import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import xfail_issue_34323
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"ng_api_helper, numpy_function, reduction_axes",
|
||||
[
|
||||
@ -48,6 +50,7 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"ng_api_helper, numpy_function, reduction_axes",
|
||||
[
|
||||
@ -81,6 +84,7 @@ def test_topk():
|
||||
assert list(node.get_output_shape(1)) == [6, 3, 10, 24]
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"ng_api_helper, numpy_function, reduction_axes",
|
||||
[
|
||||
@ -158,6 +162,7 @@ def test_roi_align():
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
@pytest.mark.parametrize(
|
||||
"input_shape, cumsum_axis, reverse",
|
||||
[([5, 2], 0, False), ([5, 2], 1, False), ([5, 2, 6], 2, False), ([5, 2], 0, True)],
|
||||
@ -177,6 +182,7 @@ def test_cum_sum(input_shape, cumsum_axis, reverse):
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_normalize_l2():
|
||||
input_shape = [1, 2, 3, 4]
|
||||
input_data = np.arange(np.prod(input_shape)).reshape(input_shape).astype(np.float32)
|
||||
|
@ -18,6 +18,7 @@ import numpy as np
|
||||
import ngraph as ng
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests import xfail_issue_34323
|
||||
|
||||
|
||||
def test_onehot():
|
||||
@ -32,6 +33,7 @@ def test_onehot():
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_one_hot():
|
||||
data = np.array([0, 1, 2], dtype=np.int32)
|
||||
depth = 2
|
||||
@ -44,6 +46,7 @@ def test_one_hot():
|
||||
assert np.allclose(result, excepted)
|
||||
|
||||
|
||||
@xfail_issue_34323
|
||||
def test_range():
|
||||
start = 5
|
||||
stop = 35
|
||||
|
@ -17,7 +17,8 @@
|
||||
import numpy as np
|
||||
import onnx
|
||||
|
||||
from tests.test_onnx.utils import run_node, xfail_issue_35893
|
||||
from tests.test_onnx.utils import run_node
|
||||
from tests import xfail_issue_35893
|
||||
|
||||
|
||||
def make_batch_norm_node(**node_attributes):
|
||||
|
@ -18,7 +18,8 @@ import onnx
|
||||
import pytest
|
||||
from onnx.helper import make_graph, make_model, make_tensor_value_info
|
||||
|
||||
from tests.test_onnx.utils import run_model, skip_segfault
|
||||
from tests.test_onnx.utils import run_model
|
||||
from tests import skip_segfault
|
||||
|
||||
|
||||
def import_and_compute(op_type, input_data_left, input_data_right, opset=7, **node_attributes):
|
||||
|
@ -19,13 +19,8 @@ import pytest
|
||||
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
|
||||
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_onnx.utils import (get_node_model,
|
||||
import_onnx_model,
|
||||
run_model,
|
||||
run_node,
|
||||
xfail_issue_35911,
|
||||
xfail_issue_35912
|
||||
)
|
||||
from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node
|
||||
from tests import xfail_issue_35911, xfail_issue_35912
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -17,7 +17,8 @@ import numpy as np
|
||||
import onnx
|
||||
import pytest
|
||||
|
||||
from tests.test_onnx.utils import run_node, xfail_issue_35914, xfail_issue_35915
|
||||
from tests.test_onnx.utils import run_node
|
||||
from tests import xfail_issue_35914, xfail_issue_35915
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -16,16 +16,11 @@
|
||||
import numpy as np
|
||||
import onnx
|
||||
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
|
||||
import pytest
|
||||
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_onnx.utils import (import_onnx_model,
|
||||
xfail_issue_35916,
|
||||
xfail_issue_35917,
|
||||
xfail_issue_35918,
|
||||
xfail_issue_35921
|
||||
)
|
||||
|
||||
import pytest
|
||||
from tests.test_onnx.utils import import_onnx_model
|
||||
from tests import xfail_issue_35916, xfail_issue_35917, xfail_issue_35918, xfail_issue_35921
|
||||
|
||||
|
||||
def make_onnx_model_for_matmul_op(input_left, input_right):
|
||||
|
@ -17,7 +17,8 @@ import numpy as np
|
||||
import onnx
|
||||
import pytest
|
||||
|
||||
from tests.test_onnx.utils import run_node, xfail_issue_35918, xfail_issue_35923, xfail_issue_35924
|
||||
from tests.test_onnx.utils import run_node
|
||||
from tests import xfail_issue_35918, xfail_issue_35923, xfail_issue_35924
|
||||
|
||||
|
||||
def import_and_compute(op_type, input_data, **node_attrs):
|
||||
|
@ -17,10 +17,8 @@ import numpy as np
|
||||
import onnx
|
||||
import pytest
|
||||
|
||||
from tests.test_onnx.utils import (run_node,
|
||||
unstrict_xfail_issue_35925,
|
||||
strict_xfail_issue_35925,
|
||||
xfail_issue_36437)
|
||||
from tests.test_onnx.utils import run_node
|
||||
from tests import xfail_issue_35925, xfail_issue_36437
|
||||
|
||||
reduce_data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
|
||||
reduce_axis_parameters = [
|
||||
@ -49,17 +47,34 @@ def import_and_compute(op_type, input_data, **node_attrs):
|
||||
return run_node(node, data_inputs).pop()
|
||||
|
||||
|
||||
@unstrict_xfail_issue_35925
|
||||
@pytest.mark.parametrize("operation, ref_operation", reduce_operation_parameters)
|
||||
@pytest.mark.parametrize("keepdims", [True, False])
|
||||
@pytest.mark.parametrize("axes", reduce_axis_parameters)
|
||||
def test_reduce_operation(operation, ref_operation, keepdims, axes):
|
||||
def test_reduce_operation_keepdims(operation, ref_operation, axes):
|
||||
if axes:
|
||||
assert np.array_equal(import_and_compute(operation, reduce_data, axes=axes, keepdims=keepdims),
|
||||
ref_operation(reduce_data, keepdims=keepdims, axis=axes))
|
||||
assert np.array_equal(import_and_compute(operation, reduce_data, axes=axes, keepdims=True),
|
||||
ref_operation(reduce_data, keepdims=True, axis=axes))
|
||||
else:
|
||||
assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=keepdims),
|
||||
ref_operation(reduce_data, keepdims=keepdims))
|
||||
assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=True),
|
||||
ref_operation(reduce_data, keepdims=True))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("axes", [
|
||||
pytest.param(None, marks=xfail_issue_35925),
|
||||
(0,),
|
||||
(1,),
|
||||
(2,),
|
||||
(0, 1),
|
||||
(0, 2),
|
||||
(1, 2),
|
||||
pytest.param((0, 1, 2), marks=xfail_issue_35925)])
|
||||
@pytest.mark.parametrize("operation, ref_operation", reduce_operation_parameters)
|
||||
def test_reduce_operation_no_keepdims(operation, ref_operation, axes):
|
||||
if axes:
|
||||
assert np.array_equal(import_and_compute(operation, reduce_data, axes=axes, keepdims=False),
|
||||
ref_operation(reduce_data, keepdims=False, axis=axes))
|
||||
else:
|
||||
assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=False),
|
||||
ref_operation(reduce_data, keepdims=False))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)])
|
||||
@ -81,7 +96,7 @@ def test_reduce_l1(reduction_axes):
|
||||
assert np.allclose(expected, ng_result)
|
||||
|
||||
|
||||
@strict_xfail_issue_35925
|
||||
@xfail_issue_35925
|
||||
def test_reduce_l1_default_axes():
|
||||
shape = [2, 4, 3, 2]
|
||||
np.random.seed(133391)
|
||||
@ -120,7 +135,7 @@ def test_reduce_l2(reduction_axes):
|
||||
assert np.allclose(expected, ng_result)
|
||||
|
||||
|
||||
@strict_xfail_issue_35925
|
||||
@xfail_issue_35925
|
||||
def test_reduce_l2_default_axes():
|
||||
shape = [2, 4, 3, 2]
|
||||
np.random.seed(133391)
|
||||
@ -139,7 +154,6 @@ def test_reduce_l2_default_axes():
|
||||
assert np.allclose(expected, ng_result)
|
||||
|
||||
|
||||
@unstrict_xfail_issue_35925
|
||||
@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)])
|
||||
def test_reduce_log_sum(reduction_axes):
|
||||
shape = [2, 4, 3, 2]
|
||||
@ -159,7 +173,7 @@ def test_reduce_log_sum(reduction_axes):
|
||||
assert np.allclose(expected, ng_result)
|
||||
|
||||
|
||||
@strict_xfail_issue_35925
|
||||
@xfail_issue_35925
|
||||
def test_reduce_log_sum_default_axes():
|
||||
shape = [2, 4, 3, 2]
|
||||
np.random.seed(133391)
|
||||
@ -178,7 +192,7 @@ def test_reduce_log_sum_default_axes():
|
||||
assert np.allclose(expected, ng_result)
|
||||
|
||||
|
||||
@strict_xfail_issue_35925
|
||||
@xfail_issue_35925
|
||||
def test_reduce_log_sum_exp():
|
||||
def logsumexp(data, axis=None, keepdims=True):
|
||||
return np.log(np.sum(np.exp(data), axis=axis, keepdims=keepdims))
|
||||
@ -237,7 +251,7 @@ def test_reduce_sum_square(reduction_axes):
|
||||
assert np.allclose(expected, ng_result)
|
||||
|
||||
|
||||
@strict_xfail_issue_35925
|
||||
@xfail_issue_35925
|
||||
def test_reduce_sum_square_default_axes():
|
||||
shape = [2, 4, 3, 2]
|
||||
np.random.seed(133391)
|
||||
|
@ -19,14 +19,8 @@ import pytest
|
||||
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
|
||||
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_onnx.utils import (all_arrays_equal,
|
||||
get_node_model,
|
||||
import_onnx_model,
|
||||
run_model,
|
||||
run_node,
|
||||
xfail_issue_35926,
|
||||
xfail_issue_35927
|
||||
)
|
||||
from tests.test_onnx.utils import all_arrays_equal, get_node_model, import_onnx_model, run_model, run_node
|
||||
from tests import xfail_issue_35926, xfail_issue_35927
|
||||
|
||||
|
||||
@xfail_issue_35926
|
||||
|
@ -21,15 +21,12 @@ from onnx.helper import make_graph, make_model, make_node, make_tensor_value_inf
|
||||
|
||||
from ngraph.exceptions import NgraphTypeError
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_onnx.utils import (get_node_model,
|
||||
import_onnx_model,
|
||||
run_model, run_node,
|
||||
xfail_issue_35926,
|
||||
xfail_issue_35929,
|
||||
xfail_issue_34323,
|
||||
xfail_issue_35930,
|
||||
xfail_issue_35932
|
||||
)
|
||||
from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node
|
||||
from tests import (xfail_issue_35926,
|
||||
xfail_issue_35929,
|
||||
xfail_issue_34323,
|
||||
xfail_issue_35930,
|
||||
xfail_issue_35932)
|
||||
|
||||
|
||||
@xfail_issue_35926
|
||||
|
@ -19,7 +19,8 @@ import numpy as np
|
||||
import onnx
|
||||
import pytest
|
||||
|
||||
from tests.test_onnx.utils import run_node, xfail_issue_35926
|
||||
from tests.test_onnx.utils import run_node
|
||||
from tests import xfail_issue_35926
|
||||
|
||||
|
||||
@xfail_issue_35926
|
||||
|
@ -29,9 +29,15 @@
|
||||
# zoo_models.append({'model_name': '{}_opset{}'.format(model_name.replace('-', '_'), opset), 'url': url})
|
||||
#
|
||||
# sorted(zoo_models, key=itemgetter('model_name'))
|
||||
import tests
|
||||
from tests.test_onnx.utils import OpenVinoOnnxBackend
|
||||
from tests.test_onnx.utils.model_zoo_tester import ModelZooTestRunner
|
||||
from tests import (BACKEND_NAME,
|
||||
xfail_issue_36533,
|
||||
xfail_issue_36534,
|
||||
xfail_issue_35926,
|
||||
xfail_issue_36535,
|
||||
xfail_issue_36537,
|
||||
xfail_issue_36538)
|
||||
|
||||
_GITHUB_MODELS_LTS = "https://media.githubusercontent.com/media/onnx/models/master/"
|
||||
|
||||
@ -565,11 +571,58 @@ zoo_models = [
|
||||
]
|
||||
|
||||
# Set backend device name to be used instead of hardcoded by ONNX BackendTest class ones.
|
||||
OpenVinoOnnxBackend.backend_name = tests.BACKEND_NAME
|
||||
OpenVinoOnnxBackend.backend_name = BACKEND_NAME
|
||||
|
||||
# import all test cases at global scope to make them visible to pytest
|
||||
backend_test = ModelZooTestRunner(OpenVinoOnnxBackend, zoo_models, __name__)
|
||||
test_cases = backend_test.test_cases["OnnxBackendZooModelTest"]
|
||||
|
||||
test_cases_list = [
|
||||
test_cases.test_udnie_opset8_cpu,
|
||||
test_cases.test_udnie_opset8_cpu,
|
||||
test_cases.test_udnie_opset9_cpu,
|
||||
test_cases.test_mosaic_opset8_cpu,
|
||||
test_cases.test_vgg16_opset7_cpu,
|
||||
test_cases.test_pointilism_opset9_cpu,
|
||||
test_cases.test_vgg19_bn_opset7_cpu,
|
||||
test_cases.test_candy_opset9_cpu,
|
||||
test_cases.test_rain_princess_opset8_cpu,
|
||||
test_cases.test_mosaic_opset9_cpu,
|
||||
test_cases.test_pointilism_opset8_cpu,
|
||||
test_cases.test_rain_princess_opset9_cpu,
|
||||
test_cases.test_ssd_opset10_cpu,
|
||||
test_cases.test_resnet152_v2_opset7_cpu,
|
||||
test_cases.test_resnet50_v2_opset7_cpu,
|
||||
test_cases.test_resnet18_v1_opset7_cpu,
|
||||
test_cases.test_resnet18_v2_opset7_cpu,
|
||||
test_cases.test_resnet34_v1_opset7_cpu,
|
||||
test_cases.test_resnet34_v2_opset7_cpu,
|
||||
test_cases.test_resnet101_v2_opset7_cpu,
|
||||
test_cases.test_resnet101_v1_opset7_cpu,
|
||||
test_cases.test_ResNet101_DUC_opset7_cpu,
|
||||
test_cases.test_arcfaceresnet100_opset8_cpu,
|
||||
test_cases.test_mobilenetv2_opset7_cpu,
|
||||
test_cases.test_candy_opset8_cpu,
|
||||
test_cases.test_resnet152_v1_opset7_cpu
|
||||
]
|
||||
|
||||
xfail_issue_36534(test_cases.test_FasterRCNN_opset10_cpu)
|
||||
xfail_issue_36534(test_cases.test_MaskRCNN_opset10_cpu)
|
||||
|
||||
xfail_issue_35926(test_cases.test_bertsquad_opset8_cpu)
|
||||
xfail_issue_35926(test_cases.test_bertsquad_opset10_cpu)
|
||||
|
||||
xfail_issue_35926(test_cases.test_gpt2_opset10_cpu)
|
||||
|
||||
xfail_issue_36535(test_cases.test_super_resolution_opset10_cpu)
|
||||
xfail_issue_36535(test_cases.test_tinyyolov2_opset7_cpu)
|
||||
xfail_issue_36535(test_cases.test_tinyyolov2_opset8_cpu)
|
||||
|
||||
xfail_issue_36537(test_cases.test_shufflenet_v2_opset10_cpu)
|
||||
xfail_issue_36538(test_cases.test_yolov3_opset10_cpu)
|
||||
|
||||
for test_case in test_cases_list:
|
||||
xfail_issue_36533(test_case)
|
||||
|
||||
del test_cases
|
||||
globals().update(backend_test.enable_report().test_cases)
|
||||
|
@ -19,7 +19,6 @@ from typing import Any, Dict, Iterable, List, Optional, Text
|
||||
|
||||
import numpy as np
|
||||
import onnx
|
||||
import pytest
|
||||
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info
|
||||
|
||||
import tests
|
||||
@ -28,43 +27,6 @@ from tests.test_onnx.utils.onnx_backend import OpenVinoOnnxBackend
|
||||
from tests.test_onnx.utils.onnx_helpers import import_onnx_model
|
||||
|
||||
|
||||
def xfail_test(reason="Mark the test as expected to fail", strict=True):
|
||||
return pytest.mark.xfail(reason=reason, strict=strict)
|
||||
|
||||
|
||||
skip_segfault = pytest.mark.skip(reason="Segmentation fault error")
|
||||
xfail_issue_35893 = xfail_test(reason="ValueError: could not broadcast input array")
|
||||
xfail_issue_35911 = xfail_test(reason="Assertion error: Pad model mismatch error")
|
||||
xfail_issue_35912 = xfail_test(reason="RuntimeError: Error of validate layer: B with type: "
|
||||
"Pad. Cannot parse parameter pads_end from IR for layer B. "
|
||||
"Value -1,0 cannot be casted to int.")
|
||||
xfail_issue_35914 = xfail_test(reason="IndexError: too many indices for array: "
|
||||
"array is 0-dimensional, but 1 were indexed")
|
||||
xfail_issue_35915 = xfail_test(reason="RuntimeError: Eltwise node with unsupported combination "
|
||||
"of input and output types")
|
||||
xfail_issue_35916 = xfail_test(reason="RuntimeError: Unsupported input dims count for layer Z")
|
||||
xfail_issue_35917 = xfail_test(reason="RuntimeError: Unsupported input dims count for "
|
||||
"layer MatMul")
|
||||
xfail_issue_35918 = xfail_test(reason="onnx.onnx_cpp2py_export.checker.ValidationError: "
|
||||
"Mismatched attribute type in 'test_node : alpha'")
|
||||
xfail_issue_35921 = xfail_test(reason="ValueError - shapes mismatch in gemm")
|
||||
|
||||
xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is not supported")
|
||||
xfail_issue_35924 = xfail_test(reason="Assertion error - elu results mismatch")
|
||||
unstrict_xfail_issue_35925 = xfail_test(reason="Assertion error - reduction ops results mismatch",
|
||||
strict=False)
|
||||
strict_xfail_issue_35925 = xfail_test(reason="Assertion error - reduction ops results mismatch")
|
||||
xfail_issue_35926 = xfail_test(reason="RuntimeError: [NOT_IMPLEMENTED] Input image format I64 is "
|
||||
"not supported yet...")
|
||||
xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable")
|
||||
xfail_issue_35929 = xfail_test(reason="RuntimeError: Incorrect precision f64!")
|
||||
xfail_issue_34323 = xfail_test(reason="RuntimeError: data [value] doesn't exist")
|
||||
xfail_issue_35930 = xfail_test(reason="onnx.onnx_cpp2py_export.checker.ValidationError: "
|
||||
"Required attribute 'to' is missing.")
|
||||
xfail_issue_35932 = xfail_test(reason="Assertion error - logsoftmax results mismatch")
|
||||
xfail_issue_36437 = xfail_test(reason="RuntimeError: Cannot find blob with name: y")
|
||||
|
||||
|
||||
def run_node(onnx_node, data_inputs, **kwargs):
|
||||
# type: (onnx.NodeProto, List[np.ndarray], Dict[Text, Any]) -> List[np.ndarray]
|
||||
"""
|
||||
|
@ -17,14 +17,17 @@ setenv =
|
||||
PYBIND_HEADERS_PATH = {env:PYBIND_HEADERS_PATH:}
|
||||
NGRAPH_BACKEND = {env:NGRAPH_BACKEND:"CPU"}
|
||||
PYTHONPATH = {env:PYTHONPATH}
|
||||
passenv =
|
||||
http_proxy
|
||||
https_proxy
|
||||
commands=
|
||||
{envbindir}/python setup.py bdist_wheel
|
||||
{envbindir}/pip install --no-index --pre --find-links=dist/ ngraph-core
|
||||
flake8 {posargs:src/ setup.py}
|
||||
flake8 --ignore=D100,D101,D102,D103,D104,D105,D107,W503 tests/ # ignore lack of docs in tests
|
||||
mypy --config-file=tox.ini {posargs:src/}
|
||||
pytest --backend={env:NGRAPH_BACKEND} tests/test_ngraph/test_core.py -v
|
||||
pytest --backend={env:NGRAPH_BACKEND} tests/test_onnx -v -k 'not test_zoo_models.py'
|
||||
pytest --backend={env:NGRAPH_BACKEND} tests/test_ngraph tests/test_onnx -v -k 'not test_zoo_models.py'
|
||||
|
||||
[testenv:devenv]
|
||||
envdir = devenv
|
||||
usedevelop = True
|
||||
|
Loading…
Reference in New Issue
Block a user