diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index 38b35d17623..9cc3c4a6491 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -121,14 +121,6 @@ jobs: # Tests # - - name: Python API 1.0 Tests - # if: fromJSON(inputs.affected-components).Python_API.test # Ticket: 127101 - run: | - python3 -m pytest -s ${INSTALL_TEST_DIR}/pyngraph \ - --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_backend.py - - name: Python API 2.0 Tests # if: ${{ fromJSON(inputs.affected-components).Python_API.test && runner.arch != 'ARM64' }} # Ticket: 126380, 127101 run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 93f5acb10df..312bc1a4f73 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -364,12 +364,6 @@ jobs: # TODO: replace with Python API tests requirements python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/mo/requirements_dev.txt - - name: Python API 1.0 Tests - #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 - shell: cmd - run: | - python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py - - name: Python API 2.0 Tests #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 shell: cmd diff --git a/src/bindings/python/docs/test_examples.md b/src/bindings/python/docs/test_examples.md index 80808fdbfd5..c7078c08cc8 100644 --- a/src/bindings/python/docs/test_examples.md +++ b/src/bindings/python/docs/test_examples.md @@ -23,11 +23,6 @@ To run OpenVINO Python API 2.0 tests: pytest tests/ ``` -To run OpenVINO Python API 1.0 tests, use this command: -``` -pytest tests_compatibility/ -``` - By default, tests are run on the CPU plugin. If you want to run them on a different plugin, you need to specify this environment variable: ``` @@ -147,10 +142,6 @@ Notice that the test name is shared between cases. In a real-life pull request, * ... or create reference values during runtime. Always use a good, thrust-worthy library for that! * Re-use common parts of the code (like multiple lines that create helper object) and move them out to make tests easier to read. -### Difference between *tests* and *tests_compatibility* directories - -Someone could notice two similar folders [`tests`](./../tests/) and [`tests_compatibility`](./../tests_compatibility/). First one is the desired place for all upcoming features and tests. Compatibility layer is only supported in specific cases and any updates to it should be explicitly approved by OpenVINOâ„¢ reviewers. Please do not duplicate tests in both directories if not necessary. - ## See also * [OpenVINOâ„¢ README](../../../../README.md) * [OpenVINOâ„¢ bindings README](../../README.md) diff --git a/src/bindings/python/setup.cfg b/src/bindings/python/setup.cfg index 5ea6fe46cd5..dd8b0a75c27 100644 --- a/src/bindings/python/setup.cfg +++ b/src/bindings/python/setup.cfg @@ -19,7 +19,6 @@ passenv = https_proxy commands= pytest tests -m "not template_extension" -v -k 'not _cuda' --ignore=tests/test_utils - pytest --backend={env:OV_BACKEND} tests_compatibility/test_ngraph -v -k 'not _cuda' --ignore=tests_compatibility/test_onnx/test_zoo_models.py pytest --backend={env:OV_BACKEND} /openvino/src/frontends/onnx/tests -v --ignore=/openvino/src/frontends/onnx/tests/tests_python/test_zoo_models.py [testenv:zoo_models] @@ -68,7 +67,6 @@ docstring-convention = google enable-extensions = G per-file-ignores = src/openvino/runtime/*/ops.py: VNE001,VNE003 - tests_compatibility/test_ngraph/*: C101,C812,C815,C816,C819,CCE001,D212,E800,ECE001,N400,N802,N806,P101,P103,PT001,PT005,PT006,PT011,PT019,PT023,RST201,S001,VNE002 src/compatibility/ngraph/*: C101,C812,C819,CCE001,E800,N806,P101,RST201,RST202,RST203,RST206,VNE001,VNE003 src/openvino/preprocess/torchvision/*: N801, VNE001 *__init__.py: F401 diff --git a/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt b/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt index a5c41c2b613..8d3ac1ab0c0 100644 --- a/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt +++ b/src/bindings/python/src/compatibility/pyngraph/CMakeLists.txt @@ -83,8 +83,3 @@ install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../ngraph COMPONENT ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion} ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL} USE_SOURCE_PERMISSIONS) - -install(DIRECTORY ${OpenVINOPython_SOURCE_DIR}/tests_compatibility - DESTINATION tests/${PROJECT_NAME} - COMPONENT tests - EXCLUDE_FROM_ALL) diff --git a/src/bindings/python/tests_compatibility/__init__.py b/src/bindings/python/tests_compatibility/__init__.py deleted file mode 100644 index 9556eb9b8fe..00000000000 --- a/src/bindings/python/tests_compatibility/__init__.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -# test.BACKEND_NAME is a configuration variable determining which -# nGraph backend tests will use. It's set during pytest configuration time. -# See `pytest_configure` hook in `conftest.py` for more details. -BACKEND_NAME = None - -# test.MODEL_ZOO_DIR is a configuration variable providing the path -# to the ZOO of ONNX models to test. It's set during pytest configuration time. -# See `pytest_configure` hook in `conftest.py` for more -# details. -MODEL_ZOO_DIR = None - -# test.MODEL_ZOO_XFAIL is a configuration variable which enable xfails for model zoo. -MODEL_ZOO_XFAIL = False - - -def xfail_test(reason="Mark the test as expected to fail", strict=True): - return pytest.mark.xfail(reason=reason, strict=strict) - - -skip_segfault = pytest.mark.skip(reason="Segmentation fault error") -xfail_accuracy = xfail_test(reason="Accuracy") -xfail_unsupported_by_legacy_api = xfail_test(reason="RuntimeError: This feature is not supported via legacy API.") -xfail_issue_69444 = xfail_test(reason="ONNX Resize - AssertionError: Mismatched elements.") -xfail_issue_67415 = xfail_test(reason="RuntimeError: Unsupported data type for when filling blob!") -xfail_issue_33488 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " - "MaxUnpool") -skip_issue_38084 = pytest.mark.skip(reason="Aborted (core dumped) Assertion " - "`(layer->get_output_partial_shape(i).is_static())' failed.") -xfail_issue_33595 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " - "Unique") -xfail_issue_33596 = xfail_test(reason="RuntimeError: nGraph does not support different sequence operations: " - "ConcatFromSequence, SequenceConstruct, SequenceAt, SplitToSequence, " - "SequenceEmpty, SequenceInsert, SequenceErase, SequenceLength ") -xfail_issue_33606 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " - "Det") -xfail_issue_33651 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " - "TfIdfVectorizer") -xfail_issue_33581 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " - "GatherElements") -xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is not supported") -xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable") -xfail_issue_38084 = xfail_test(reason="RuntimeError: AssertionFailed: layer->get_output_partial_shape(i)." - "is_static() nGraph operation with name: cannot be " - "converted to layer with name: because output " - "with index 0 contains dynamic shapes: {}. Try to use " - "CNNNetwork::reshape() method in order to specialize shapes " - "before the conversion.") -xfail_issue_38091 = xfail_test(reason="AssertionError: Mismatched elements") -xfail_issue_38699 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " - "ai.onnx.preview.training.Gradient") -xfail_issue_38701 = xfail_test(reason="RuntimeError: unsupported element type: STRING") -xfail_issue_38706 = xfail_test(reason="RuntimeError: output_3.0 has zero dimension which is not allowed") -xfail_issue_38708 = xfail_test(reason="RuntimeError: While validating ONNX node '': " - "Axes input must be constant") -xfail_issue_38710 = xfail_test(reason="RuntimeError: data has zero dimension which is not allowed") -xfail_issue_38713 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " - "ai.onnx.preview.training.Momentum") -xfail_issue_38724 = xfail_test(reason="RuntimeError: While validating ONNX node '': " - "tf_crop_and_resize - this type of coordinate transformation mode " - "is not supported. Choose one of the following modes: " - "tf_half_pixel_for_nn, asymmetric, align_corners, pytorch_half_pixel, " - "half_pixel") -xfail_issue_38725 = xfail_test(reason="RuntimeError: While validating ONNX node ''") -xfail_issue_99958 = xfail_test(reason="LogSoftmax - Results mismatch") -xfail_issue_99959 = xfail_test(reason="Mish function is not supported") -xfail_issue_99960 = xfail_test(reason="MVN - Results mismatch") -xfail_issue_99961 = xfail_test(reason="Optional has/get element operators are not supported)'") -xfail_issue_99962 = pytest.mark.skip(reason="ReduceL1/L2 - Unrecognized attribute: axes for operator ReduceL1/L2") -xfail_issue_99968 = xfail_test(reason="ReduceL1/L2 - Results mismatch or unsupported ReduceSum with " - "dynamic rank by CPU plugin") -xfail_issue_99969 = xfail_test(reason="Resize - Results mismatch / " - "RuntimeError: While validating ONNX node '' / " - "RuntimeError: Check '(false)' failed at onnx/frontend/src/op/resize.cpp") -xfail_issue_99970 = xfail_test(reason="Scatter and ScatterND - RuntimeError: Check '(reduction == none)' failed at " - "src/frontends/onnx/frontend/src/op/scatter_elements.cpp OR at " - "src/frontends/onnx/frontend/src/op/scatter_nd") -xfail_issue_99972 = xfail_test(reason="Softmax - Results mismatch") -xfail_issue_99973 = xfail_test(reason="Split - RuntimeError: While validating ONNX node " - "''") - -# Model MSFT issues: -xfail_issue_37957 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " - "com.microsoft.CropAndResize, com.microsoft.GatherND, " - "com.microsoft.Pad, com.microsoft.Range") -xfail_issue_39669 = xfail_test(reason="AssertionError: This model has no test data") -xfail_issue_36534 = xfail_test(reason="RuntimeError: node input index is out of range") -xfail_issue_36536 = xfail_test(reason="RuntimeError: can't protect") -xfail_issue_36538 = xfail_test(reason="RuntimeError: Check 'PartialShape::broadcast_merge_into( pshape, " - "node->get_input_partial_shape(i), autob)' failed at " - "/openvino/ngraph/src/ngraph/op/util/elementwise_args.cpp:48:") -xfail_issue_39658 = xfail_test(reason="RuntimeError: Tile operation has a form that is not supported." - " z should be converted to TileIE operation.") - - -xfail_issue_37973 = xfail_test(reason="TF Inception V2 - AssertionError: zoo models results mismatch") -xfail_issue_47430 = xfail_test(reason="FCN ResNet models - AssertionError: zoo models results mismatch") -xfail_issue_47495 = xfail_test(reason="BertSquad-10 from MSFT - AssertionError: zoo models results mismatch") -xfail_issue_48145 = xfail_test(reason="BertSquad-8 - AssertionError: Items are not equal: ACTUAL: 4 " - "DESIRED: 3") -xfail_issue_48190 = xfail_test(reason="RobertaBase-11 - AssertionError: Items are not equal: " - "ACTUAL: dtype('float64') DESIRED: dtype('float32')") -xfail_issue_49752 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::Pad") -xfail_issue_49753 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::StridedSlice") -xfail_issue_49754 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::TopKIE") -xfail_issue_52463 = xfail_test(reason="test_operator_add_size1_singleton_broadcast_cpu - " - "Not equal to tolerance") -xfail_issue_58033 = xfail_test(reason="Einsum operation misses support for complex ellipsis equations") -xfail_issue_onnx_models_140 = xfail_test(reason="https://github.com/onnx/models/issues/140") - -xfail_issue_63033 = xfail_test(reason="BatchNormalization: Training mode is not supported") -xfail_issue_63036 = xfail_test(reason="Changes in ConvTranspose padding") -xfail_issue_63043 = xfail_test(reason="Recurrent node expects constants as W, R, B inputs.") - -skip_rng_tests = pytest.mark.skip(reason="Tests use random number generator with no seed.") -xfail_issue_63137 = xfail_test(reason="Unsupported operations: OptionalHasElement, OptionalGetElement") -xfail_issue_63138 = xfail_test(reason="Missing ONNX Shape-15 support") - -xfail_issue_78843 = xfail_test(reason="Missing reference output files for ssd mobilenet models") -xfail_issue_78741 = xfail_test(reason="Cannot get dims for non-static shapes. " - "Requires dynamism support enabled.") - -xfail_issue_81976 = xfail_test(reason="RuntimeError: z node not found in graph cache") -xfail_issue_82038 = xfail_test(reason="ScatterElements, ScatterND, AssertionError: Result mismatch") -xfail_issue_82039 = xfail_test(reason="Unsupported data type Optional, RuntimeError: [ NOT_IMPLEMENTED ] " - "CPU plugin: Input image format UNSPECIFIED is not supported yet...") -xfail_issue_90649 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations:" - "BlackmanWindow, DFT, HammingWindow, HannWindow, LayerNormalization, " - "MelWeightMatrix, SequenceMap, STFT") -skip_issue_91151 = pytest.mark.skip(reason="RuntimeError: model input (shape={3,4}) and blob (shape=(1)) are incompatible") # Need to enable after bumping to 1.15 -xfail_issue_91490 = xfail_test(reason="y has zero dimension which is not allowed") -xfail_issue_101965 = xfail_test(reason="Mismatch with numpy-based expected results.") -xfail_issue_113506 = xfail_test(reason="Unsupported operation of type: LSTMSequence Node expects 7 inputs. Actual: 8") - -skip_dynamic_model = pytest.mark.skip(reason="CPU plug-in can't load a model with dynamic output shapes via legacy API") - -# ONNX 1.14 -xfail_issue_119896 = xfail_test(reason="Unsupported element type: FLOAT8") -xfail_issue_119900 = xfail_test(reason="While validating ONNX node '': " - "half_pixel_symmetric - this type of coordinate transformation mode " - "is not supported. Choose one of the following modes: " - "tf_half_pixel_for_nn, asymmetric, align_corners, pytorch_half_pixel, " - "half_pixel") -xfail_issue_119903 = xfail_test(reason="DeformConv operation is not supported") -xfail_issue_119906 = xfail_test(reason="LpPool operation is not supported") -xfail_issue_119919 = xfail_test(reason="While validating ONNX node '': Unsupported padding mode: [wrap]") -xfail_issue_119922 = xfail_test(reason="ai.onnx.ml operators domain isn't supported") -xfail_issue_119925 = xfail_test(reason="AveragePool AssertionError: Not equal to tolerance rtol=0.001, atol=1e-07") -xfail_issue_119926 = xfail_test(reason="ROIAlign AssertionError: Not equal to tolerance rtol=0.001, atol=1e-07") - -# ONNX 1.15 -xfail_issue_125485 = xfail_test(reason="AffineGrid operation is not supported") -xfail_issue_125486 = xfail_test(reason="Gelu operation is not supported") -xfail_issue_125488 = xfail_test(reason="ImageDecoder operation is not supported") -xfail_issue_125487 = xfail_test(reason="GridSample doesn't support cubic and linear modes, and 4D tensor") -skip_issue_125489 = pytest.mark.skip(reason="IsInf changed behavior since opset-20") # Need to enable after opset-20 will be released -xfail_issue_125491 = xfail_test(reason="AveragePool mismatch with differences in shapes") -xfail_issue_125492 = xfail_test(reason="DFT mismatch") -xfail_issue_125493 = xfail_test(reason="Reduce* mismatch") -xfail_issue_125495 = xfail_test(reason="ReduceMin/Max doesn't support boolean") -xfail_issue_127812 = xfail_test(reason="Reduce* doesn't support zero dimensions") -skip_misalignment = pytest.mark.skip(reason="Misalignment between onnx versions") # Need to enable after bumping to 1.15 diff --git a/src/bindings/python/tests_compatibility/conftest.py b/src/bindings/python/tests_compatibility/conftest.py deleted file mode 100644 index c45bb0ec32d..00000000000 --- a/src/bindings/python/tests_compatibility/conftest.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import pytest -import numpy as np - -import ngraph as ng -import tests_compatibility - -from pathlib import Path - - -def model_path(is_fp16=False): - base_path = os.path.dirname(__file__) - if is_fp16: - test_xml = os.path.join(base_path, "test_utils", "utils", "test_model_fp16.xml") - test_bin = os.path.join(base_path, "test_utils", "utils", "test_model_fp16.bin") - else: - test_xml = os.path.join(base_path, "test_utils", "utils", "test_model_fp32.xml") - test_bin = os.path.join(base_path, "test_utils", "utils", "test_model_fp32.bin") - return (test_xml, test_bin) - - -def model_onnx_path(): - base_path = os.path.dirname(__file__) - test_onnx = os.path.join(base_path, "test_utils", "utils", "test_model.onnx") - return test_onnx - - -def plugins_path(): - base_path = os.path.dirname(__file__) - plugins_xml = os.path.join(base_path, "test_utils", "utils", "plugins.xml") - plugins_win_xml = os.path.join(base_path, "test_utils", "utils", "plugins_win.xml") - plugins_osx_xml = os.path.join(base_path, "test_utils", "utils", "plugins_apple.xml") - return (plugins_xml, plugins_win_xml, plugins_osx_xml) - - -def _get_default_model_zoo_dir(): - return Path(os.getenv("ONNX_HOME", Path.home() / ".onnx/model_zoo")) - - -def pytest_addoption(parser): - parser.addoption( - "--backend", - default="CPU", - choices=["CPU", "GPU", "GNA", "HETERO", "TEMPLATE"], - help="Select target device", - ) - parser.addoption( - "--model_zoo_dir", - default=_get_default_model_zoo_dir(), - type=str, - help="location of the model zoo", - ) - parser.addoption( - "--model_zoo_xfail", - action="store_true", - help="treat model zoo known issues as xfails instead of failures", - ) - - -def pytest_configure(config): - backend_name = config.getvalue("backend") - tests_compatibility.BACKEND_NAME = backend_name - tests_compatibility.MODEL_ZOO_DIR = Path(config.getvalue("model_zoo_dir")) - tests_compatibility.MODEL_ZOO_XFAIL = config.getvalue("model_zoo_xfail") - - # register additional markers - config.addinivalue_line("markers", "skip_on_cpu: Skip test on CPU") - config.addinivalue_line("markers", "skip_on_gpu: Skip test on GPU") - config.addinivalue_line("markers", "skip_on_gna: Skip test on GNA") - config.addinivalue_line("markers", "skip_on_hetero: Skip test on HETERO") - config.addinivalue_line("markers", "skip_on_template: Skip test on TEMPLATE") - config.addinivalue_line("markers", "onnx_coverage: Collect ONNX operator coverage") - config.addinivalue_line("markers", "template_extension") - config.addinivalue_line("markers", "dynamic_library: Runs tests only in dynamic libraries case") - - -def pytest_collection_modifyitems(config, items): - backend_name = config.getvalue("backend") - tests_compatibility.MODEL_ZOO_DIR = Path(config.getvalue("model_zoo_dir")) - tests_compatibility.MODEL_ZOO_XFAIL = config.getvalue("model_zoo_xfail") - - keywords = { - "CPU": "skip_on_cpu", - "GPU": "skip_on_gpu", - "GNA": "skip_on_gna", - "HETERO": "skip_on_hetero", - "TEMPLATE": "skip_on_template", - } - - skip_markers = { - "CPU": pytest.mark.skip(reason="Skipping test on the CPU backend."), - "GPU": pytest.mark.skip(reason="Skipping test on the GPU backend."), - "GNA": pytest.mark.skip(reason="Skipping test on the GNA backend."), - "HETERO": pytest.mark.skip(reason="Skipping test on the HETERO backend."), - "TEMPLATE": pytest.mark.skip(reason="Skipping test on the TEMPLATE backend."), - } - - for item in items: - skip_this_backend = keywords[backend_name] - if skip_this_backend in item.keywords: - item.add_marker(skip_markers[backend_name]) - - -@pytest.fixture(scope="session") -def device(): - return os.environ.get("TEST_DEVICE") if os.environ.get("TEST_DEVICE") else "CPU" - - -def create_encoder(input_shape, levels=4): - # input - input_node = ng.parameter(input_shape, np.float32, name="data") - - padding_begin = padding_end = [0, 0] - strides = [1, 1] - dilations = [1, 1] - input_channels = [input_shape[1]] - last_output = input_node - - # convolution layers - for _ in range(levels): - input_c = input_channels[-1] - output_c = input_c * 2 - conv_w = np.random.uniform(0, 1, [output_c, input_c, 5, 5]).astype(np.float32) - conv_node = ng.convolution(last_output, conv_w, strides, padding_begin, padding_end, dilations) - input_channels.append(output_c) - last_output = conv_node - - # deconvolution layers - for _ in range(levels): - input_c = input_channels[-2] - output_c = input_channels.pop(-1) - deconv_w = np.random.uniform(0, 1, [output_c, input_c, 5, 5]).astype(np.float32) - deconv_node = ng.convolution_backprop_data(last_output, deconv_w, strides) - last_output = deconv_node - - # result - last_output.set_friendly_name("out") - result_node = ng.result(last_output) - return ng.Function(result_node, [input_node], "Encoder") - - -def create_relu(input_shape): - input_shape = ng.impl.PartialShape(input_shape) - param = ng.parameter(input_shape, dtype=np.float32, name="data") - result = ng.relu(param, name="out") - function = ng.Function(result, [param], "TestFunction") - return function diff --git a/src/bindings/python/tests_compatibility/runtime.py b/src/bindings/python/tests_compatibility/runtime.py deleted file mode 100644 index 4a295448532..00000000000 --- a/src/bindings/python/tests_compatibility/runtime.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -"""Provide a layer of abstraction for an OpenVINO runtime environment.""" - -import logging -from typing import Dict, List, Union - -import numpy as np -from openvino.inference_engine import IECore, IENetwork, Blob, DataPtr - -from ngraph.exceptions import UserInputError -from ngraph.impl import Function, Node, PartialShape, Type -from ngraph.opset1.ops import result -from ngraph.utils.types import NumericData, get_shape, get_dtype -from _pyngraph.util import get_ie_output_name - -import tests_compatibility - -log = logging.getLogger(__name__) - - -def runtime(backend_name: str = "CPU") -> "Runtime": - """Create a Runtime object (helper factory).""" - return Runtime(backend_name) - - -def get_runtime(): - """Return runtime object.""" - if tests_compatibility.BACKEND_NAME is not None: - return runtime(backend_name=tests_compatibility.BACKEND_NAME) - else: - return runtime() - - -def _convert_inputs(cnn_network: IENetwork) -> None: - """WA converts unsupported input images formats.""" - precision_map = { - "FP64": "FP32", - "U32": "I32", - } - - for cnn_input in cnn_network.input_info: - try: - _precision = precision_map[cnn_network.input_info[cnn_input].precision] - cnn_network.input_info[cnn_input].precision = _precision - except KeyError: - pass - - -def apply_ng_type(output: DataPtr, ng_type: Type): - ng_ie_supported_type_map = { - Type.boolean.to_string(): "BOOL", - Type.f32.to_string(): "FP32", - Type.i8.to_string(): "I8", - Type.i32.to_string(): "I32", - Type.u8.to_string(): "U8", - } - if ng_type.to_string() in ng_ie_supported_type_map: - output.precision = ng_ie_supported_type_map[ng_type.to_string()] - - -class Runtime(object): - """Represents an nGraph runtime environment.""" - - def __init__(self, backend_name: str) -> None: - self.backend_name = backend_name - log.debug("Creating Inference Engine for %s" % backend_name) - self.backend = IECore() - assert backend_name in self.backend.available_devices, ( - 'The requested device "' + backend_name + '" is not supported!' - ) - - def set_config(self, config: Dict[str, str]) -> None: - """Set the inference engine configuration.""" - self.backend.set_config(config, device_name=self.backend_name) - - def __repr__(self) -> str: - return "".format(self.backend_name) - - def computation(self, node_or_function: Union[Node, Function], *inputs: Node) -> "Computation": - """Return a callable Computation object.""" - if isinstance(node_or_function, Node): - ng_function = Function(node_or_function, inputs, node_or_function.name) - return Computation(self, ng_function) - elif isinstance(node_or_function, Function): - return Computation(self, node_or_function) - else: - raise TypeError( - "Runtime.computation must be called with an nGraph Function object " - "or an nGraph node object an optionally Parameter node objects. " - "Called with: %s", - node_or_function, - ) - - -class Computation(object): - """nGraph callable computation object.""" - - def __init__(self, runtime: Runtime, ng_function: Function) -> None: - self.runtime = runtime - self.function = ng_function - self.parameters = ng_function.get_parameters() - self.results = ng_function.get_results() - self.network_cache = {} - - def __repr__(self) -> str: - params_string = ", ".join([param.name for param in self.parameters]) - return "".format(self.function.get_name(), params_string) - - def _get_ie_output_blob_name(self, outputs: Dict, ng_result: result) -> str: - if len(self.results) == 1: - return next(iter(outputs.keys())) - else: - prev_layer_output = ng_result.input(0).get_source_output() - return get_ie_output_name(prev_layer_output) - - def _get_ie_output_blob_buffer(self, output_blobs: Dict[str, Blob], ng_result: result) -> np.ndarray: - out_name = self._get_ie_output_blob_name(output_blobs, ng_result) - out_blob = output_blobs[out_name] - - if out_blob.tensor_desc.layout == "SCALAR": - return out_blob.buffer.reshape(()) - else: - return out_blob.buffer - - def convert_buffers(self, source_buffers, target_dtypes): - converted_buffers = [] - for i in range(len(source_buffers)): - target_dtype = target_dtypes[i] - # custom conversion for bf16 - if self.results[i].get_output_element_type(0) == Type.bf16: - converted_buffers.append((source_buffers[i].view(np.uint32) >> 16).astype(np.uint16)) - else: - converted_buffers.append(source_buffers[i].astype(target_dtype)) - return converted_buffers - - def __call__(self, *input_values: NumericData) -> List[NumericData]: - """Run computation on input values and return result.""" - # Input validation - if len(input_values) < len(self.parameters): - raise UserInputError( - "Expected %s params, received not enough %s values.", len(self.parameters), len(input_values) - ) - # ignore not needed input values - input_values = input_values[:len(self.parameters)] - - input_values = [np.array(input_value) for input_value in input_values] - input_shapes = [get_shape(input_value) for input_value in input_values] - - param_names = [param.friendly_name for param in self.parameters] - - if self.network_cache.get(str(input_shapes)) is None: - capsule = Function.to_capsule(self.function) - cnn_network = IENetwork(capsule) - if self.function.is_dynamic(): - cnn_network.reshape(dict(zip(param_names, input_shapes))) - # Convert unsupported inputs of the network - _convert_inputs(cnn_network) - self.network_cache[str(input_shapes)] = cnn_network - else: - cnn_network = self.network_cache[str(input_shapes)] - - # set output blobs precission based on nG results - for ng_result in self.results: - ie_out_name = self._get_ie_output_blob_name(cnn_network.outputs, ng_result) - apply_ng_type(cnn_network.outputs[ie_out_name], ng_result.get_output_element_type(0)) - - executable_network = self.runtime.backend.load_network(cnn_network, self.runtime.backend_name) - - for parameter, input in zip(self.parameters, input_values): - parameter_shape = parameter.get_output_partial_shape(0) - input_shape = PartialShape(input.shape) - if len(input.shape) > 0 and not parameter_shape.compatible(input_shape): - raise UserInputError( - "Provided tensor's shape: %s does not match the expected: %s.", - input_shape, - parameter_shape, - ) - - request = executable_network.requests[0] - request.infer(dict(zip(param_names, input_values))) - - # Set order of output blobs compatible with nG Function - result_buffers = [self._get_ie_output_blob_buffer(request.output_blobs, result) - for result in self.results] - - # Since OV overwrite result data type we have to convert results to the original one. - original_dtypes = [get_dtype(result.get_output_element_type(0)) for result in self.results] - converted_buffers = self.convert_buffers(result_buffers, original_dtypes) - return converted_buffers diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/__init__.py b/src/bindings/python/tests_compatibility/test_inference_engine/__init__.py deleted file mode 100644 index 46a1a3756d2..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (C) 2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_Blob.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_Blob.py deleted file mode 100644 index 8f6c9c00934..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_Blob.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np -import os - -from openvino.inference_engine import TensorDesc, Blob, IECore -from tests_compatibility.conftest import model_path -from tests_compatibility.test_utils.test_utils import generate_image - - -def test_init_with_tensor_desc(): - tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NHWC") - blob = Blob(tensor_desc) - assert isinstance(blob.buffer, np.ndarray) - assert blob.tensor_desc == tensor_desc - - -@pytest.mark.parametrize("shape, layout", [ - ([1, 3, 127, 127], "NCHW"), - ([], "SCALAR"), -]) -def test_init_with_numpy(shape, layout): - tensor_desc = TensorDesc("FP32", shape, layout) - array = np.ones(shape=shape, dtype=np.float32) - blob = Blob(tensor_desc, array) - assert isinstance(blob.buffer, np.ndarray) - assert np.shares_memory(blob.buffer, array) - assert blob.tensor_desc == tensor_desc - - -def test_get_tensor_desc(): - tensor_desc = TensorDesc("FP32", [1, 127, 127, 3], "NHWC") - blob = Blob(tensor_desc) - assert blob.tensor_desc == tensor_desc - - -def test_get_buffer(): - tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NCHW") - array = np.ones(shape=(1, 3, 127, 127), dtype=np.float32) - blob = Blob(tensor_desc, array) - assert np.array_equal(blob.buffer, array) - - -@pytest.mark.parametrize("precision, numpy_precision", [ - ("FP32", np.float32), - ("FP64", np.float64), - ("FP16", np.float16), - ("I8", np.int8), - ("U8", np.uint8), - ("I32", np.int32), - ("I16", np.int16), - ("U16", np.uint16), - ("I64", np.int64), - ("BOOL", np.uint8), - ("BIN", np.int8), - ("BF16", np.float16), -]) -def test_write_to_buffer(precision, numpy_precision): - tensor_desc = TensorDesc(precision, [1, 3, 127, 127], "NCHW") - array = np.zeros(shape=(1, 3, 127, 127), dtype=numpy_precision) - blob = Blob(tensor_desc, array) - ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=numpy_precision) - blob.buffer[:] = ones_arr - assert np.array_equal(blob.buffer, ones_arr) - - -def test_write_numpy_scalar_int64(): - tensor_desc = TensorDesc("I64", [], "SCALAR") - scalar = np.array(0, dtype=np.int64) - blob = Blob(tensor_desc, scalar) - scalar_to_write = np.array(1, dtype=np.int64) - blob.buffer[:] = scalar_to_write - assert np.array_equal(blob.buffer, np.atleast_1d(scalar_to_write)) - - -def test_incompatible_array_and_td(): - tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NCHW") - array = np.zeros(shape=(1, 2, 3, 4), dtype=np.float32) - with pytest.raises(AttributeError) as e: - Blob(tensor_desc, array) - assert "Number of elements in provided numpy array 24 and " \ - "required by TensorDesc 48387 are not equal" in str(e.value) - - -def test_incompatible_input_precision(): - image = generate_image(dtype="float64") - tensor_desc = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") - with pytest.raises(ValueError) as e: - Blob(tensor_desc, image) - assert "Data type float64 of provided numpy array " \ - "doesn't match to TensorDesc precision FP32" in str(e.value) - - -# issue 49903 -@pytest.mark.skip(reason="Test will enable when CPU fix will be merge") -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test") -def test_buffer_values_after_add_outputs(device): - test_net_xml_fp16, test_net_bin_fp16 = model_path(is_fp16=True) - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml_fp16, weights=test_net_bin_fp16) - output_layer = "22" - net.add_outputs(output_layer) - exec_net = ie_core.load_network(net, device) - feed_dict = { - 'data': np.random.normal(0, 1, (1, 3, 32, 32)).astype(np.float32) - } - result = exec_net.infer(feed_dict) - assert np.all(abs(result[output_layer])<30) - assert result[output_layer].dtype == np.float16 - - -def test_set_shape(): - tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NHWC") - blob = Blob(tensor_desc) - blob.set_shape([1, 4, 128, 128]) - assert blob.tensor_desc.dims == [1, 4, 128, 128] - assert blob.buffer.shape == (1, 4, 128, 128) - - -def test_cannot_set_shape_preallocated_memory(): - tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NHWC") - array = np.ones([1, 3, 127, 127], dtype=np.float32) - blob = Blob(tensor_desc, array) - with pytest.raises(RuntimeError) as e: - blob.set_shape([1, 4, 128, 128]) - assert "Cannot call setShape for Blobs created on top of preallocated memory" in str(e.value) diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_CDataPtr.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_CDataPtr.py deleted file mode 100644 index ebfa391eb96..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_CDataPtr.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -from openvino.inference_engine import CDataPtr, IECore -from tests_compatibility.conftest import model_path, create_relu -import ngraph as ng - - -test_net_xml, test_net_bin = model_path() - - -def test_name(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - assert isinstance(exec_net.outputs['fc_out'], CDataPtr) - assert exec_net.outputs['fc_out'].name == "fc_out", "Incorrect name for layer 'fc_out'" - - -def test_precision(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - assert isinstance(exec_net.outputs['fc_out'], CDataPtr) - assert exec_net.outputs['fc_out'].precision == "FP32", "Incorrect precision for layer 'fc_out'" - - -def test_no_precision_setter(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - with pytest.raises(AttributeError) as e: - exec_net.outputs['fc_out'].precision = "I8" - assert "attribute 'precision' of 'openvino.inference_engine.ie_api.CDataPtr' objects is not writable" in str(e.value) - - -def test_layout(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - assert exec_net.outputs['fc_out'].layout == "NC", "Incorrect layout for layer 'fc_out" - - -def test_no_layout_setter(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - with pytest.raises(AttributeError) as e: - exec_net.outputs['fc_out'].layout = "CN" - assert "attribute 'layout' of 'openvino.inference_engine.ie_api.CDataPtr' objects is not writable" in str(e.value) - - -def test_initialized(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - assert exec_net.outputs['fc_out'].initialized, "Incorrect value for initialized property for layer 'fc_out" diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_DataPtr.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_DataPtr.py deleted file mode 100644 index 433e0300fd2..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_DataPtr.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -from openvino.inference_engine import IECore, DataPtr -from tests_compatibility.conftest import model_path, create_relu -import ngraph as ng - - -test_net_xml, test_net_bin = model_path() - - -def layer_out_data(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - return net.outputs['fc_out'] - - -def test_name(): - assert layer_out_data().name == 'fc_out', "Incorrect name for layer 'fc_out'" - - -def test_precision(): - assert layer_out_data().precision == "FP32", "Incorrect precision for layer 'fc_out'" - - -def test_precision_setter(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - net.outputs['fc_out'].precision = "I8" - assert net.outputs['fc_out'].precision == "I8", "Incorrect precision for layer 'fc_out'" - - -def test_incorrect_precision_setter(): - with pytest.raises(ValueError) as e: - layer_out_data().precision = "123" - assert "Unsupported precision 123! List of supported precisions:" in str(e.value) - - -def test_layout(): - assert layer_out_data().layout == "NC", "Incorrect layout for layer 'fc_out'" - - -def test_initialized(): - assert layer_out_data().initialized, "Incorrect value for initialized property for layer 'fc_out'" diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_ExecutableNetwork.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_ExecutableNetwork.py deleted file mode 100644 index 13a899520d5..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_ExecutableNetwork.py +++ /dev/null @@ -1,319 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import os -import pytest -import time - -from openvino.inference_engine import ie_api as ie -from tests_compatibility.conftest import model_path -from tests_compatibility.test_utils.test_utils import generate_image, generate_relu_model - - -test_net_xml, test_net_bin = model_path(False) - - -def test_infer(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device) - img = generate_image() - res = exec_net.infer({'parameter': img}) - assert np.argmax(res['relu'][0]) == 531 - del exec_net - del ie_core - - -def test_infer_wrong_input_name(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device) - img = generate_image() - with pytest.raises(AssertionError) as e: - exec_net.infer({'_data_': img}) - assert "No input with name _data_ found in network" in str(e.value) - del exec_net - del ie_core - - -def test_input_info(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device, num_requests=5) - assert isinstance(exec_net.input_info['data'], ie.InputInfoCPtr) - assert exec_net.input_info['data'].name == "data" - assert exec_net.input_info['data'].precision == "FP32" - assert isinstance(exec_net.input_info['data'].input_data, ie.DataPtr) - del exec_net - del ie_core - - -def test_outputs(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device, num_requests=5) - assert len(exec_net.outputs) == 1 - assert "fc_out" in exec_net.outputs - assert isinstance(exec_net.outputs['fc_out'], ie.CDataPtr) - del exec_net - del ie_core - - -def test_access_requests(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device, num_requests=5) - assert len(exec_net.requests) == 5 - assert isinstance(exec_net.requests[0], ie.InferRequest) - del exec_net - del ie_core - - -def test_async_infer_one_req(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request_handler = exec_net.start_async(request_id=0, inputs={'parameter': img}) - request_handler.wait() - res = request_handler.output_blobs['relu'].buffer - assert np.argmax(res) == 531 - del exec_net - del ie_core - - -def test_async_infer_many_req(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=5) - img = generate_image() - for id in range(5): - request_handler = exec_net.start_async(request_id=id, inputs={'parameter': img}) - request_handler.wait() - res = request_handler.output_blobs['relu'].buffer - assert np.argmax(res) == 531 - del exec_net - del ie_core - - -def test_async_infer_many_req_get_idle(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - num_requests = 5 - exec_net = ie_core.load_network(net, device, num_requests=num_requests) - img = generate_image() - check_id = set() - for id in range(2*num_requests): - request_id = exec_net.get_idle_request_id() - if request_id == -1: - status = exec_net.wait(num_requests=1, timeout=ie.WaitMode.RESULT_READY) - assert(status == ie.StatusCode.OK) - request_id = exec_net.get_idle_request_id() - assert(request_id >= 0) - request_handler = exec_net.start_async(request_id=request_id, inputs={'parameter': img}) - check_id.add(request_id) - status = exec_net.wait(timeout=ie.WaitMode.RESULT_READY) - assert status == ie.StatusCode.OK - for id in range(num_requests): - if id in check_id: - assert np.argmax(exec_net.requests[id].output_blobs['relu'].buffer) == 531 - del exec_net - del ie_core - - -def test_wait_before_start(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - num_requests = 5 - exec_net = ie_core.load_network(net, device, num_requests=num_requests) - img = generate_image() - requests = exec_net.requests - for id in range(num_requests): - status = requests[id].wait() - # Plugin API 2.0 has the different behavior will not return this status - # assert status == ie.StatusCode.INFER_NOT_STARTED - request_handler = exec_net.start_async(request_id=id, inputs={'parameter': img}) - status = requests[id].wait() - assert status == ie.StatusCode.OK - assert np.argmax(request_handler.output_blobs['relu'].buffer) == 531 - del exec_net - del ie_core - - -def test_wait_for_callback(device): - def callback(status, callbacks_info): - time.sleep(0.01) - callbacks_info['finished'] += 1 - - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - num_requests = 3 - exec_net = ie_core.load_network(net, device, num_requests=num_requests) - callbacks_info = {} - callbacks_info['finished'] = 0 - img = generate_image() - for request in exec_net.requests: - request.set_completion_callback(callback, callbacks_info) - request.async_infer({'data': img}) - - exec_net.wait(num_requests) - assert callbacks_info['finished'] == num_requests - - -def test_wrong_request_id(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - with pytest.raises(ValueError) as e: - exec_net.start_async(request_id=20, inputs={'data': img}) - assert "Incorrect request_id specified!" in str(e.value) - del exec_net - del ie_core - - -def test_wrong_num_requests(device): - with pytest.raises(ValueError) as e: - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - ie_core.load_network(net, device, num_requests=-1) - assert "Incorrect number of requests specified: -1. Expected positive integer number or zero for auto detection" \ - in str(e.value) - del ie_core - - -def test_wrong_num_requests_core(device): - with pytest.raises(ValueError) as e: - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device, num_requests=-1) - assert "Incorrect number of requests specified: -1. Expected positive integer number or zero for auto detection" \ - in str(e.value) - del ie_core - - -def test_plugin_accessible_after_deletion(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device) - img = generate_image() - res = exec_net.infer({'parameter': img}) - assert np.argmax(res['relu'][0]) == 531 - del exec_net - del ie_core - - -def test_exec_graph(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device) - img = generate_image() - res = exec_net.infer({'data': img}) - exec_graph = exec_net.get_exec_graph_info() - exec_graph_file = 'exec_graph.xml' - exec_graph.serialize(exec_graph_file) - assert os.path.exists(exec_graph_file) - os.remove(exec_graph_file) - del exec_net - del exec_graph - del ie_core - - -def test_export_import(device): - ie_core = ie.IECore() - if "EXPORT_IMPORT" not in ie_core.get_metric(device, "OPTIMIZATION_CAPABILITIES"): - pytest.skip(f"{ie_core.get_metric(device, 'FULL_DEVICE_NAME')} plugin due-to export, import model API isn't implemented.") - - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, "CPU") - exported_net_file = 'exported_model.bin' - exec_net.export(exported_net_file) - assert os.path.exists(exported_net_file) - exec_net = ie_core.import_network(exported_net_file, "CPU") - os.remove(exported_net_file) - img = generate_image() - res = exec_net.infer({'data': img}) - assert np.argmax(res['fc_out'][0]) == 9 - del exec_net - del ie_core - - -def test_multi_out_data(device): - # Regression test 23965 - # Check that CDataPtr for all output layers not copied between outputs map items - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - net.add_outputs(['28/Reshape']) - exec_net = ie_core.load_network(net, device) - assert "fc_out" in exec_net.outputs and "28/Reshape" in exec_net.outputs - assert isinstance(exec_net.outputs["fc_out"], ie.CDataPtr) - assert isinstance(exec_net.outputs["28/Reshape"], ie.CDataPtr) - assert exec_net.outputs["fc_out"].name == "fc_out" and exec_net.outputs["fc_out"].shape == [1, 10] - assert exec_net.outputs["28/Reshape"].name == "28/Reshape" and exec_net.outputs["28/Reshape"].shape == [1, 5184] - del ie_core - pass - - -def test_get_metric(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device) - network_name = exec_net.get_metric("NETWORK_NAME") - assert network_name == "test_model" - - -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test") -def test_get_config(device): - ie_core = ie.IECore() - if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON": - pytest.skip("Can't run on ARM plugin due-to CPU dependent test") - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device) - config = exec_net.get_config("PERF_COUNT") - assert config == "NO" - - -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "GNA", reason="Device dependent test") -def test_set_config(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device) - exec_net.set_config({"DEVICE_MODE" : "GNA_HW"}) - parameter = exec_net.get_config("DEVICE_MODE") - assert parameter == "GNA_HW" - exec_net.set_config({"DEVICE_MODE" : "GNA_SW_EXACT"}) - parameter = exec_net.get_config("DEVICE_MODE") - assert parameter == "GNA_SW_EXACT" - - -# issue 28996 -# checks that objects can deallocate in this order, if not - segfault happends -def test_input_info_deallocation(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device) - input_info = exec_net.input_info["data"] - del ie_core - del exec_net - del input_info - - -def test_outputs_deallocation(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device) - output = exec_net.outputs["fc_out"] - del ie_core - del exec_net - del output - - -def test_exec_graph_info_deallocation(device): - ie_core = ie.IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie_core.load_network(net, device) - exec_graph_info = exec_net.get_exec_graph_info() - del ie_core - del exec_net - del exec_graph_info diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_IECore.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_IECore.py deleted file mode 100644 index edda335c011..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_IECore.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import platform -import pytest -import sys -from pathlib import Path -from threading import Event, Thread -from time import sleep, time -from queue import Queue - -from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork -from tests_compatibility.conftest import model_path, plugins_path, model_onnx_path -import ngraph as ng - - -test_net_xml, test_net_bin = model_path() -test_net_onnx = model_onnx_path() -plugins_xml, plugins_win_xml, plugins_osx_xml = plugins_path() - - -def test_init_ie_core_no_cfg(): - ie = IECore() - assert isinstance(ie, IECore) - - -def test_init_ie_core_with_cfg(): - ie = IECore(plugins_xml) - assert isinstance(ie, IECore) - - -def test_get_version(device): - ie = IECore() - version = ie.get_versions(device) - assert isinstance(version, dict), "Returned version must be a dictionary" - assert device in version, "{} plugin version wasn't found in versions" - assert hasattr(version[device], "major"), "Returned version has no field 'major'" - assert hasattr(version[device], "minor"), "Returned version has no field 'minor'" - assert hasattr(version[device], "description"), "Returned version has no field 'description'" - assert hasattr(version[device], "build_number"), "Returned version has no field 'build_number'" - - -def test_load_network(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device) - assert isinstance(exec_net, ExecutableNetwork) - -def test_load_network_without_device(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net) - assert isinstance(exec_net, ExecutableNetwork) - -def test_load_network_from_file(device): - ie = IECore() - exec_net = ie.load_network(test_net_xml, device) - assert isinstance(exec_net, ExecutableNetwork) - -def test_load_network_from_file_without_device(): - ie = IECore() - exec_net = ie.load_network(test_net_xml) - assert isinstance(exec_net, ExecutableNetwork) - -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test") -def test_load_network_wrong_device(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - with pytest.raises(RuntimeError) as e: - ie.load_network(net, "BLA") - assert 'Device with "BLA" name is not registered in the OpenVINO Runtime' in str(e.value) - - -def test_query_network(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - query_res = ie.query_network(net, device) - func_net = ng.function_from_cnn(net) - ops_net = func_net.get_ordered_ops() - ops_net_names = [op.friendly_name for op in ops_net] - assert [key for key in query_res.keys() if key not in ops_net_names] == [], \ - "Not all network layers present in query_network results" - assert next(iter(set(query_res.values()))) == device, "Wrong device for some layers" - - -@pytest.mark.dynamic_library -def test_register_plugin(): - device = "TEST_DEVICE" - lib_name = "test_plugin" - full_lib_name = lib_name + ".dll" if sys.platform == "win32" else "lib" + lib_name + ".so" - - ie = IECore() - ie.register_plugin(lib_name, device) - with pytest.raises(RuntimeError) as e: - ie.get_versions(device) - assert f"Cannot load library '{full_lib_name}'" in str(e.value) - -@pytest.mark.dynamic_library -def test_register_plugins(): - device = "TEST_DEVICE" - lib_name = "test_plugin" - full_lib_name = lib_name + ".dll" if sys.platform == "win32" else "lib" + lib_name + ".so" - plugins_xml_path = os.path.join(os.getcwd(), "plugin_path.xml") - - plugin_xml = f""" - - - - - """ - - with open(plugins_xml_path, "w") as f: - f.write(plugin_xml) - - ie = IECore() - ie.register_plugins(plugins_xml_path) - os.remove(plugins_xml_path) - - with pytest.raises(RuntimeError) as e: - ie.get_versions(device) - assert f"Cannot load library '{full_lib_name}'" in str(e.value) - - -def test_unload_plugin(device): - ie = IECore() - # Trigger plugin loading - ie.get_versions(device) - # Unload plugin - ie.unregister_plugin(device) - - -def test_available_devices(device): - ie = IECore() - devices = ie.available_devices - assert device in devices, f"Current device '{device}' is not listed in available devices '{', '.join(devices)}'" - - -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", - reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") -def test_get_metric_list_of_str(): - ie = IECore() - param = ie.get_metric("CPU", "OPTIMIZATION_CAPABILITIES") - assert isinstance(param, list), "Parameter value for 'OPTIMIZATION_CAPABILITIES' " \ - f"metric must be a list but {type(param)} is returned" - assert all(isinstance(v, str) for v in param), "Not all of the parameter values for 'OPTIMIZATION_CAPABILITIES' " \ - "metric are strings!" - - -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", - reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") -def test_get_metric_tuple_of_two_ints(): - ie = IECore() - if ie.get_metric("CPU", "FULL_DEVICE_NAME") == "arm_compute::NEON": - pytest.skip("Can't run on ARM plugin due-to unsupported device metric") - param = ie.get_metric("CPU", "RANGE_FOR_STREAMS") - assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_STREAMS' " \ - f"metric must be tuple but {type(param)} is returned" - assert all(isinstance(v, int) for v in param), "Not all of the parameter values for 'RANGE_FOR_STREAMS' " \ - "metric are integers!" - - -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", - reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") -def test_get_metric_tuple_of_three_ints(): - ie = IECore() - if ie.get_metric("CPU", "FULL_DEVICE_NAME") == "arm_compute::NEON": - pytest.skip("Can't run on ARM plugin due-to unsupported device metric") - param = ie.get_metric("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS") - assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' " \ - f"metric must be tuple but {type(param)} is returned" - assert all(isinstance(v, int) for v in param), "Not all of the parameter values for " \ - "'RANGE_FOR_ASYNC_INFER_REQUESTS' metric are integers!" - - -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", - reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") -def test_get_metric_str(): - ie = IECore() - param = ie.get_metric("CPU", "FULL_DEVICE_NAME") - assert isinstance(param, str), "Parameter value for 'FULL_DEVICE_NAME' " \ - f"metric must be string but {type(param)} is returned" - - -def test_read_network_from_xml(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - assert isinstance(net, IENetwork) - - net = ie.read_network(model=test_net_xml) - assert isinstance(net, IENetwork) - - -def test_read_network_as_path(): - ie = IECore() - - net = ie.read_network(model=Path(test_net_xml), weights=test_net_bin) - assert isinstance(net, IENetwork) - - net = ie.read_network(model=test_net_xml, weights=Path(test_net_bin)) - assert isinstance(net, IENetwork) - - net = ie.read_network(model=Path(test_net_xml)) - assert isinstance(net, IENetwork) - - -def test_read_network_from_onnx(): - ie = IECore() - net = ie.read_network(model=test_net_onnx) - assert isinstance(net, IENetwork) - - -def test_read_network_from_onnx_as_path(): - ie = IECore() - net = ie.read_network(model=Path(test_net_onnx)) - assert isinstance(net, IENetwork) - - -def test_incorrect_xml(): - ie = IECore() - with pytest.raises(Exception) as e: - ie.read_network(model="./model.xml", weights=Path(test_net_bin)) - assert "Path to the model ./model.xml doesn't exist or it's a directory" in str(e.value) - - -def test_incorrect_bin(): - ie = IECore() - with pytest.raises(Exception) as e: - ie.read_network(model=test_net_xml, weights="./model.bin") - assert "Path to the weights ./model.bin doesn't exist or it's a directory" in str(e.value) - - -def test_read_net_from_buffer(): - ie = IECore() - with open(test_net_bin, 'rb') as f: - bin = f.read() - with open(model_path()[0], 'rb') as f: - xml = f.read() - net = ie.read_network(model=xml, weights=bin, init_from_buffer=True) - assert isinstance(net, IENetwork) - - -def test_net_from_buffer_valid(): - ie = IECore() - with open(test_net_bin, 'rb') as f: - bin = f.read() - with open(model_path()[0], 'rb') as f: - xml = f.read() - net = ie.read_network(model=xml, weights=bin, init_from_buffer=True) - ref_net = ie.read_network(model=test_net_xml, weights=test_net_bin) - assert net.name == ref_net.name - assert net.batch_size == ref_net.batch_size - ii_net = net.input_info - ii_net2 = ref_net.input_info - o_net = net.outputs - o_net2 = ref_net.outputs - assert ii_net.keys() == ii_net2.keys() - assert o_net.keys() == o_net2.keys() - - -@pytest.mark.skipif(os.environ.get("TEST_DEVICE","CPU") != "GPU", reason=f"Device dependent test") -def test_load_network_release_gil(device): - running = True - message_queue = Queue() - def detect_long_gil_holds(): - sleep_time = 0.01 - latency_alert_threshold = 0.1 - # Send a message to indicate the thread is running and ready to detect GIL locks - message_queue.put("ready to detect") - while running: - start_sleep = time() - sleep(sleep_time) - elapsed = time() - start_sleep - if elapsed > latency_alert_threshold: - # Send a message to the testing thread that a long GIL lock occurred - message_queue.put(latency_alert_threshold) - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - # Wait for the GIL lock detector to be up and running - gil_hold_detection_thread = Thread(daemon=True, target=detect_long_gil_holds) - gil_hold_detection_thread.start() - # Wait to make sure the thread is started and checking for GIL holds - sleep(0.1) - assert message_queue.get(timeout=5) == "ready to detect" - # Run the function that should unlock the GIL - exec_net = ie.load_network(net, device) - # Ensure resources are closed - running = False - gil_hold_detection_thread.join(timeout=5) - # Assert there were never any long gil locks - assert message_queue.qsize() == 0, \ - f"More than 0 GIL locks occured! Latency: {message_queue.get()})" - - -def test_nogil_safe(device): - libc_name, libc_version = platform.libc_ver() - if libc_name == 'glibc': - version = tuple(int(x) for x in libc_version.split('.')) - if version < (2, 34): - pytest.skip("There is an issue in glibc for an older version.") - - call_thread_func = Event() - switch_interval = sys.getswitchinterval() - core = IECore() - net = core.read_network(model=test_net_xml, weights=test_net_bin) - - def thread_target(thread_func, thread_args): - call_thread_func.wait() - call_thread_func.clear() - thread_func(*thread_args) - - def main_thread_target(gil_release_func, args): - call_thread_func.set() - gil_release_func(*args) - - def test_run_parallel(gil_release_func, args, thread_func, thread_args): - thread = Thread(target=thread_target, args=[thread_func, thread_args]) - sys.setswitchinterval(1000) - thread.start() - main_thread_target(gil_release_func, args) - thread.join() - sys.setswitchinterval(switch_interval) - - main_targets = [{ - core.read_network: [test_net_xml, test_net_bin], - core.load_network: [net, device], - }, - { - core.load_network: [net, device], - }] - - thread_targets = [{ - core.get_versions: [device,], - core.read_network: [test_net_xml, test_net_bin], - core.load_network: [net, device], - core.query_network: [net, device], - getattr: [core, "available_devices"], - }, - { - getattr: [net, "name"], - getattr: [net, "input_info"], - getattr: [net, "outputs"], - getattr: [net, "batch_size"], - }] - - for main_target, custom_target in zip(main_targets, thread_targets): - for nogil_func, args in main_target.items(): - for thread_func, thread_args in custom_target.items(): - test_run_parallel(nogil_func, args, thread_func, thread_args) diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_IENetwork.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_IENetwork.py deleted file mode 100644 index 82040639671..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_IENetwork.py +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import pytest - -import ngraph as ng -from openvino.inference_engine import IECore, DataPtr, InputInfoPtr, PreProcessInfo -from tests_compatibility.conftest import model_path - - -test_net_xml, test_net_bin = model_path() - - -def test_name(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - assert net.name == "test_model" - - -def test_input_info(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - assert isinstance(net.input_info['data'], InputInfoPtr) - assert net.input_info['data'].layout == "NCHW" - assert net.input_info['data'].precision == "FP32" - assert isinstance(net.input_info['data'].input_data, DataPtr) - assert isinstance(net.input_info['data'].preprocess_info, PreProcessInfo) - - -def test_input_info_precision_setter(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - assert net.input_info['data'].layout == "NCHW" - net.input_info['data'].layout = "NHWC" - assert net.input_info['data'].layout == "NHWC" - - -def test_input_input_info_layout_setter(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - assert net.input_info['data'].precision == "FP32" - net.input_info['data'].precision = "I8" - assert net.input_info['data'].precision == "I8" - - -def test_input_unsupported_precision_setter(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - with pytest.raises(ValueError) as e: - net.input_info['data'].precision = "BLA" - assert "Unsupported precision BLA! List of supported precisions: " in str(e.value) - - -def test_input_unsupported_layout_setter(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - with pytest.raises(ValueError) as e: - net.input_info['data'].layout = "BLA" - assert "Unsupported layout BLA! List of supported layouts: " in str(e.value) - - -def test_outputs(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - assert isinstance(net.outputs['fc_out'], DataPtr) - assert net.outputs['fc_out'].layout == "NC" - assert net.outputs['fc_out'].precision == "FP32" - assert net.outputs['fc_out'].shape == [1, 10] - - -def test_output_precision_setter(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - assert net.outputs['fc_out'].precision == "FP32" - net.outputs['fc_out'].precision = "I8" - assert net.outputs['fc_out'].precision == "I8" - - -def test_output_unsupported_precision_setter(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - with pytest.raises(ValueError) as e: - net.outputs['fc_out'].precision = "BLA" - assert "Unsupported precision BLA! List of supported precisions: " in str(e.value) - - -def test_add_ouputs(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - net.add_outputs('28/Reshape') - net.add_outputs(['29/WithoutBiases']) - assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out'] - - -def test_add_outputs_with_port(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - net.add_outputs(('28/Reshape', 0)) - net.add_outputs([('29/WithoutBiases', 0)]) - assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out'] - - -def test_add_outputs_with_and_without_port(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - net.add_outputs('28/Reshape') - net.add_outputs([('29/WithoutBiases', 0)]) - assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out'] - - -def test_batch_size_getter(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - assert net.batch_size == 1 - - -def test_batch_size_setter(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - net.batch_size = 4 - assert net.batch_size == 4 - assert net.input_info['data'].input_data.shape == [4, 3, 32, 32] - - -def test_batch_size_after_reshape(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - net.reshape({'data': [4, 3, 32, 32]}) - assert net.batch_size == 4 - assert net.input_info['data'].input_data.shape == [4, 3, 32, 32] - net.reshape({'data': [8, 3, 32, 32]}) - assert net.batch_size == 8 - assert net.input_info['data'].input_data.shape == [8, 3, 32, 32] - - -def test_serialize(): - def run(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - net.serialize("./serialized_net.xml", "./serialized_net.bin") - serialized_net = ie.read_network(model="./serialized_net.xml", weights="./serialized_net.bin") - func_net = ng.function_from_cnn(net) - ops_net = func_net.get_ordered_ops() - ops_net_names = [op.friendly_name for op in ops_net] - func_serialized_net = ng.function_from_cnn(serialized_net) - ops_serialized_net = func_serialized_net.get_ordered_ops() - ops_serialized_net_names = [op.friendly_name for op in ops_serialized_net] - assert ops_serialized_net_names == ops_net_names - - run() - # xml/bin files shall not be acquired after by 'net' here, can be removed - os.remove("./serialized_net.xml") - os.remove("./serialized_net.bin") - - -def test_reshape(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - net.reshape({"data": (2, 3, 32, 32)}) - assert net.input_info["data"].input_data.shape == [2, 3, 32, 32] - - -def test_reshape_dynamic(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - with pytest.raises(ValueError) as e: - net.reshape({"data": (-1, 3, 32, 32)}) - assert "Detected dynamic dimension in the shape (-1, 3, 32, 32) of the `data` input" in str(e.value) - - -def test_net_from_buffer_valid(): - ie = IECore() - with open(test_net_bin, 'rb') as f: - bin = f.read() - with open(model_path()[0], 'rb') as f: - xml = f.read() - net = ie.read_network(model=xml, weights=bin, init_from_buffer=True) - ref_net = ie.read_network(model=test_net_xml, weights=test_net_bin) - assert net.name == ref_net.name - assert net.batch_size == ref_net.batch_size - ii_net = net.input_info - ii_net2 = ref_net.input_info - o_net = net.outputs - o_net2 = ref_net.outputs - assert ii_net.keys() == ii_net2.keys() - assert o_net.keys() == o_net2.keys() - - -def test_multi_out_data(): - # Regression test 23965 - # Check that DataPtr for all output layers not copied between outputs map items - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - net.add_outputs(['28/Reshape']) - assert "28/Reshape" in net.outputs and "fc_out" in net.outputs - assert isinstance(net.outputs["28/Reshape"], DataPtr) - assert isinstance(net.outputs["fc_out"], DataPtr) - assert net.outputs["28/Reshape"].name == "28/Reshape" and net.outputs["28/Reshape"].shape == [1, 5184] - assert net.outputs["fc_out"].name == "fc_out" and net.outputs["fc_out"].shape == [1, 10] - pass - - -def test_tensor_names(): - model = """ - - - - - - - 1 - 3 - 22 - 22 - - - - - - - 1 - 3 - 22 - 22 - - - - - 1 - 3 - 22 - 22 - - - - - - - 1 - 3 - 22 - 22 - - - - - - - - - - """ - ie = IECore() - weights = b'' - net = ie.read_network(model=model.encode('utf-8'), weights=weights, init_from_buffer=True) - assert net.get_ov_name_for_tensor("relu_t") == "activation" - assert net.get_ov_name_for_tensor("identity_t") == "activation" - assert net.get_ov_name_for_tensor("input") == "in1" diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_InferRequest.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_InferRequest.py deleted file mode 100644 index 1eedac6a5f3..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_InferRequest.py +++ /dev/null @@ -1,517 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import os -import pytest -import threading -from datetime import datetime -import time - -from openvino.inference_engine import ie_api as ie -from tests_compatibility.conftest import model_path, create_encoder -from tests_compatibility.test_utils.test_utils import generate_image, generate_relu_model -import ngraph as ng -from ngraph.impl import Function, Type - -test_net_xml, test_net_bin = model_path() - - -def create_function_with_memory(input_shape, data_type): - input_data = ng.parameter(input_shape, name="input_data", dtype=data_type) - init_val = ng.constant(np.zeros(input_shape), data_type) - rv = ng.read_value(init_val, "var_id_667") - add = ng.add(rv, input_data, name="MemoryAdd") - node = ng.assign(add, "var_id_667") - res = ng.result(add, "res") - func = Function(results=[res], sinks=[node], parameters=[input_data], name="name") - caps = Function.to_capsule(func) - return caps - - -def load_sample_model(device, num_requests=1): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - executable_network = ie_core.load_network(net, device, num_requests=num_requests) - return executable_network - - -def test_input_blobs(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - executable_network = ie_core.load_network(net, device, num_requests=2) - td = ie.TensorDesc("FP32", (1, 3, 32, 32), "NCHW") - assert executable_network.requests[0].input_blobs['data'].tensor_desc == td - - -def test_output_blobs(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - executable_network = ie_core.load_network(net, device, num_requests=2) - td = ie.TensorDesc("FP32", (1, 10), "NC") - assert executable_network.requests[0].output_blobs['fc_out'].tensor_desc == td - - -def test_inputs_list(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - executable_network = ie_core.load_network(net, device, num_requests=2) - - for req in executable_network.requests: - assert len(req._inputs_list) == 1 - assert "data" in req._inputs_list - del ie_core - - -def test_outputs_list(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - executable_network = ie_core.load_network(net, device, num_requests=2) - - for req in executable_network.requests: - assert len(req._outputs_list) == 1 - assert "fc_out" in req._outputs_list - del ie_core - - -def test_access_input_buffer(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - executable_network = ie_core.load_network(net, device, num_requests=1) - buffer = executable_network.requests[0]._get_blob_buffer("data".encode()).to_numpy() - assert buffer.shape == (1, 3, 32, 32) - assert buffer.strides == (12288, 4096, 128, 4) - assert buffer.dtype == np.float32 - del executable_network - del ie_core - del net - - -def test_access_output_buffer(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - executable_network = ie_core.load_network(net, device, num_requests=1) - buffer = executable_network.requests[0]._get_blob_buffer("fc_out".encode()).to_numpy() - assert buffer.shape == (1, 10) - assert buffer.strides == (40, 4) - assert buffer.dtype == np.float32 - del executable_network - del ie_core - del net - - -def test_write_to_input_blobs_directly(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - executable_network = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request = executable_network.requests[0] - input_data = request.input_blobs["data"] - input_data.buffer[:] = img - assert np.array_equal(executable_network.requests[0].input_blobs["data"].buffer, img) - del executable_network - del ie_core - del net - - -def test_write_to_input_blobs_copy(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - executable_network = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request = executable_network.requests[0] - request.input_blobs["data"].buffer[:] = img - assert np.allclose(executable_network.requests[0].input_blobs["data"].buffer, img) - del executable_network - del ie_core - del net - - -def test_infer(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request = exec_net.requests[0] - request.infer({'parameter': img}) - res = request.output_blobs['relu'].buffer - assert np.argmax(res) == 531 - del exec_net - del ie_core - del net - - -def test_async_infer_default_timeout(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request = exec_net.requests[0] - request.async_infer({'parameter': img}) - request.wait() - res = request.output_blobs['relu'].buffer - assert np.argmax(res) == 531 - del exec_net - del ie_core - del net - - -def test_async_infer_wait_finish(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request = exec_net.requests[0] - request.async_infer({'parameter': img}) - request.wait(ie.WaitMode.RESULT_READY) - res = request.output_blobs['relu'].buffer - assert np.argmax(res) == 531 - del exec_net - del ie_core - del net - - -def test_async_infer_wait_time(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=2) - img = generate_image() - request = exec_net.requests[0] - request.async_infer({'parameter': img}) - start_time = datetime.utcnow() - status = request.wait(ie.WaitMode.RESULT_READY) - assert status == ie.StatusCode.OK - time_delta = datetime.utcnow() - start_time - latency_ms = (time_delta.microseconds / 1000) + (time_delta.seconds * 1000) - timeout = max(100, latency_ms) - request = exec_net.requests[1] - request.async_infer({'parameter': img}) - max_repeat = 10 - status = ie.StatusCode.REQUEST_BUSY - i = 0 - while i < max_repeat and status != ie.StatusCode.OK: - status = request.wait(timeout) - i += 1 - assert status == ie.StatusCode.OK - res = request.output_blobs['relu'].buffer - assert np.argmax(res) == 531 - del exec_net - del ie_core - del net - - -def test_async_infer_wait_status(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request = exec_net.requests[0] - request.async_infer({'parameter': img}) - request.wait(ie.WaitMode.RESULT_READY) - res = request.output_blobs['relu'].buffer - assert np.argmax(res) == 531 - status = request.wait(ie.WaitMode.STATUS_ONLY) - assert status == ie.StatusCode.OK - del exec_net - del ie_core - del net - - -def test_async_infer_fill_inputs(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request = exec_net.requests[0] - request.input_blobs['parameter'].buffer[:] = img - request.async_infer() - status_end = request.wait() - assert status_end == ie.StatusCode.OK - res = request.output_blobs['relu'].buffer - assert np.argmax(res[0]) == 531 - del exec_net - del ie_core - del net - - -def test_infer_modify_outputs(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request = exec_net.requests[0] - outputs0 = exec_net.infer({'parameter': img}) - status_end = request.wait() - assert status_end == ie.StatusCode.OK - assert np.argmax(outputs0['relu']) == 531 - outputs0['relu'][:] = np.zeros(shape=(1, 3, 32, 32), dtype=np.float32) - outputs1 = request.output_blobs - assert np.argmax(outputs1['relu'].buffer) == 531 - outputs1['relu'].buffer[:] = np.ones(shape=(1, 3, 32, 32), dtype=np.float32) - outputs2 = request.output_blobs - assert np.argmax(outputs2['relu'].buffer) == 531 - del exec_net - del ie_core - del net - - -def test_async_infer_callback(device): - def static_vars(**kwargs): - def decorate(func): - for k in kwargs: - setattr(func, k, kwargs[k]) - return func - - return decorate - - @static_vars(callback_called=0) - def callback(self, status): - callback.callback_called = 1 - - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request = exec_net.requests[0] - request.set_completion_callback(callback) - request.async_infer({'parameter': img}) - status = request.wait() - assert status == ie.StatusCode.OK - res = request.output_blobs['relu'].buffer - assert np.argmax(res) == 531 - assert callback.callback_called == 1 - del exec_net - del ie_core - - -def test_async_infer_callback_wait_before_start(device): - def static_vars(**kwargs): - def decorate(func): - for k in kwargs: - setattr(func, k, kwargs[k]) - return func - return decorate - - @static_vars(callback_called=0) - def callback(self, status): - callback.callback_called = 1 - - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request = exec_net.requests[0] - request.set_completion_callback(callback) - status = request.wait() - # Plugin API 2.0 has the different behavior will not return this status - # assert status == ie.StatusCode.INFER_NOT_STARTED - request.async_infer({'parameter': img}) - status = request.wait() - assert status == ie.StatusCode.OK - res = request.output_blobs['relu'].buffer - assert np.argmax(res) == 531 - assert callback.callback_called == 1 - del exec_net - del ie_core - - -def test_async_infer_callback_wait_in_callback(device): - class InferReqWrap: - def __init__(self, request): - self.request = request - self.cv = threading.Condition() - self.request.set_completion_callback(self.callback) - self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY) - # Plugin API 2.0 has the different behavior will not return this status - # assert self.status_code == ie.StatusCode.INFER_NOT_STARTED - - def callback(self, statusCode, userdata): - self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY) - self.cv.acquire() - self.cv.notify() - self.cv.release() - - def execute(self, input_data): - self.request.async_infer(input_data) - self.cv.acquire() - self.cv.wait() - self.cv.release() - status = self.request.wait(ie.WaitMode.RESULT_READY) - assert status == ie.StatusCode.OK - assert self.status_code == ie.StatusCode.RESULT_NOT_READY - - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net = ie_core.load_network(net, device, num_requests=1) - img = generate_image() - request_wrap = InferReqWrap(exec_net.requests[0]) - request_wrap.execute({'data': img}) - del exec_net - del ie_core - - -def test_async_infer_wait_while_callback_will_not_finish(device): - def callback(status, callback_status): - time.sleep(0.01) - callback_status['finished'] = True - - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net = ie_core.load_network(net, device, num_requests=1) - callback_status = {} - callback_status['finished'] = False - request = exec_net.requests[0] - request.set_completion_callback(callback, py_data=callback_status) - img = generate_image() - request.async_infer({'data': img}) - request.wait() - assert callback_status['finished'] == True - - -def test_get_perf_counts(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - ie_core.set_config({"PERF_COUNT": "YES"}, device) - exec_net = ie_core.load_network(net, device) - img = generate_image() - request = exec_net.requests[0] - request.infer({'data': img}) - pc = request.get_perf_counts() - assert pc['29/WithoutBiases']["status"] == "EXECUTED" - del exec_net - del ie_core - del net - - -def test_blob_setter(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net_1 = ie_core.load_network(network=net, device_name=device, num_requests=1) - - net.input_info['data'].layout = "NHWC" - exec_net_2 = ie_core.load_network(network=net, device_name=device, num_requests=1) - - img = generate_image() - res_1 = np.sort(exec_net_1.infer({"data": img})['fc_out']) - - img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32) - tensor_desc = ie.TensorDesc("FP32", [1, 3, 32, 32], "NHWC") - img_blob = ie.Blob(tensor_desc, img) - request = exec_net_2.requests[0] - request.set_blob('data', img_blob) - request.infer() - res_2 = np.sort(request.output_blobs['fc_out'].buffer) - assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2) - - -def test_getting_preprocess(device): - ie_core = ie.IECore() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1) - request = exec_net.requests[0] - preprocess_info = request.preprocess_info["data"] - assert isinstance(preprocess_info, ie.PreProcessInfo) - assert preprocess_info.mean_variant == ie.MeanVariant.NONE - - -def test_resize_algorithm_work(device): - ie_core = ie.IECore() - net = generate_relu_model([1, 3, 32, 32]) - exec_net_1 = ie_core.load_network(network=net, device_name=device, num_requests=1) - - img = generate_image() - res_1 = np.sort(exec_net_1.infer({"parameter": img})['relu']) - - net.input_info['parameter'].preprocess_info.resize_algorithm = ie.ResizeAlgorithm.RESIZE_BILINEAR - - exec_net_2 = ie_core.load_network(net, device) - - tensor_desc = ie.TensorDesc("FP32", [1, 3, img.shape[2], img.shape[3]], "NCHW") - img_blob = ie.Blob(tensor_desc, img) - request = exec_net_2.requests[0] - assert request.preprocess_info["parameter"].resize_algorithm == ie.ResizeAlgorithm.RESIZE_BILINEAR - request.set_blob('parameter', img_blob) - request.infer() - res_2 = np.sort(request.output_blobs['relu'].buffer) - - assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2) - - -@pytest.mark.parametrize("mode", ["set_init_memory_state", "reset_memory_state", "normal"]) -@pytest.mark.parametrize("data_type", ["FP32", "FP16", "I32"]) -@pytest.mark.parametrize("input_shape", [[10], [10, 10], [10, 10, 10], [2, 10, 10, 10]]) -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", - reason=f"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, " - "Memory layers fully supported only on CPU") -def test_query_state_write_buffer(device, input_shape, data_type, mode): - ie_core = ie.IECore() - - layout = ["C", "HW", "CHW", "NCHW"] - - from openvino.inference_engine import TensorDesc, Blob, format_map - - net = ie.IENetwork(create_function_with_memory(input_shape, format_map[data_type])) - ie_core = ie.IECore() - exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1) - request = exec_net.requests[0] - mem_states = request.query_state() - mem_state = mem_states[0] - - assert mem_state.name == 'var_id_667' - # todo: Uncomment after fix 45611, - # CPU plugin returns outputs and memory state in FP32 in case of FP16 original precision - #assert mem_state.state.tensor_desc.precision == data_type - - for i in range(1, 10): - if mode == "set_init_memory_state": - # create initial value - const_init = 5 - init_array = np.full(input_shape, const_init, dtype=format_map[mem_state.state.tensor_desc.precision]) - tensor_desc = TensorDesc(mem_state.state.tensor_desc.precision, input_shape, layout[len(input_shape) - 1]) - blob = Blob(tensor_desc, init_array) - mem_state.state = blob - - res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])}) - expected_res = np.full(input_shape, 1 + const_init, dtype=format_map[data_type]) - elif mode == "reset_memory_state": - # reset initial state of ReadValue to zero - mem_state.reset() - res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])}) - - # always ones - expected_res = np.full(input_shape, 1, dtype=format_map[data_type]) - else: - res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])}) - expected_res = np.full(input_shape, i, dtype=format_map[data_type]) - - assert np.allclose(res['MemoryAdd'], expected_res, atol=1e-6), \ - "Expected values: {} \n Actual values: {} \n".format(expected_res, res) - - -def test_set_blob_with_incorrect_name(device): - function = create_encoder([4, 4, 20, 20]) - net = ng.function_to_cnn(function) - ie_core = ie.IECore() - exec_net = ie_core.load_network(net, device) - tensor_desc = exec_net.requests[0].input_blobs["data"].tensor_desc - tensor_desc.dims = [4, 4, 20, 20] - blob = ie.Blob(tensor_desc) - with pytest.raises(RuntimeError) as e: - exec_net.requests[0].set_blob("incorrect_name", blob) - assert f"Failed to find input or output with name: 'incorrect_name'" in str(e.value) - - -def test_set_blob_with_incorrect_size(device): - function = create_encoder([4, 4, 20, 20]) - net = ng.function_to_cnn(function) - ie_core = ie.IECore() - exec_net = ie_core.load_network(net, device) - tensor_desc = exec_net.requests[0].input_blobs["data"].tensor_desc - tensor_desc.dims = [tensor_desc.dims[0]*2, 4, 20, 20] - blob = ie.Blob(tensor_desc) - with pytest.raises(RuntimeError) as e: - exec_net.requests[0].set_blob("data", blob) - assert f"Can't set the input tensor" in str(e.value) - with pytest.raises(RuntimeError) as e: - exec_net.requests[0].set_blob("out", blob) - assert f"Can't set the output tensor" in str(e.value) diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_InputInfoCPtr.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_InputInfoCPtr.py deleted file mode 100644 index a34598914db..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_InputInfoCPtr.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -from openvino.inference_engine import InputInfoCPtr, DataPtr, IECore, TensorDesc -from tests_compatibility.conftest import model_path - - -test_net_xml, test_net_bin = model_path() - - -def test_name(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - assert isinstance(exec_net.input_info['data'], InputInfoCPtr) - assert exec_net.input_info['data'].name == "data", "Incorrect name" - del ie - del exec_net - - -def test_precision(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - assert isinstance(exec_net.input_info['data'], InputInfoCPtr) - assert exec_net.input_info['data'].precision == "FP32", "Incorrect precision" - del ie - del exec_net - - -def test_no_precision_setter(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - with pytest.raises(AttributeError) as e: - exec_net.input_info['data'].precision = "I8" - assert "attribute 'precision' of 'openvino.inference_engine.ie_api.InputInfoCPtr' " \ - "objects is not writable" in str(e.value) - del ie - del exec_net - - -def test_input_data(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - assert isinstance(exec_net.input_info['data'], InputInfoCPtr) - assert isinstance(exec_net.input_info['data'].input_data, DataPtr), "Incorrect precision for layer 'fc_out'" - del ie - del exec_net - - -# issue 28996 -# checks that objects can deallocate in this order, if not - segfault happends -def test_input_data_deallocation(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device) - input_info = exec_net.input_info['data'] - input_data = input_info.input_data - del ie - del exec_net - del input_info - del input_data - - -def test_tensor_desc(device): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device, num_requests=5) - tensor_desc = exec_net.input_info['data'].tensor_desc - assert isinstance(tensor_desc, TensorDesc) - assert tensor_desc.layout == "NCHW" diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_InputInfoPtr.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_InputInfoPtr.py deleted file mode 100644 index f5d1f6efba7..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_InputInfoPtr.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -from openvino.inference_engine import InputInfoPtr, PreProcessInfo, DataPtr, IECore, TensorDesc, ColorFormat -from tests_compatibility.conftest import model_path - - -test_net_xml, test_net_bin = model_path() - - -def get_input_info(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - return net.input_info["data"] - - -def test_input_info(): - assert isinstance(get_input_info(), InputInfoPtr) - - -def test_input_data(): - assert isinstance(get_input_info().input_data, DataPtr) - - -def test_input_data_setter(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - input_info = net.input_info["data"] - other_input_data = net.outputs["fc_out"] - input_info.input_data = other_input_data - assert input_info.input_data.name == "fc_out" - - -def test_incorrect_input_info_setter(): - with pytest.raises(TypeError) as e: - get_input_info().input_data = "dfds" - assert "Argument 'input_ptr' has incorrect type" in str(e.value) - - -def test_name(): - assert get_input_info().name == "data" - - -def test_precision(): - assert get_input_info().precision == "FP32" - - -def test_precision_setter(): - input_info = get_input_info() - input_info.precision = "I8" - assert input_info.precision == "I8", "Incorrect precision" - - -def test_incorrect_precision_setter(): - with pytest.raises(ValueError) as e: - get_input_info().precision = "123" - assert "Unsupported precision 123! List of supported precisions:" in str(e.value) - - -def test_layout(): - assert get_input_info().layout == "NCHW" - - -def test_layout_setter(): - input_info = get_input_info() - input_info.layout = "NHWC" - assert input_info.layout == "NHWC", "Incorrect layout" - - -def test_incorrect_layout_setter(): - with pytest.raises(ValueError) as e: - get_input_info().layout = "123" - assert "Unsupported layout 123! List of supported layouts:" in str(e.value) - - -def test_preprocess_info(): - input_info = get_input_info() - preprocess_info = input_info.preprocess_info - assert isinstance(preprocess_info, PreProcessInfo) - assert preprocess_info.color_format == ColorFormat.RAW - - -def test_tensor_desc(): - tensor_desc = get_input_info().tensor_desc - assert isinstance(tensor_desc, TensorDesc) - assert tensor_desc.layout == "NCHW" diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_NGraph.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_NGraph.py deleted file mode 100644 index 2096909f6c6..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_NGraph.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.inference_engine import IECore, IENetwork -import ngraph as ng -from ngraph.impl import Function - -from tests_compatibility.conftest import model_path, create_relu - - -test_net_xml, test_net_bin = model_path() - - -def test_create_IENetwork_from_nGraph(): - func = create_relu([1, 3, 22, 22]) - caps = Function.to_capsule(func) - cnnNetwork = IENetwork(caps) - assert cnnNetwork != None - func2 = ng.function_from_cnn(cnnNetwork) - assert func2 != None - assert len(func2.get_ops()) == 3 - - -def test_get_IENetwork_from_nGraph(): - func = create_relu([1, 3, 22, 22]) - caps = Function.to_capsule(func) - cnnNetwork = IENetwork(caps) - assert cnnNetwork != None - assert ng.function_from_cnn(cnnNetwork) != None - func2 = ng.function_from_cnn(cnnNetwork) - assert func2 != None - - -def test_get_ops_from_IENetwork(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - func = ng.function_from_cnn(net) - ops = func.get_ordered_ops() - ops_names = [op.friendly_name for op in ops] - assert len(ops_names) != 0 - assert 'data' in ops_names - - -def test_get_type_name(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - func = ng.function_from_cnn(net) - ops = func.get_ordered_ops() - assert ops[2].get_type_name() == "Convolution" - - -def test_getting_shapes(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - func = ng.function_from_cnn(net) - ops = func.get_ordered_ops() - shapes = [sh for sh in ops[2].shape] - assert shapes == [1, 16, 32, 32] - - -def test_get_set_rt_info(): - ie = IECore() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - func = ng.function_from_cnn(net) - ops = func.get_ordered_ops() - rt_info = ops[14].get_rt_info() - rt_info["affinity"] = "test_affinity" - assert ops[14].get_rt_info()["affinity"] == "test_affinity" diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_PreProcessInfo.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_PreProcessInfo.py deleted file mode 100644 index 32b30e8cdf0..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_PreProcessInfo.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -from openvino.inference_engine import PreProcessInfo, IECore, TensorDesc, Blob, PreProcessChannel,\ - MeanVariant, ResizeAlgorithm, ColorFormat -from tests_compatibility.conftest import model_path - - -test_net_xml, test_net_bin = model_path() - - -def test_preprocess_info(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - assert isinstance(net.input_info["data"].preprocess_info, PreProcessInfo) - - -def test_color_format(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - preprocess_info = net.input_info["data"].preprocess_info - assert preprocess_info.color_format == ColorFormat.RAW - - -def test_color_format_setter(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - preprocess_info = net.input_info["data"].preprocess_info - preprocess_info.color_format = ColorFormat.BGR - assert preprocess_info.color_format == ColorFormat.BGR - - -def test_resize_algorithm(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - preprocess_info = net.input_info["data"].preprocess_info - assert preprocess_info.resize_algorithm == ResizeAlgorithm.NO_RESIZE - - -def test_resize_algorithm_setter(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - preprocess_info = net.input_info["data"].preprocess_info - preprocess_info.resize_algorithm = ResizeAlgorithm.RESIZE_BILINEAR - assert preprocess_info.resize_algorithm == ResizeAlgorithm.RESIZE_BILINEAR - - -def test_mean_variant(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - preprocess_info = net.input_info["data"].preprocess_info - assert preprocess_info.mean_variant == MeanVariant.NONE - - -def test_mean_variant_setter(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - preprocess_info = net.input_info["data"].preprocess_info - preprocess_info.mean_variant = MeanVariant.MEAN_IMAGE - assert preprocess_info.mean_variant == MeanVariant.MEAN_IMAGE - - -def test_get_number_of_channels(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - assert net.input_info["data"].preprocess_info.get_number_of_channels() == 0 - - -def test_init(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - net.input_info['data'].preprocess_info.init(5) - assert net.input_info["data"].preprocess_info.get_number_of_channels() == 5 - - -def test_set_mean_image(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - tensor_desc = TensorDesc("FP32", [0, 127, 127], "CHW") - mean_image_blob = Blob(tensor_desc) - preprocess_info = net.input_info["data"].preprocess_info - preprocess_info.set_mean_image(mean_image_blob) - assert preprocess_info.mean_variant == MeanVariant.MEAN_IMAGE - - -def test_get_pre_process_channel(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - preprocess_info = net.input_info["data"].preprocess_info - preprocess_info.init(1) - pre_process_channel = preprocess_info[0] - assert isinstance(pre_process_channel, PreProcessChannel) - - -def test_set_mean_image_for_channel(): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - tensor_desc = TensorDesc("FP32", [127, 127], "HW") - mean_image_blob = Blob(tensor_desc) - preprocess_info = net.input_info["data"].preprocess_info - preprocess_info.init(1) - preprocess_info.set_mean_image_for_channel(mean_image_blob, 0) - pre_process_channel = preprocess_info[0] - assert isinstance(pre_process_channel.mean_data, Blob) - assert pre_process_channel.mean_data.tensor_desc.dims == [127, 127] - assert preprocess_info.mean_variant == MeanVariant.MEAN_IMAGE - - -def test_resize_algorithm_set(device): - ie_core = IECore() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - preprocess_info = net.input_info["data"].preprocess_info - preprocess_info.resize_algorithm = ResizeAlgorithm.RESIZE_BILINEAR - exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1) - request = exec_net.requests[0] - pp = request.preprocess_info["data"] - assert pp.resize_algorithm == ResizeAlgorithm.RESIZE_BILINEAR - - -def test_set_mean_variant_to_read_only_preprocess(device): - ie_core = IECore() - net = ie_core.read_network(test_net_xml) - exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1) - request = exec_net.requests[0] - preprocess_info = request.preprocess_info["data"] - assert isinstance(preprocess_info, PreProcessInfo) - with pytest.raises(TypeError) as e: - preprocess_info.mean_variant = MeanVariant.MEAN_IMAGE - assert "Cannot set mean image when called from constant" in str(e.value) - - -def test_set_resize_algorithm_to_read_only_preprocess(device): - ie_core = IECore() - net = ie_core.read_network(test_net_xml) - exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1) - request = exec_net.requests[0] - preprocess_info = request.preprocess_info["data"] - assert isinstance(preprocess_info, PreProcessInfo) - with pytest.raises(TypeError) as e: - preprocess_info.resize_algorithm = ResizeAlgorithm.RESIZE_BILINEAR - assert "Cannot set resize algorithm when called from constant" in str(e.value) - - -def test_set_color_format_to_read_only_preprocess(device): - ie_core = IECore() - net = ie_core.read_network(test_net_xml) - exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1) - request = exec_net.requests[0] - preprocess_info = request.preprocess_info["data"] - assert isinstance(preprocess_info, PreProcessInfo) - with pytest.raises(TypeError) as e: - preprocess_info.color_format = ColorFormat.BGR - assert "Cannot set color format when called from constant" in str(e.value) diff --git a/src/bindings/python/tests_compatibility/test_inference_engine/test_TensorDesc.py b/src/bindings/python/tests_compatibility/test_inference_engine/test_TensorDesc.py deleted file mode 100644 index 76ba71c5398..00000000000 --- a/src/bindings/python/tests_compatibility/test_inference_engine/test_TensorDesc.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -from openvino.inference_engine import TensorDesc - - -def test_init(): - tensor_desc = TensorDesc("FP32", [1, 127, 127, 3], "NHWC") - assert isinstance(tensor_desc, TensorDesc) - - -def test_precision(): - tensor_desc = TensorDesc("FP32", [1, 127, 127, 3], "NHWC") - assert tensor_desc.precision == "FP32" - - -def test_layout(): - tensor_desc = TensorDesc("FP32", [1, 127, 127, 3], "NHWC") - assert tensor_desc.layout == "NHWC" - - -def test_dims(): - tensor_desc = TensorDesc("FP32", [1, 127, 127, 3], "NHWC") - assert tensor_desc.dims == [1, 127, 127, 3] - - -def test_incorrect_precision_setter(): - tensor_desc = TensorDesc("FP32", [1, 127, 127, 3], "NHWC") - with pytest.raises(ValueError) as e: - tensor_desc.precision = "123" - assert "Unsupported precision 123! List of supported precisions:" in str(e.value) - - -def test_incorrect_layout_setter(): - tensor_desc = TensorDesc("FP32", [1, 127, 127, 3], "NHWC") - with pytest.raises(ValueError) as e: - tensor_desc.layout = "123" - assert "Unsupported layout 123! List of supported layouts: " in str(e.value) - - -def test_init_incorrect_precision(): - with pytest.raises(ValueError) as e: - TensorDesc("123", [1, 127, 127, 3], "NHWC") - assert "Unsupported precision 123! List of supported precisions: " in str(e.value) - - -def test_eq_operator(): - tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NHWC") - tensor_desc_2 = TensorDesc("FP32", [1, 3, 127, 127], "NHWC") - assert tensor_desc == tensor_desc_2 - - -def test_ne_operator(): - tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NHWC") - tensor_desc_2 = TensorDesc("FP32", [1, 3, 127, 127], "NCHW") - assert tensor_desc != tensor_desc_2 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/__init__.py b/src/bindings/python/tests_compatibility/test_ngraph/__init__.py deleted file mode 100644 index 83f1374cd29..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# openvino.dll directory path visibility is needed to use _pyngraph module -# import below causes adding this path to os.environ["PATH"] -import ngraph # noqa: F401 'imported but unused' diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py b/src/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py deleted file mode 100644 index 8543fe55a51..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ngraph as ng -import numpy as np - -from ngraph.impl import Type - -def test_adaptive_avg_pool(): - input_parameter = ng.parameter((2, 3, 7), name="input_data", dtype=np.float32) - output_shape = ng.constant(np.array([3], dtype=np.int32)) - - adaptive_pool_node = ng.adaptive_avg_pool(input_parameter, output_shape) - assert adaptive_pool_node.get_type_name() == "AdaptiveAvgPool" - assert adaptive_pool_node.get_output_size() == 1 - assert adaptive_pool_node.get_output_element_type(0) == Type.f32 - assert list(adaptive_pool_node.get_output_shape(0)) == [2, 3, 3] - - -def test_adaptive_max_pool(): - input_parameter = ng.parameter((2, 3, 7), name="input_data", dtype=np.float32) - output_shape = ng.constant(np.array([3], dtype=np.int32)) - - adaptive_pool_node = ng.adaptive_max_pool(input_parameter, output_shape) - assert adaptive_pool_node.get_type_name() == "AdaptiveMaxPool" - assert adaptive_pool_node.get_output_size() == 2 - assert adaptive_pool_node.get_output_element_type(0) == Type.f32 - assert adaptive_pool_node.get_output_element_type(1) == Type.i64 - assert list(adaptive_pool_node.get_output_shape(0)) == [2, 3, 3] - assert list(adaptive_pool_node.get_output_shape(1)) == [2, 3, 3] diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_basic.py b/src/bindings/python/tests_compatibility/test_ngraph/test_basic.py deleted file mode 100644 index 5acc1a29fd2..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_basic.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest - -import ngraph as ng -from ngraph.impl import Function, PartialShape, Shape, Type -from ngraph.impl.op import Parameter -from ngraph.utils.types import get_element_type - - -def test_ngraph_function_api(): - shape = [2, 2] - parameter_a = ng.parameter(shape, dtype=np.float32, name="A") - parameter_b = ng.parameter(shape, dtype=np.float32, name="B") - parameter_c = ng.parameter(shape, dtype=np.float32, name="C") - model = (parameter_a + parameter_b) * parameter_c - function = Function(model, [parameter_a, parameter_b, parameter_c], "TestFunction") - - function.get_parameters()[1].set_partial_shape(PartialShape([3, 4, 5])) - - ordered_ops = function.get_ordered_ops() - op_types = [op.get_type_name() for op in ordered_ops] - assert op_types == ["Parameter", "Parameter", "Parameter", "Add", "Multiply", "Result"] - assert len(function.get_ops()) == 6 - assert function.get_output_size() == 1 - assert function.get_output_op(0).get_type_name() == "Result" - assert function.get_output_element_type(0) == parameter_a.get_element_type() - assert list(function.get_output_shape(0)) == [2, 2] - assert (function.get_parameters()[1].get_partial_shape()) == PartialShape([3, 4, 5]) - assert len(function.get_parameters()) == 3 - assert len(function.get_results()) == 1 - assert function.get_friendly_name() == "TestFunction" - - -@pytest.mark.parametrize( - "dtype", - [ - np.float32, - np.float64, - np.int8, - np.int16, - np.int32, - np.int64, - np.uint8, - np.uint16, - np.uint32, - np.uint64, - ], -) -def test_simple_computation_on_ndarrays(dtype): - shape = [2, 2] - parameter_a = ng.parameter(shape, dtype=dtype, name="A") - parameter_b = ng.parameter(shape, dtype=dtype, name="B") - parameter_c = ng.parameter(shape, dtype=dtype, name="C") - model = (parameter_a + parameter_b) * parameter_c - assert model.get_type_name() == "Multiply" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == get_element_type(dtype) - assert list(model.get_output_shape(0)) == [2, 2] - - -def test_broadcast_1(): - input_data = np.array([1, 2, 3], dtype=np.int32) - - new_shape = [3, 3] - node = ng.broadcast(input_data, new_shape) - assert node.get_type_name() == "Broadcast" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.i32 - assert list(node.get_output_shape(0)) == [3, 3] - - -def test_broadcast_2(): - input_data = np.arange(4, dtype=np.int32) - new_shape = [3, 4, 2, 4] - node = ng.broadcast(input_data, new_shape) - assert node.get_type_name() == "Broadcast" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.i32 - assert list(node.get_output_shape(0)) == [3, 4, 2, 4] - - -def test_broadcast_3(): - input_data = np.array([1, 2, 3], dtype=np.int32) - new_shape = [3, 3] - axis_mapping = [0] - - node = ng.broadcast(input_data, new_shape, axis_mapping, "EXPLICIT") - assert node.get_type_name() == "Broadcast" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.i32 - assert list(node.get_output_shape(0)) == [3, 3] - - -@pytest.mark.parametrize( - "destination_type, input_data", - [(bool, np.zeros((2, 2), dtype=np.int32)), ("boolean", np.zeros((2, 2), dtype=np.int32))], -) -def test_convert_to_bool(destination_type, input_data): - node = ng.convert(input_data, destination_type) - assert node.get_type_name() == "Convert" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.boolean - assert list(node.get_output_shape(0)) == [2, 2] - - -@pytest.mark.parametrize( - "destination_type, rand_range, in_dtype, expected_type", - [ - pytest.param(np.float32, (-8, 8), np.int32, np.float32), - pytest.param(np.float64, (-16383, 16383), np.int64, np.float64), - pytest.param("f32", (-8, 8), np.int32, np.float32), - pytest.param("f64", (-16383, 16383), np.int64, np.float64), - ], -) -def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type): - np.random.seed(133391) - input_data = np.random.randint(*rand_range, size=(2, 2), dtype=in_dtype) - node = ng.convert(input_data, destination_type) - assert node.get_type_name() == "Convert" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == get_element_type(expected_type) - assert list(node.get_output_shape(0)) == [2, 2] - - -@pytest.mark.parametrize( - "destination_type, expected_type", - [ - (np.int8, np.int8), - (np.int16, np.int16), - (np.int32, np.int32), - (np.int64, np.int64), - ("i8", np.int8), - ("i16", np.int16), - ("i32", np.int32), - ("i64", np.int64), - ], -) -def test_convert_to_int(destination_type, expected_type): - np.random.seed(133391) - input_data = (np.ceil(-8 + np.random.rand(2, 3, 4) * 16)).astype(np.float32) - node = ng.convert(input_data, destination_type) - assert node.get_type_name() == "Convert" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == get_element_type(expected_type) - assert list(node.get_output_shape(0)) == [2, 3, 4] - - -@pytest.mark.parametrize( - "destination_type, expected_type", - [ - (np.uint8, np.uint8), - (np.uint16, np.uint16), - (np.uint32, np.uint32), - (np.uint64, np.uint64), - ("u8", np.uint8), - ("u16", np.uint16), - ("u32", np.uint32), - ("u64", np.uint64), - ], -) -def test_convert_to_uint(destination_type, expected_type): - np.random.seed(133391) - input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(np.float32) - node = ng.convert(input_data, destination_type) - assert node.get_type_name() == "Convert" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == get_element_type(expected_type) - assert list(node.get_output_shape(0)) == [2, 3, 4] - - -def test_constant_get_data_bool(): - input_data = np.array([True, False, False, True]) - node = ng.constant(input_data, dtype=bool) - retrieved_data = node.get_data() - assert np.allclose(input_data, retrieved_data) - - -@pytest.mark.parametrize("data_type", [np.float32, np.float64]) -def test_constant_get_data_floating_point(data_type): - np.random.seed(133391) - input_data = np.random.randn(2, 3, 4).astype(data_type) - min_value = -1.0e20 - max_value = 1.0e20 - input_data = min_value + input_data * max_value * data_type(2) - node = ng.constant(input_data, dtype=data_type) - retrieved_data = node.get_data() - assert np.allclose(input_data, retrieved_data) - - -@pytest.mark.parametrize("data_type", [np.int64, np.int32, np.int16, np.int8]) -def test_constant_get_data_signed_integer(data_type): - np.random.seed(133391) - input_data = np.random.randint( - np.iinfo(data_type).min, np.iinfo(data_type).max, size=[2, 3, 4], dtype=data_type - ) - node = ng.constant(input_data, dtype=data_type) - retrieved_data = node.get_data() - assert np.allclose(input_data, retrieved_data) - - -@pytest.mark.parametrize("data_type", [np.uint64, np.uint32, np.uint16, np.uint8]) -def test_constant_get_data_unsigned_integer(data_type): - np.random.seed(133391) - input_data = np.random.randn(2, 3, 4).astype(data_type) - input_data = ( - np.iinfo(data_type).min + input_data * np.iinfo(data_type).max + input_data * np.iinfo(data_type).max - ) - node = ng.constant(input_data, dtype=data_type) - retrieved_data = node.get_data() - assert np.allclose(input_data, retrieved_data) - - -def test_set_argument(): - data1 = np.array([1, 2, 3]) - data2 = np.array([4, 5, 6]) - data3 = np.array([7, 8, 9]) - - node1 = ng.constant(data1, dtype=np.float32) - node2 = ng.constant(data2, dtype=np.float32) - node3 = ng.constant(data3, dtype=np.float64) - node4 = ng.constant(data3, dtype=np.float64) - node_add = ng.add(node1, node2) - - # Original arguments - node_inputs = node_add.inputs() - assert node_inputs[0].get_element_type() == Type.f32 - assert node_inputs[1].get_element_type() == Type.f32 - - # Arguments changed by set_argument - node_add.set_argument(0, node3.output(0)) - node_add.set_argument(1, node4.output(0)) - node_inputs = node_add.inputs() - assert node_inputs[0].get_element_type() == Type.f64 - assert node_inputs[1].get_element_type() == Type.f64 - - # Arguments changed by set_argument - node_add.set_argument(0, node1.output(0)) - node_add.set_argument(1, node2.output(0)) - assert node_inputs[0].get_element_type() == Type.f32 - assert node_inputs[1].get_element_type() == Type.f32 - - # Arguments changed by set_argument(OutputVector) - node_add.set_arguments([node3.output(0), node4.output(0)]) - assert node_inputs[0].get_element_type() == Type.f64 - assert node_inputs[1].get_element_type() == Type.f64 - - # Arguments changed by set_arguments(NodeVector) - node_add.set_arguments([node1, node2]) - assert node_inputs[0].get_element_type() == Type.f32 - assert node_inputs[1].get_element_type() == Type.f32 - - -def test_result(): - input_data = np.array([[11, 10], [1, 8], [3, 4]], dtype=np.float32) - node = ng.result(input_data) - assert node.get_type_name() == "Result" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.f32 - assert list(node.get_output_shape(0)) == [3, 2] - - -def test_node_friendly_name(): - dummy_node = ng.parameter(shape=[1], name="dummy_name") - - assert(dummy_node.friendly_name == "dummy_name") - - dummy_node.set_friendly_name("changed_name") - - assert(dummy_node.get_friendly_name() == "changed_name") - - dummy_node.friendly_name = "new_name" - - assert(dummy_node.get_friendly_name() == "new_name") - - -def test_node_output(): - input_array = np.array([0, 1, 2, 3, 4, 5]) - splits = 3 - expected_shape = len(input_array) // splits - - input_tensor = ng.constant(input_array, dtype=np.int32) - axis = ng.constant(0, dtype=np.int64) - split_node = ng.split(input_tensor, axis, splits) - - split_node_outputs = split_node.outputs() - - assert len(split_node_outputs) == splits - assert [output_node.get_index() for output_node in split_node_outputs] == [0, 1, 2] - assert np.equal( - [output_node.get_element_type() for output_node in split_node_outputs], - input_tensor.get_element_type(), - ).all() - assert np.equal( - [output_node.get_shape() for output_node in split_node_outputs], - Shape([expected_shape]), - ).all() - assert np.equal( - [output_node.get_partial_shape() for output_node in split_node_outputs], - PartialShape([expected_shape]), - ).all() - - output0 = split_node.output(0) - output1 = split_node.output(1) - output2 = split_node.output(2) - - assert [output0.get_index(), output1.get_index(), output2.get_index()] == [0, 1, 2] - - -def test_node_input(): - shape = [2, 2] - parameter_a = ng.parameter(shape, dtype=np.float32, name="A") - parameter_b = ng.parameter(shape, dtype=np.float32, name="B") - - model = parameter_a + parameter_b - - model_inputs = model.inputs() - - assert len(model_inputs) == 2 - assert [input_node.get_index() for input_node in model_inputs] == [0, 1] - assert np.equal( - [input_node.get_element_type() for input_node in model_inputs], - model.get_element_type(), - ).all() - assert np.equal( - [input_node.get_shape() for input_node in model_inputs], Shape(shape) - ).all() - assert np.equal( - [input_node.get_partial_shape() for input_node in model_inputs], - PartialShape(shape), - ).all() - - input0 = model.input(0) - input1 = model.input(1) - - assert [input0.get_index(), input1.get_index()] == [0, 1] - - -def test_node_target_inputs_soruce_output(): - shape = [2, 2] - parameter_a = ng.parameter(shape, dtype=np.float32, name="A") - parameter_b = ng.parameter(shape, dtype=np.float32, name="B") - - model = parameter_a + parameter_b - - out_a = list(parameter_a.output(0).get_target_inputs())[0] - out_b = list(parameter_b.output(0).get_target_inputs())[0] - - assert out_a.get_node().name == model.name - assert out_b.get_node().name == model.name - assert np.equal([out_a.get_shape()], [model.get_output_shape(0)]).all() - assert np.equal([out_b.get_shape()], [model.get_output_shape(0)]).all() - - in_model0 = model.input(0).get_source_output() - in_model1 = model.input(1).get_source_output() - - assert in_model0.get_node().name == parameter_a.name - assert in_model1.get_node().name == parameter_b.name - assert np.equal([in_model0.get_shape()], [model.get_output_shape(0)]).all() - assert np.equal([in_model1.get_shape()], [model.get_output_shape(0)]).all() - - -def test_runtime_info(): - test_shape = PartialShape([1, 1, 1, 1]) - test_type = Type.f32 - test_param = Parameter(test_type, test_shape) - relu_node = ng.relu(test_param) - runtime_info = relu_node.get_rt_info() - runtime_info["affinity"] = "test_affinity" - relu_node.set_friendly_name("testReLU") - runtime_info_after = relu_node.get_rt_info() - - assert runtime_info_after["affinity"] == "test_affinity" - - -def test_mutiple_outputs(): - input_shape = [4, 4] - input_data = np.arange(-8, 8).reshape(input_shape) - - expected_output = np.split(input_data, 2, axis=1)[0] - expected_output[expected_output < 0] = 0 - - test_param = ng.parameter(input_shape, dtype=np.float32, name="A") - split = ng.split(test_param, axis=1, num_splits=2) - split_first_output = split.output(0) - relu = ng.relu(split_first_output) - - assert relu.get_type_name() == "Relu" - assert relu.get_output_size() == 1 - assert relu.get_output_element_type(0) == Type.f32 - assert list(relu.get_output_shape(0)) == [4, 2] - - -def test_sink_function_ctor(): - input_data = ng.parameter([2, 2], name="input_data", dtype=np.float32) - rv = ng.read_value(input_data, "var_id_667") - add = ng.add(rv, input_data, name="MemoryAdd") - node = ng.assign(add, "var_id_667") - res = ng.result(add, "res") - function = Function(results=[res], sinks=[node], parameters=[input_data], name="TestFunction") - - ordered_ops = function.get_ordered_ops() - op_types = [op.get_type_name() for op in ordered_ops] - assert op_types == ["Parameter", "ReadValue", "Add", "Assign", "Result"] - assert len(function.get_ops()) == 5 - assert function.get_output_size() == 1 - assert function.get_output_op(0).get_type_name() == "Result" - assert function.get_output_element_type(0) == input_data.get_element_type() - assert list(function.get_output_shape(0)) == [2, 2] - assert (function.get_parameters()[0].get_partial_shape()) == PartialShape([2, 2]) - assert len(function.get_parameters()) == 1 - assert len(function.get_results()) == 1 - assert function.get_friendly_name() == "TestFunction" - diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_convolution.py b/src/bindings/python/tests_compatibility/test_ngraph/test_convolution.py deleted file mode 100644 index 5c4873364e6..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_convolution.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest - -import ngraph as ng -from ngraph.impl import Type - - -@pytest.mark.parametrize(("strides", "pads_begin", "pads_end", "dilations", "expected_shape"), [ - (np.array([1, 1]), np.array([1, 1]), np.array([1, 1]), np.array([1, 1]), [1, 1, 9, 9]), - (np.array([1, 1]), np.array([0, 0]), np.array([0, 0]), np.array([1, 1]), [1, 1, 7, 7]), - (np.array([2, 2]), np.array([0, 0]), np.array([0, 0]), np.array([1, 1]), [1, 1, 4, 4]), - (np.array([1, 1]), np.array([0, 0]), np.array([0, 0]), np.array([2, 2]), [1, 1, 5, 5]), -]) -def test_convolution_2d(strides, pads_begin, pads_end, dilations, expected_shape): - - # input_x should have shape N(batch) x C x H x W - input_x = ng.parameter((1, 1, 9, 9), name="input_data", dtype=np.float32) - - # filter weights should have shape M x C x kH x kW - input_filter = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape( - 1, 1, 3, 3 - ) - - node = ng.convolution(input_x, input_filter, strides, pads_begin, pads_end, dilations) - - assert node.get_type_name() == "Convolution" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - assert node.get_output_element_type(0) == Type.f32 - - -def test_convolution_backprop_data(): - output_spatial_shape = [9, 9] - filter_shape = [1, 1, 3, 3] - data_shape = [1, 1, 7, 7] - strides = [1, 1] - - data_node = ng.parameter(shape=data_shape) - filter_node = ng.parameter(shape=filter_shape) - output_shape_node = ng.constant(np.array(output_spatial_shape, dtype=np.int64)) - - deconvolution = ng.convolution_backprop_data(data_node, filter_node, strides, output_shape_node) - assert deconvolution.get_type_name() == "ConvolutionBackpropData" - assert deconvolution.get_output_size() == 1 - assert list(deconvolution.get_output_shape(0)) == [1, 1, 9, 9] - assert deconvolution.get_output_element_type(0) == Type.f32 - - -def test_convolution_v1(): - input_tensor = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16) - filters = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3) - filters[0, 0, 0, 0] = -1 - filters[0, 0, 1, 1] = -1 - filters[0, 0, 2, 2] = -1 - filters[0, 0, 0, 2] = -1 - filters[0, 0, 2, 0] = -1 - strides = np.array([1, 1]) - pads_begin = np.array([0, 0]) - pads_end = np.array([0, 0]) - dilations = np.array([1, 1]) - - node = ng.convolution(input_tensor, filters, strides, pads_begin, pads_end, dilations) - - assert node.get_type_name() == "Convolution" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, 1, 14, 14] - assert node.get_output_element_type(0) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_core.py b/src/bindings/python/tests_compatibility/test_ngraph/test_core.py deleted file mode 100644 index 949946ef842..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_core.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -import ngraph as ng -from ngraph.impl import Dimension, Function, PartialShape, Shape - - -def test_dimension(): - dim = Dimension() - assert dim.is_dynamic - assert not dim.is_static - assert repr(dim) == "" - - dim = Dimension.dynamic() - assert dim.is_dynamic - assert not dim.is_static - assert repr(dim) == "" - - dim = Dimension(10) - assert dim.is_static - assert len(dim) == 10 - assert dim.get_length() == 10 - assert dim.get_min_length() == 10 - assert dim.get_max_length() == 10 - assert repr(dim) == "" - - dim = Dimension(5, 15) - assert dim.is_dynamic - assert dim.get_min_length() == 5 - assert dim.get_max_length() == 15 - assert repr(dim) == "" - - -def test_dimension_comparisons(): - d1 = Dimension.dynamic() - d2 = Dimension.dynamic() - assert d1 == d2 - assert d1 == -1 - assert d1.refines(d2) - assert d1.relaxes(d2) - assert d2.refines(d1) - assert d2.relaxes(d1) - assert d2.compatible(d1) - assert d2.same_scheme(d1) - - d1 = Dimension.dynamic() - d2 = Dimension(3) - assert d1 != d2 - assert d2 == 3 - assert not d1.refines(d2) - assert d1.relaxes(d2) - assert d2.refines(d1) - assert not d2.relaxes(d1) - assert d2.compatible(d1) - assert not d2.same_scheme(d1) - - d1 = Dimension(3) - d2 = Dimension(3) - assert d1 == d2 - assert d1.refines(d2) - assert d1.relaxes(d2) - assert d2.refines(d1) - assert d2.relaxes(d1) - assert d2.compatible(d1) - assert d2.same_scheme(d1) - - d1 = Dimension(4) - d2 = Dimension(3) - assert d1 != d2 - assert not d1.refines(d2) - assert not d1.relaxes(d2) - assert not d2.refines(d1) - assert not d2.relaxes(d1) - assert not d2.compatible(d1) - assert not d2.same_scheme(d1) - - -def test_partial_shape(): - ps = PartialShape([1, 2, 3, 4]) - assert ps.is_static - assert not ps.is_dynamic - assert ps.rank == 4 - assert repr(ps) == "" - assert ps.get_dimension(0) == Dimension(1) - assert ps.get_dimension(1) == Dimension(2) - assert ps.get_dimension(2) == Dimension(3) - assert ps.get_dimension(3) == Dimension(4) - - shape = Shape([1, 2, 3]) - ps = PartialShape(shape) - assert ps.is_static - assert not ps.is_dynamic - assert ps.all_non_negative - assert ps.rank == 3 - assert list(ps.get_shape()) == [1, 2, 3] - assert list(ps.get_max_shape()) == [1, 2, 3] - assert list(ps.get_min_shape()) == [1, 2, 3] - assert list(ps.to_shape()) == [1, 2, 3] - assert repr(shape) == "" - assert repr(ps) == "" - - ps = PartialShape([Dimension(1), Dimension(2), Dimension(3), Dimension.dynamic()]) - assert not ps.is_static - assert ps.is_dynamic - assert ps.all_non_negative - assert ps.rank == 4 - assert list(ps.get_min_shape()) == [1, 2, 3, 0] - assert list(ps.get_max_shape())[3] > 1000000000 - assert repr(ps) == "" - assert ps.get_dimension(0) == Dimension(1) - assert ps.get_dimension(1) == Dimension(2) - assert ps.get_dimension(2) == Dimension(3) - assert ps.get_dimension(3) == Dimension.dynamic() - - ps = PartialShape([1, 2, 3, -1]) - assert not ps.is_static - assert ps.is_dynamic - assert ps.all_non_negative - assert ps.rank == 4 - assert list(ps.get_min_shape()) == [1, 2, 3, 0] - assert list(ps.get_max_shape())[3] > 1000000000 - assert repr(ps) == "" - - ps = PartialShape.dynamic() - assert not ps.is_static - assert ps.is_dynamic - assert ps.rank == Dimension.dynamic() - assert list(ps.get_min_shape()) == [] - assert list(ps.get_max_shape()) == [] - assert repr(ps) == "" - - ps = PartialShape.dynamic(r=Dimension(2)) - assert not ps.is_static - assert ps.is_dynamic - assert ps.rank == 2 - assert 2 == ps.rank - assert list(ps.get_min_shape()) == [0, 0] - assert list(ps.get_max_shape())[0] > 1000000000 - assert repr(ps) == "" - - -def test_partial_shape_compatible(): - ps1 = PartialShape.dynamic() - ps2 = PartialShape.dynamic() - assert ps1.compatible(ps2) - - ps1 = PartialShape([3]) - ps2 = PartialShape.dynamic() - assert ps1.compatible(ps2) - - ps1 = PartialShape.dynamic() - ps2 = PartialShape([4]) - assert ps1.compatible(ps2) - - ps1 = PartialShape([2, -1, 3, -1, 5]) - ps2 = PartialShape([2, -1, -1, 4, 5]) - assert ps1.compatible(ps2) - - ps1 = PartialShape([2, -1, 3, -1, 5]) - ps2 = PartialShape([1, -1, -1, 4, 5]) - assert not ps1.compatible(ps2) - - -def test_partial_shape_same_scheme(): - ps1 = PartialShape([1, 2, -1]) - ps2 = PartialShape([1, 3, -1]) - assert not ps1.same_scheme(ps2) - - ps1 = PartialShape([1, 2, -1]) - ps2 = PartialShape([1, 2, -1]) - assert ps1.same_scheme(ps2) - - ps1 = PartialShape([1, 2, 3]) - ps2 = PartialShape([1, 2, 3]) - assert ps1.same_scheme(ps2) - - ps1 = PartialShape([-1, 2, 3]) - ps2 = PartialShape([1, -1, 3]) - assert not ps1.same_scheme(ps2) - - ps1 = PartialShape.dynamic() - ps2 = PartialShape.dynamic() - assert ps1.same_scheme(ps2) - - -def test_partial_shape_refinement(): - ps1 = PartialShape.dynamic() - ps2 = PartialShape.dynamic() - assert ps1.refines(ps2) - assert ps1.relaxes(ps2) - assert ps2.refines(ps1) - assert ps2.relaxes(ps1) - - ps1 = PartialShape.dynamic() - ps2 = PartialShape([3, -1, 7, 9]) - assert not ps1.refines(ps2) - assert ps1.relaxes(ps2) - assert ps2.refines(ps1) - assert not ps2.relaxes(ps1) - - ps1 = PartialShape.dynamic() - ps2 = PartialShape([3, 5, 7, 9]) - assert not ps1.refines(ps2) - assert ps1.relaxes(ps2) - assert ps2.refines(ps1) - assert not ps2.relaxes(ps1) - - -def test_partial_shape_equals(): - ps1 = PartialShape.dynamic() - ps2 = PartialShape.dynamic() - assert ps1 == ps2 - - ps1 = PartialShape([1, 2, 3]) - ps2 = PartialShape([1, 2, 3]) - assert ps1 == ps2 - - shape = Shape([1, 2, 3]) - ps = PartialShape([1, 2, 3]) - assert shape == ps - - -def test_repr_dynamic_shape(): - shape = PartialShape([-1, 2]) - parameter_a = ng.parameter(shape, dtype=np.float32, name="A") - parameter_b = ng.parameter(shape, dtype=np.float32, name="B") - model = parameter_a + parameter_b - function = Function(model, [parameter_a, parameter_b], "simple_dyn_shapes_graph") - - assert repr(function) == "" - - ops = function.get_ordered_ops() - for op in ops: - assert "[?,2]" in repr(op) - - -def test_discrete_type_info(): - data_shape = [6, 12, 10, 24] - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - k = np.int32(3) - axis = np.int32(1) - n1 = ng.topk(data_parameter, k, axis, "max", "value") - n2 = ng.topk(data_parameter, k, axis, "max", "value") - n3 = ng.sin(0.2) - - assert n1.type_info.name == "TopK" - assert n3.type_info.name == "Sin" - assert n1.get_type_info().name == "TopK" - assert n3.get_type_info().name == "Sin" - assert n1.type_info.name == n2.type_info.name - assert n1.type_info.version_id == n2.type_info.version_id - assert n1.type_info.parent == n2.type_info.parent - assert n1.get_type_info().name == n2.get_type_info().name - assert n1.get_type_info().version_id == n2.get_type_info().version_id - assert n1.get_type_info().parent == n2.get_type_info().parent - assert n1.get_type_info().name != n3.get_type_info().name - assert n1.get_type_info().name > n3.get_type_info().name - assert n1.get_type_info().name >= n3.get_type_info().name - assert n3.get_type_info().name < n1.get_type_info().name - assert n3.get_type_info().name <= n1.get_type_info().name diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_create_op.py b/src/bindings/python/tests_compatibility/test_ngraph/test_create_op.py deleted file mode 100644 index 7ec5a26109a..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_create_op.py +++ /dev/null @@ -1,2427 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -import pytest - -from _pyngraph import PartialShape, Dimension - -import ngraph as ng -import ngraph.opset1 as ng_opset1 -import ngraph.opset5 as ng_opset5 -import ngraph.opset10 as ng_opset10 -import ngraph.opset11 as ng_opset11 -from ngraph.utils.types import make_constant_node -from ngraph.exceptions import UserInputError -from ngraph.impl import Type - -np_types = [np.float32, np.int32] -integral_np_types = [ - np.int8, - np.int16, - np.int32, - np.int64, - np.uint8, - np.uint16, - np.uint32, - np.uint64, -] - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_adaptive_avg_pool(dtype): - data = ng.parameter([2, 24, 34, 62], name="input", dtype=dtype) - output_shape = ng.constant(np.array([16, 16], dtype=np.int32)) - - node = ng.adaptive_avg_pool(data, output_shape) - - assert node.get_type_name() == "AdaptiveAvgPool" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 24, 16, 16] - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -@pytest.mark.parametrize("ind_type", ["i32", "i64"]) -def test_adaptive_max_pool(dtype, ind_type): - data = ng.parameter([2, 24, 34, 62], name="input", dtype=dtype) - output_shape = ng.constant(np.array([16, 16], dtype=np.int32)) - - node = ng.adaptive_max_pool(data, output_shape, ind_type) - - assert node.get_type_name() == "AdaptiveMaxPool" - assert node.get_output_size() == 2 - assert list(node.get_output_shape(0)) == [2, 24, 16, 16] - assert list(node.get_output_shape(1)) == [2, 24, 16, 16] - assert node.get_output_element_type(1) == Type.i32 if ind_type == "i32" else Type.i64 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_binary_convolution(dtype): - strides = np.array([1, 1]) - pads_begin = np.array([0, 0]) - pads_end = np.array([0, 0]) - dilations = np.array([1, 1]) - mode = "xnor-popcount" - pad_value = 0.0 - - input0_shape = [1, 1, 9, 9] - input1_shape = [1, 1, 3, 3] - expected_shape = [1, 1, 7, 7] - - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - - node = ng.binary_convolution( - parameter_input0, parameter_input1, strides, pads_begin, pads_end, dilations, mode, pad_value, - ) - - assert node.get_type_name() == "BinaryConvolution" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize("dtype", np_types) -def test_ctc_greedy_decoder(dtype): - input0_shape = [20, 8, 128] - input1_shape = [20, 8] - expected_shape = [8, 20, 1, 1] - - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - - node = ng.ctc_greedy_decoder(parameter_input0, parameter_input1) - - assert node.get_type_name() == "CTCGreedyDecoder" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize("fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index", - [ - (np.float32, np.int32, "i32", "i32", True, True), - (np.float32, np.int32, "i64", "i32", True, True), - (np.float32, np.int32, "i32", "i64", True, True), - (np.float32, np.int32, "i64", "i64", True, True), - (np.float64, np.int64, "i32", "i32", False, True), - (np.float64, np.int64, "i64", "i32", False, True), - (np.float64, np.int64, "i32", "i64", False, True), - (np.float64, np.int64, "i64", "i64", False, True), - (np.float32, np.int32, "i32", "i32", True, False), - (np.float32, np.int32, "i64", "i32", True, False), - (np.float32, np.int32, "i32", "i64", True, False), - (np.float32, np.int32, "i64", "i64", True, False), - (np.float64, np.int64, "i32", "i32", False, False), - (np.float64, np.int64, "i64", "i32", False, False), - (np.float64, np.int64, "i32", "i64", False, False), - (np.float64, np.int64, "i64", "i64", False, False) - ], ) -def test_ctc_greedy_decoder_seq_len(fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index): - input0_shape = [8, 20, 128] - input1_shape = [8] - input2_shape = [1] - expected_shape = [8, 20] - - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=fp_dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=int_dtype) - parameter_input2 = None - if blank_index: - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=int_dtype) - - node = ng.ctc_greedy_decoder_seq_len( - parameter_input0, parameter_input1, parameter_input2, merge_repeated, int_ci, int_sl - ) - - assert node.get_type_name() == "CTCGreedyDecoderSeqLen" - assert node.get_output_size() == 2 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize("dtype", np_types) -def test_deformable_convolution_opset1(dtype): - strides = np.array([1, 1]) - pads_begin = np.array([0, 0]) - pads_end = np.array([0, 0]) - dilations = np.array([1, 1]) - - input0_shape = [1, 1, 9, 9] - input1_shape = [1, 18, 7, 7] - input2_shape = [1, 1, 3, 3] - expected_shape = [1, 1, 7, 7] - - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) - - node = ng_opset1.deformable_convolution( - parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations, - ) - - assert node.get_type_name() == "DeformableConvolution" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize("dtype", np_types) -def test_deformable_convolution(dtype): - strides = np.array([1, 1]) - pads_begin = np.array([0, 0]) - pads_end = np.array([0, 0]) - dilations = np.array([1, 1]) - - input0_shape = [1, 1, 9, 9] - input1_shape = [1, 18, 7, 7] - input2_shape = [1, 1, 3, 3] - expected_shape = [1, 1, 7, 7] - - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) - - node = ng.deformable_convolution( - parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations, - ) - - assert node.get_type_name() == "DeformableConvolution" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize("dtype", np_types) -def test_deformable_convolution_mask(dtype): - strides = np.array([1, 1]) - pads_begin = np.array([0, 0]) - pads_end = np.array([0, 0]) - dilations = np.array([1, 1]) - - input0_shape = [1, 1, 9, 9] - input1_shape = [1, 18, 7, 7] - input2_shape = [1, 1, 3, 3] - input3_shape = [1, 9, 7, 7] - expected_shape = [1, 1, 7, 7] - - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) - parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype) - - node = ng.deformable_convolution( - parameter_input0, parameter_input1, parameter_input2, strides, - pads_begin, pads_end, dilations, parameter_input3 - ) - - assert node.get_type_name() == "DeformableConvolution" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize("dtype", np_types) -def test_deformable_psroi_pooling(dtype): - output_dim = 8 - spatial_scale = 0.0625 - group_size = 7 - mode = "bilinear_deformable" - spatial_bins_x = 4 - spatial_bins_y = 4 - trans_std = 0.1 - part_size = 7 - - input0_shape = [1, 392, 38, 63] - input1_shape = [300, 5] - input2_shape = [300, 2, 7, 7] - expected_shape = [300, 8, 7, 7] - - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) - - node = ng.deformable_psroi_pooling( - parameter_input0, - parameter_input1, - output_dim, - spatial_scale, - group_size, - mode, - spatial_bins_x, - spatial_bins_y, - trans_std, - part_size, - offsets=parameter_input2, - ) - - assert node.get_type_name() == "DeformablePSROIPooling" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize( - ("data_shape", "rois", "batch_indices", "pooled_h", "pooled_w", "sampling_ratio", "spatial_scale", "mode", "aligned_mode", "expected_shape"), - [ - ([2, 3, 5, 6], [7, 4], [7], 2, 2, 1, 1.0, "avg", "asymmetric", [7, 3, 2, 2]), - ([10, 3, 5, 5], [7, 4], [7], 3, 4, 1, 1.0, "avg", "half_pixel_for_nn", [7, 3, 3, 4]), - ([10, 3, 5, 5], [3, 4], [3], 3, 4, 1, 1.0, "avg", "half_pixel", [3, 3, 3, 4]), - ([10, 3, 5, 5], [3, 4], [3], 3, 4, 1, np.float32(1), "avg", "half_pixel", [3, 3, 3, 4]), - ], -) -def test_roi_align(data_shape, rois, batch_indices, pooled_h, pooled_w, sampling_ratio, spatial_scale, mode, aligned_mode, expected_shape): - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - rois_parameter = ng.parameter(rois, name="Rois", dtype=np.float32) - batch_indices_parameter = ng.parameter(batch_indices, name="Batch_indices", dtype=np.int32) - - node = ng.roi_align( - data_parameter, - rois_parameter, - batch_indices_parameter, - pooled_h, - pooled_w, - sampling_ratio, - np.float32(spatial_scale), - mode, - aligned_mode, - ) - - assert node.get_type_name() == "ROIAlign" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.f32 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize("dtype", np_types) -def test_floor_mod(dtype): - input0_shape = [8, 1, 6, 1] - input1_shape = [7, 1, 5] - expected_shape = [8, 7, 6, 5] - - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - - node = ng.floor_mod(parameter_input0, parameter_input1) - - assert node.get_type_name() == "FloorMod" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize("dtype", np_types) -def test_gather_tree(dtype): - input0_shape = [100, 1, 10] - input1_shape = [100, 1, 10] - input2_shape = [1] - input3_shape = [] - expected_shape = [100, 1, 10] - - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) - parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype) - - node = ng.gather_tree(parameter_input0, parameter_input1, parameter_input2, parameter_input3) - - assert node.get_type_name() == "GatherTree" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_lstm_cell_operator(dtype): - batch_size = 1 - input_size = 16 - hidden_size = 128 - - X_shape = [batch_size, input_size] - H_t_shape = [batch_size, hidden_size] - C_t_shape = [batch_size, hidden_size] - W_shape = [4 * hidden_size, input_size] - R_shape = [4 * hidden_size, hidden_size] - B_shape = [4 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - expected_shape = [1, 128] - - node_default = ng.lstm_cell( - parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size, - ) - - assert node_default.get_type_name() == "LSTMCell" - assert node_default.get_output_size() == 2 - assert list(node_default.get_output_shape(0)) == expected_shape - assert list(node_default.get_output_shape(1)) == expected_shape - - activations = ["tanh", "Sigmoid", "RELU"] - activation_alpha = [1.0, 2.0, 3.0] - activation_beta = [3.0, 2.0, 1.0] - clip = 0.5 - - node_param = ng.lstm_cell( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node_param.get_type_name() == "LSTMCell" - assert node_param.get_output_size() == 2 - assert list(node_param.get_output_shape(0)) == expected_shape - assert list(node_param.get_output_shape(1)) == expected_shape - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_lstm_cell_operator_opset1(dtype): - batch_size = 1 - input_size = 16 - hidden_size = 128 - - X_shape = [batch_size, input_size] - H_t_shape = [batch_size, hidden_size] - C_t_shape = [batch_size, hidden_size] - W_shape = [4 * hidden_size, input_size] - R_shape = [4 * hidden_size, hidden_size] - B_shape = [4 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - expected_shape = [1, 128] - - node_default = ng_opset1.lstm_cell( - parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size, - ) - - assert node_default.get_type_name() == "LSTMCell" - assert node_default.get_output_size() == 2 - assert list(node_default.get_output_shape(0)) == expected_shape - assert list(node_default.get_output_shape(1)) == expected_shape - - activations = ["tanh", "Sigmoid", "RELU"] - activation_alpha = [1.0, 2.0, 3.0] - activation_beta = [3.0, 2.0, 1.0] - clip = 0.5 - - node_param = ng_opset1.lstm_cell( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node_param.get_type_name() == "LSTMCell" - assert node_param.get_output_size() == 2 - assert list(node_param.get_output_shape(0)) == expected_shape - assert list(node_param.get_output_shape(1)) == expected_shape - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_lstm_sequence_operator_bidirectional_opset1(dtype): - batch_size = 1 - input_size = 16 - hidden_size = 128 - num_directions = 2 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - C_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, 4 * hidden_size, input_size] - R_shape = [num_directions, 4 * hidden_size, hidden_size] - B_shape = [num_directions, 4 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "BIDIRECTIONAL" - node = ng_opset1.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node.get_type_name() == "LSTMSequence" - assert node.get_output_size() == 3 - - activations = ["RELU", "tanh", "Sigmoid"] - activation_alpha = [1.0, 2.0, 3.0] - activation_beta = [3.0, 2.0, 1.0] - clip = 1.22 - - node_param = ng_opset1.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node_param.get_type_name() == "LSTMSequence" - assert node_param.get_output_size() == 3 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_lstm_sequence_operator_reverse_opset1(dtype): - batch_size = 2 - input_size = 4 - hidden_size = 3 - num_directions = 1 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - C_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, 4 * hidden_size, input_size] - R_shape = [num_directions, 4 * hidden_size, hidden_size] - B_shape = [num_directions, 4 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "REVERSE" - - node_default = ng_opset1.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node_default.get_type_name() == "LSTMSequence" - assert node_default.get_output_size() == 3 - - activations = ["RELU", "tanh", "Sigmoid"] - activation_alpha = [1.0, 2.0, 3.0] - activation_beta = [3.0, 2.0, 1.0] - clip = 1.22 - - node_param = ng_opset1.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node_param.get_type_name() == "LSTMSequence" - assert node_param.get_output_size() == 3 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_lstm_sequence_operator_forward_opset1(dtype): - batch_size = 2 - input_size = 4 - hidden_size = 3 - num_directions = 1 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - C_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, 4 * hidden_size, input_size] - R_shape = [num_directions, 4 * hidden_size, hidden_size] - B_shape = [num_directions, 4 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "forward" - - node_default = ng_opset1.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node_default.get_type_name() == "LSTMSequence" - assert node_default.get_output_size() == 3 - - activations = ["RELU", "tanh", "Sigmoid"] - activation_alpha = [2.0] - activation_beta = [1.0] - clip = 0.5 - - node = ng_opset1.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node.get_type_name() == "LSTMSequence" - assert node.get_output_size() == 3 - - -def test_gru_cell_operator(): - batch_size = 1 - input_size = 16 - hidden_size = 128 - - X_shape = [batch_size, input_size] - H_t_shape = [batch_size, hidden_size] - W_shape = [3 * hidden_size, input_size] - R_shape = [3 * hidden_size, hidden_size] - B_shape = [3 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) - parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) - parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) - - expected_shape = [1, 128] - - node_default = ng.gru_cell(parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B, hidden_size) - - assert node_default.get_type_name() == "GRUCell" - assert node_default.get_output_size() == 1 - assert list(node_default.get_output_shape(0)) == expected_shape - - activations = ["tanh", "relu"] - activations_alpha = [1.0, 2.0] - activations_beta = [1.0, 2.0] - clip = 0.5 - linear_before_reset = True - - # If *linear_before_reset* is set True, then B tensor shape must be [4 * hidden_size] - B_shape = [4 * hidden_size] - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) - - node_param = ng.gru_cell( - parameter_X, - parameter_H_t, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - activations, - activations_alpha, - activations_beta, - clip, - linear_before_reset, - ) - - assert node_param.get_type_name() == "GRUCell" - assert node_param.get_output_size() == 1 - assert list(node_param.get_output_shape(0)) == expected_shape - - -def test_gru_sequence(): - batch_size = 2 - input_size = 16 - hidden_size = 32 - seq_len = 8 - seq_lengths = [seq_len] * batch_size - num_directions = 1 - direction = "FORWARD" - - X_shape = [batch_size, seq_len, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - W_shape = [num_directions, 3 * hidden_size, input_size] - R_shape = [num_directions, 3 * hidden_size, hidden_size] - B_shape = [num_directions, 3 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) - parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) - parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) - - expected_shape_y = [batch_size, num_directions, seq_len, hidden_size] - expected_shape_h = [batch_size, num_directions, hidden_size] - - node_default = ng.gru_sequence( - parameter_X, - parameter_H_t, - seq_lengths, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node_default.get_type_name() == "GRUSequence" - assert node_default.get_output_size() == 2 - assert list(node_default.get_output_shape(0)) == expected_shape_y - assert list(node_default.get_output_shape(1)) == expected_shape_h - - activations = ["tanh", "relu"] - activations_alpha = [1.0, 2.0] - activations_beta = [1.0, 2.0] - clip = 0.5 - linear_before_reset = True - - # If *linear_before_reset* is set True, then B tensor shape must be [4 * hidden_size] - B_shape = [num_directions, 4 * hidden_size] - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) - - node_param = ng.gru_sequence( - parameter_X, - parameter_H_t, - seq_lengths, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activations_alpha, - activations_beta, - clip, - linear_before_reset, - ) - - assert node_param.get_type_name() == "GRUSequence" - assert node_param.get_output_size() == 2 - assert list(node_param.get_output_shape(0)) == expected_shape_y - assert list(node_param.get_output_shape(1)) == expected_shape_h - - -def test_rnn_sequence(): - batch_size = 2 - input_size = 16 - hidden_size = 32 - seq_len = 8 - seq_lengths = [seq_len] * batch_size - num_directions = 1 - direction = "FORWARD" - - X_shape = [batch_size, seq_len, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - W_shape = [num_directions, hidden_size, input_size] - R_shape = [num_directions, hidden_size, hidden_size] - B_shape = [num_directions, hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) - parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) - parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) - - expected_shape_y = [batch_size, num_directions, seq_len, hidden_size] - expected_shape_h = [batch_size, num_directions, hidden_size] - - node_default = ng.rnn_sequence( - parameter_X, - parameter_H_t, - seq_lengths, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node_default.get_type_name() == "RNNSequence" - assert node_default.get_output_size() == 2 - assert list(node_default.get_output_shape(0)) == expected_shape_y - assert list(node_default.get_output_shape(1)) == expected_shape_h - - activations = ["relu"] - activations_alpha = [2.0] - activations_beta = [1.0] - clip = 0.5 - - node_param = ng.rnn_sequence( - parameter_X, - parameter_H_t, - seq_lengths, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activations_alpha, - activations_beta, - clip, - ) - - assert node_param.get_type_name() == "RNNSequence" - assert node_param.get_output_size() == 2 - assert list(node_param.get_output_shape(0)) == expected_shape_y - assert list(node_param.get_output_shape(1)) == expected_shape_h - - -def test_loop(): - from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, - ) - - condition = ng.constant(True, dtype=bool) - trip_count = ng.constant(16, dtype=np.int32) - # Body parameters - body_timestep = ng.parameter([], np.int32, "timestep") - body_data_in = ng.parameter([1, 2, 2], np.float32, "body_in") - body_prev_cma = ng.parameter([2, 2], np.float32, "body_prev_cma") - body_const_one = ng.parameter([], np.int32, "body_const_one") - - # CMA = cumulative moving average - prev_cum_sum = ng.multiply(ng.convert(body_timestep, "f32"), body_prev_cma) - curr_cum_sum = ng.add(prev_cum_sum, ng.squeeze(body_data_in, [0])) - elem_cnt = ng.add(body_const_one, body_timestep) - curr_cma = ng.divide(curr_cum_sum, ng.convert(elem_cnt, "f32")) - cma_hist = ng.unsqueeze(curr_cma, [0]) - - # TI inputs - data = ng.parameter([16, 2, 2], np.float32, "data") - # Iterations count - zero = ng.constant(0, dtype=np.int32) - one = ng.constant(1, dtype=np.int32) - initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32) - iter_cnt = ng.range(zero, np.int32(16), np.int32(1)) - ti_inputs = [iter_cnt, data, initial_cma, one] - body_const_condition = ng.constant(True, dtype=bool) - - graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], - [curr_cma, cma_hist, body_const_condition]) - ti_slice_input_desc = [ - # timestep - # input_idx, body_param_idx, start, stride, part_size, end, axis - TensorIteratorSliceInputDesc(2, 0, 0, 1, 1, -1, 0), - # data - TensorIteratorSliceInputDesc(3, 1, 0, 1, 1, -1, 0), - ] - ti_merged_input_desc = [ - # body prev/curr_cma - TensorIteratorMergedInputDesc(4, 2, 0), - ] - ti_invariant_input_desc = [ - # body const one - TensorIteratorInvariantInputDesc(5, 3), - ] - - # TI outputs - ti_body_output_desc = [ - # final average - TensorIteratorBodyOutputDesc(0, 0, -1), - ] - ti_concat_output_desc = [ - # history of cma - TensorIteratorConcatOutputDesc(1, 1, 0, 1, 1, -1, 0), - ] - - node = ng.loop( - trip_count, - condition, - ti_inputs, - graph_body, - ti_slice_input_desc, - ti_merged_input_desc, - ti_invariant_input_desc, - ti_body_output_desc, - ti_concat_output_desc, - 2, - -1, - ) - - assert node.get_type_name() == "Loop" - assert node.get_output_size() == 2 - # final average - assert list(node.get_output_shape(0)) == [2, 2] - # cma history - assert list(node.get_output_shape(1)) == [16, 2, 2] - - -def test_roi_pooling(): - inputs = ng.parameter([2, 3, 4, 5], dtype=np.float32) - coords = ng.parameter([150, 5], dtype=np.float32) - node = ng.roi_pooling(inputs, coords, [6, 6], 0.0625, "Max") - - assert node.get_type_name() == "ROIPooling" - assert node.get_output_size() == [6, 6] - assert list(node.get_output_shape(0)) == [150, 3, 6, 6] - assert node.get_output_element_type(0) == Type.f32 - - -def test_psroi_pooling(): - inputs = ng.parameter([1, 72, 4, 5], dtype=np.float32) - coords = ng.parameter([150, 5], dtype=np.float32) - node = ng.psroi_pooling(inputs, coords, 2, 6, 0.0625, 0, 0, "average") - - assert node.get_type_name() == "PSROIPooling" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [150, 2, 6, 6] - assert node.get_output_element_type(0) == Type.f32 - - -def test_convert_like(): - parameter_data = ng.parameter([1, 2, 3, 4], name="data", dtype=np.float32) - like = ng.constant(1, dtype=np.int8) - - node = ng.convert_like(parameter_data, like) - - assert node.get_type_name() == "ConvertLike" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, 2, 3, 4] - assert node.get_output_element_type(0) == Type.i8 - - -def test_bucketize(): - data = ng.parameter([4, 3, 2, 1], name="data", dtype=np.float32) - buckets = ng.parameter([5], name="buckets", dtype=np.int64) - - node = ng.bucketize(data, buckets, "i32") - - assert node.get_type_name() == "Bucketize" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [4, 3, 2, 1] - assert node.get_output_element_type(0) == Type.i32 - - -def test_region_yolo(): - data = ng.parameter([1, 125, 13, 13], name="input", dtype=np.float32) - num_coords = 4 - num_classes = 80 - num_regions = 1 - mask = [6, 7, 8] - axis = 0 - end_axis = 3 - do_softmax = False - - node = ng.region_yolo(data, num_coords, num_classes, num_regions, do_softmax, mask, axis, end_axis) - - assert node.get_type_name() == "RegionYolo" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, (80 + 4 + 1) * 3, 13, 13] - assert node.get_output_element_type(0) == Type.f32 - - -def test_reorg_yolo(): - data = ng.parameter([2, 24, 34, 62], name="input", dtype=np.int32) - stride = [2] - - node = ng.reorg_yolo(data, stride) - - assert node.get_type_name() == "ReorgYolo" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 96, 17, 31] - assert node.get_output_element_type(0) == Type.i32 - - -def test_embedding_bag_offsets_sum_1(): - emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) - indices = ng.parameter([4], name="indices", dtype=np.int64) - offsets = ng.parameter([3], name="offsets", dtype=np.int64) - default_index = ng.parameter([], name="default_index", dtype=np.int64) - - node = ng.embedding_bag_offsets_sum(emb_table, indices, offsets, default_index) - - assert node.get_type_name() == "EmbeddingBagOffsetsSum" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 2] - assert node.get_output_element_type(0) == Type.f32 - - -def test_embedding_segments_sum_all_inputs(): - emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) - indices = ng.parameter([4], name="indices", dtype=np.int64) - segment_ids = ng.parameter([4], name="segment_ids", dtype=np.int64) - num_segments = ng.parameter([], name="num_segments", dtype=np.int64) - default_index = ng.parameter([], name="default_index", dtype=np.int64) - per_sample_weights = ng.parameter([4], name="per_sample_weights", dtype=np.float32) - - node = ng.embedding_segments_sum( - emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights - ) - - assert node.get_type_name() == "EmbeddingSegmentsSum" - assert node.get_output_size() == 1 - assert node.get_output_partial_shape(0).same_scheme(PartialShape([-1, 2])) - assert node.get_output_element_type(0) == Type.f32 - - -def test_embedding_segments_sum_with_some_opt_inputs(): - emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) - indices = ng.parameter([4], name="indices", dtype=np.int64) - segment_ids = ng.parameter([4], name="segment_ids", dtype=np.int64) - num_segments = ng.parameter([], name="num_segments", dtype=np.int64) - - # only 1 out of 3 optional inputs - node = ng.embedding_segments_sum(emb_table, indices, segment_ids, num_segments) - - assert node.get_type_name() == "EmbeddingSegmentsSum" - assert node.get_output_size() == 1 - assert node.get_output_partial_shape(0).same_scheme(PartialShape([-1, 2])) - assert node.get_output_element_type(0) == Type.f32 - - -def test_embedding_bag_packed_sum(): - emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) - indices = ng.parameter([3, 3], name="indices", dtype=np.int64) - per_sample_weights = ng.parameter([3, 3], name="per_sample_weights", dtype=np.float32) - - # only 1 out of 3 optional inputs - node = ng.embedding_bag_packed_sum(emb_table, indices, per_sample_weights) - - assert node.get_type_name() == "EmbeddingBagPackedSum" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 2] - assert node.get_output_element_type(0) == Type.f32 - - -@pytest.mark.parametrize("dtype", integral_np_types) -def test_interpolate_opset1(dtype): - image_shape = [1, 3, 1024, 1024] - output_shape = [64, 64] - attributes = { - "axes": [2, 3], - "mode": "cubic", - "pads_begin": np.array([2, 2], dtype=dtype), - } - - image_node = ng.parameter(image_shape, dtype, name="Image") - - node = ng_opset1.interpolate(image_node, output_shape, attributes) - expected_shape = [1, 3, 64, 64] - - assert node.get_type_name() == "Interpolate" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize( - "int_dtype, fp_dtype", - [ - (np.int8, np.float32), - (np.int16, np.float32), - (np.int32, np.float32), - (np.int64, np.float32), - (np.uint8, np.float32), - (np.uint16, np.float32), - (np.uint32, np.float32), - (np.uint64, np.float32), - (np.int32, np.float16), - (np.int32, np.float64), - ], -) -def test_prior_box(int_dtype, fp_dtype): - image_shape = np.array([64, 64], dtype=int_dtype) - attributes = { - "offset": fp_dtype(0), - "min_size": np.array([2, 3], dtype=fp_dtype), - "aspect_ratio": np.array([1.5, 2.0, 2.5], dtype=fp_dtype), - "scale_all_sizes": False - } - - layer_shape = ng.constant(np.array([32, 32], dtype=int_dtype), int_dtype) - - node = ng.prior_box(layer_shape, image_shape, attributes) - - assert node.get_type_name() == "PriorBox" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 20480] - - -@pytest.mark.parametrize( - "int_dtype, fp_dtype", - [ - (np.int8, np.float32), - (np.int16, np.float32), - (np.int32, np.float32), - (np.int64, np.float32), - (np.uint8, np.float32), - (np.uint16, np.float32), - (np.uint32, np.float32), - (np.uint64, np.float32), - (np.int32, np.float16), - (np.int32, np.float64), - ], -) -def test_prior_box_clustered(int_dtype, fp_dtype): - image_size = np.array([64, 64], dtype=int_dtype) - attributes = { - "offset": fp_dtype(0.5), - "width": np.array([4.0, 2.0, 3.2], dtype=fp_dtype), - "height": np.array([1.0, 2.0, 1.0], dtype=fp_dtype), - } - - output_size = ng.constant(np.array([19, 19], dtype=int_dtype), int_dtype) - - node = ng.prior_box_clustered(output_size, image_size, attributes) - - assert node.get_type_name() == "PriorBoxClustered" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 4332] - - -@pytest.mark.parametrize( - "int_dtype, fp_dtype", - [ - (np.uint8, np.float32), - (np.uint16, np.float32), - (np.uint32, np.float32), - (np.uint64, np.float32), - (np.uint32, np.float16), - (np.uint32, np.float64), - ], -) -def test_proposal(int_dtype, fp_dtype): - attributes = { - "base_size": int_dtype(1), - "pre_nms_topn": int_dtype(20), - "post_nms_topn": int_dtype(64), - "nms_thresh": fp_dtype(0.34), - "feat_stride": int_dtype(16), - "min_size": int_dtype(32), - "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype), - "scale": np.array([2, 3, 3, 4], dtype=fp_dtype), - } - batch_size = 7 - - class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs") - bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas") - image_shape = ng.parameter([3], fp_dtype, "image_shape") - node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes) - - assert node.get_type_name() == "Proposal" - assert node.get_output_size() == 2 - assert list(node.get_output_shape(0)) == [batch_size * attributes["post_nms_topn"], 5] - - -def test_tensor_iterator(): - from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorSliceInputDesc, - TensorIteratorMergedInputDesc, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, - TensorIteratorConcatOutputDesc, - ) - - # Body parameters - body_timestep = ng.parameter([], np.int32, "timestep") - body_data_in = ng.parameter([1, 2, 2], np.float32, "body_in") - body_prev_cma = ng.parameter([2, 2], np.float32, "body_prev_cma") - body_const_one = ng.parameter([], np.int32, "body_const_one") - - # CMA = cumulative moving average - prev_cum_sum = ng.multiply(ng.convert(body_timestep, "f32"), body_prev_cma) - curr_cum_sum = ng.add(prev_cum_sum, ng.squeeze(body_data_in, [0])) - elem_cnt = ng.add(body_const_one, body_timestep) - curr_cma = ng.divide(curr_cum_sum, ng.convert(elem_cnt, "f32")) - cma_hist = ng.unsqueeze(curr_cma, [0]) - - # TI inputs - data = ng.parameter([16, 2, 2], np.float32, "data") - # Iterations count - zero = ng.constant(0, dtype=np.int32) - one = ng.constant(1, dtype=np.int32) - initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32) - iter_cnt = ng.range(zero, np.int32(16), np.int32(1)) - ti_inputs = [iter_cnt, data, initial_cma, one] - - graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], [curr_cma, cma_hist]) - ti_slice_input_desc = [ - # timestep - # input_idx, body_param_idx, start, stride, part_size, end, axis - TensorIteratorSliceInputDesc(0, 0, 0, 1, 1, -1, 0), - # data - TensorIteratorSliceInputDesc(1, 1, 0, 1, 1, -1, 0), - ] - ti_merged_input_desc = [ - # body prev/curr_cma - TensorIteratorMergedInputDesc(2, 2, 0), - ] - ti_invariant_input_desc = [ - # body const one - TensorIteratorInvariantInputDesc(3, 3), - ] - - # TI outputs - ti_body_output_desc = [ - # final average - TensorIteratorBodyOutputDesc(0, 0, -1), - ] - ti_concat_output_desc = [ - # history of cma - TensorIteratorConcatOutputDesc(1, 1, 0, 1, 1, -1, 0), - ] - - node = ng.tensor_iterator( - ti_inputs, - graph_body, - ti_slice_input_desc, - ti_merged_input_desc, - ti_invariant_input_desc, - ti_body_output_desc, - ti_concat_output_desc, - ) - - assert node.get_type_name() == "TensorIterator" - assert node.get_output_size() == 2 - # final average - assert list(node.get_output_shape(0)) == [2, 2] - # cma history - assert list(node.get_output_shape(1)) == [16, 2, 2] - - -def test_read_value_opset5(): - init_value = ng_opset5.parameter([2, 2], name="init_value", dtype=np.int32) - - node = ng_opset5.read_value(init_value, "var_id_667") - - assert node.get_type_name() == "ReadValue" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 2] - assert node.get_output_element_type(0) == Type.i32 - - -def test_assign_opset5(): - input_data = ng_opset5.parameter([5, 7], name="input_data", dtype=np.int32) - rv = ng_opset5.read_value(input_data, "var_id_667") - node = ng_opset5.assign(rv, "var_id_667") - - assert node.get_type_name() == "Assign" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [5, 7] - assert node.get_output_element_type(0) == Type.i32 - - -def test_read_value(): - init_value = ng.parameter([2, 2], name="init_value", dtype=np.int32) - - node = ng.read_value(init_value, "var_id_667") - - assert node.get_type_name() == "ReadValue" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 2] - assert node.get_output_element_type(0) == Type.i32 - - -def test_assign(): - input_data = ng.parameter([5, 7], name="input_data", dtype=np.int32) - rv = ng.read_value(input_data, "var_id_667") - node = ng.assign(rv, "var_id_667") - - assert node.get_type_name() == "Assign" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [5, 7] - assert node.get_output_element_type(0) == Type.i32 - - -def test_extract_image_patches(): - image = ng.parameter([64, 3, 10, 10], name="image", dtype=np.int32) - sizes = [3, 3] - strides = [5, 5] - rates = [1, 1] - padding = "VALID" - node = ng.extract_image_patches(image, sizes, strides, rates, padding) - - assert node.get_type_name() == "ExtractImagePatches" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [64, 27, 2, 2] - assert node.get_output_element_type(0) == Type.i32 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_lstm_sequence_operator_bidirectional(dtype): - batch_size = 1 - input_size = 16 - hidden_size = 128 - num_directions = 2 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - C_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, 4 * hidden_size, input_size] - R_shape = [num_directions, 4 * hidden_size, hidden_size] - B_shape = [num_directions, 4 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "BIDIRECTIONAL" - node = ng.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node.get_type_name() == "LSTMSequence" - assert node.get_output_size() == 3 - - activations = ["RELU", "tanh", "Sigmoid"] - activation_alpha = [1.0, 2.0, 3.0] - activation_beta = [3.0, 2.0, 1.0] - clip = 1.22 - - node_param = ng.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node_param.get_type_name() == "LSTMSequence" - assert node_param.get_output_size() == 3 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_lstm_sequence_operator_reverse(dtype): - batch_size = 2 - input_size = 4 - hidden_size = 3 - num_directions = 1 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - C_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, 4 * hidden_size, input_size] - R_shape = [num_directions, 4 * hidden_size, hidden_size] - B_shape = [num_directions, 4 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "REVERSE" - - node_default = ng.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node_default.get_type_name() == "LSTMSequence" - assert node_default.get_output_size() == 3 - - activations = ["RELU", "tanh", "Sigmoid"] - activation_alpha = [1.0, 2.0, 3.0] - activation_beta = [3.0, 2.0, 1.0] - clip = 1.22 - - node_param = ng.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node_param.get_type_name() == "LSTMSequence" - assert node_param.get_output_size() == 3 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_lstm_sequence_operator_forward(dtype): - batch_size = 2 - input_size = 4 - hidden_size = 3 - num_directions = 1 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - C_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, 4 * hidden_size, input_size] - R_shape = [num_directions, 4 * hidden_size, hidden_size] - B_shape = [num_directions, 4 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "forward" - - node_default = ng.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node_default.get_type_name() == "LSTMSequence" - assert node_default.get_output_size() == 3 - - activations = ["RELU", "tanh", "Sigmoid"] - activation_alpha = [2.0] - activation_beta = [1.0] - clip = 0.5 - - node = ng.lstm_sequence( - parameter_X, - parameter_H_t, - parameter_C_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node.get_type_name() == "LSTMSequence" - assert node.get_output_size() == 3 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_gru_sequence_operator_bidirectional(dtype): - batch_size = 1 - input_size = 16 - hidden_size = 128 - num_directions = 2 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, 3 * hidden_size, input_size] - R_shape = [num_directions, 3 * hidden_size, hidden_size] - B_shape = [num_directions, 3 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "BIDIRECTIONAL" - node = ng.gru_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node.get_type_name() == "GRUSequence" - assert node.get_output_size() == 2 - - activations = ["RELU", "tanh"] - activation_alpha = [1.0, 2.0, 3.0] - activation_beta = [3.0, 2.0, 1.0] - clip = 1.22 - linear_before_reset = True - B_shape = [num_directions, 4 * hidden_size] - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - node_param = ng.gru_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - linear_before_reset - ) - - assert node_param.get_type_name() == "GRUSequence" - assert node_param.get_output_size() == 2 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_gru_sequence_operator_reverse(dtype): - batch_size = 2 - input_size = 4 - hidden_size = 3 - num_directions = 1 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, 3 * hidden_size, input_size] - R_shape = [num_directions, 3 * hidden_size, hidden_size] - B_shape = [num_directions, 3 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "REVERSE" - - node_default = ng.gru_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node_default.get_type_name() == "GRUSequence" - assert node_default.get_output_size() == 2 - - activations = ["RELU", "tanh"] - activation_alpha = [1.0, 2.0, 3.0] - activation_beta = [3.0, 2.0, 1.0] - clip = 1.22 - linear_before_reset = True - B_shape = [num_directions, 4 * hidden_size] - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - node_param = ng.gru_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - linear_before_reset - ) - - assert node_param.get_type_name() == "GRUSequence" - assert node_param.get_output_size() == 2 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_gru_sequence_operator_forward(dtype): - batch_size = 2 - input_size = 4 - hidden_size = 3 - num_directions = 1 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, 3 * hidden_size, input_size] - R_shape = [num_directions, 3 * hidden_size, hidden_size] - B_shape = [num_directions, 3 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "forward" - - node_default = ng.gru_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node_default.get_type_name() == "GRUSequence" - assert node_default.get_output_size() == 2 - - activations = ["RELU", "tanh"] - activation_alpha = [2.0] - activation_beta = [1.0] - clip = 0.5 - linear_before_reset = True - B_shape = [num_directions, 4 * hidden_size] - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - node = ng.gru_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - linear_before_reset - ) - - assert node.get_type_name() == "GRUSequence" - assert node.get_output_size() == 2 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_rnn_sequence_operator_bidirectional(dtype): - batch_size = 1 - input_size = 16 - hidden_size = 128 - num_directions = 2 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, hidden_size, input_size] - R_shape = [num_directions, hidden_size, hidden_size] - B_shape = [num_directions, hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "BIDIRECTIONAL" - node = ng.rnn_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node.get_type_name() == "RNNSequence" - assert node.get_output_size() == 2 - - activations = ["RELU", "tanh"] - activation_alpha = [1.0, 2.0, 3.0] - activation_beta = [3.0, 2.0, 1.0] - clip = 1.22 - - node_param = ng.rnn_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node_param.get_type_name() == "RNNSequence" - assert node_param.get_output_size() == 2 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_rnn_sequence_operator_reverse(dtype): - batch_size = 2 - input_size = 4 - hidden_size = 3 - num_directions = 1 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, hidden_size, input_size] - R_shape = [num_directions, hidden_size, hidden_size] - B_shape = [num_directions, hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "REVERSE" - - node_default = ng.rnn_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node_default.get_type_name() == "RNNSequence" - assert node_default.get_output_size() == 2 - - activations = ["RELU", "tanh"] - activation_alpha = [1.0, 2.0, 3.0] - activation_beta = [3.0, 2.0, 1.0] - clip = 1.22 - - node_param = ng.rnn_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node_param.get_type_name() == "RNNSequence" - assert node_param.get_output_size() == 2 - - -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_rnn_sequence_operator_forward(dtype): - batch_size = 2 - input_size = 4 - hidden_size = 3 - num_directions = 1 - seq_length = 2 - - X_shape = [batch_size, seq_length, input_size] - H_t_shape = [batch_size, num_directions, hidden_size] - seq_len_shape = [batch_size] - W_shape = [num_directions, hidden_size, input_size] - R_shape = [num_directions, hidden_size, hidden_size] - B_shape = [num_directions, hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) - - direction = "forward" - - node_default = ng.rnn_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - ) - - assert node_default.get_type_name() == "RNNSequence" - assert node_default.get_output_size() == 2 - - activations = ["RELU", "tanh"] - activation_alpha = [2.0] - activation_beta = [1.0] - clip = 0.5 - - node = ng.rnn_sequence( - parameter_X, - parameter_H_t, - parameter_seq_len, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - direction, - activations, - activation_alpha, - activation_beta, - clip, - ) - - assert node.get_type_name() == "RNNSequence" - assert node.get_output_size() == 2 - - -def test_multiclass_nms(): - boxes_data = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, - 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, - 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32") - boxes_data = boxes_data.reshape([1, 6, 4]) - box = ng.constant(boxes_data, dtype=np.float32) - scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3, - 0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32") - scores_data = scores_data.reshape([1, 2, 6]) - score = ng.constant(scores_data, dtype=np.float32) - - nms_node = ng.multiclass_nms(box, score, None, output_type="i32", nms_top_k=3, - iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid", - nms_eta=1.0) - - assert nms_node.get_type_name() == "MulticlassNms" - assert nms_node.get_output_size() == 3 - assert nms_node.outputs()[0].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(6)]) - assert nms_node.outputs()[1].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(1)]) - assert list(nms_node.outputs()[2].get_shape()) == [1, ] - assert nms_node.get_output_element_type(0) == Type.f32 - assert nms_node.get_output_element_type(1) == Type.i32 - assert nms_node.get_output_element_type(2) == Type.i32 - - boxes_data = np.array([[[7.55, 1.10, 18.28, 14.47], - [7.25, 0.47, 12.28, 17.77]], - [[4.06, 5.15, 16.11, 18.40], - [9.66, 3.36, 18.57, 13.26]], - [[6.50, 7.00, 13.33, 17.63], - [0.73, 5.34, 19.97, 19.97]]]).astype("float32") - box = ng.constant(boxes_data, dtype=np.float32) - scores_data = np.array([[0.34, 0.66], - [0.45, 0.61], - [0.39, 0.59]]).astype("float32") - score = ng.constant(scores_data, dtype=np.float32) - rois_num_data = np.array([3]).astype("int32") - roisnum = ng.constant(rois_num_data, dtype=np.int32) - nms_node = ng.multiclass_nms(box, score, roisnum, output_type="i32", nms_top_k=3, - iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid", - nms_eta=1.0) - - assert nms_node.get_type_name() == "MulticlassNms" - assert nms_node.get_output_size() == 3 - assert nms_node.outputs()[0].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(6)]) - assert nms_node.outputs()[1].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(1)]) - assert list(nms_node.outputs()[2].get_shape()) == [1, ] - assert nms_node.get_output_element_type(0) == Type.f32 - assert nms_node.get_output_element_type(1) == Type.i32 - assert nms_node.get_output_element_type(2) == Type.i32 - - -def test_matrix_nms(): - boxes_data = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, - 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, - 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32") - boxes_data = boxes_data.reshape([1, 6, 4]) - box = ng.constant(boxes_data, dtype=np.float32) - scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3, - 0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32") - scores_data = scores_data.reshape([1, 2, 6]) - score = ng.constant(scores_data, dtype=np.float32) - - nms_node = ng.matrix_nms(box, score, output_type="i32", nms_top_k=3, - score_threshold=0.0, sort_result_type="score", background_class=0, - decay_function="linear", gaussian_sigma=2.0, post_threshold=0.0) - - assert nms_node.get_type_name() == "MatrixNms" - assert nms_node.get_output_size() == 3 - assert nms_node.outputs()[0].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(6)]) - assert nms_node.outputs()[1].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(1)]) - assert list(nms_node.outputs()[2].get_shape()) == [1, ] - assert nms_node.get_output_element_type(0) == Type.f32 - assert nms_node.get_output_element_type(1) == Type.i32 - assert nms_node.get_output_element_type(2) == Type.i32 - - -@pytest.mark.parametrize( - ("boxes_shape", "scores_shape", "max_output_boxes", "expected_shape"), - [ - ([1, 1000, 4], [1, 1, 1000], [1000], [PartialShape([Dimension(0, 1000), Dimension(3)]), PartialShape([Dimension(0, 1000), Dimension(3)])]), - ([1, 700, 4], [1, 1, 700], [600], [PartialShape([Dimension(0, 600), Dimension(3)]), PartialShape([Dimension(0, 600), Dimension(3)])]), - ([1, 300, 4], [1, 1, 300], [300], [PartialShape([Dimension(0, 300), Dimension(3)]), PartialShape([Dimension(0, 300), Dimension(3)])]), - ], -) -def test_non_max_suppression(boxes_shape, scores_shape, max_output_boxes, expected_shape): - boxes_parameter = ng.parameter(boxes_shape, name="Boxes", dtype=np.float32) - scores_parameter = ng.parameter(scores_shape, name="Scores", dtype=np.float32) - - node = ng.non_max_suppression(boxes_parameter, scores_parameter, make_constant_node(max_output_boxes, np.int64)) - assert node.get_type_name() == "NonMaxSuppression" - assert node.get_output_size() == 3 - assert node.get_output_partial_shape(0) == expected_shape[0] - assert node.get_output_partial_shape(1) == expected_shape[1] - assert list(node.get_output_shape(2)) == [1] - - -@pytest.mark.parametrize( - ("boxes_shape", "scores_shape", "max_output_boxes", "iou_threshold", "score_threshold", "soft_nms_sigma", "expected_shape"), - [ - ([1, 100, 4], [1, 1, 100], [100], 0.1, 0.4, 0.5, [PartialShape([Dimension(0, 100), Dimension(3)]), PartialShape([Dimension(0, 100), Dimension(3)])]), - ([1, 700, 4], [1, 1, 700], [600], 0.1, 0.4, 0.5, [PartialShape([Dimension(0, 600), Dimension(3)]), PartialShape([Dimension(0, 600), Dimension(3)])]), - ([1, 300, 4], [1, 1, 300], [300], 0.1, 0.4, 0.5, [PartialShape([Dimension(0, 300), Dimension(3)]), PartialShape([Dimension(0, 300), Dimension(3)])]), - ], -) -def test_non_max_suppression_non_default_args(boxes_shape, scores_shape, max_output_boxes, iou_threshold, score_threshold, soft_nms_sigma, expected_shape): - boxes_parameter = ng.parameter(boxes_shape, name="Boxes", dtype=np.float32) - scores_parameter = ng.parameter(scores_shape, name="Scores", dtype=np.float32) - - max_output_boxes = make_constant_node(max_output_boxes, np.int64) - iou_threshold = make_constant_node(iou_threshold, np.float32) - score_threshold = make_constant_node(score_threshold, np.float32) - soft_nms_sigma = make_constant_node(soft_nms_sigma, np.float32) - - node = ng.non_max_suppression(boxes_parameter, scores_parameter, max_output_boxes, iou_threshold, score_threshold, soft_nms_sigma) - assert node.get_type_name() == "NonMaxSuppression" - assert node.get_output_size() == 3 - assert node.get_output_partial_shape(0) == expected_shape[0] - assert node.get_output_partial_shape(1) == expected_shape[1] - assert list(node.get_output_shape(2)) == [1] - - -def test_slice(): - data_shape = [10, 7, 2, 13] - data = ng.parameter(data_shape, name="input", dtype=np.float32) - - start = ng.constant(np.array([2, 0, 0], dtype=np.int32)) - stop = ng.constant(np.array([9, 7, 2], dtype=np.int32)) - step = ng.constant(np.array([2, 1, 1], dtype=np.int32)) - - node_default_axes = ng.slice(data, start, stop, step) - - assert node_default_axes.get_type_name() == "Slice" - assert node_default_axes.get_output_size() == 1 - assert node_default_axes.get_output_element_type(0) == Type.f32 - assert tuple(node_default_axes.get_output_shape(0)) == np.zeros(data_shape)[2:9:2, ::, 0:2:1].shape - - start = ng.constant(np.array([0, 2], dtype=np.int32)) - stop = ng.constant(np.array([2, 9], dtype=np.int32)) - step = ng.constant(np.array([1, 2], dtype=np.int32)) - axes = ng.constant(np.array([-2, 0], dtype=np.int32)) - - node = ng.slice(data, start, stop, step, axes) - - assert node.get_type_name() == "Slice" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.f32 - assert tuple(node.get_output_shape(0)) == np.zeros(data_shape)[2:9:2, ::, 0:2:1].shape - - -def test_i420_to_bgr(): - expected_output_shape = [1, 480, 640, 3] - - # # Single plane (one arg) - arg_single_plane = ng.parameter([1, 720, 640, 1], name="input", dtype=np.float32) - node_single_plane = ng.i420_to_bgr(arg_single_plane) - - assert node_single_plane.get_type_name() == "I420toBGR" - assert node_single_plane.get_output_size() == 1 - assert node_single_plane.get_output_element_type(0) == Type.f32 - assert list(node_single_plane.get_output_shape(0)) == expected_output_shape - - # Separate planes (three args) - arg_y = ng.parameter([1, 480, 640, 1], name="input_y", dtype=np.float32) - arg_u = ng.parameter([1, 240, 320, 1], name="input_u", dtype=np.float32) - arg_v = ng.parameter([1, 240, 320, 1], name="input_v", dtype=np.float32) - - node_separate_planes = ng.i420_to_bgr(arg_y, arg_u, arg_v) - - assert node_separate_planes.get_type_name() == "I420toBGR" - assert node_separate_planes.get_output_size() == 1 - assert node_separate_planes.get_output_element_type(0) == Type.f32 - assert list(node_separate_planes.get_output_shape(0)) == expected_output_shape - - # Incorrect inputs number - with pytest.raises(UserInputError, match=r".*Operation I420toBGR*."): - node_separate_planes = ng.i420_to_bgr(arg_y, arg_v) - - with pytest.raises(UserInputError, match=r".*Operation I420toBGR*."): - node_separate_planes = ng.i420_to_bgr(arg_single_plane, None, arg_v) - - -def test_i420_to_rgb(): - expected_output_shape = [1, 480, 640, 3] - - # # Single plane (one arg) - arg_single_plane = ng.parameter([1, 720, 640, 1], name="input", dtype=np.float32) - node_single_plane = ng.i420_to_rgb(arg_single_plane) - - assert node_single_plane.get_type_name() == "I420toRGB" - assert node_single_plane.get_output_size() == 1 - assert node_single_plane.get_output_element_type(0) == Type.f32 - assert list(node_single_plane.get_output_shape(0)) == expected_output_shape - - # Separate planes (three args) - arg_y = ng.parameter([1, 480, 640, 1], name="input_y", dtype=np.float32) - arg_u = ng.parameter([1, 240, 320, 1], name="input_u", dtype=np.float32) - arg_v = ng.parameter([1, 240, 320, 1], name="input_v", dtype=np.float32) - - node_separate_planes = ng.i420_to_rgb(arg_y, arg_u, arg_v) - - assert node_separate_planes.get_type_name() == "I420toRGB" - assert node_separate_planes.get_output_size() == 1 - assert node_separate_planes.get_output_element_type(0) == Type.f32 - assert list(node_separate_planes.get_output_shape(0)) == expected_output_shape - - with pytest.raises(UserInputError, match=r".*Operation I420toRGB*."): - node_separate_planes = ng.i420_to_rgb(arg_y, arg_v) - - with pytest.raises(UserInputError, match=r".*Operation I420toRGB*."): - node_separate_planes = ng.i420_to_rgb(arg_single_plane, None, arg_v) - - -def test_nv12_to_bgr(): - expected_output_shape = [1, 480, 640, 3] - - # # Single plane (one arg) - arg_single_plane = ng.parameter([1, 720, 640, 1], name="input", dtype=np.float32) - node_single_plane = ng.nv12_to_bgr(arg_single_plane) - - assert node_single_plane.get_type_name() == "NV12toBGR" - assert node_single_plane.get_output_size() == 1 - assert node_single_plane.get_output_element_type(0) == Type.f32 - assert list(node_single_plane.get_output_shape(0)) == expected_output_shape - - # Separate planes (two args) - arg_y = ng.parameter([1, 480, 640, 1], name="input_y", dtype=np.float32) - arg_uv = ng.parameter([1, 240, 320, 2], name="input_uv", dtype=np.float32) - - node_separate_planes = ng.nv12_to_bgr(arg_y, arg_uv) - - assert node_separate_planes.get_type_name() == "NV12toBGR" - assert node_separate_planes.get_output_size() == 1 - assert node_separate_planes.get_output_element_type(0) == Type.f32 - assert list(node_separate_planes.get_output_shape(0)) == expected_output_shape - - -def test_nv12_to_rgb(): - expected_output_shape = [1, 480, 640, 3] - - # # Single plane (one arg) - arg_single_plane = ng.parameter([1, 720, 640, 1], name="input", dtype=np.float32) - node_single_plane = ng.nv12_to_rgb(arg_single_plane) - - assert node_single_plane.get_type_name() == "NV12toRGB" - assert node_single_plane.get_output_size() == 1 - assert node_single_plane.get_output_element_type(0) == Type.f32 - assert list(node_single_plane.get_output_shape(0)) == expected_output_shape - - # Separate planes (two args) - arg_y = ng.parameter([1, 480, 640, 1], name="input_y", dtype=np.float32) - arg_uv = ng.parameter([1, 240, 320, 2], name="input_uv", dtype=np.float32) - - node_separate_planes = ng.nv12_to_rgb(arg_y, arg_uv) - - assert node_separate_planes.get_type_name() == "NV12toRGB" - assert node_separate_planes.get_output_size() == 1 - assert node_separate_planes.get_output_element_type(0) == Type.f32 - assert list(node_separate_planes.get_output_shape(0)) == expected_output_shape - - -def test_softsign(): - input_shape = [2, 4, 8, 16] - - param = ng.parameter(input_shape, name="input") - node = ng.softsign(param, input_shape) - - assert node.get_type_name() == "SoftSign" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == input_shape - assert node.get_output_element_type(0) == Type.f32 - - -def test_rdft(): - param = ng.parameter([5, 3, 4], name="input") - axes = ng.constant(np.array([0, 1])) - signal_size = ng.constant(np.array([1, 2])) - node = ng.rdft(param, axes, signal_size) - - assert node.get_type_name() == "RDFT" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, 2, 4, 2] - assert node.get_output_element_type(0) == Type.f32 - - -def test_irdft(): - param = ng.parameter([5, 3, 4, 2], name="input") - axes = ng.constant(np.array([0, 1])) - signal_size = ng.constant(np.array([1, 2])) - node = ng.irdft(param, axes, signal_size) - - assert node.get_type_name() == "IRDFT" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, 2, 4] - assert node.get_output_element_type(0) == Type.f32 - - -def test_generate_proposals(): - im_info_shape = [1, 3] - anchors_shape = [4, 4, 3, 4] - deltas_shape = [1, 12, 4, 4] - scores_shape = [1, 3, 4, 4] - - im_info_param = ng.parameter(im_info_shape, name="im_info") - anchors_param = ng.parameter(anchors_shape, name="anchors") - deltas_param = ng.parameter(deltas_shape, name="deltas") - scores_param = ng.parameter(scores_shape, name="scores") - - node = ng.generate_proposals(im_info_param, - anchors_param, - deltas_param, - scores_param, - min_size=1.0, - nms_threshold=0.5, - pre_nms_count=200, - post_nms_count=100, - normalized=False, - nms_eta=1.0, - roi_num_type="i32") - - assert node.get_type_name() == "GenerateProposals" - assert node.get_output_size() == 3 - assert node.get_output_partial_shape(0).same_scheme(PartialShape([-1, 4])) - assert node.get_output_partial_shape(1).same_scheme(PartialShape([-1])) - assert node.get_output_partial_shape(2).same_scheme(PartialShape([1])) - assert node.get_output_element_type(0) == Type.f32 - assert node.get_output_element_type(1) == Type.f32 - assert node.get_output_element_type(2) == Type.i32 - - -def test_grid_sample_default(): - img = ng.parameter([1, 3, 100, 100], dtype=np.int32, name="image") - grid = ng.parameter([1, 10, 10, 2], dtype=np.float32, name="grid") - - node = ng.grid_sample(img, grid, {}) - - assert node.get_type_name() == "GridSample" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, 3, 10, 10] - assert node.get_output_element_type(0) == Type.i32 - - -def test_grid_sample_custom_attributes(): - img = ng.parameter([1, 3, 100, 100], dtype=np.int32, name="image") - grid = ng.parameter([1, 5, 6, 2], dtype=np.float32, name="grid") - - attributes = { - "align_corners": True, - "mode": "nearest", - "padding_mode": "reflection" - } - - node = ng.grid_sample(img, grid, attributes) - - assert node.get_type_name() == "GridSample" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, 3, 5, 6] - assert node.get_output_element_type(0) == Type.i32 - - node_attributes = node.get_attributes() - assert node_attributes["align_corners"] is True - assert node_attributes["mode"] == "nearest" - assert node_attributes["padding_mode"] == "reflection" - - -@pytest.mark.parametrize( - ("expected_shape", "shape_calculation_mode"), - [ - ([1, 3, 64, 64], "scales"), - ([1, 3, 256, 256], "sizes"), - ], -) -@pytest.mark.parametrize("dtype", np_types) -def test_interpolate_opset10(dtype, expected_shape, shape_calculation_mode): - - image_shape = [1, 3, 1024, 1024] - image_node = ng.parameter(image_shape, dtype, name="Image") - output_shape = [256, 256] - scales = np.array([1 / 16, 1 / 16], dtype=np.float32) - axes = [2, 3] - mode = "cubic" - - node = ng_opset10.interpolate(image=image_node, output_shape=output_shape, scales=scales, - axes=axes,mode=mode, shape_calculation_mode=shape_calculation_mode) - assert node.get_type_name() == "Interpolate" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - - -@pytest.mark.parametrize( - ("expected_shape", "shape_calculation_mode", "input_value"), - [ - ([1, 3, 64, 64], "scales", np.array([1 / 16, 1 / 16], dtype=np.float32)), - ([1, 3, 256, 256], "sizes", np.array([256, 256], dtype=np.int32)), - ], -) -@pytest.mark.parametrize("dtype", np_types) -def test_interpolate_opset11(dtype, expected_shape, shape_calculation_mode, input_value): - - image_shape = [1, 3, 1024, 1024] - image_node = ng.parameter(image_shape, dtype, name="Image") - axes = [2, 3] - mode = "bilinear_pillow" - - node = ng_opset11.interpolate(image=image_node, scales_or_sizes=input_value, axes=axes, mode=mode, - shape_calculation_mode=shape_calculation_mode) - assert node.get_type_name() == "Interpolate" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - -def test_is_finite_opset10(): - input_shape = [1, 2, 3, 4] - input_node = ng.parameter(input_shape, np.float32, name="InputData") - node = ng_opset10.is_finite(input_node) - - assert node.get_type_name() == "IsFinite" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == input_shape - - -def test_is_inf_opset10_default(): - input_shape = [2, 2, 2, 2] - input_node = ng.parameter(input_shape, dtype=np.float32, name="InputData") - node = ng_opset10.is_inf(input_node) - - assert node.get_type_name() == "IsInf" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == input_shape - - node_attributes = node.get_attributes() - assert node_attributes["detect_positive"] is True - assert node_attributes["detect_negative"] is True - - -def test_is_inf_opset10_custom_attribute(): - input_shape = [2, 2, 2] - input_node = ng.parameter(input_shape, dtype=np.float32, name="InputData") - attributes = { - "detect_positive": False, - } - node = ng_opset10.is_inf(input_node, attributes) - - assert node.get_type_name() == "IsInf" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == input_shape - - node_attributes = node.get_attributes() - assert node_attributes["detect_positive"] is False - assert node_attributes["detect_negative"] is True - - -def test_is_inf_opset10_custom_all_attributes(): - input_shape = [2, 2, 2] - input_node = ng.parameter(input_shape, dtype=np.float32, name="InputData") - attributes = { - "detect_negative": False, - "detect_positive": True, - } - node = ng_opset10.is_inf(input_node, attributes) - - assert node.get_type_name() == "IsInf" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == input_shape - - node_attributes = node.get_attributes() - assert node_attributes["detect_positive"] is True - assert node_attributes["detect_negative"] is False - - -def test_is_nan_opset10(): - input_shape = [1, 2, 3, 4] - input_node = ng.parameter(input_shape, np.float32, name="InputData") - node = ng_opset10.is_nan(input_node) - - assert node.get_type_name() == "IsNaN" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == input_shape - assert node.get_output_element_type(0) == Type.boolean - - -def test_unique_opset10(): - input_shape = [1, 2, 3, 4] - input_node = ng.parameter(input_shape, np.float32, name="input_data") - axis = ng.constant([1], np.int32, [1]) - - node = ng_opset10.unique(input_node, axis, False, "i32") - - assert node.get_type_name() == "Unique" - assert node.get_sorted() is False - assert node.get_output_size() == 4 - - assert node.get_output_partial_shape(0) == PartialShape([Dimension(1), Dimension(1, 2), - Dimension(3), Dimension(4)]) - assert node.get_output_partial_shape(1) == PartialShape([Dimension(1, 24)]) - assert node.get_output_partial_shape(2) == PartialShape([2]) - assert node.get_output_partial_shape(3) == PartialShape([Dimension(1, 24)]) - - assert node.get_output_element_type(0) == Type.f32 - assert node.get_output_element_type(1) == Type.i32 - assert node.get_output_element_type(2) == Type.i32 - assert node.get_output_element_type(3) == Type.i64 - - # Axis default, means flattened result - node = ng_opset10.unique(input_node, None, False, "i32", "i32") - - assert node.get_type_name() == "Unique" - assert node.get_sorted() is False - assert node.get_output_size() == 4 - - assert node.get_output_partial_shape(0) == PartialShape([Dimension(1, 24)]) - assert node.get_output_partial_shape(1) == PartialShape([Dimension(1, 24)]) - assert node.get_output_partial_shape(2) == PartialShape([24]) - assert node.get_output_partial_shape(3) == PartialShape([Dimension(1, 24)]) - - assert node.get_output_element_type(0) == Type.f32 - assert node.get_output_element_type(1) == Type.i32 - assert node.get_output_element_type(2) == Type.i32 - assert node.get_output_element_type(3) == Type.i32 - - # All arguments default - node = ng_opset10.unique(input_node) - - assert node.get_type_name() == "Unique" - assert node.get_output_size() == 4 - assert node.get_sorted() is True - - assert node.get_output_partial_shape(0) == PartialShape([Dimension(1, 24)]) - assert node.get_output_partial_shape(1) == PartialShape([Dimension(1, 24)]) - assert node.get_output_partial_shape(2) == PartialShape([24]) - assert node.get_output_partial_shape(3) == PartialShape([Dimension(1, 24)]) - - assert node.get_output_element_type(0) == Type.f32 - assert node.get_output_element_type(1) == Type.i64 - assert node.get_output_element_type(2) == Type.i64 - assert node.get_output_element_type(3) == Type.i64 - - -def test_topk_opset11(): - data_shape = [1, 3, 256] - data = ng.parameter(data_shape, dtype=np.int32, name="Data") - k_val = np.int32(3) - axis = np.int32(-1) - node = ng_opset11.topk(data, k_val, axis, "min", "value", stable=True) - - assert node.get_type_name() == "TopK" - assert node.get_output_size() == 2 - assert list(node.get_output_shape(0)) == [1, 3, 3] - assert list(node.get_output_shape(1)) == [1, 3, 3] diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_ctc_loss.py b/src/bindings/python/tests_compatibility/test_ngraph/test_ctc_loss.py deleted file mode 100644 index f179cff1318..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_ctc_loss.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -import ngraph as ng -from ngraph.impl import Type - - -def test_ctc_loss_props(): - ind_dtype = np.int32 - float_dtype = np.float32 - logits = ng.parameter([2, 100, 80], dtype=float_dtype, name="logits") - logit_length = ng.parameter([2], dtype=ind_dtype, name="logit_length") - labels = ng.parameter([2, 100], dtype=ind_dtype, name="labels") - label_length = ng.parameter([2], dtype=ind_dtype, name="label_length") - blank_index = ng.parameter([], dtype=ind_dtype, name="blank_index") - preprocess_collapse_repeated = False - ctc_merge_repeated = True - unique = False - - node = ng.ctc_loss(logits, logit_length, labels, label_length, blank_index, - preprocess_collapse_repeated, ctc_merge_repeated, unique) - assert node.get_type_name() == "CTCLoss" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2] - assert node.get_output_element_type(0) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_data_movement.py b/src/bindings/python/tests_compatibility/test_ngraph/test_data_movement.py deleted file mode 100644 index b6bfecbc3c0..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_data_movement.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -import ngraph as ng -from ngraph.impl import Type, Shape - - -def test_reverse_sequence(): - input_data = ng.parameter((2, 3, 4, 2), name="input_data", dtype=np.int32) - seq_lengths = np.array([1, 2, 1, 2], dtype=np.int32) - batch_axis = 2 - sequence_axis = 1 - - input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32) - seq_lengths_param = ng.parameter(seq_lengths.shape, name="sequence lengths", dtype=np.int32) - model = ng.reverse_sequence(input_param, seq_lengths_param, batch_axis, sequence_axis) - - assert model.get_type_name() == "ReverseSequence" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 3, 4, 2] - assert model.get_output_element_type(0) == Type.i32 - - -def test_pad_edge(): - input_data = np.arange(1, 13).reshape([3, 4]) - pads_begin = np.array([0, 1], dtype=np.int32) - pads_end = np.array([2, 3], dtype=np.int32) - - input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32) - model = ng.pad(input_param, pads_begin, pads_end, "edge") - - assert model.get_type_name() == "Pad" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [5, 8] - assert model.get_output_element_type(0) == Type.i32 - - -def test_pad_constant(): - input_data = np.arange(1, 13).reshape([3, 4]) - pads_begin = np.array([0, 1], dtype=np.int32) - pads_end = np.array([2, 3], dtype=np.int32) - - input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32) - model = ng.pad(input_param, pads_begin, pads_end, "constant", arg_pad_value=np.array(100, dtype=np.int32)) - - assert model.get_type_name() == "Pad" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [5, 8] - assert model.get_output_element_type(0) == Type.i32 - - -def test_select(): - cond = np.array([[False, False], [True, False], [True, True]]) - then_node = np.array([[-1, 0], [1, 2], [3, 4]], dtype=np.int32) - else_node = np.array([[11, 10], [9, 8], [7, 6]], dtype=np.int32) - - node = ng.select(cond, then_node, else_node) - assert node.get_type_name() == "Select" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 2] - assert node.get_output_element_type(0) == Type.i32 - - -def test_gather_nd(): - indices_type = np.int32 - data_dtype = np.float32 - data = ng.parameter([2, 10, 80, 30, 50], dtype=data_dtype, name="data") - indices = ng.parameter([2, 10, 30, 40, 2], dtype=indices_type, name="indices") - batch_dims = 2 - expected_shape = [20, 30, 40, 50] - - node = ng.opset5.gather_nd(data, indices, batch_dims) - assert node.get_type_name() == "GatherND" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - assert node.get_output_element_type(0) == Type.f32 - - -def test_gather_v8_nd(): - indices_type = np.int32 - data_dtype = np.float32 - data = ng.parameter([2, 10, 80, 30, 50], dtype=data_dtype, name="data") - indices = ng.parameter([2, 10, 30, 40, 2], dtype=indices_type, name="indices") - batch_dims = 2 - expected_shape = [2, 10, 30, 40, 50] - - node = ng.gather_nd(data, indices, batch_dims) - assert node.get_type_name() == "GatherND" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - assert node.get_output_element_type(0) == Type.f32 - - -def test_gather_elements(): - indices_type = np.int32 - data_dtype = np.float32 - data = ng.parameter(Shape([2, 5]), dtype=data_dtype, name="data") - indices = ng.parameter(Shape([2, 100]), dtype=indices_type, name="indices") - axis = 1 - expected_shape = [2, 100] - - node = ng.gather_elements(data, indices, axis) - assert node.get_type_name() == "GatherElements" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - assert node.get_output_element_type(0) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_detection_output.py b/src/bindings/python/tests_compatibility/test_ngraph/test_detection_output.py deleted file mode 100644 index b5874a5acab..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_detection_output.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ngraph as ng -import numpy as np -import pytest - -np_types = [np.float32, np.int32] -integral_np_types = [ - np.int8, - np.int16, - np.int32, - np.int64, - np.uint8, - np.uint16, - np.uint32, - np.uint64, -] - - -@pytest.mark.parametrize( - "int_dtype, fp_dtype", - [ - (np.int8, np.float32), - (np.int16, np.float32), - (np.int32, np.float32), - (np.int64, np.float32), - (np.uint8, np.float32), - (np.uint16, np.float32), - (np.uint32, np.float32), - (np.uint64, np.float32), - (np.int32, np.float16), - (np.int32, np.float64), - ], -) -def test_detection_output(int_dtype, fp_dtype): - attributes = { - "keep_top_k": np.array([64], dtype=int_dtype), - "nms_threshold": fp_dtype(0.645), - } - - box_logits = ng.parameter([4, 8], fp_dtype, "box_logits") - class_preds = ng.parameter([4, 170], fp_dtype, "class_preds") - proposals = ng.parameter([4, 2, 10], fp_dtype, "proposals") - aux_class_preds = ng.parameter([4, 4], fp_dtype, "aux_class_preds") - aux_box_preds = ng.parameter([4, 8], fp_dtype, "aux_box_preds") - - node = ng.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds) - - assert node.get_type_name() == "DetectionOutput" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, 1, 256, 7] - - -@pytest.mark.parametrize( - "int_dtype, fp_dtype", - [ - (np.int8, np.float32), - (np.int16, np.float32), - (np.int32, np.float32), - (np.int64, np.float32), - (np.uint8, np.float32), - (np.uint16, np.float32), - (np.uint32, np.float32), - (np.uint64, np.float32), - (np.int32, np.float16), - (np.int32, np.float64), - ], -) -def test_dynamic_get_attribute_value(int_dtype, fp_dtype): - attributes = { - "background_label_id": int_dtype(13), - "top_k": int_dtype(16), - "variance_encoded_in_target": True, - "keep_top_k": np.array([64, 32, 16, 8], dtype=int_dtype), - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "share_location": False, - "nms_threshold": fp_dtype(0.645), - "confidence_threshold": fp_dtype(0.111), - "clip_after_nms": True, - "clip_before_nms": False, - "decrease_label_id": True, - "normalized": True, - "input_height": int_dtype(86), - "input_width": int_dtype(79), - "objectness_score": fp_dtype(0.77), - } - - box_logits = ng.parameter([4, 680], fp_dtype, "box_logits") - class_preds = ng.parameter([4, 170], fp_dtype, "class_preds") - proposals = ng.parameter([4, 1, 8], fp_dtype, "proposals") - aux_class_preds = ng.parameter([4, 4], fp_dtype, "aux_class_preds") - aux_box_preds = ng.parameter([4, 680], fp_dtype, "aux_box_preds") - - node = ng.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds) - - assert node.get_background_label_id() == int_dtype(13) - assert node.get_top_k() == int_dtype(16) - assert node.get_variance_encoded_in_target() - assert np.all(np.equal(node.get_keep_top_k(), np.array([64, 32, 16, 8], dtype=int_dtype))) - assert node.get_code_type() == "caffe.PriorBoxParameter.CENTER_SIZE" - assert not node.get_share_location() - assert np.isclose(node.get_nms_threshold(), fp_dtype(0.645)) - assert np.isclose(node.get_confidence_threshold(), fp_dtype(0.111)) - assert node.get_clip_after_nms() - assert not node.get_clip_before_nms() - assert node.get_decrease_label_id() - assert node.get_normalized() - assert node.get_input_height() == int_dtype(86) - assert node.get_input_width() == int_dtype(79) - assert np.isclose(node.get_objectness_score(), fp_dtype(0.77)) diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_dft.py b/src/bindings/python/tests_compatibility/test_ngraph/test_dft.py deleted file mode 100644 index 56223e92d11..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_dft.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from ngraph.impl import Type -import ngraph as ng -import numpy as np - - -def build_fft_input_data(): - np.random.seed(202104) - return np.random.uniform(0, 1, (2, 10, 10, 2)).astype(np.float32) - - -def test_dft_1d(): - input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([2], dtype=np.int64)) - np_results = np.fft.fft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), - axis=2).astype(np.complex64) - - dft_node = ng.dft(input_tensor, input_axes) - assert dft_node.get_type_name() == "DFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == list(np.stack((np_results.real, np_results.imag), axis=-1).shape) - assert dft_node.get_output_element_type(0) == Type.f32 - - -def test_dft_2d(): - input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) - - dft_node = ng.dft(input_tensor, input_axes) - assert dft_node.get_type_name() == "DFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == [2, 10, 10, 2] - assert dft_node.get_output_element_type(0) == Type.f32 - - -def test_dft_3d(): - input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) - - dft_node = ng.dft(input_tensor, input_axes) - assert dft_node.get_type_name() == "DFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == [2, 10, 10, 2] - assert dft_node.get_output_element_type(0) == Type.f32 - - -def test_dft_1d_signal_size(): - input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([-2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([20], dtype=np.int64)) - - dft_node = ng.dft(input_tensor, input_axes, input_signal_size) - assert dft_node.get_type_name() == "DFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == [2, 20, 10, 2] - assert dft_node.get_output_element_type(0) == Type.f32 - - -def test_dft_2d_signal_size_1(): - input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) - - dft_node = ng.dft(input_tensor, input_axes, input_signal_size) - assert dft_node.get_type_name() == "DFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == [4, 10, 5, 2] - assert dft_node.get_output_element_type(0) == Type.f32 - - -def test_dft_2d_signal_size_2(): - input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) - - dft_node = ng.dft(input_tensor, input_axes, input_signal_size) - assert dft_node.get_type_name() == "DFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == [2, 4, 5, 2] - assert dft_node.get_output_element_type(0) == Type.f32 - - -def test_dft_3d_signal_size(): - input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5, 16], dtype=np.int64)) - - dft_node = ng.dft(input_tensor, input_axes, input_signal_size) - assert dft_node.get_type_name() == "DFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == [4, 5, 16, 2] - assert dft_node.get_output_element_type(0) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_dyn_attributes.py b/src/bindings/python/tests_compatibility/test_ngraph/test_dyn_attributes.py deleted file mode 100644 index 9cfee2b7760..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_dyn_attributes.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ngraph as ng -import numpy as np -import pytest - - -@pytest.fixture() -def _proposal_node(): - attributes = { - "base_size": np.uint16(1), - "pre_nms_topn": np.uint16(20), - "post_nms_topn": np.uint16(64), - "nms_thresh": np.float64(0.34), - "feat_stride": np.uint16(16), - "min_size": np.uint16(32), - "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float64), - "scale": np.array([2, 3, 3, 4], dtype=np.float64), - } - batch_size = 7 - - class_probs = ng.parameter([batch_size, 12, 34, 62], np.float64, "class_probs") - bbox_deltas = ng.parameter([batch_size, 24, 34, 62], np.float64, "bbox_deltas") - image_shape = ng.parameter([3], np.float64, "image_shape") - return ng.proposal(class_probs, bbox_deltas, image_shape, attributes) - - -def test_dynamic_attributes_softmax(): - axis = 2 - data = ng.parameter([1, 2, 3, 4], np.float32, "data_in") - node = ng.softmax(data, axis) - - assert node.get_axis() == axis - node.set_axis(3) - assert node.get_axis() == 3 - - -@pytest.mark.parametrize( - "int_dtype, fp_dtype", - [ - (np.uint8, np.float32), - (np.uint16, np.float32), - (np.uint32, np.float32), - (np.uint64, np.float32), - (np.uint32, np.float16), - (np.uint32, np.float64), - ], -) -def test_dynamic_set_attribute_value(int_dtype, fp_dtype): - attributes = { - "base_size": int_dtype(1), - "pre_nms_topn": int_dtype(20), - "post_nms_topn": int_dtype(64), - "nms_thresh": fp_dtype(0.34), - "feat_stride": int_dtype(16), - "min_size": int_dtype(32), - "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype), - "scale": np.array([2, 3, 3, 4], dtype=fp_dtype), - } - batch_size = 7 - - class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs") - bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas") - image_shape = ng.parameter([3], fp_dtype, "image_shape") - node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes) - - node.set_base_size(int_dtype(15)) - node.set_pre_nms_topn(int_dtype(7)) - node.set_post_nms_topn(int_dtype(33)) - node.set_nms_thresh(fp_dtype(1.55)) - node.set_feat_stride(int_dtype(8)) - node.set_min_size(int_dtype(123)) - node.set_ratio(np.array([1.1, 2.5, 3.0, 4.5], dtype=fp_dtype)) - node.set_scale(np.array([2.1, 3.2, 3.3, 4.4], dtype=fp_dtype)) - node.set_clip_before_nms(True) - node.set_clip_after_nms(True) - node.set_normalize(True) - node.set_box_size_scale(fp_dtype(1.34)) - node.set_box_coordinate_scale(fp_dtype(0.88)) - node.set_framework("OpenVINO") - - assert node.get_base_size() == int_dtype(15) - assert node.get_pre_nms_topn() == int_dtype(7) - assert node.get_post_nms_topn() == int_dtype(33) - assert np.isclose(node.get_nms_thresh(), fp_dtype(1.55)) - assert node.get_feat_stride() == int_dtype(8) - assert node.get_min_size() == int_dtype(123) - assert np.allclose(node.get_ratio(), np.array([1.1, 2.5, 3.0, 4.5], dtype=fp_dtype)) - assert np.allclose(node.get_scale(), np.array([2.1, 3.2, 3.3, 4.4], dtype=fp_dtype)) - assert node.get_clip_before_nms() - assert node.get_clip_after_nms() - assert node.get_normalize() - assert np.isclose(node.get_box_size_scale(), fp_dtype(1.34)) - assert np.isclose(node.get_box_coordinate_scale(), fp_dtype(0.88)) - assert node.get_framework() == "OpenVINO" - - -def test_dynamic_attr_cache(_proposal_node): - node = _proposal_node - - assert not node._attr_cache_valid - node.set_nms_thresh(1.3453678102) - assert not node._attr_cache_valid - assert np.isclose(node.get_nms_thresh(), np.float64(1.3453678102)) - assert node._attr_cache_valid - - -def test_dynamic_attr_transitivity(_proposal_node): - node = _proposal_node - node2 = node - - node.set_ratio(np.array([1.1, 2.5, 3.0, 4.5], dtype=np.float64)) - assert np.allclose(node.get_ratio(), np.array([1.1, 2.5, 3.0, 4.5], dtype=np.float64)) - assert np.allclose(node2.get_ratio(), np.array([1.1, 2.5, 3.0, 4.5], dtype=np.float64)) - - node2.set_scale(np.array([2.1, 3.2, 3.3, 4.4], dtype=np.float64)) - assert np.allclose(node2.get_scale(), np.array([2.1, 3.2, 3.3, 4.4], dtype=np.float64)) - assert np.allclose(node.get_scale(), np.array([2.1, 3.2, 3.3, 4.4], dtype=np.float64)) - - -def test_dynamic_attributes_simple(): - batch_size = 1 - input_size = 16 - hidden_size = 128 - - X_shape = [batch_size, input_size] - H_t_shape = [batch_size, hidden_size] - W_shape = [3 * hidden_size, input_size] - R_shape = [3 * hidden_size, hidden_size] - B_shape = [4 * hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) - parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) - parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) - - activations = ["tanh", "relu"] - activations_alpha = [1.0, 2.0] - activations_beta = [1.0, 2.0] - clip = 0.5 - linear_before_reset = True - - node = ng.gru_cell( - parameter_X, - parameter_H_t, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - activations, - activations_alpha, - activations_beta, - clip, - linear_before_reset, - ) - - assert node.get_hidden_size() == hidden_size - assert all(map(lambda x, y: x == y, node.get_activations(), activations)) - assert all(np.equal(node.get_activations_alpha(), activations_alpha)) - assert all(np.equal(node.get_activations_beta(), activations_beta)) - assert node.get_linear_before_reset() == linear_before_reset - assert np.isclose(node.get_clip(), clip) diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_einsum.py b/src/bindings/python/tests_compatibility/test_ngraph/test_einsum.py deleted file mode 100644 index 2427d60f64d..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_einsum.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ngraph as ng -import numpy as np -import pytest - -from ngraph.utils.types import get_element_type -from tests_compatibility import xfail_issue_58033 - - -def einsum_op_exec(input_shapes: list, equation: str, data_type: np.dtype, - seed=202104): - """Test Einsum operation for given input shapes, equation, and data type. - - It generates input data of given shapes and type, receives reference results using numpy, - and tests IE implementation by matching with reference numpy results. - :param input_shapes: a list of tuples with shapes - :param equation: Einsum equation - :param data_type: a type of input data - :param seed: a seed for random generation of input data - """ - np.random.seed(seed) - num_inputs = len(input_shapes) - - # generate input tensors - ng_inputs = [] - np_inputs = [] - for i in range(num_inputs): - input_i = np.random.randint(1, 10 + 1, size=input_shapes[i]).astype(data_type) - np_inputs.append(input_i) - ng_inputs.append(ng.parameter(input_i.shape, dtype=data_type)) - - expected_result = np.einsum(equation, *np_inputs) - einsum_model = ng.einsum(ng_inputs, equation) - - # check the output shape and type - assert einsum_model.get_type_name() == "Einsum" - assert einsum_model.get_output_size() == 1 - assert list(einsum_model.get_output_shape(0)) == list(expected_result.shape) - assert einsum_model.get_output_element_type(0) == get_element_type(data_type) - - -@pytest.mark.parametrize("data_type", [np.float32, np.int32]) -def test_dot_product(data_type): - einsum_op_exec([5, 5], "i,i->", data_type) - - -@pytest.mark.parametrize("data_type", [np.float32, np.int32]) -def test_matrix_multiplication(data_type): - einsum_op_exec([(2, 3), (3, 4)], "ab,bc->ac", data_type) - - -@pytest.mark.parametrize("data_type", [np.float32, np.int32]) -def test_batch_trace(data_type): - einsum_op_exec([(2, 3, 3)], "kii->k", data_type) - - -@pytest.mark.parametrize("data_type", [np.float32, np.int32]) -def test_diagonal_extraction(data_type): - einsum_op_exec([(6, 5, 5)], "kii->ki", data_type) - - -@pytest.mark.parametrize("data_type", [np.float32, np.int32]) -def test_transpose(data_type): - einsum_op_exec([(1, 2, 3)], "ijk->kij", data_type) - - -@pytest.mark.parametrize("data_type", [np.float32, np.int32]) -def test_multiple_multiplication(data_type): - einsum_op_exec([(2, 5), (5, 3, 6), (5, 3)], "ab,bcd,bc->ca", data_type) - - -@pytest.mark.parametrize("data_type", [np.float32, np.int32]) -def test_simple_ellipsis(data_type): - einsum_op_exec([(5, 3, 4)], "a...->...", data_type) - - -@xfail_issue_58033 -@pytest.mark.parametrize("data_type", [np.float32, np.int32]) -def test_multiple_ellipsis(data_type): - einsum_op_exec([(3, 5), 1], "a...,...->a...", data_type, with_value=True) - - -@xfail_issue_58033 -@pytest.mark.parametrize("data_type", [np.float32, np.int32]) -def test_broadcasting_ellipsis(data_type): - einsum_op_exec([(9, 1, 4, 3), (3, 11, 7, 1)], "a...b,b...->a...", data_type, with_value=True) diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_eye.py b/src/bindings/python/tests_compatibility/test_ngraph/test_eye.py deleted file mode 100644 index c14b4ebf9bb..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_eye.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ngraph as ng -import numpy as np -import pytest - -from ngraph.utils.types import get_element_type -from ngraph.utils.types import get_element_type_str - - -@pytest.mark.parametrize( - "num_rows, num_columns, diagonal_index, out_type", - [ - pytest.param(2, 5, 0, np.float32), - pytest.param(5, 3, 2, np.int64), - pytest.param(3, 3, -1, np.float16), - pytest.param(5, 5, -10, np.float32), - ], -) -def test_eye_rectangle(num_rows, num_columns, diagonal_index, out_type): - num_rows_array = np.array([num_rows], np.int32) - num_columns_array = np.array([num_columns], np.int32) - diagonal_index_array = np.array([diagonal_index], np.int32) - num_rows_tensor = ng.constant(num_rows_array) - num_columns_tensor = ng.constant(num_columns_array) - diagonal_index_tensor = ng.constant(diagonal_index_array) - - # Create with param names - eye_node = ng.eye(num_rows=num_rows_tensor, - num_columns=num_columns_tensor, - diagonal_index=diagonal_index_tensor, - output_type=get_element_type_str(out_type)) - - # Create with default orded - eye_node = ng.eye(num_rows_tensor, - num_columns_tensor, - diagonal_index_tensor, - get_element_type_str(out_type)) - - expected_results = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32) - - assert eye_node.get_type_name() == "Eye" - assert eye_node.get_output_size() == 1 - assert eye_node.get_output_element_type(0) == get_element_type(out_type) - assert tuple(eye_node.get_output_shape(0)) == expected_results.shape - - -@pytest.mark.parametrize( - "num_rows, num_columns, diagonal_index, batch_shape, out_type", - [ - pytest.param(2, 5, 0, [1], np.float32), - pytest.param(5, 3, 2, [2, 2], np.int64), - pytest.param(3, 3, -1, [1, 3, 2], np.float16), - pytest.param(5, 5, -10, [1, 1], np.float32), - ], -) -def test_eye_batch_shape(num_rows, num_columns, diagonal_index, batch_shape, out_type): - num_rows_array = np.array([num_rows], np.int32) - num_columns_array = np.array([num_columns], np.int32) - diagonal_index_array = np.array([diagonal_index], np.int32) - batch_shape_array = np.array(batch_shape, np.int32) - num_rows_tensor = ng.constant(num_rows_array) - num_columns_tensor = ng.constant(num_columns_array) - diagonal_index_tensor = ng.constant(diagonal_index_array) - batch_shape_tensor = ng.constant(batch_shape_array) - - # Create with param names - eye_node = ng.eye(num_rows=num_rows_tensor, - num_columns=num_columns_tensor, - diagonal_index=diagonal_index_tensor, - batch_shape=batch_shape_tensor, - output_type=get_element_type_str(out_type)) - - # Create with default orded - eye_node = ng.eye(num_rows_tensor, - num_columns_tensor, - diagonal_index_tensor, - get_element_type_str(out_type), - batch_shape_tensor) - - output_shape = [*batch_shape, 1, 1] - one_matrix = np.eye(num_rows, M=num_columns, k=diagonal_index, dtype=np.float32) - expected_results = np.tile(one_matrix, output_shape) - - assert eye_node.get_type_name() == "Eye" - assert eye_node.get_output_size() == 1 - assert eye_node.get_output_element_type(0) == get_element_type(out_type) - assert tuple(eye_node.get_output_shape(0)) == expected_results.shape diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_gather.py b/src/bindings/python/tests_compatibility/test_ngraph/test_gather.py deleted file mode 100644 index 903b82d1a91..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_gather.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ngraph as ng -import numpy as np - - -def test_gather(): - input_data = ng.parameter((3, 3), name="input_data", dtype=np.float32) - input_indices = ng.parameter((1, 2), name="input_indices", dtype=np.int32) - input_axis = np.array([1], np.int32) - - node = ng.gather(input_data, input_indices, input_axis) - assert node.get_type_name() == "Gather" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 1, 2] - - -def test_gather_with_scalar_axis(): - input_data = ng.parameter((3, 3), name="input_data", dtype=np.float32) - input_indices = ng.parameter((1, 2), name="input_indices", dtype=np.int32) - input_axis = np.array(1, np.int32) - - node = ng.gather(input_data, input_indices, input_axis) - assert node.get_type_name() == "Gather" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 1, 2] - - -def test_gather_batch_dims_1(): - input_data = ng.parameter((2, 5), name="input_data", dtype=np.float32) - input_indices = ng.parameter((2, 3), name="input_indices", dtype=np.int32) - input_axis = np.array([1], np.int32) - batch_dims = 1 - - node = ng.gather(input_data, input_indices, input_axis, batch_dims) - assert node.get_type_name() == "Gather" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 3] - - -def test_gather_negative_indices(): - input_data = ng.parameter((3, 3), name="input_data", dtype=np.float32) - input_indices = ng.parameter((1, 2), name="input_indices", dtype=np.int32) - input_axis = np.array([1], np.int32) - - node = ng.gather(input_data, input_indices, input_axis) - assert node.get_type_name() == "Gather" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 1, 2] - - -def test_gather_batch_dims_1_negative_indices(): - input_data = ng.parameter((2, 5), name="input_data", dtype=np.float32) - input_indices = ng.parameter((2, 3), name="input_indices", dtype=np.int32) - input_axis = np.array([1], np.int32) - batch_dims = 1 - - node = ng.gather(input_data, input_indices, input_axis, batch_dims) - assert node.get_type_name() == "Gather" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 3] diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_idft.py b/src/bindings/python/tests_compatibility/test_ngraph/test_idft.py deleted file mode 100644 index a700eff23f4..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_idft.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ngraph as ng -import numpy as np - - -def get_data(): - np.random.seed(202104) - return np.random.uniform(0, 1, (2, 10, 10, 2)).astype(np.float32) - - -def test_idft_1d(): - expected_results = get_data() - complex_input_data = np.fft.fft(np.squeeze(expected_results.view(dtype=np.complex64), - axis=-1), axis=2).astype(np.complex64) - input_data = np.stack((complex_input_data.real, complex_input_data.imag), axis=-1) - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([2], dtype=np.int64)) - - dft_node = ng.idft(input_tensor, input_axes) - assert dft_node.get_type_name() == "IDFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) - - -def test_idft_2d(): - expected_results = get_data() - complex_input_data = np.fft.fft2(np.squeeze(expected_results.view(dtype=np.complex64), axis=-1), - axes=[1, 2]).astype(np.complex64) - input_data = np.stack((complex_input_data.real, complex_input_data.imag), axis=-1) - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) - - dft_node = ng.idft(input_tensor, input_axes) - assert dft_node.get_type_name() == "IDFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) - - -def test_idft_3d(): - expected_results = get_data() - complex_input_data = np.fft.fft2(np.squeeze(expected_results.view(dtype=np.complex64), axis=-1), - axes=[0, 1, 2]).astype(np.complex64) - input_data = np.stack((complex_input_data.real, complex_input_data.imag), axis=-1) - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) - - dft_node = ng.idft(input_tensor, input_axes) - assert dft_node.get_type_name() == "IDFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) - - -def test_idft_1d_signal_size(): - input_data = get_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([-2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([20], dtype=np.int64)) - - dft_node = ng.idft(input_tensor, input_axes, input_signal_size) - np_results = np.fft.ifft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), n=20, - axis=-2).astype(np.complex64) - expected_results = np.stack((np_results.real, np_results.imag), axis=-1) - assert dft_node.get_type_name() == "IDFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) - - -def test_idft_2d_signal_size_1(): - input_data = get_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) - - dft_node = ng.idft(input_tensor, input_axes, input_signal_size) - np_results = np.fft.ifft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5], - axes=[0, 2]).astype(np.complex64) - expected_results = np.stack((np_results.real, np_results.imag), axis=-1) - assert dft_node.get_type_name() == "IDFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) - - -def test_idft_2d_signal_size_2(): - input_data = get_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) - - dft_node = ng.idft(input_tensor, input_axes, input_signal_size) - np_results = np.fft.ifft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5], - axes=[1, 2]).astype(np.complex64) - expected_results = np.stack((np_results.real, np_results.imag), axis=-1) - assert dft_node.get_type_name() == "IDFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) - - -def test_idft_3d_signal_size(): - input_data = get_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5, 16], dtype=np.int64)) - - dft_node = ng.idft(input_tensor, input_axes, input_signal_size) - np_results = np.fft.ifftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), - s=[4, 5, 16], axes=[0, 1, 2]).astype(np.complex64) - expected_results = np.stack((np_results.real, np_results.imag), axis=-1) - assert dft_node.get_type_name() == "IDFT" - assert dft_node.get_output_size() == 1 - assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_if.py b/src/bindings/python/tests_compatibility/test_ngraph/test_if.py deleted file mode 100644 index 5da6e8ec2c5..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_if.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ngraph as ng -import pytest -import numpy as np -from ngraph.utils.tensor_iterator_types import ( - GraphBody, - TensorIteratorInvariantInputDesc, - TensorIteratorBodyOutputDesc, -) - - -def create_simple_if_with_two_outputs(condition_val): - condition = ng.constant(condition_val, dtype=bool) - - # then_body - X_t = ng.parameter([], np.float32, "X") - Y_t = ng.parameter([], np.float32, "Y") - Z_t = ng.parameter([], np.float32, "Z") - - add_t = ng.add(X_t, Y_t) - mul_t = ng.multiply(Y_t, Z_t) - then_body_res_1 = ng.result(add_t) - then_body_res_2 = ng.result(mul_t) - then_body = GraphBody([X_t, Y_t, Z_t], [then_body_res_1, then_body_res_2]) - then_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1), - TensorIteratorInvariantInputDesc(3, 2)] - then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1)] - - # else_body - X_e = ng.parameter([], np.float32, "X") - Z_e = ng.parameter([], np.float32, "Z") - W_e = ng.parameter([], np.float32, "W") - - add_e = ng.add(X_e, W_e) - pow_e = ng.power(W_e, Z_e) - else_body_res_1 = ng.result(add_e) - else_body_res_2 = ng.result(pow_e) - else_body = GraphBody([X_e, Z_e, W_e], [else_body_res_1, else_body_res_2]) - else_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(3, 1), - TensorIteratorInvariantInputDesc(4, 2)] - else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1)] - - X = ng.constant(15.0, dtype=np.float32) - Y = ng.constant(-5.0, dtype=np.float32) - Z = ng.constant(4.0, dtype=np.float32) - W = ng.constant(2.0, dtype=np.float32) - if_node = ng.if_op(condition, [X, Y, Z, W], (then_body, else_body), (then_body_inputs, else_body_inputs), - (then_body_outputs, else_body_outputs)) - return if_node - - -def create_diff_if_with_two_outputs(condition_val): - condition = ng.constant(condition_val, dtype=bool) - - # then_body - X_t = ng.parameter([2], np.float32, "X") - Y_t = ng.parameter([2], np.float32, "Y") - mmul_t = ng.matmul(X_t, Y_t, False, False) - mul_t = ng.multiply(Y_t, X_t) - then_body_res_1 = ng.result(mmul_t) - then_body_res_2 = ng.result(mul_t) - then_body = GraphBody([X_t, Y_t], [then_body_res_1, then_body_res_2]) - then_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1)] - then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1)] - - # else_body - X_e = ng.parameter([2], np.float32, "X") - Z_e = ng.parameter([], np.float32, "Z") - mul_e = ng.multiply(X_e, Z_e) - else_body_res_1 = ng.result(Z_e) - else_body_res_2 = ng.result(mul_e) - else_body = GraphBody([X_e, Z_e], [else_body_res_1, else_body_res_2]) - else_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(3, 1)] - else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1)] - - X = ng.constant([3, 4], dtype=np.float32) - Y = ng.constant([2, 1], dtype=np.float32) - Z = ng.constant(4.0, dtype=np.float32) - if_node = ng.if_op(condition, [X, Y, Z], (then_body, else_body), (then_body_inputs, else_body_inputs), - (then_body_outputs, else_body_outputs)) - return if_node - - -def simple_if(condition_val): - condition = ng.constant(condition_val, dtype=bool) - # then_body - X_t = ng.parameter([2], np.float32, "X") - Y_t = ng.parameter([2], np.float32, "Y") - - then_mul = ng.multiply(X_t, Y_t) - then_body_res_1 = ng.result(then_mul) - then_body = GraphBody([X_t, Y_t], [then_body_res_1]) - then_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1)] - then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)] - - # else_body - X_e = ng.parameter([2], np.float32, "X") - Y_e = ng.parameter([2], np.float32, "Y") - add_e = ng.add(X_e, Y_e) - else_body_res_1 = ng.result(add_e) - else_body = GraphBody([X_e, Y_e], [else_body_res_1]) - else_body_inputs = [TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1)] - else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)] - - X = ng.constant([3, 4], dtype=np.float32) - Y = ng.constant([2, 1], dtype=np.float32) - if_node = ng.if_op(condition, [X, Y], (then_body, else_body), (then_body_inputs, else_body_inputs), - (then_body_outputs, else_body_outputs)) - relu = ng.relu(if_node) - return relu - - -def simple_if_without_parameters(condition_val): - condition = ng.constant(condition_val, dtype=bool) - - # then_body - then_constant = ng.constant(0.7, dtype=np.float32) - then_body_res_1 = ng.result(then_constant) - then_body = GraphBody([], [then_body_res_1]) - then_body_inputs = [] - then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)] - - # else_body - else_const = ng.constant(9.0, dtype=np.float32) - else_body_res_1 = ng.result(else_const) - else_body = GraphBody([], [else_body_res_1]) - else_body_inputs = [] - else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)] - - if_node = ng.if_op(condition, [], (then_body, else_body), (then_body_inputs, else_body_inputs), - (then_body_outputs, else_body_outputs)) - relu = ng.relu(if_node) - return relu - - -def check_results(results, expected_results): - assert len(results) == len(expected_results) - for id_result, res in enumerate(results): - assert np.allclose(res, expected_results[id_result]) - - -def check_if(if_model, cond_val, exp_results): - last_node = if_model(cond_val) - assert last_node.get_type_name() == exp_results[0] - assert last_node.get_output_size() == exp_results[1] - assert list(last_node.get_output_shape(0)) == exp_results[2] - - -def test_if_with_two_outputs(): - check_if(create_simple_if_with_two_outputs, True, - ["If", 2, []]) - check_if(create_simple_if_with_two_outputs, False, - ["If", 2, []]) - - -def test_diff_if_with_two_outputs(): - check_if(create_diff_if_with_two_outputs, True, - ["If", 2, []]) - check_if(create_diff_if_with_two_outputs, False, - ["If", 2, []]) - - -@pytest.mark.xfail(reason="ngraph.exceptions.NgraphTypeError: ('Unidentified data type %s', dtype('O'))") -def test_simple_if(): - check_if(simple_if, True, ["Relu", 1, [2]]) - check_if(simple_if, False, ["Relu", 1, [2]]) - - -@pytest.mark.xfail(reason="ngraph.exceptions.NgraphTypeError: ('Unidentified data type %s', dtype('O'))") -def test_simple_if_without_body_parameters(): - check_if(simple_if_without_parameters, True, ["Relu", 1, []]) - check_if(simple_if_without_parameters, False, ["Relu", 1, []]) diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_input_validation.py b/src/bindings/python/tests_compatibility/test_ngraph/test_input_validation.py deleted file mode 100644 index 43081b5b965..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_input_validation.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest - -from ngraph.exceptions import UserInputError -from ngraph.utils.input_validation import ( - _check_value, - check_valid_attribute, - check_valid_attributes, - is_non_negative_value, - is_positive_value, -) - - -@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64]) -def test_is_positive_value_signed_type(dtype): - assert is_positive_value(dtype(16)) - assert not is_positive_value(dtype(-16)) - - -@pytest.mark.parametrize("dtype", [np.uint8, np.uint16, np.uint32, np.uint64]) -def test_is_positive_value_unsigned_type(dtype): - assert is_positive_value(dtype(16)) - - -@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64]) -def test_is_non_negative_value_signed_type(dtype): - assert is_non_negative_value(dtype(16)) - assert is_non_negative_value(dtype(0)) - assert not is_non_negative_value(dtype(-1)) - assert not is_non_negative_value(dtype(-16)) - - -@pytest.mark.parametrize("dtype", [np.uint8, np.uint16, np.uint32, np.uint64]) -def test_is_non_negative_value_unsigned_type(dtype): - assert is_non_negative_value(dtype(16)) - assert is_non_negative_value(dtype(0)) - - -@pytest.mark.parametrize( - "value, val_type", - [ - (np.int8(64), np.integer), - (np.int16(64), np.integer), - (np.int32(64), np.integer), - (np.int64(64), np.integer), - (np.uint8(64), np.unsignedinteger), - (np.uint16(64), np.unsignedinteger), - (np.uint32(64), np.unsignedinteger), - (np.uint64(64), np.unsignedinteger), - (np.float32(64), np.floating), - (np.float64(64), np.floating), - ], -) -def test_check_value(value, val_type): - def is_even(x): - return x % 2 == 0 - - assert _check_value("TestOp", "test_attr", value, val_type, is_even) - - -@pytest.mark.parametrize( - "value, val_type", - [ - (np.int8(64), np.floating), - (np.int16(64), np.floating), - (np.int32(64), np.floating), - (np.int64(64), np.floating), - (np.uint8(64), np.floating), - (np.uint16(64), np.floating), - (np.uint32(64), np.floating), - (np.uint64(64), np.floating), - (np.float32(64), np.integer), - (np.float64(64), np.integer), - ], -) -def test_check_value_fail_type(value, val_type): - try: - _check_value("TestOp", "test_attr", value, val_type, None) - except UserInputError: - pass - else: - raise AssertionError("Type validation has unexpectedly passed.") - - -@pytest.mark.parametrize( - "value, val_type", - [ - (np.int8(61), np.integer), - (np.int16(61), np.integer), - (np.int32(61), np.integer), - (np.int64(61), np.integer), - (np.uint8(61), np.unsignedinteger), - (np.uint16(61), np.unsignedinteger), - (np.uint32(61), np.unsignedinteger), - (np.uint64(61), np.unsignedinteger), - (np.float32(61), np.floating), - (np.float64(61), np.floating), - ], -) -def test_check_value_fail_cond(value, val_type): - def is_even(x): - return x % 2 == 0 - - try: - _check_value("TestOp", "test_attr", value, val_type, is_even) - except UserInputError: - pass - else: - raise AssertionError("Condition validation has unexpectedly passed.") - - -def test_check_valid_attribute(): - attr_dict = { - "mode": "bilinear", - "coefficients": [1, 2, 3, 4, 5], - } - - assert check_valid_attribute("TestOp", attr_dict, "width", np.unsignedinteger, required=False) - assert check_valid_attribute("TestOp", attr_dict, "mode", np.str_, required=True) - assert check_valid_attribute("TestOp", attr_dict, "coefficients", np.integer, required=True) - - try: - check_valid_attribute("TestOp", attr_dict, "alpha", np.floating, required=True) - except UserInputError: - pass - else: - raise AssertionError("Validation of missing required attribute has unexpectedly passed.") - - -def test_check_valid_attributes(): - attr_dict = { - "mode": "bilinear", - "coefficients": [1, 2, 3, 4, 5], - } - - def _is_supported_mode(x): - return x in ["linear", "area", "cubic", "bilinear"] - - requirements = [ - ("width", False, np.unsignedinteger, None), - ("mode", True, np.str_, _is_supported_mode), - ("coefficients", True, np.integer, lambda x: x > 0), - ("alpha", False, np.float64, None), - ] - - assert check_valid_attributes("TestOp", attr_dict, requirements) - - requirements[3] = ("alpha", True, np.float64, None) - try: - check_valid_attributes("TestOp", attr_dict, requirements) - except UserInputError: - pass - else: - raise AssertionError("Validation of missing required attribute has unexpectedly passed.") diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_log_softmax.py b/src/bindings/python/tests_compatibility/test_ngraph/test_log_softmax.py deleted file mode 100644 index 13da8ec95d7..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_log_softmax.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import ngraph as ng -from ngraph.impl import Shape, Type - - -def test_log_softmax(): - float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - - node = ng.log_softmax(data, 1) - assert node.get_type_name() == "LogSoftmax" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 10] - assert node.get_output_element_type(0) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_manager.py b/src/bindings/python/tests_compatibility/test_ngraph/test_manager.py deleted file mode 100644 index d9c0010d5e9..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_manager.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# flake8: noqa - - -import numpy as np - -import ngraph as ng -from ngraph.impl import Function -from ngraph.impl.passes import Manager -from tests_compatibility.test_ngraph.util import count_ops_of_type - - -def test_constant_folding(): - node_constant = ng.constant(np.array([[0.0, 0.1, -0.1], [-2.5, 2.5, 3.0]], dtype=np.float32)) - node_ceil = ng.ceiling(node_constant) - func = Function(node_ceil, [], "TestFunction") - - assert count_ops_of_type(func, node_ceil) == 1 - assert count_ops_of_type(func, node_constant) == 1 - - pass_manager = Manager() - pass_manager.register_pass("ConstantFolding") - pass_manager.run_passes(func) - - assert count_ops_of_type(func, node_ceil) == 0 - assert count_ops_of_type(func, node_constant) == 1 - - new_const = func.get_results()[0].input(0).get_source_output().get_node() - - values_out = new_const.get_vector() - values_expected = [0.0, 1.0, 0.0, -2.0, 3.0, 3.0] - - assert np.allclose(values_out, values_expected) diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_node_factory.py b/src/bindings/python/tests_compatibility/test_ngraph/test_node_factory.py deleted file mode 100644 index c4b0c54e051..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_node_factory.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import ngraph as ng -from ngraph.exceptions import UserInputError -from ngraph.utils.node_factory import NodeFactory -from _pyngraph import NodeFactory as _NodeFactory - - -def test_node_factory_add(): - shape = [2, 2] - dtype = np.int8 - parameter_a = ng.parameter(shape, dtype=dtype, name="A") - parameter_b = ng.parameter(shape, dtype=dtype, name="B") - - factory = _NodeFactory("opset1") - arguments = NodeFactory._arguments_as_outputs([parameter_a, parameter_b]) - node = factory.create("Add", arguments, {}) - - assert node.get_type_name() == "Add" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 2] - - -def test_node_factory_wrapper_add(): - shape = [2, 2] - dtype = np.int8 - parameter_a = ng.parameter(shape, dtype=dtype, name="A") - parameter_b = ng.parameter(shape, dtype=dtype, name="B") - - node = ng.add(parameter_a, parameter_b, name="TestNode") - - assert node.get_type_name() == "Add" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 2] - assert node.friendly_name == "TestNode" - - -def test_node_factory_topk(): - dtype = np.int32 - data = ng.parameter([2, 10], dtype=dtype, name="A") - k = ng.constant(3, dtype=dtype, name="B") - factory = _NodeFactory("opset1") - arguments = NodeFactory._arguments_as_outputs([data, k]) - node = factory.create( - "TopK", arguments, {"axis": 1, "mode": "max", "sort": "value"} - ) - attributes = node.get_attributes() - - assert node.get_type_name() == "TopK" - assert node.get_output_size() == 2 - assert list(node.get_output_shape(0)) == [2, 3] - assert attributes["axis"] == 1 - assert attributes["mode"] == "max" - assert attributes["sort"] == "value" - - -def test_node_factory_empty_topk(): - factory = NodeFactory("opset1") - node = factory.create("TopK") - - assert node.get_type_name() == "TopK" - - -def test_node_factory_empty_topk_with_args_and_attrs(): - dtype = np.int32 - data = ng.parameter([2, 10], dtype=dtype, name="A") - k = ng.constant(3, dtype=dtype, name="B") - factory = NodeFactory("opset1") - arguments = NodeFactory._arguments_as_outputs([data, k]) - node = factory.create("TopK", None, None) - node.set_arguments(arguments) - node.set_attribute("axis", 1) - node.set_attribute("mode", "max") - node.set_attribute("sort", "value") - node.validate() - - assert node.get_type_name() == "TopK" - assert node.get_output_size() == 2 - assert list(node.get_output_shape(0)) == [2, 3] - - -def test_node_factory_validate_missing_arguments(): - factory = NodeFactory("opset1") - - try: - factory.create( - "TopK", None, {"axis": 1, "mode": "max", "sort": "value"} - ) - except UserInputError: - pass - else: - raise AssertionError("Validation of missing arguments has unexpectedly passed.") diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_normalization.py b/src/bindings/python/tests_compatibility/test_ngraph/test_normalization.py deleted file mode 100644 index c231a1592d8..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_normalization.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -import ngraph as ng -from ngraph.impl import Type - - -def test_lrn(): - input_image_shape = (2, 3, 2, 1) - input_image = np.arange(int(np.prod(input_image_shape))).reshape(input_image_shape).astype("f") - axes = np.array([1], dtype=np.int64) - model = ng.lrn(ng.constant(input_image), ng.constant(axes), alpha=1.0, beta=2.0, bias=1.0, size=3) - assert model.get_type_name() == "LRN" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 3, 2, 1] - assert model.get_output_element_type(0) == Type.f32 - - # Test LRN default parameter values - model = ng.lrn(ng.constant(input_image), ng.constant(axes)) - assert model.get_type_name() == "LRN" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 3, 2, 1] - assert model.get_output_element_type(0) == Type.f32 - - -def test_lrn_factory(): - alpha = 0.0002 - beta = 0.5 - bias = 2.0 - nsize = 3 - axis = np.array([1], dtype=np.int32) - inputs = ng.parameter((1, 2, 3, 4), name="inputs", dtype=np.float32) - node = ng.lrn(inputs, axis, alpha, beta, bias, nsize) - - assert node.get_type_name() == "LRN" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, 2, 3, 4] - assert node.get_output_element_type(0) == Type.f32 - - -def test_batch_norm_inference(): - data = ng.parameter((2, 3), name="data", dtype=np.float32) - gamma = ng.parameter((3,), name="gamma", dtype=np.float32) - beta = ng.parameter((3,), name="beta", dtype=np.float32) - mean = ng.parameter((3,), name="mean", dtype=np.float32) - variance = ng.parameter((3,), name="variance", dtype=np.float32) - epsilon = 9.99e-06 - - node = ng.batch_norm_inference(data, gamma, beta, mean, variance, epsilon) - - assert node.get_type_name() == "BatchNormInference" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 3] - assert node.get_output_element_type(0) == Type.f32 - - -def test_mvn(): - data = ng.parameter((1, 3, 3, 3), name="data", dtype=np.float32) - axes = np.array([2, 3], dtype=np.int64) - epsilon = 1e-9 - normalize_variance = True - eps_mode = "outside_sqrt" - - node = ng.mvn(data, axes, normalize_variance, epsilon, eps_mode) - - assert node.get_type_name() == "MVN" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, 3, 3, 3] - assert node.get_output_element_type(0) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_ops.py b/src/bindings/python/tests_compatibility/test_ngraph/test_ops.py deleted file mode 100644 index 0a470d87306..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_ops.py +++ /dev/null @@ -1,775 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# flake8: noqa - -import numpy as np - -import ngraph as ng -from ngraph.impl import AxisSet, Shape, Type -from ngraph.impl.op import Constant, Parameter - - -def binary_op(op_str, a, b): - - if op_str == "+": - return a + b - elif op_str == "Add": - return ng.add(a, b) - elif op_str == "-": - return a - b - elif op_str == "Sub": - return ng.subtract(a, b) - elif op_str == "*": - return a * b - elif op_str == "Mul": - return ng.multiply(a, b) - elif op_str == "/": - return a / b - elif op_str == "Div": - return ng.divide(a, b) - elif op_str == "Equal": - return ng.equal(a, b) - elif op_str == "Greater": - return ng.greater(a, b) - elif op_str == "GreaterEq": - return ng.greater_equal(a, b) - elif op_str == "Less": - return ng.less(a, b) - elif op_str == "LessEq": - return ng.less_equal(a, b) - elif op_str == "Maximum": - return ng.maximum(a, b) - elif op_str == "Minimum": - return ng.minimum(a, b) - elif op_str == "NotEqual": - return ng.not_equal(a, b) - elif op_str == "Power": - return ng.power(a, b) - - -def binary_op_ref(op_str, a, b): - - if op_str == "+" or op_str == "Add": - return a + b - elif op_str == "-" or op_str == "Sub": - return a - b - elif op_str == "*" or op_str == "Mul": - return a * b - elif op_str == "/" or op_str == "Div": - return a / b - elif op_str == "Dot": - return np.dot(a, b) - elif op_str == "Equal": - return np.equal(a, b) - elif op_str == "Greater": - return np.greater(a, b) - elif op_str == "GreaterEq": - return np.greater_equal(a, b) - elif op_str == "Less": - return np.less(a, b) - elif op_str == "LessEq": - return np.less_equal(a, b) - elif op_str == "Maximum": - return np.maximum(a, b) - elif op_str == "Minimum": - return np.minimum(a, b) - elif op_str == "NotEqual": - return np.not_equal(a, b) - elif op_str == "Power": - return np.power(a, b) - - -def binary_op_exec(op_str, expected_ov_str=None): - if not expected_ov_str: - expected_ov_str = op_str - - element_type = Type.f32 - shape = Shape([2, 2]) - A = Parameter(element_type, shape) - B = Parameter(element_type, shape) - - node = binary_op(op_str, A, B) - - assert node.get_type_name() == expected_ov_str - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 2] - assert node.get_output_element_type(0) == Type.f32 - - -def binary_op_comparison(op_str, expected_ov_str=None): - if not expected_ov_str: - expected_ov_str = op_str - - element_type = Type.f32 - shape = Shape([2, 2]) - A = Parameter(element_type, shape) - B = Parameter(element_type, shape) - - node = binary_op(op_str, A, B) - - assert node.get_type_name() == expected_ov_str - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 2] - assert node.get_output_element_type(0) == Type.boolean - - -def test_add(): - binary_op_exec("+", "Add") - - -def test_add_op(): - binary_op_exec("Add") - - -def test_sub(): - binary_op_exec("-", "Subtract") - - -def test_sub_op(): - binary_op_exec("Sub", "Subtract") - - -def test_mul(): - binary_op_exec("*", "Multiply") - - -def test_mul_op(): - binary_op_exec("Mul", "Multiply") - - -def test_div(): - binary_op_exec("/", "Divide") - - -def test_div_op(): - binary_op_exec("Div", "Divide") - - -def test_maximum(): - binary_op_exec("Maximum") - - -def test_minimum(): - binary_op_exec("Minimum") - - -def test_power(): - binary_op_exec("Power") - - -def test_greater(): - binary_op_comparison("Greater") - - -def test_greater_eq(): - binary_op_comparison("GreaterEq", "GreaterEqual") - - -def test_less(): - binary_op_comparison("Less") - - -def test_less_eq(): - binary_op_comparison("LessEq", "LessEqual") - - -def test_not_equal(): - binary_op_comparison("NotEqual") - - -def test_add_with_mul(): - - element_type = Type.f32 - shape = Shape([4]) - A = Parameter(element_type, shape) - B = Parameter(element_type, shape) - C = Parameter(element_type, shape) - node = ng.multiply(ng.add(A, B), C) - - assert node.get_type_name() == "Multiply" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [4] - assert node.get_output_element_type(0) == Type.f32 - - -def unary_op(op_str, a): - if op_str == "Abs": - return ng.abs(a) - elif op_str == "Acos": - return ng.acos(a) - elif op_str == "Acosh": - return ng.acosh(a) - elif op_str == "Asin": - return ng.asin(a) - elif op_str == "Asinh": - return ng.asinh(a) - elif op_str == "Atan": - return ng.atan(a) - elif op_str == "Atanh": - return ng.atanh(a) - elif op_str == "Ceiling": - return ng.ceiling(a) - elif op_str == "Cos": - return ng.cos(a) - elif op_str == "Cosh": - return ng.cosh(a) - elif op_str == "Floor": - return ng.floor(a) - elif op_str == "log": - return ng.log(a) - elif op_str == "exp": - return ng.exp(a) - elif op_str == "negative": - return ng.negative(a) - elif op_str == "Sign": - return ng.sign(a) - elif op_str == "Sin": - return ng.sin(a) - elif op_str == "Sinh": - return ng.sinh(a) - elif op_str == "Sqrt": - return ng.sqrt(a) - elif op_str == "Tan": - return ng.tan(a) - elif op_str == "Tanh": - return ng.tanh(a) - - -def unary_op_ref(op_str, a): - if op_str == "Abs": - return np.abs(a) - elif op_str == "Acos": - return np.arccos(a) - elif op_str == "Acosh": - return np.arccosh(a) - elif op_str == "Asin": - return np.arcsin(a) - elif op_str == "Asinh": - return np.arcsinh(a) - elif op_str == "Atan": - return np.arctan(a) - elif op_str == "Atanh": - return np.arctanh(a) - elif op_str == "Ceiling": - return np.ceil(a) - elif op_str == "Cos": - return np.cos(a) - elif op_str == "Cosh": - return np.cosh(a) - elif op_str == "Floor": - return np.floor(a) - elif op_str == "log": - return np.log(a) - elif op_str == "exp": - return np.exp(a) - elif op_str == "negative": - return np.negative(a) - elif op_str == "Reverse": - return np.fliplr(a) - elif op_str == "Sign": - return np.sign(a) - elif op_str == "Sin": - return np.sin(a) - elif op_str == "Sinh": - return np.sinh(a) - elif op_str == "Sqrt": - return np.sqrt(a) - elif op_str == "Tan": - return np.tan(a) - elif op_str == "Tanh": - return np.tanh(a) - - -def unary_op_exec(op_str, input_list, expected_ov_str=None): - """ - input_list needs to have deep length of 4 - """ - if not expected_ov_str: - expected_ov_str = op_str - element_type = Type.f32 - shape = Shape(np.array(input_list).shape) - A = Parameter(element_type, shape) - node = unary_op(op_str, A) - - assert node.get_type_name() == expected_ov_str - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == list(shape) - assert node.get_output_element_type(0) == Type.f32 - - -def test_abs(): - input_list = [-1, 0, 1, 2] - op_str = "Abs" - unary_op_exec(op_str, input_list) - - -def test_acos(): - input_list = [-1, 0, 0.5, 1] - op_str = "Acos" - unary_op_exec(op_str, input_list) - - -def test_acosh(): - input_list = [2., 3., 1.5, 1.0] - op_str = "Acosh" - unary_op_exec(op_str, input_list) - - -def test_asin(): - input_list = [-1, 0, 0.5, 1] - op_str = "Asin" - unary_op_exec(op_str, input_list) - - -def test_asinh(): - input_list = [-1, 0, 0.5, 1] - op_str = "Asinh" - unary_op_exec(op_str, input_list) - - -def test_atan(): - input_list = [-1, 0, 0.5, 1] - op_str = "Atan" - unary_op_exec(op_str, input_list) - - -def test_atanh(): - input_list = [-1, 0, 0.5, 1] - op_str = "Atanh" - unary_op_exec(op_str, input_list) - - -def test_ceiling(): - input_list = [0.5, 0, 0.4, 0.5] - op_str = "Ceiling" - unary_op_exec(op_str, input_list) - - -def test_cos(): - input_list = [0, 0.7, 1.7, 3.4] - op_str = "Cos" - unary_op_exec(op_str, input_list) - - -def test_cosh(): - input_list = [-1, 0.0, 0.5, 1] - op_str = "Cosh" - unary_op_exec(op_str, input_list) - - -def test_floor(): - input_list = [-0.5, 0, 0.4, 0.5] - op_str = "Floor" - unary_op_exec(op_str, input_list) - - -def test_log(): - input_list = [1, 2, 3, 4] - op_str = "log" - unary_op_exec(op_str, input_list, "Log") - - -def test_exp(): - input_list = [-1, 0, 1, 2] - op_str = "exp" - unary_op_exec(op_str, input_list, "Exp") - - -def test_negative(): - input_list = [-1, 0, 1, 2] - op_str = "negative" - unary_op_exec(op_str, input_list, "Negative") - - -def test_sign(): - input_list = [-1, 0, 0.5, 1] - op_str = "Sign" - unary_op_exec(op_str, input_list) - - -def test_sin(): - input_list = [0, 0.7, 1.7, 3.4] - op_str = "Sin" - unary_op_exec(op_str, input_list) - - -def test_sinh(): - input_list = [-1, 0.0, 0.5, 1] - op_str = "Sinh" - unary_op_exec(op_str, input_list) - - -def test_sqrt(): - input_list = [0.0, 0.5, 1, 2] - op_str = "Sqrt" - unary_op_exec(op_str, input_list) - - -def test_tan(): - input_list = [-np.pi / 4, 0, np.pi / 8, np.pi / 8] - op_str = "Tan" - unary_op_exec(op_str, input_list) - - -def test_tanh(): - input_list = [-1, 0, 0.5, 1] - op_str = "Tanh" - unary_op_exec(op_str, input_list) - - -def test_reshape(): - - element_type = Type.f32 - shape = Shape([2, 3]) - A = Parameter(element_type, shape) - node = ng.reshape(A, Shape([3, 2]), special_zero=False) - - assert node.get_type_name() == "Reshape" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 2] - assert node.get_output_element_type(0) == element_type - - -def test_broadcast(): - - element_type = Type.f32 - A = Parameter(element_type, Shape([3])) - node = ng.broadcast(A, [3, 3]) - assert node.get_type_name() == "Broadcast" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 3] - assert node.get_output_element_type(0) == element_type - - -def test_constant(): - element_type = Type.f32 - node = Constant(element_type, Shape([3, 3]), list(range(9))) - assert node.get_type_name() == "Constant" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 3] - assert node.get_output_element_type(0) == element_type - - -def test_concat(): - - element_type = Type.f32 - node = Constant(element_type, Shape([3, 3]), list(range(9))) - assert node.get_type_name() == "Constant" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 3] - assert node.get_output_element_type(0) == element_type - - -def test_axisset(): - - set_axisset = AxisSet({1, 2, 3}) - list_axisset = AxisSet([1, 2, 3]) - tuple_axisset = AxisSet((1, 2, 3)) - - assert len(set_axisset) == 3 - assert set(set_axisset) == {1, 2, 3} - - assert len(list_axisset) == 3 - assert set(list_axisset) == set(set_axisset) - - assert len(tuple_axisset) == 3 - assert set(tuple_axisset) == set(set_axisset) - - -def test_select(): - element_type = Type.f32 - A = Parameter(Type.boolean, Shape([1, 2])) - B = Parameter(element_type, Shape([1, 2])) - C = Parameter(element_type, Shape([1, 2])) - node = ng.select(A, B, C) - assert node.get_type_name() == "Select" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [1, 2] - assert node.get_output_element_type(0) == element_type - - -def test_max_pool_1d(): - element_type = Type.f32 - shape = Shape([1, 1, 10]) - window_shape = [3] - A = Parameter(element_type, shape) - - strides = [1] * len(window_shape) - dilations = [1] * len(window_shape) - pads_begin = [0] * len(window_shape) - pads_end = [0] * len(window_shape) - rounding_type = "floor" - auto_pad = "explicit" - idx_elem_type = "i32" - - model = ng.max_pool( - A, - strides, - dilations, - pads_begin, - pads_end, - window_shape, - rounding_type, - auto_pad, - idx_elem_type, - ) - assert model.get_type_name() == "MaxPool" - assert model.get_output_size() == 2 - assert list(model.get_output_shape(0)) == [1, 1, 8] - assert list(model.get_output_shape(1)) == [1, 1, 8] - assert model.get_output_element_type(0) == element_type - assert model.get_output_element_type(1) == Type.i32 - - -def test_max_pool_1d_with_strides(): - element_type = Type.f32 - shape = Shape([1, 1, 10]) - A = Parameter(element_type, shape) - window_shape = [3] - strides = [2] - pads_begin = [0] * len(window_shape) - dilations = [1] * len(window_shape) - pads_end = [0] * len(window_shape) - rounding_type = "floor" - auto_pad = "explicit" - idx_elem_type = "i32" - - model = ng.max_pool( - A, - strides, - dilations, - pads_begin, - pads_end, - window_shape, - rounding_type, - auto_pad, - idx_elem_type, - ) - assert model.get_type_name() == "MaxPool" - assert model.get_output_size() == 2 - assert list(model.get_output_shape(0)) == [1, 1, 4] - assert list(model.get_output_shape(1)) == [1, 1, 4] - assert model.get_output_element_type(0) == element_type - assert model.get_output_element_type(1) == Type.i32 - - -def test_max_pool_2d(): - element_type = Type.f32 - shape = Shape([1, 1, 10, 10]) - A = Parameter(element_type, shape) - parameter_list = [A] - - input_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10) - window_shape = [3, 3] - rounding_type = "floor" - auto_pad = "explicit" - idx_elem_type = "i32" - - strides = [1, 1] - dilations = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - - model = ng.max_pool( - A, - strides, - dilations, - pads_begin, - pads_end, - window_shape, - rounding_type, - auto_pad, - idx_elem_type, - ) - assert model.get_type_name() == "MaxPool" - assert model.get_output_size() == 2 - assert list(model.get_output_shape(0)) == [1, 1, 8, 8] - assert list(model.get_output_shape(1)) == [1, 1, 8, 8] - assert model.get_output_element_type(0) == element_type - assert model.get_output_element_type(1) == Type.i32 - - -def test_max_pool_2d_with_strides(): - element_type = Type.f32 - shape = Shape([1, 1, 10, 10]) - A = Parameter(element_type, shape) - strides = [2, 2] - dilations = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - window_shape = [3, 3] - rounding_type = "floor" - auto_pad = "explicit" - idx_elem_type = "i32" - - model = ng.max_pool( - A, - strides, - dilations, - pads_begin, - pads_end, - window_shape, - rounding_type, - auto_pad, - idx_elem_type, - ) - assert model.get_type_name() == "MaxPool" - assert model.get_output_size() == 2 - assert list(model.get_output_shape(0)) == [1, 1, 4, 4] - assert list(model.get_output_shape(1)) == [1, 1, 4, 4] - assert model.get_output_element_type(0) == element_type - assert model.get_output_element_type(1) == Type.i32 - - -def convolution2d( - image, - filterit, - strides=(1, 1), - dilation=(1, 1), - padding_below=(0, 0), - padding_above=(0, 0), - data_dilation=(1, 1), -): - def dilate(arr, dil=(1, 1)): - m, n = arr.shape - new_m, new_n = (m - 1) * dil[0] + 1, (n - 1) * dil[1] + 1 - new_arr = np.zeros(new_m * new_n, dtype=np.float32).reshape(new_m, new_n) - for i in range(m): - for j in range(n): - new_arr[dil[0] * i][dil[1] * j] = arr[i][j] - return new_arr - - i_m, i_n = image.shape - new_image = np.zeros( - (i_m + padding_below[0] + padding_above[0]) * (i_n + padding_below[1] + padding_above[1]), - dtype=np.float32, - ).reshape(i_m + padding_below[0] + padding_above[0], i_n + padding_below[1] + padding_above[1]) - new_image[padding_below[0] : padding_below[0] + i_m, padding_below[1] : padding_below[1] + i_n] = image - image = new_image - image = image if data_dilation[0] == data_dilation[1] == 1 else dilate(image, data_dilation) - i_m, i_n = image.shape - - filterit = filterit if dilation[0] == dilation[1] == 1 else dilate(filterit, dilation) - f_m, f_n = filterit.shape - - # result_shape - r_m = i_m - f_m + 1 - r_n = i_n - f_n + 1 - r_m //= strides[0] - r_n //= strides[1] - - result = np.zeros(r_m * r_n, dtype=np.float32).reshape(r_m, r_n) - - for i in range(r_m): - for j in range(r_n): - sub_m = image[i * strides[0] : i * strides[0] + f_m, j * strides[1] : j * strides[1] + f_n] - result[i][j] = np.sum(sub_m * filterit) - return result - - -def test_convolution_simple(): - - element_type = Type.f32 - image_shape = Shape([1, 1, 16, 16]) - filter_shape = Shape([1, 1, 3, 3]) - data = Parameter(element_type, image_shape) - filters = Parameter(element_type, filter_shape) - filter_arr = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3) - filter_arr[0][0][0][0] = -1 - filter_arr[0][0][1][1] = -1 - filter_arr[0][0][2][2] = -1 - filter_arr[0][0][0][2] = -1 - filter_arr[0][0][2][0] = -1 - - strides = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - dilations = [1, 1] - - model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) - assert model.get_type_name() == "Convolution" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [1, 1, 14, 14] - assert model.get_output_element_type(0) == element_type - - -def test_convolution_with_strides(): - - element_type = Type.f32 - image_shape = Shape([1, 1, 10, 10]) - filter_shape = Shape([1, 1, 3, 3]) - data = Parameter(element_type, image_shape) - filters = Parameter(element_type, filter_shape) - filter_arr = np.zeros(9, dtype=np.float32).reshape([1, 1, 3, 3]) - filter_arr[0][0][1][1] = 1 - strides = [2, 2] - pads_begin = [0, 0] - pads_end = [0, 0] - dilations = [1, 1] - - model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) - assert model.get_type_name() == "Convolution" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [1, 1, 4, 4] - assert model.get_output_element_type(0) == element_type - - -def test_convolution_with_filter_dilation(): - - element_type = Type.f32 - image_shape = Shape([1, 1, 10, 10]) - filter_shape = Shape([1, 1, 3, 3]) - data = Parameter(element_type, image_shape) - filters = Parameter(element_type, filter_shape) - strides = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - dilations = [2, 2] - - model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) - assert model.get_type_name() == "Convolution" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [1, 1, 6, 6] - assert model.get_output_element_type(0) == element_type - - -def test_convolution_with_padding(): - - element_type = Type.f32 - image_shape = Shape([1, 1, 10, 10]) - filter_shape = Shape([1, 1, 3, 3]) - data = Parameter(element_type, image_shape) - filters = Parameter(element_type, filter_shape) - filter_arr = np.zeros(9, dtype=np.float32).reshape(1, 1, 3, 3) - filter_arr[0][0][1][1] = 1 - strides = [1, 1] - dilations = [2, 2] - pads_begin = [0, 0] - pads_end = [0, 0] - - model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) - assert model.get_type_name() == "Convolution" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [1, 1, 6, 6] - assert model.get_output_element_type(0) == element_type - - -def test_convolution_with_non_zero_padding(): - element_type = Type.f32 - image_shape = Shape([1, 1, 10, 10]) - filter_shape = Shape([1, 1, 3, 3]) - data = Parameter(element_type, image_shape) - filters = Parameter(element_type, filter_shape) - filter_arr = (np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)) * -1 - filter_arr[0][0][1][1] = 1 - strides = [1, 1] - dilations = [2, 2] - pads_begin = [2, 1] - pads_end = [1, 2] - - model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) - assert model.get_type_name() == "Convolution" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [1, 1, 9, 9] - assert model.get_output_element_type(0) == element_type diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_binary.py b/src/bindings/python/tests_compatibility/test_ngraph/test_ops_binary.py deleted file mode 100644 index d18376ddc61..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_binary.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import operator - -import numpy as np -import pytest - -import ngraph as ng -from ngraph.impl import Type - - -@pytest.mark.parametrize( - ("ng_api_helper", "expected_type"), - [ - (ng.add, Type.f32), - (ng.divide, Type.f32), - (ng.multiply, Type.f32), - (ng.subtract, Type.f32), - (ng.minimum, Type.f32), - (ng.maximum, Type.f32), - (ng.mod, Type.f32), - (ng.equal, Type.boolean), - (ng.not_equal, Type.boolean), - (ng.greater, Type.boolean), - (ng.greater_equal, Type.boolean), - (ng.less, Type.boolean), - (ng.less_equal, Type.boolean), - ], -) -def test_binary_op(ng_api_helper, expected_type): - shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=np.float32) - parameter_b = ng.parameter(shape, name="B", dtype=np.float32) - - model = ng_api_helper(parameter_a, parameter_b) - - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 2] - assert model.get_output_element_type(0) == expected_type - - -@pytest.mark.parametrize( - ("ng_api_helper", "expected_type"), - [ - (ng.add, Type.f32), - (ng.divide, Type.f32), - (ng.multiply, Type.f32), - (ng.subtract, Type.f32), - (ng.minimum, Type.f32), - (ng.maximum, Type.f32), - (ng.mod, Type.f32), - (ng.equal, Type.boolean), - (ng.not_equal, Type.boolean), - (ng.greater, Type.boolean), - (ng.greater_equal, Type.boolean), - (ng.less, Type.boolean), - (ng.less_equal, Type.boolean), - ], -) -def test_binary_op(ng_api_helper, expected_type): - value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) - - shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=np.float32) - - model = ng_api_helper(parameter_a, value_b) - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 2] - assert model.get_output_element_type(0) == expected_type - - -@pytest.mark.parametrize( - "ng_api_helper", - [ng.logical_and, ng.logical_or, ng.logical_xor], -) -def test_binary_logical_op_parameter_inputs(ng_api_helper): - shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=bool) - parameter_b = ng.parameter(shape, name="B", dtype=bool) - - model = ng_api_helper(parameter_a, parameter_b) - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 2] - assert model.get_output_element_type(0) == Type.boolean - - -@pytest.mark.parametrize( - "ng_api_helper", - [ng.logical_and, ng.logical_or, ng.logical_xor], -) -def test_binary_logical_numpy_input(ng_api_helper): - value_b = np.array([[False, True], [False, True]], dtype=bool) - - shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=bool) - - model = ng_api_helper(parameter_a, value_b) - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 2] - assert model.get_output_element_type(0) == Type.boolean - - -@pytest.mark.parametrize( - ("operator", "expected_type"), - [ - (operator.add, Type.f32), - (operator.sub, Type.f32), - (operator.mul, Type.f32), - (operator.truediv, Type.f32), - (operator.eq, Type.boolean), - (operator.ne, Type.boolean), - (operator.gt, Type.boolean), - (operator.ge, Type.boolean), - (operator.lt, Type.boolean), - (operator.le, Type.boolean), - ], -) -def test_binary_operators(operator, expected_type): - value_b = np.array([[4, 5], [1, 7]], dtype=np.float32) - - shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=np.float32) - - model = operator(parameter_a, value_b) - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 2] - assert model.get_output_element_type(0) == expected_type - - -@pytest.mark.parametrize( - ("operator", "expected_type"), - [ - (operator.add, Type.f32), - (operator.sub, Type.f32), - (operator.mul, Type.f32), - (operator.truediv, Type.f32), - (operator.eq, Type.boolean), - (operator.ne, Type.boolean), - (operator.gt, Type.boolean), - (operator.ge, Type.boolean), - (operator.lt, Type.boolean), - (operator.le, Type.boolean), - ], -) -def test_binary_operators_with_scalar(operator, expected_type): - value_b = np.array(3, dtype=np.float32) - - shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=np.float32) - - model = operator(parameter_a, value_b) - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 2] - assert model.get_output_element_type(0) == expected_type - - -def test_multiply(): - A = np.arange(48, dtype=np.int32).reshape((8, 1, 6, 1)) - B = np.arange(35, dtype=np.int32).reshape((7, 1, 5)) - - node = ng.multiply(A, B) - - assert node.get_type_name() == "Multiply" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [8, 7, 6, 5] - assert node.get_output_element_type(0) == Type.i32 - - -def test_power_v1(): - A = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1)) - B = np.arange(20, dtype=np.float32).reshape((4, 1, 5)) - - node = ng.power(A, B) - - assert node.get_type_name() == "Power" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [8, 4, 6, 5] - assert node.get_output_element_type(0) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_fused.py b/src/bindings/python/tests_compatibility/test_ngraph/test_ops_fused.py deleted file mode 100644 index 4cd2bbcba2f..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_fused.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -import ngraph as ng -from ngraph.impl import Type - - -def test_elu_operator_with_scalar_and_array(): - data_value = ng.parameter((2, 2), name="data_value", dtype=np.float32) - alpha_value = np.float32(3) - - model = ng.elu(data_value, alpha_value) - assert model.get_type_name() == "Elu" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 2] - assert model.get_output_element_type(0) == Type.f32 - - -def test_elu_operator_with_scalar(): - parameter_data = ng.parameter([2, 2], name="Data", dtype=np.float32) - alpha_value = np.float32(3) - - data_shape = [2, 2] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - - model = ng.elu(parameter_data, alpha_value) - assert model.get_type_name() == "Elu" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 2] - assert model.get_output_element_type(0) == Type.f32 - - -def test_fake_quantize(): - levels = np.int32(4) - - data_shape = [1, 2, 3, 4] - bound_shape = [] - parameter_data = ng.parameter(data_shape, name="data", dtype=np.float32) - parameter_input_low = ng.parameter(bound_shape, name="input_low", dtype=np.float32) - parameter_input_high = ng.parameter(bound_shape, name="input_high", dtype=np.float32) - parameter_output_low = ng.parameter(bound_shape, name="output_low", dtype=np.float32) - parameter_output_high = ng.parameter(bound_shape, name="output_high", dtype=np.float32) - - model = ng.fake_quantize( - parameter_data, - parameter_input_low, - parameter_input_high, - parameter_output_low, - parameter_output_high, - levels, - ) - assert model.get_type_name() == "FakeQuantize" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [1, 2, 3, 4] - assert model.get_output_element_type(0) == Type.f32 - - -def test_depth_to_space(): - data_shape = [1, 4, 2, 3] - mode = "blocks_first" - block_size = np.int32(2) - - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - - model = ng.depth_to_space(parameter_data, mode, block_size) - assert model.get_type_name() == "DepthToSpace" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [1, 1, 4, 6] - assert model.get_output_element_type(0) == Type.f32 - - -def test_space_to_batch(): - data_value = np.array([[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], dtype=np.float32) - data_shape = [1, 2, 2, 3] - data_shape = data_value.shape - - block_shape = np.array([1, 2, 3, 2], dtype=np.int64) - pads_begin = np.array([0, 0, 1, 0], dtype=np.int64) - pads_end = np.array([0, 0, 0, 1], dtype=np.int64) - - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - - model = ng.space_to_batch(parameter_data, block_shape, pads_begin, pads_end) - assert model.get_type_name() == "SpaceToBatch" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [12, 1, 1, 2] - assert model.get_output_element_type(0) == Type.f32 - - -def test_batch_to_space(): - data_shape = [12, 1, 1, 2] - - block_shape = np.array([1, 2, 3, 2], dtype=np.int64) - crops_begin = np.array([0, 0, 1, 0], dtype=np.int64) - crops_end = np.array([0, 0, 0, 1], dtype=np.int64) - - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - - model = ng.batch_to_space(parameter_data, block_shape, crops_begin, crops_end) - assert model.get_type_name() == "BatchToSpace" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [1, 2, 2, 3] - assert model.get_output_element_type(0) == Type.f32 - - -def test_clamp_operator(): - data_shape = [2, 2] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - min_value = np.float32(3) - max_value = np.float32(12) - - model = ng.clamp(parameter_data, min_value, max_value) - assert model.get_type_name() == "Clamp" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 2] - assert model.get_output_element_type(0) == Type.f32 - - -def test_clamp_operator_with_array(): - data_value = np.array([[-5, 9], [45, 3]], dtype=np.float32) - min_value = np.float32(3) - max_value = np.float32(12) - - model = ng.clamp(data_value, min_value, max_value) - assert model.get_type_name() == "Clamp" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [2, 2] - assert model.get_output_element_type(0) == Type.f32 - - -def test_squeeze_operator(): - data_shape = [1, 2, 1, 3, 1, 1] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - axes = [2, 4] - model = ng.squeeze(parameter_data, axes) - assert model.get_type_name() == "Squeeze" - assert model.get_output_size() == 1 - assert list(model.get_output_shape(0)) == [1, 2, 3, 1] - assert model.get_output_element_type(0) == Type.f32 - - -def test_squared_difference_operator(): - x1_shape = [1, 2, 3, 4] - x2_shape = [2, 3, 4] - - parameter_x1 = ng.parameter(x1_shape, name="x1", dtype=np.float32) - parameter_x2 = ng.parameter(x2_shape, name="x2", dtype=np.float32) - - model = ng.squared_difference(parameter_x1, parameter_x2) - assert model.get_type_name() == "SquaredDifference" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [1, 2, 3, 4] - - -def test_shuffle_channels_operator(): - data_shape = [1, 15, 2, 2] - axis = 1 - groups = 5 - - parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - - model = ng.shuffle_channels(parameter, axis, groups) - assert model.get_type_name() == "ShuffleChannels" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [1, 15, 2, 2] - - -def test_unsqueeze(): - - data_shape = [3, 4, 5] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - axes = [0, 4] - model = ng.unsqueeze(parameter_data, axes) - assert model.get_type_name() == "Unsqueeze" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [1, 3, 4, 5, 1] - - -def test_grn_operator(): - bias = np.float32(1e-6) - - data_shape = [1, 2, 3, 4] - - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - - model = ng.grn(parameter_data, bias) - assert model.get_type_name() == "GRN" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == data_shape - - -def test_prelu_operator(): - data_shape = [1, 2, 3, 4] - slope_shape = [2, 3, 1] - - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - parameter_slope = ng.parameter(slope_shape, name="Slope", dtype=np.float32) - - model = ng.prelu(parameter_data, parameter_slope) - assert model.get_type_name() == "PRelu" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [1, 2, 3, 4] - - -def test_selu_operator(): - data_shape = [4, 2, 3, 1] - - alpha = np.array(1.6733, dtype=np.float32) - lambda_value = np.array(1.0507, dtype=np.float32) - - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.selu(parameter_data, alpha, lambda_value) - assert model.get_type_name() == "Selu" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [4, 2, 3, 1] - - -def test_hard_sigmoid_operator(): - data_shape = [3] - - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - parameter_alpha = ng.parameter([], name="Alpha", dtype=np.float32) - parameter_beta = ng.parameter([], name="Beta", dtype=np.float32) - - model = ng.hard_sigmoid(parameter_data, parameter_alpha, parameter_beta) - assert model.get_type_name() == "HardSigmoid" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [3] - - -def test_mvn_operator(): - data_shape = [3, 3, 3, 1] - axes = [0, 2, 3] - normalize_variance = True - eps = np.float32(1e-9) - eps_mode = "outside_sqrt" - - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - - model = ng.mvn(parameter_data, axes, normalize_variance, eps, eps_mode) - assert model.get_type_name() == "MVN" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == data_shape - - -def test_space_to_depth_operator(): - data_shape = [1, 2, 4, 4] - mode = "blocks_first" - block_size = 2 - - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - - model = ng.space_to_depth(parameter_data, mode, block_size) - assert model.get_type_name() == "SpaceToDepth" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [1, 8, 2, 2] - - batch_size = 2 - input_size = 3 - hidden_size = 3 - - X_shape = [batch_size, input_size] - H_t_shape = [batch_size, hidden_size] - W_shape = [hidden_size, input_size] - R_shape = [hidden_size, hidden_size] - B_shape = [hidden_size] - - parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) - parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) - parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) - - activations = ["sigmoid"] - activation_alpha = [] - activation_beta = [] - clip = 2.88 - - model = ng.rnn_cell( - parameter_X, - parameter_H_t, - parameter_W, - parameter_R, - parameter_B, - hidden_size, - activations, - activation_alpha, - activation_beta, - clip, - ) - assert model.get_type_name() == "RNNCell" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [batch_size, hidden_size] - - -def test_group_convolution_operator(): - data_shape = [1, 4, 2, 2] - filters_shape = [2, 1, 2, 1, 1] - - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - parameter_filters = ng.parameter(filters_shape, name="Filters", dtype=np.float32) - - strides = [1, 1] - dilations = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - - model = ng.group_convolution(parameter_data, parameter_filters, strides, pads_begin, pads_end, dilations) - assert model.get_type_name() == "GroupConvolution" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [1, 2, 2, 2] - - -def test_group_convolution_backprop_data(): - data_shape = [1, 1, 3, 3] - filters_shape = [1, 1, 1, 3, 3] - strides = [2, 2] - output_padding = [1, 1] - pads_begin = [1, 1] - pads_end = [1, 1] - - data_node = ng.parameter(data_shape, name="Data", dtype=np.float32) - filters_node = ng.parameter(filters_shape, name="Filters", dtype=np.float32) - model = ng.group_convolution_backprop_data( - data_node, filters_node, strides, None, pads_begin, pads_end, output_padding=output_padding - ) - - assert model.get_type_name() == "GroupConvolutionBackpropData" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [1, 1, 6, 6] - - -def test_group_convolution_backprop_data_output_shape(): - data_shape = [1, 1, 1, 10] - filters_shape = [1, 1, 1, 1, 5] - strides = [1, 1] - - data_node = ng.parameter(data_shape, name="Data", dtype=np.float32) - filters_node = ng.parameter(filters_shape, name="Filters", dtype=np.float32) - output_shape_node = ng.constant(np.array([1, 14], dtype=np.int64)) - - model = ng.group_convolution_backprop_data( - data_node, filters_node, strides, output_shape_node, auto_pad="same_upper" - ) - assert model.get_type_name() == "GroupConvolutionBackpropData" - assert model.get_output_size() == 1 - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [1, 1, 1, 14] diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_matmul.py b/src/bindings/python/tests_compatibility/test_ngraph/test_ops_matmul.py deleted file mode 100644 index 1d9ce2b5837..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_matmul.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest - -import ngraph as ng -from ngraph.impl import Type - - -@pytest.mark.parametrize( - ("shape_a", "shape_b", "transpose_a", "transpose_b", "expected_shape"), - [ - # matrix, vector - ([2, 4], [4], False, False, [2]), - ([4], [4, 2], False, False, [2]), - # matrix, matrix - ([2, 4], [4, 2], False, False, [2, 2]), - # tensor, vector - ([2, 4, 5], [5], False, False, [2, 4]), - # # tensor, matrix - ([2, 4, 5], [5, 4], False, False, [2, 4, 4]), - # # tensor, tensor - ([2, 2, 4], [2, 4, 2], False, False, [2, 2, 2]), - ], -) -def test_matmul(shape_a, shape_b, transpose_a, transpose_b, expected_shape): - np.random.seed(133391) - left_input = np.random.rand(*shape_a).astype(np.float32) - right_input = np.random.rand(*shape_b).astype(np.float32) - - node = ng.matmul(left_input, right_input, transpose_a, transpose_b) - - assert node.get_output_size() == 1 - assert node.get_type_name() == "MatMul" - assert list(node.get_output_shape(0)) == expected_shape - assert node.get_output_element_type(0) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_multioutput.py b/src/bindings/python/tests_compatibility/test_ngraph/test_ops_multioutput.py deleted file mode 100644 index 82aeed43d35..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_multioutput.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -import ngraph as ng -from ngraph.impl import Type - - -def test_split(): - input_tensor = ng.constant(np.array([0, 1, 2, 3, 4, 5], dtype=np.int32)) - axis = ng.constant(0, dtype=np.int64) - splits = 3 - - split_node = ng.split(input_tensor, axis, splits) - assert split_node.get_type_name() == "Split" - assert split_node.get_output_size() == 3 - assert list(split_node.get_output_shape(0)) == [2] - assert list(split_node.get_output_shape(1)) == [2] - assert list(split_node.get_output_shape(2)) == [2] - assert split_node.get_output_element_type(0) == Type.i32 - assert split_node.get_output_element_type(1) == Type.i32 - assert split_node.get_output_element_type(2) == Type.i32 - - -def test_variadic_split(): - input_tensor = ng.constant(np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]], dtype=np.int32)) - axis = ng.constant(1, dtype=np.int64) - splits = ng.constant(np.array([2, 4], dtype=np.int64)) - - v_split_node = ng.variadic_split(input_tensor, axis, splits) - assert v_split_node.get_type_name() == "VariadicSplit" - assert v_split_node.get_output_size() == 2 - assert list(v_split_node.get_output_shape(0)) == [2, 2] - assert list(v_split_node.get_output_shape(1)) == [2, 4] - assert v_split_node.get_output_element_type(0) == Type.i32 - assert v_split_node.get_output_element_type(1) == Type.i32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_reshape.py b/src/bindings/python/tests_compatibility/test_ngraph/test_ops_reshape.py deleted file mode 100644 index 5ae83cc47d3..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_reshape.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest - -import ngraph as ng -from ngraph.impl import Type -from ngraph.utils.types import get_element_type - - -def test_concat(): - a = np.array([[1, 2], [3, 4]]) - b = np.array([[5, 6]]) - axis = 0 - - parameter_a = ng.parameter(list(a.shape), name="A", dtype=np.float32) - parameter_b = ng.parameter(list(b.shape), name="B", dtype=np.float32) - node = ng.concat([parameter_a, parameter_b], axis) - assert node.get_type_name() == "Concat" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 2] - assert node.get_output_element_type(0) == Type.f32 - - -@pytest.mark.parametrize( - ("val_type", "value", "output_shape"), [(bool, False, []), (bool, np.empty((2, 2), dtype=bool), [2, 2])] -) -def test_constant_from_bool(val_type, value, output_shape): - node = ng.constant(value, val_type) - assert node.get_type_name() == "Constant" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.boolean - assert list(node.get_output_shape(0)) == output_shape - - -@pytest.mark.parametrize( - "val_type, value", - [ - pytest.param(np.int16, np.int16(-12345)), - pytest.param(np.int64, np.int64(-1234567)), - pytest.param(np.uint16, np.uint16(12345)), - pytest.param(np.uint32, np.uint32(123456)), - pytest.param(np.uint64, np.uint64(1234567)), - pytest.param(np.float64, np.float64(0.1234)), - pytest.param(np.float32, np.float32(0.1234)), - pytest.param(np.int8, np.int8(-63)), - pytest.param(np.int32, np.int32(-123456)), - pytest.param(np.uint8, np.uint8(63)), - ], -) -def test_constant_from_scalar(val_type, value): - node = ng.constant(value, val_type) - assert node.get_type_name() == "Constant" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == get_element_type(val_type) - assert list(node.get_output_shape(0)) == [] - - -@pytest.mark.parametrize( - "val_type", - [ - pytest.param(np.float64), - pytest.param(np.float32), - ], -) -def test_constant_from_float_array(val_type): - np.random.seed(133391) - input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type) - node = ng.constant(input_data, val_type) - assert node.get_type_name() == "Constant" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == get_element_type(val_type) - assert list(node.get_output_shape(0)) == [2, 3, 4] - - -@pytest.mark.parametrize( - "val_type, range_start, range_end", - [ - pytest.param(np.int16, -64, 64), - pytest.param(np.int64, -16383, 16383), - pytest.param(np.uint16, 0, 64), - pytest.param(np.uint32, 0, 1024), - pytest.param(np.uint64, 0, 16383), - pytest.param(np.int8, -8, 8), - pytest.param(np.int32, -1024, 1024), - pytest.param(np.uint8, 0, 8), - ], -) -def test_constant_from_integer_array(val_type, range_start, range_end): - np.random.seed(133391) - input_data = np.array( - np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type - ) - node = ng.constant(input_data, val_type) - assert node.get_type_name() == "Constant" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == get_element_type(val_type) - assert list(node.get_output_shape(0)) == [2, 2] - - -def test_broadcast_numpy(): - data_shape = [16, 1, 1] - target_shape_shape = [4] - - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - target_shape_parameter = ng.parameter( - target_shape_shape, name="Target_shape", dtype=np.int64 - ) - - node = ng.broadcast(data_parameter, target_shape_parameter) - - assert node.get_type_name() == "Broadcast" - assert node.get_output_size() == 1 - - -def test_broadcast_bidirectional(): - data_shape = [16, 1, 1] - target_shape_shape = [4] - - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - target_shape_parameter = ng.parameter( - target_shape_shape, name="Target_shape", dtype=np.int64 - ) - - node = ng.broadcast(data_parameter, target_shape_parameter, "BIDIRECTIONAL") - - assert node.get_type_name() == "Broadcast" - assert node.get_output_size() == 1 - - -def test_transpose(): - input_tensor = np.arange(3 * 3 * 224 * 224, dtype=np.int32).reshape( - (3, 3, 224, 224) - ) - input_order = np.array([0, 2, 3, 1], dtype=np.int32) - - node = ng.transpose(input_tensor, input_order) - assert node.get_type_name() == "Transpose" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.i32 - assert list(node.get_output_shape(0)) == [3, 224, 224, 3] - - -def test_tile(): - input_tensor = np.arange(6, dtype=np.int32).reshape((2, 1, 3)) - repeats = np.array([2, 1], dtype=np.int32) - - node = ng.tile(input_tensor, repeats) - - assert node.get_type_name() == "Tile" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.i32 - assert list(node.get_output_shape(0)) == [2, 2, 3] - - -def test_strided_slice(): - input_tensor = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4)) - begin = np.array([1, 0], dtype=np.int32) - end = np.array([0, 0], dtype=np.int32) - strides = np.array([1, 1], dtype=np.int32) - begin_mask = np.array([0, 0, 0], dtype=np.int32) - end_mask = np.array([0, 0, 0], dtype=np.int32) - new_axis_mask = np.array([0, 1, 0], dtype=np.int32) - shrink_axis_mask = np.array([1, 0, 0], dtype=np.int32) - ellipsis_mask = np.array([0, 0, 0], dtype=np.int32) - - node = ng.strided_slice( - input_tensor, - begin, - end, - strides, - begin_mask, - end_mask, - new_axis_mask, - shrink_axis_mask, - ellipsis_mask, - ) - - assert node.get_type_name() == "StridedSlice" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.f32 - assert list(node.get_output_shape(0)) == [1, 3, 4] - - -def test_reshape_v1(): - A = np.arange(1200, dtype=np.float32).reshape((2, 5, 5, 24)) - shape = np.array([0, -1, 4], dtype=np.int32) - special_zero = True - - node = ng.reshape(A, shape, special_zero) - assert node.get_type_name() == "Reshape" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.f32 - assert list(node.get_output_shape(0)) == [2, 150, 4] - - -def test_shape_of(): - input_tensor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32) - - node = ng.shape_of(input_tensor) - assert node.get_type_name() == "ShapeOf" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.i64 - assert list(node.get_output_shape(0)) == [2] diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_scatter.py b/src/bindings/python/tests_compatibility/test_ngraph/test_ops_scatter.py deleted file mode 100644 index 0faba0b6558..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_scatter.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -import ngraph as ng -from ngraph.impl import Type - - -def test_scatter_update_props(): - dtype = np.int8 - parameter_r = ng.parameter([2, 3, 4], dtype=dtype, name="data") - parameter_i = ng.parameter([2, 1], dtype=dtype, name="indices") - parameter_u = ng.parameter([2, 2, 1, 4], dtype=dtype, name="updates") - axis = np.array([1], dtype=np.int8) - - node = ng.scatter_update(parameter_r, parameter_i, parameter_u, axis) - assert node.get_type_name() == "ScatterUpdate" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 3, 4] - assert node.get_output_element_type(0) == Type.i8 - - -def test_scatter_update_elements_props(): - dtype = np.int8 - parameter_r = ng.parameter([2, 4, 5, 7], dtype=dtype, name="data") - parameter_i = ng.parameter([2, 2, 2, 2], dtype=dtype, name="indices") - parameter_u = ng.parameter([2, 2, 2, 2], dtype=dtype, name="updates") - axis = np.array([1], dtype=np.int8) - - node = ng.scatter_elements_update(parameter_r, parameter_i, parameter_u, axis) - assert node.get_type_name() == "ScatterElementsUpdate" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [2, 4, 5, 7] - assert node.get_output_element_type(0) == Type.i8 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_unary.py b/src/bindings/python/tests_compatibility/test_ngraph/test_ops_unary.py deleted file mode 100644 index aafbd97af8d..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_ops_unary.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest - -import ngraph as ng -from ngraph.impl import Shape, Type - -R_TOLERANCE = 1e-6 # global relative tolerance - - -@pytest.mark.parametrize( - ("graph_api_fn", "type_name"), - [ - (ng.absolute, "Abs"), - (ng.abs, "Abs"), - (ng.acos, "Acos"), - (ng.acosh, "Acosh"), - (ng.asin, "Asin"), - (ng.asinh, "Asinh"), - (ng.atan, "Atan"), - (ng.atanh, "Atanh"), - (ng.ceiling, "Ceiling"), - (ng.ceil, "Ceiling"), - (ng.cos, "Cos"), - (ng.cosh, "Cosh"), - (ng.exp, "Exp"), - (ng.floor, "Floor"), - (ng.log, "Log"), - (ng.relu, "Relu"), - (ng.sign, "Sign"), - (ng.sin, "Sin"), - (ng.sinh, "Sinh"), - (ng.sqrt, "Sqrt"), - (ng.tan, "Tan"), - (ng.tanh, "Tanh"), - ], -) -def test_unary_op_array(graph_api_fn, type_name): - np.random.seed(133391) - input_data = np.random.rand(2, 3, 4).astype(np.float32) - node = graph_api_fn(input_data) - assert node.get_output_size() == 1 - assert node.get_type_name() == type_name - assert node.get_output_element_type(0) == Type.f32 - assert list(node.get_output_shape(0)) == [2, 3, 4] - - -@pytest.mark.parametrize( - ("graph_api_fn", "input_data"), - [ - pytest.param(ng.absolute, np.float32(-3)), - pytest.param(ng.abs, np.float32(-3)), - pytest.param(ng.acos, np.float32(-0.5)), - pytest.param(ng.asin, np.float32(-0.5)), - pytest.param(ng.atan, np.float32(-0.5)), - pytest.param(ng.ceiling, np.float32(1.5)), - pytest.param(ng.ceil, np.float32(1.5)), - pytest.param(ng.cos, np.float32(np.pi / 4.0)), - pytest.param(ng.cosh, np.float32(np.pi / 4.0)), - pytest.param(ng.exp, np.float32(1.5)), - pytest.param(ng.floor, np.float32(1.5)), - pytest.param(ng.log, np.float32(1.5)), - pytest.param(ng.relu, np.float32(-0.125)), - pytest.param(ng.sign, np.float32(0.0)), - pytest.param(ng.sin, np.float32(np.pi / 4.0)), - pytest.param(ng.sinh, np.float32(0.0)), - pytest.param(ng.sqrt, np.float32(3.5)), - pytest.param(ng.tan, np.float32(np.pi / 4.0)), - pytest.param(ng.tanh, np.float32(0.1234)), - ], -) -def test_unary_op_scalar(graph_api_fn, input_data): - node = graph_api_fn(input_data) - - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.f32 - assert list(node.get_output_shape(0)) == [] - - -@pytest.mark.parametrize( - "input_data", [(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))] -) -def test_logical_not(input_data): - node = ng.logical_not(input_data) - assert node.get_output_size() == 1 - assert node.get_type_name() == "LogicalNot" - assert node.get_output_element_type(0) == Type.boolean - assert list(node.get_output_shape(0)) == list(input_data.shape) - - -def test_sigmoid(): - input_data = np.array([-3.14, -1.0, 0.0, 2.71001, 1000.0], dtype=np.float32) - node = ng.sigmoid(input_data) - - assert node.get_output_size() == 1 - assert node.get_type_name() == "Sigmoid" - assert node.get_output_element_type(0) == Type.f32 - assert list(node.get_output_shape(0)) == [5] - - -def test_softmax(): - axis = 1 - input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) - - node = ng.softmax(input_tensor, axis) - assert node.get_output_size() == 1 - assert node.get_type_name() == "Softmax" - assert node.get_output_element_type(0) == Type.f32 - assert list(node.get_output_shape(0)) == [2, 3] - - -def test_erf(): - input_tensor = np.array([-1.0, 0.0, 1.0, 2.5, 3.14, 4.0], dtype=np.float32) - node = ng.erf(input_tensor) - assert node.get_output_size() == 1 - assert node.get_type_name() == "Erf" - assert node.get_output_element_type(0) == Type.f32 - assert list(node.get_output_shape(0)) == [6] - - -def test_hswish(): - float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - - node = ng.hswish(data) - assert node.get_type_name() == "HSwish" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 10] - assert node.get_output_element_type(0) == Type.f32 - - -def test_round_even(): - float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - - node = ng.round(data, "HALF_TO_EVEN") - assert node.get_type_name() == "Round" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 10] - assert node.get_output_element_type(0) == Type.f32 - - -def test_hsigmoid(): - float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - - node = ng.hsigmoid(data) - assert node.get_type_name() == "HSigmoid" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 10] - assert node.get_output_element_type(0) == Type.f32 - - -def test_gelu_operator_with_parameters(): - data_shape = [2, 2] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - - model = ng.gelu(parameter_data, "erf") - assert model.get_output_size() == 1 - assert model.get_type_name() == "Gelu" - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [2, 2] - - -def test_gelu_operator_with_array(): - data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) - - model = ng.gelu(data_value, "erf") - assert model.get_output_size() == 1 - assert model.get_type_name() == "Gelu" - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [2, 2] - - -def test_gelu_tanh_operator_with_parameters(): - data_shape = [2, 2] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - - model = ng.gelu(parameter_data, "tanh") - assert model.get_output_size() == 1 - assert model.get_type_name() == "Gelu" - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [2, 2] - - -def test_gelu_tanh_operator_with_array(): - data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) - - model = ng.gelu(data_value, "tanh") - assert model.get_output_size() == 1 - assert model.get_type_name() == "Gelu" - assert model.get_output_element_type(0) == Type.f32 - assert list(model.get_output_shape(0)) == [2, 2] diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_pooling.py b/src/bindings/python/tests_compatibility/test_ngraph/test_pooling.py deleted file mode 100644 index bec88064ce9..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_pooling.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest - -import ngraph as ng -from ngraph.impl import Type - - -@pytest.fixture -def _ndarray_1x1x4x4(): - return np.arange(11, 27, dtype=np.float32).reshape(1, 1, 4, 4) - - -def test_avg_pool_2d(_ndarray_1x1x4x4): - input_data = _ndarray_1x1x4x4 - param = ng.parameter(input_data.shape, name="A", dtype=np.float32) - - kernel_shape = [2, 2] - spatial_dim_count = len(kernel_shape) - pads_begin = [0] * spatial_dim_count - pads_end = [0] * spatial_dim_count - strides = [2, 2] - exclude_pad = True - - avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) - assert avg_pool_node.get_type_name() == "AvgPool" - assert avg_pool_node.get_output_size() == 1 - assert list(avg_pool_node.get_output_shape(0)) == [1, 1, 2, 2] - assert avg_pool_node.get_output_element_type(0) == Type.f32 - - -def test_avg_pooling_3d(_ndarray_1x1x4x4): - data = _ndarray_1x1x4x4 - data = np.broadcast_to(data, (1, 1, 4, 4, 4)) - param = ng.parameter(list(data.shape)) - kernel_shape = [2, 2, 2] - strides = [2, 2, 2] - spatial_dim_count = len(kernel_shape) - pads_begin = [0] * spatial_dim_count - pads_end = [0] * spatial_dim_count - exclude_pad = True - - avgpool = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) - assert avgpool.get_type_name() == "AvgPool" - assert avgpool.get_output_size() == 1 - assert list(avgpool.get_output_shape(0)) == [1, 1, 2, 2, 2] - assert avgpool.get_output_element_type(0) == Type.f32 - - -def test_max_pool_basic(): - data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) - strides = [1, 1] - dilations = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - kernel_shape = [2, 2] - rounding_type = "floor" - auto_pad = None - index_et = "i32" - - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) - assert maxpool_node.get_type_name() == "MaxPool" - assert maxpool_node.get_output_size() == 2 - assert list(maxpool_node.get_output_shape(0)) == [1, 1, 3, 3] - assert list(maxpool_node.get_output_shape(1)) == [1, 1, 3, 3] - assert maxpool_node.get_output_element_type(0) == Type.f32 - assert maxpool_node.get_output_element_type(1) == Type.i32 - - -def test_max_pool_strides(): - data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) - strides = [2, 1] - dilations = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - kernel_shape = [2, 2] - rounding_type = "floor" - auto_pad = None - index_et = "i32" - - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) - assert maxpool_node.get_type_name() == "MaxPool" - assert maxpool_node.get_output_size() == 2 - assert list(maxpool_node.get_output_shape(0)) == [1, 1, 2, 3] - assert list(maxpool_node.get_output_shape(1)) == [1, 1, 2, 3] - assert maxpool_node.get_output_element_type(0) == Type.f32 - assert maxpool_node.get_output_element_type(1) == Type.i32 - - -def test_max_pool_kernel_shape1x1(): - data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) - strides = [1, 1] - dilations = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - kernel_shape = [1, 1] - rounding_type = "floor" - auto_pad = None - index_et = "i32" - - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) - assert maxpool_node.get_type_name() == "MaxPool" - assert maxpool_node.get_output_size() == 2 - assert list(maxpool_node.get_output_shape(0)) == [1, 1, 4, 4] - assert list(maxpool_node.get_output_shape(1)) == [1, 1, 4, 4] - assert maxpool_node.get_output_element_type(0) == Type.f32 - assert maxpool_node.get_output_element_type(1) == Type.i32 - - -def test_max_pool_kernel_shape3x3(): - data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) - strides = [1, 1] - dilations = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - kernel_shape = [3, 3] - rounding_type = "floor" - auto_pad = None - index_et = "i32" - - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) - assert maxpool_node.get_type_name() == "MaxPool" - assert maxpool_node.get_output_size() == 2 - assert list(maxpool_node.get_output_shape(0)) == [1, 1, 2, 2] - assert list(maxpool_node.get_output_shape(1)) == [1, 1, 2, 2] - assert maxpool_node.get_output_element_type(0) == Type.f32 - assert maxpool_node.get_output_element_type(1) == Type.i32 - - -def test_max_pool_non_zero_pads(): - data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) - strides = [1, 1] - dilations = [1, 1] - pads_begin = [1, 1] - pads_end = [1, 1] - kernel_shape = [2, 2] - rounding_type = "floor" - auto_pad = None - index_et = "i32" - - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) - assert maxpool_node.get_type_name() == "MaxPool" - assert maxpool_node.get_output_size() == 2 - assert list(maxpool_node.get_output_shape(0)) == [1, 1, 5, 5] - assert list(maxpool_node.get_output_shape(1)) == [1, 1, 5, 5] - assert maxpool_node.get_output_element_type(0) == Type.f32 - assert maxpool_node.get_output_element_type(1) == Type.i32 - - -def test_max_pool_same_upper_auto_pads(): - data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) - strides = [1, 1] - dilations = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - kernel_shape = [2, 2] - auto_pad = "same_upper" - rounding_type = "floor" - index_et = "i32" - - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) - assert maxpool_node.get_type_name() == "MaxPool" - assert maxpool_node.get_output_size() == 2 - assert list(maxpool_node.get_output_shape(0)) == [1, 1, 4, 4] - assert list(maxpool_node.get_output_shape(1)) == [1, 1, 4, 4] - assert maxpool_node.get_output_element_type(0) == Type.f32 - assert maxpool_node.get_output_element_type(1) == Type.i32 - - -def test_max_pool_same_lower_auto_pads(): - data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) - strides = [1, 1] - dilations = [1, 1] - pads_begin = [0, 0] - pads_end = [0, 0] - kernel_shape = [2, 2] - auto_pad = "same_lower" - rounding_type = "floor" - index_et = "i32" - - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( - data_node, - strides, - dilations, - pads_begin, - pads_end, - kernel_shape, - rounding_type, - auto_pad, - index_et, - ) - assert maxpool_node.get_type_name() == "MaxPool" - assert maxpool_node.get_output_size() == 2 - assert list(maxpool_node.get_output_shape(0)) == [1, 1, 4, 4] - assert list(maxpool_node.get_output_shape(1)) == [1, 1, 4, 4] - assert maxpool_node.get_output_element_type(0) == Type.f32 - assert maxpool_node.get_output_element_type(1) == Type.i32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_proposal.py b/src/bindings/python/tests_compatibility/test_ngraph/test_proposal.py deleted file mode 100644 index 81c3159ae6e..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_proposal.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import ngraph as ng -from ngraph.impl import Shape, Type - - -def test_proposal_props(): - float_dtype = np.float32 - batch_size = 1 - post_nms_topn = 20 - probs = ng.parameter(Shape([batch_size, 8, 255, 255]), dtype=float_dtype, name="probs") - deltas = ng.parameter(Shape([batch_size, 16, 255, 255]), dtype=float_dtype, name="bbox_deltas") - im_info = ng.parameter(Shape([4]), dtype=float_dtype, name="im_info") - - attrs = { - "base_size": np.uint32(85), - "pre_nms_topn": np.uint32(10), - "post_nms_topn": np.uint32(post_nms_topn), - "nms_thresh": np.float32(0.34), - "feat_stride": np.uint32(16), - "min_size": np.uint32(32), - "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float32), - "scale": np.array([2, 3, 3, 4], dtype=np.float32), - } - - node = ng.proposal(probs, deltas, im_info, attrs) - - assert node.get_type_name() == "Proposal" - assert node.get_output_size() == 2 - - assert list(node.get_output_shape(0)) == [batch_size * post_nms_topn, 5] - assert list(node.get_output_shape(1)) == [batch_size * post_nms_topn] - assert node.get_output_element_type(0) == Type.f32 - assert node.get_output_element_type(1) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py b/src/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py deleted file mode 100644 index c9da0afe153..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ngraph as ng -from ngraph.impl import Type - -import numpy as np - - -def test_random_uniform(): - input_tensor = ng.constant(np.array([2, 4, 3], dtype=np.int32)) - min_val = ng.constant(np.array([-2.7], dtype=np.float32)) - max_val = ng.constant(np.array([3.5], dtype=np.float32)) - - random_uniform_node = ng.random_uniform(input_tensor, min_val, max_val, - output_type="f32", global_seed=7461, - op_seed=1546) - random_uniform_node = ng.random_uniform(input_tensor, min_val, max_val, - output_type="f32", global_seed=7461, - op_seed=1546) - assert random_uniform_node.get_output_size() == 1 - assert random_uniform_node.get_type_name() == "RandomUniform" - assert random_uniform_node.get_output_element_type(0) == Type.f32 - assert list(random_uniform_node.get_output_shape(0)) == [2, 4, 3] diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_reduction.py b/src/bindings/python/tests_compatibility/test_ngraph/test_reduction.py deleted file mode 100644 index e1bac52b738..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_reduction.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import pytest - -import ngraph as ng -from ngraph.impl import Type - - -@pytest.mark.parametrize( - ("ng_api_helper", "reduction_axes", "expected_shape"), - [ - (ng.reduce_max, np.array([0, 1, 2, 3]), []), - (ng.reduce_min, np.array([0, 1, 2, 3]), []), - (ng.reduce_sum, np.array([0, 1, 2, 3]), []), - (ng.reduce_prod, np.array([0, 1, 2, 3]), []), - (ng.reduce_max, np.array([0]), [4, 3, 2]), - (ng.reduce_min, np.array([0]), [4, 3, 2]), - (ng.reduce_sum, np.array([0]), [4, 3, 2]), - (ng.reduce_prod, np.array([0]), [4, 3, 2]), - (ng.reduce_max, np.array([0, 2]), [4, 2]), - (ng.reduce_min, np.array([0, 2]), [4, 2]), - (ng.reduce_sum, np.array([0, 2]), [4, 2]), - (ng.reduce_prod, np.array([0, 2]), [4, 2]), - ], -) -def test_reduction_ops(ng_api_helper, reduction_axes, expected_shape): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.randn(*shape).astype(np.float32) - - node = ng_api_helper(input_data, reduction_axes) - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - assert node.get_output_element_type(0) == Type.f32 - - -@pytest.mark.parametrize( - ("ng_api_helper", "reduction_axes", "expected_shape"), - [ - (ng.reduce_logical_and, np.array([0]), [4, 3, 2]), - (ng.reduce_logical_or, np.array([0]), [4, 3, 2]), - (ng.reduce_logical_and, np.array([0, 2]), [4, 2]), - (ng.reduce_logical_or, np.array([0, 2]), [4, 2]), - (ng.reduce_logical_and, np.array([0, 1, 2, 3]), []), - (ng.reduce_logical_or, np.array([0, 1, 2, 3]), []), - ], -) -def test_reduction_logical_ops(ng_api_helper, reduction_axes, expected_shape): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.randn(*shape).astype(bool) - - node = ng_api_helper(input_data, reduction_axes) - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - assert node.get_output_element_type(0) == Type.boolean - - -def test_topk(): - data_shape = [6, 12, 10, 24] - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - K = np.int32(3) - axis = np.int32(1) - node = ng.topk(data_parameter, K, axis, "max", "value") - assert node.get_type_name() == "TopK" - assert node.get_output_size() == 2 - assert list(node.get_output_shape(0)) == [6, 3, 10, 24] - assert list(node.get_output_shape(1)) == [6, 3, 10, 24] - assert node.get_output_element_type(0) == Type.f32 - assert node.get_output_element_type(1) == Type.i32 - - -@pytest.mark.parametrize( - ("ng_api_helper", "reduction_axes", "expected_shape"), - [ - (ng.reduce_mean, np.array([0, 1, 2, 3]), []), - (ng.reduce_mean, np.array([0]), [4, 3, 2]), - (ng.reduce_mean, np.array([0, 2]), [4, 2]), - ], -) -def test_reduce_mean_op(ng_api_helper, reduction_axes, expected_shape): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.randn(*shape).astype(np.float32) - - node = ng_api_helper(input_data, reduction_axes) - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - assert node.get_output_element_type(0) == Type.f32 - - -def test_non_zero(): - - data_shape = [3, 10, 100, 200] - - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - - node = ng.non_zero(data_parameter) - - assert node.get_type_name() == "NonZero" - assert node.get_output_size() == 1 - assert node.get_output_element_type(0) == Type.i64 - - -def test_roi_align(): - - data_shape = [7, 256, 200, 200] - rois = [1000, 4] - batch_indices = [1000] - expected_shape = [1000, 256, 6, 6] - - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - rois_parameter = ng.parameter(rois, name="Rois", dtype=np.float32) - batch_indices_parameter = ng.parameter(batch_indices, name="Batch_indices", dtype=np.int32) - pooled_h = 6 - pooled_w = 6 - sampling_ratio = 2 - spatial_scale = np.float32(16) - mode = "avg" - - node = ng.roi_align( - data_parameter, - rois_parameter, - batch_indices_parameter, - pooled_h, - pooled_w, - sampling_ratio, - spatial_scale, - mode, - ) - - assert node.get_type_name() == "ROIAlign" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == expected_shape - assert node.get_output_element_type(0) == Type.f32 - - -@pytest.mark.parametrize( - "input_shape, cumsum_axis, reverse", - [([5, 2], 0, False), ([5, 2], 1, False), ([5, 2, 6], 2, False), ([5, 2], 0, True)], -) -def test_cum_sum(input_shape, cumsum_axis, reverse): - input_data = np.arange(np.prod(input_shape), dtype=np.int64).reshape(input_shape) - - node = ng.cum_sum(input_data, cumsum_axis, reverse=reverse) - assert node.get_output_size() == 1 - assert node.get_type_name() == "CumSum" - assert list(node.get_output_shape(0)) == input_shape - assert node.get_output_element_type(0) == Type.i64 - - -def test_normalize_l2(): - input_shape = [1, 2, 3, 4] - input_data = np.arange(np.prod(input_shape)).reshape(input_shape).astype(np.float32) - input_data += 1 - axes = np.array([1, 2, 3]).astype(np.int64) - eps = 1e-6 - eps_mode = "add" - - node = ng.normalize_l2(input_data, axes, eps, eps_mode) - assert node.get_output_size() == 1 - assert node.get_type_name() == "NormalizeL2" - assert list(node.get_output_shape(0)) == input_shape - assert node.get_output_element_type(0) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_roll.py b/src/bindings/python/tests_compatibility/test_ngraph/test_roll.py deleted file mode 100644 index b912eb5ebc3..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_roll.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ngraph as ng -from ngraph.impl import Type - -import numpy as np - - -def test_roll(): - input = np.reshape(np.arange(10, dtype=np.int64), (2, 5)) - input_tensor = ng.constant(input) - input_shift = ng.constant(np.array([-10, 7], dtype=np.int32)) - input_axes = ng.constant(np.array([-1, 0], dtype=np.int32)) - - roll_node = ng.roll(input_tensor, input_shift, input_axes) - assert roll_node.get_output_size() == 1 - assert roll_node.get_type_name() == "Roll" - assert list(roll_node.get_output_shape(0)) == [2, 5] - assert roll_node.get_output_element_type(0) == Type.i64 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_sequence_processing.py b/src/bindings/python/tests_compatibility/test_ngraph/test_sequence_processing.py deleted file mode 100644 index e5e85480060..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_sequence_processing.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -import ngraph as ng -from ngraph.impl import Type - - -def test_onehot(): - param = ng.parameter([3], dtype=np.int32) - # output type is derived from 'on_value' and 'off_value' element types - # Need to set explicitly 'on_value' and 'off_value' types. - # If we don't do it explicitly, depending on OS/packages versions types can be unpredictably either int32 or int64 - on_value = np.array(1, dtype=np.int64) - off_value = np.array(0, dtype=np.int64) - depth = 3 - axis = 0 - model = ng.one_hot(param, depth, on_value, off_value, axis) - assert model.get_output_size() == 1 - assert model.get_type_name() == "OneHot" - assert list(model.get_output_shape(0)) == [3, 3] - assert model.get_output_element_type(0) == Type.i64 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_swish.py b/src/bindings/python/tests_compatibility/test_ngraph/test_swish.py deleted file mode 100644 index 87e5b1d5eae..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_swish.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import ngraph as ng -from ngraph.impl import Shape, Type - - -def test_swish_props_with_beta(): - float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - beta = ng.parameter(Shape([]), dtype=float_dtype, name="beta") - - node = ng.swish(data, beta) - assert node.get_type_name() == "Swish" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 10] - assert node.get_output_element_type(0) == Type.f32 - - -def test_swish_props_without_beta(): - float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - - node = ng.swish(data) - assert node.get_type_name() == "Swish" - assert node.get_output_size() == 1 - assert list(node.get_output_shape(0)) == [3, 10] - assert node.get_output_element_type(0) == Type.f32 diff --git a/src/bindings/python/tests_compatibility/test_ngraph/test_utils.py b/src/bindings/python/tests_compatibility/test_ngraph/test_utils.py deleted file mode 100644 index c67ab867742..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/test_utils.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import ngraph as ng -from ngraph.impl import Shape - - -def test_get_constant_from_source_success(): - dtype = np.int32 - input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1") - input2 = ng.parameter(Shape([25]), dtype=dtype, name="input_2") - shape_of = ng.shape_of(input2, name="shape_of") - reshape = ng.reshape(input1, shape_of, special_zero=True) - folded_const = ng.impl.util.get_constant_from_source(reshape.input(1).get_source_output()) - - assert folded_const is not None - assert folded_const.get_vector() == [25] - - -def test_get_constant_from_source_failed(): - dtype = np.int32 - input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1") - input2 = ng.parameter(Shape([1]), dtype=dtype, name="input_2") - reshape = ng.reshape(input1, input2, special_zero=True) - folded_const = ng.impl.util.get_constant_from_source(reshape.input(1).get_source_output()) - - assert folded_const is None diff --git a/src/bindings/python/tests_compatibility/test_ngraph/util.py b/src/bindings/python/tests_compatibility/test_ngraph/util.py deleted file mode 100644 index 8008ce1a3d9..00000000000 --- a/src/bindings/python/tests_compatibility/test_ngraph/util.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -def count_ops_of_type(func, op_type): - count = 0 - for op in func.get_ops(): - if (type(op) is type(op_type)): - count += 1 - return count diff --git a/src/bindings/python/tests_compatibility/test_onnx/__init__.py b/src/bindings/python/tests_compatibility/test_onnx/__init__.py deleted file mode 100644 index cddd115d397..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 diff --git a/src/bindings/python/tests_compatibility/test_onnx/model_zoo_preprocess.sh b/src/bindings/python/tests_compatibility/test_onnx/model_zoo_preprocess.sh deleted file mode 100755 index 064798dfe70..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/model_zoo_preprocess.sh +++ /dev/null @@ -1,169 +0,0 @@ -#!/bin/bash - -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -set -e - -# default ONNX Model Zoo commit hash ID: -ONNX_SHA=d58213534f2a4d1c4b19ba62b3bb5f544353256e - -MODELS_DIR="$HOME/.onnx/model_zoo" -ENABLE_ONNX_MODELS_ZOO=false -ENABLE_MSFT_MODELS=false -FORCE_MODE=false - -function print_help { - echo "Model preprocessing options:" - echo " -h display this help message" - echo " -d set location of the models (for onnx model ZOO and MSFT models)" - echo " By default the models location is: $HOME/.onnx/model_zoo" - echo " -o update Onnx Model Zoo models" - echo " -s Onnx Model Zoo commit SHA" - echo " -m update MSFT models" - echo " -f force update of a chosen model" - echo "" - echo "Note: This script requires wget, GNU tar (not bsdtar) and git with LFS support." -} - -while getopts "homfd:s:" opt; do - case ${opt} in - h ) - print_help - ;; - \? ) - print_help - ;; - : ) - print_help - ;; - d ) - MODELS_DIR="$OPTARG" - ;; - o ) - ENABLE_ONNX_MODELS_ZOO=true - ;; - s ) - ONNX_SHA="$OPTARG" - ;; - m ) - ENABLE_MSFT_MODELS=true - ;; - f ) - FORCE_MODE=true - ;; - esac -done -shift $((OPTIND -1)) - -MODEL_ZOO_DIR="$MODELS_DIR/model_zoo" -ONNX_MODELS_DIR="$MODEL_ZOO_DIR/onnx_model_zoo_$ONNX_SHA" -MSFT_MODELS_DIR="$MODEL_ZOO_DIR/MSFT" - -function pull_and_postprocess_onnx_model_zoo() { - git fetch - git reset HEAD --hard - - git checkout -f "$ONNX_SHA" - - echo "Pulling models data via Git LFS for onnx model zoo repository" - git lfs pull --include="*" --exclude="*.onnx" - find "$ONNX_MODELS_DIR" -name "*.onnx" | while read -r filename; do rm "$filename"; done; - - printf "Extracting tar.gz archives into %s\n" "$ONNX_MODELS_DIR" - find "$ONNX_MODELS_DIR" -name '*.tar.gz' \ - -execdir sh -c 'BASEDIR=$(basename "$1" .tar.gz) && rm -rf $BASEDIR && mkdir -p $BASEDIR' shell {} \; \ - -execdir sh -c 'BASEDIR=$(basename "$1" .tar.gz) && tar --warning=no-unknown-keyword -xvzf "$1" -C $BASEDIR' shell {} \; - - echo "Postprocessing of ONNX Model Zoo models:" - - echo "Fix roberta model" - cd "$ONNX_MODELS_DIR/text/machine_comprehension/roberta/model/roberta-sequence-classification-9/roberta-sequence-classification-9" - mkdir -p test_data_set_0 - mv ./*.pb test_data_set_0/ - - rm -f "$MODEL_ZOO_DIR/executing_$ONNX_SHA" -} - -function update_onnx_models() { - if test "$(find "$MODEL_ZOO_DIR/executing_$ONNX_SHA" -mmin +60 2>/dev/null)" ; then - rm -rf "$ONNX_MODELS_DIR" - rm -f "$MODEL_ZOO_DIR/executing_$ONNX_SHA" - fi - - while [[ -f $MODEL_ZOO_DIR/executing_$ONNX_SHA ]]; - do - echo "Onnx Models update are currently executing - sleeping 5 minutes" - sleep 300 - done - - if [[ ! -d $ONNX_MODELS_DIR ]] ; then - touch "$MODEL_ZOO_DIR/executing_$ONNX_SHA" - trap 'rm -f "$MODEL_ZOO_DIR/executing_$ONNX_SHA"' EXIT INT TERM - echo "The ONNX Model Zoo repository doesn't exist on your filesystem then will be cloned" - git clone https://github.com/onnx/models.git "$ONNX_MODELS_DIR" - cd "$ONNX_MODELS_DIR" - pull_and_postprocess_onnx_model_zoo - else - # Check if ONNX Model Zoo directory consists of proper git repo - git_remote_url=$(git -C "$ONNX_MODELS_DIR" config --local remote.origin.url 2> /dev/null 2>&1) - printf "ONNX Model Zoo repository exists: %s\n" "$ONNX_MODELS_DIR" - if [[ $git_remote_url = "https://github.com/onnx/models.git" ]]; then - printf "The proper github repository detected: %s\n" "$git_remote_url" - else - echo "The ONNX Model Zoo repository doesn't exist then will be cloned" - git clone https://github.com/onnx/models.git "$ONNX_MODELS_DIR" - fi - fi -} - -function update_msft_models() { - wget https://onnxruntimetestdata.blob.core.windows.net/models/20191107.zip -O "$MSFT_MODELS_DIR.zip" - unzip "$MSFT_MODELS_DIR.zip" -d "$MSFT_MODELS_DIR" && rm "$MSFT_MODELS_DIR.zip" - -} - -function postprocess_msft_models() { - echo "Postprocessing of MSFT models:" - - echo "Fix LSTM_Seq_lens_unpacked" - mv "$MSFT_MODELS_DIR"/opset9/LSTM_Seq_lens_unpacked/seq_lens_sorted "$MSFT_MODELS_DIR"/opset9/LSTM_Seq_lens_unpacked/test_data_set_0 - mv "$MSFT_MODELS_DIR"/opset9/LSTM_Seq_lens_unpacked/seq_lens_unsorted "$MSFT_MODELS_DIR"/opset9/LSTM_Seq_lens_unpacked/test_data_set_1 -} - -if [[ $ENABLE_ONNX_MODELS_ZOO = false ]] && [[ $ENABLE_MSFT_MODELS = false ]] ; then - echo "Please choose an option to update chosen model: - -o to update ONNX Model ZOO - -m to update MSFT models" - exit 170 -fi - -if [[ $MODELS_DIR = false ]] ; then - printf "Unknown location of the general models directory (onnx model ZOO and MSFT models) - Please specify the location using -d flag" - exit 170 -fi - - -# check if general model zoo directory exists (directory to store ONNX model zoo and MSFT models) -if [[ ! -d $MODEL_ZOO_DIR ]] ; then - printf "The general model directory: %s doesn't exist on your filesystem, it will be created \n" "$MODEL_ZOO_DIR" - mkdir -p "$MODEL_ZOO_DIR" -else - printf "The general model directory: %s found\n" "$MODEL_ZOO_DIR" -fi - -if [[ $ENABLE_ONNX_MODELS_ZOO = true ]] ; then - if [[ $FORCE_MODE = true ]]; then - rm -rf "$ONNX_MODELS_DIR" - fi - update_onnx_models -fi - -if [[ $ENABLE_MSFT_MODELS = true ]] ; then - if [[ $FORCE_MODE = true ]]; then - rm -rf "$MSFT_MODELS_DIR" - fi - update_msft_models - postprocess_msft_models -fi diff --git a/src/bindings/python/tests_compatibility/test_onnx/models/add_abc.onnx b/src/bindings/python/tests_compatibility/test_onnx/models/add_abc.onnx deleted file mode 100644 index 5c2da5dcc0b..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/models/add_abc.onnx +++ /dev/null @@ -1,24 +0,0 @@ -ngraph ONNXImporter:† - -A -BX add_node1"Add - -X -CY add_node2"Add -test_graphZ -A - - -Z -B - - -Z -C - - -b -Y - - -B \ No newline at end of file diff --git a/src/bindings/python/tests_compatibility/test_onnx/models/data/tensor.data b/src/bindings/python/tests_compatibility/test_onnx/models/data/tensor.data deleted file mode 100644 index 5116510eebc..00000000000 Binary files a/src/bindings/python/tests_compatibility/test_onnx/models/data/tensor.data and /dev/null differ diff --git a/src/bindings/python/tests_compatibility/test_onnx/models/external_data.onnx b/src/bindings/python/tests_compatibility/test_onnx/models/external_data.onnx deleted file mode 100644 index 9cd1ae3bb10..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/models/external_data.onnx +++ /dev/null @@ -1,22 +0,0 @@ -nGraph ONNX Importer:Á -& -data_a -data_b -data_cresult"Meantest_mean_example*,Bdata_cj -locationdata/tensor.datapZ -data_a - - -Z -data_b - - -Z -data_c - - -b -result - - -B \ No newline at end of file diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py b/src/bindings/python/tests_compatibility/test_onnx/test_backend.py deleted file mode 100644 index e8ff2196d4f..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_backend.py +++ /dev/null @@ -1,935 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging - -import onnx.backend.test -from tests_compatibility import ( - BACKEND_NAME, - skip_rng_tests, - xfail_unsupported_by_legacy_api, - xfail_issue_33488, - xfail_issue_33581, - xfail_issue_33595, - xfail_issue_33596, - xfail_issue_33606, - xfail_issue_33651, - xfail_issue_38091, - xfail_issue_38699, - xfail_issue_38701, - xfail_issue_38706, - xfail_issue_38710, - xfail_issue_38713, - xfail_issue_38724, - xfail_issue_38734, - xfail_issue_38735, - xfail_issue_39658, - xfail_issue_44858, - xfail_issue_44965, - xfail_issue_45180, - xfail_issue_47323, - xfail_issue_73538, - xfail_issue_48052, - xfail_issue_52463, - xfail_issue_58033, - xfail_issue_63033, - xfail_issue_63036, - xfail_issue_63043, - xfail_issue_63137, - xfail_issue_63138, - xfail_issue_69444, - xfail_issue_78741, - xfail_issue_81976, - skip_segfault, - xfail_issue_82038, - xfail_issue_82039, - xfail_issue_90649, - skip_issue_91151, - xfail_issue_91490, - xfail_issue_101965, - skip_bitwise_ui64, - xfail_issue_99949, - xfail_issue_99950, - xfail_issue_99952, - xfail_issue_99954, - xfail_issue_99955, - xfail_issue_99957, - xfail_issue_99958, - xfail_issue_99959, - xfail_issue_99960, - xfail_issue_99961, - xfail_issue_99968, - xfail_issue_99969, - xfail_issue_99970, - xfail_issue_99972, - xfail_issue_99973, - xfail_issue_113506, - skip_dynamic_model, - xfail_issue_119896, - xfail_issue_119900, - xfail_issue_119903, - xfail_issue_119906, - xfail_issue_119919, - xfail_issue_119922, - xfail_issue_119925, - xfail_issue_119926, - xfail_issue_125485, - xfail_issue_125486, - xfail_issue_125488, - xfail_issue_125487, - skip_issue_125489, - xfail_issue_125491, - xfail_issue_125492, - xfail_issue_125493, - xfail_issue_125495, - xfail_issue_127812, - skip_misalignment, -) -from tests_compatibility.test_onnx.utils.onnx_backend import OpenVinoTestBackend - - -def expect_fail(test_case_path, xfail): # type: (str) -> None - """Mark the test as expected to fail.""" - module_name, test_name = test_case_path.split(".") - module = globals().get(module_name) - if hasattr(module, test_name): - xfail(getattr(module, test_name)) - else: - logging.getLogger().warning( - "Could not mark test as XFAIL, not found: %s", test_case_path - ) - - -OpenVinoTestBackend.backend_name = BACKEND_NAME - -# This is a pytest magic variable to load extra plugins -# Uncomment the line below to enable the ONNX compatibility report -# pytest_plugins = "onnx.backend.test.report", - -# import all test cases at global scope to make them visible to python.unittest -backend_test = onnx.backend.test.BackendTest(OpenVinoTestBackend, __name__) - -skip_tests_general = [ - # Big model tests (see test_zoo_models.py): - "test_bvlc_alexnet", - "test_densenet121", - "test_inception_v1", - "test_inception_v2", - "test_resnet50", - "test_shufflenet", - "test_squeezenet", - "test_vgg19", - "test_zfnet512", -] - -for test in skip_tests_general: - backend_test.exclude(test) - -# NOTE: ALL backend_test.exclude CALLS MUST BE PERFORMED BEFORE THE CALL TO globals().update - -OnnxBackendNodeModelTest = None -OnnxBackendSimpleModelTest = None -OnnxBackendPyTorchOperatorModelTest = None -OnnxBackendPyTorchConvertedModelTest = None -globals().update(backend_test.enable_report().test_cases) - -tests_expected_to_fail = [ - ( - xfail_issue_39658, - "OnnxBackendNodeModelTest.test_tile_cpu", - ), - ( - xfail_issue_38091, - "OnnxBackendNodeModelTest.test_dynamicquantizelinear_cpu", - "OnnxBackendNodeModelTest.test_dynamicquantizelinear_expanded_cpu", - ), - ( - xfail_issue_52463, - "OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_singleton_broadcast_cpu", - ), - ( - xfail_issue_47323, - "OnnxBackendPyTorchOperatorModelTest.test_operator_add_broadcast_cpu", - "OnnxBackendPyTorchOperatorModelTest.test_operator_addconstant_cpu", - "OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_right_broadcast_cpu", - ), - ( - xfail_issue_38699, - "OnnxBackendSimpleModelTest.test_gradient_of_add_and_mul_cpu", - "OnnxBackendSimpleModelTest.test_gradient_of_add_cpu", - ), - ( - xfail_issue_33596, - "OnnxBackendSimpleModelTest.test_sequence_model5_cpu", - "OnnxBackendSimpleModelTest.test_sequence_model7_cpu", - "OnnxBackendSimpleModelTest.test_sequence_model1_cpu", - "OnnxBackendSimpleModelTest.test_sequence_model3_cpu", - "OnnxBackendSimpleModelTest.test_sequence_model6_cpu", - "OnnxBackendSimpleModelTest.test_sequence_model8_cpu", - "OnnxBackendSimpleModelTest.test_sequence_model4_cpu", - "OnnxBackendSimpleModelTest.test_sequence_model2_cpu", - "OnnxBackendNodeModelTest.test_identity_sequence_cpu", - "OnnxBackendNodeModelTest.test_if_seq_cpu", - "OnnxBackendNodeModelTest.test_if_opt_cpu", # Optional, SequenceConstruct - "OnnxBackendNodeModelTest.test_split_to_sequence_1_cpu", - "OnnxBackendNodeModelTest.test_split_to_sequence_2_cpu", - "OnnxBackendNodeModelTest.test_split_to_sequence_nokeepdims_cpu", - ), - ( - xfail_issue_38701, - "OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_nochangecase_cpu", - "OnnxBackendSimpleModelTest.test_strnorm_model_nostopwords_nochangecase_cpu", - "OnnxBackendSimpleModelTest.test_strnorm_model_monday_empty_output_cpu", - "OnnxBackendSimpleModelTest.test_strnorm_model_monday_insensintive_upper_twodim_cpu", - "OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_lower_cpu", - "OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_upper_cpu", - "OnnxBackendNodeModelTest.test_strnormalizer_nostopwords_nochangecase_cpu", - "OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_nochangecase_cpu", - "OnnxBackendNodeModelTest.test_strnormalizer_export_monday_insensintive_upper_twodim_cpu", - "OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_lower_cpu", - "OnnxBackendNodeModelTest.test_strnormalizer_export_monday_empty_output_cpu", - "OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_upper_cpu", - "OnnxBackendNodeModelTest.test_cast_STRING_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT_to_STRING_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_STRING_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_STRING_expanded_cpu", - "OnnxBackendNodeModelTest.test_castlike_STRING_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_castlike_STRING_to_FLOAT_expanded_cpu", - "OnnxBackendNodeModelTest.test_equal_string_broadcast_cpu", - "OnnxBackendNodeModelTest.test_equal_string_cpu", - "OnnxBackendNodeModelTest.test_regex_full_match_basic_cpu", - "OnnxBackendNodeModelTest.test_regex_full_match_email_domain_cpu", - "OnnxBackendNodeModelTest.test_regex_full_match_empty_cpu", - "OnnxBackendNodeModelTest.test_string_concat_broadcasting_cpu", - "OnnxBackendNodeModelTest.test_string_concat_cpu", - "OnnxBackendNodeModelTest.test_string_concat_empty_string_cpu", - "OnnxBackendNodeModelTest.test_string_concat_utf8_cpu", - "OnnxBackendNodeModelTest.test_string_concat_zero_dimensional_cpu", - "OnnxBackendNodeModelTest.test_string_split_basic_cpu", - "OnnxBackendNodeModelTest.test_string_split_consecutive_delimiters_cpu", - "OnnxBackendNodeModelTest.test_string_split_empty_string_delimiter_cpu", - "OnnxBackendNodeModelTest.test_string_split_empty_tensor_cpu", - "OnnxBackendNodeModelTest.test_string_split_maxsplit_cpu", - "OnnxBackendNodeModelTest.test_string_split_no_delimiter_cpu", - ), - ( - xfail_issue_33595, - "OnnxBackendNodeModelTest.test_unique_not_sorted_without_axis_cpu", - "OnnxBackendNodeModelTest.test_unique_sorted_with_negative_axis_cpu", - "OnnxBackendNodeModelTest.test_unique_sorted_with_axis_cpu", - "OnnxBackendNodeModelTest.test_unique_sorted_with_axis_3d_cpu", - "OnnxBackendNodeModelTest.test_unique_sorted_without_axis_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_add_1_sequence_1_tensor_expanded_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_add_2_sequences_expanded_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_extract_shapes_expanded_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_identity_1_sequence_1_tensor_expanded_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_identity_1_sequence_expanded_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_identity_2_sequences_expanded_cpu", - ), - ( - xfail_issue_33651, - "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_onlybigrams_skip5_cpu", - "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_levelempty_cpu", - "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_onlybigrams_skip0_cpu", - "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_uniandbigrams_skip5_cpu", - "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_only_bigrams_skip0_cpu", - "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu", - "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_skip5_cpu", - ), - ( - xfail_issue_38706, - "OnnxBackendNodeModelTest.test_split_zero_size_splits_cpu", - ), - ( - xfail_issue_33581, - "OnnxBackendNodeModelTest.test_gather_elements_negative_indices_cpu", - ), - ( - xfail_issue_38713, - "OnnxBackendNodeModelTest.test_momentum_cpu", - "OnnxBackendNodeModelTest.test_nesterov_momentum_cpu", - "OnnxBackendNodeModelTest.test_momentum_multiple_cpu", - ), - ( - xfail_issue_73538, - "OnnxBackendNodeModelTest.test_onehot_negative_indices_cpu", - ), - ( - xfail_issue_33488, - "OnnxBackendNodeModelTest.test_maxunpool_export_with_output_shape_cpu", - "OnnxBackendNodeModelTest.test_maxunpool_export_without_output_shape_cpu", - ), - (xfail_issue_38724, "OnnxBackendNodeModelTest.test_resize_tf_crop_and_resize_cpu"), - ( - xfail_issue_33606, - "OnnxBackendNodeModelTest.test_det_2d_cpu", - "OnnxBackendNodeModelTest.test_det_nd_cpu", - ), - ( - xfail_issue_38734, - "OnnxBackendNodeModelTest.test_adam_multiple_cpu", - "OnnxBackendNodeModelTest.test_adam_cpu", - ), - ( - xfail_issue_38735, - "OnnxBackendNodeModelTest.test_adagrad_multiple_cpu", - "OnnxBackendNodeModelTest.test_adagrad_cpu", - ), - ( - xfail_issue_48052, - "OnnxBackendNodeModelTest.test_training_dropout_cpu", - "OnnxBackendNodeModelTest.test_training_dropout_mask_cpu", - "OnnxBackendNodeModelTest.test_training_dropout_default_cpu", - "OnnxBackendNodeModelTest.test_training_dropout_zero_ratio_cpu", - "OnnxBackendNodeModelTest.test_training_dropout_default_mask_cpu", - "OnnxBackendNodeModelTest.test_training_dropout_zero_ratio_mask_cpu", - ), - ( - xfail_issue_45180, - "OnnxBackendNodeModelTest.test_reduce_sum_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_do_not_keepdims_random_cpu", - ), - ( - xfail_issue_44858, - "OnnxBackendNodeModelTest.test_unsqueeze_axis_0_cpu", - "OnnxBackendNodeModelTest.test_unsqueeze_axis_1_cpu", - "OnnxBackendNodeModelTest.test_unsqueeze_axis_2_cpu", - "OnnxBackendNodeModelTest.test_unsqueeze_negative_axes_cpu", - "OnnxBackendNodeModelTest.test_unsqueeze_three_axes_cpu", - "OnnxBackendNodeModelTest.test_unsqueeze_two_axes_cpu", - "OnnxBackendNodeModelTest.test_unsqueeze_unsorted_axes_cpu", - ), - ( - xfail_issue_44965, - "OnnxBackendNodeModelTest.test_loop13_seq_cpu", - "OnnxBackendNodeModelTest.test_sequence_insert_at_back_cpu", - "OnnxBackendNodeModelTest.test_sequence_insert_at_front_cpu", - ), - (xfail_issue_58033, "OnnxBackendNodeModelTest.test_einsum_batch_diagonal_cpu"), - ( - xfail_issue_63033, - "OnnxBackendNodeModelTest.test_batchnorm_epsilon_training_mode_cpu", - "OnnxBackendNodeModelTest.test_batchnorm_example_training_mode_cpu", - ), - (xfail_issue_63036, "OnnxBackendNodeModelTest.test_convtranspose_autopad_same_cpu"), - ( - xfail_issue_63043, - "OnnxBackendNodeModelTest.test_gru_batchwise_cpu", - "OnnxBackendNodeModelTest.test_lstm_batchwise_cpu", - "OnnxBackendNodeModelTest.test_simple_rnn_batchwise_cpu", - ), - ( - xfail_issue_38710, - "OnnxBackendNodeModelTest.test_reshape_allowzero_reordered_cpu", - ), - ( - xfail_issue_91490, - "OnnxBackendNodeModelTest.test_tril_zero_cpu", - "OnnxBackendNodeModelTest.test_triu_zero_cpu", - ), - ( - skip_dynamic_model, - "OnnxBackendNodeModelTest.test_triu_one_row_cpu", - "OnnxBackendNodeModelTest.test_squeeze_cpu", - "OnnxBackendNodeModelTest.test_squeeze_negative_axes_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_negative_axes_hwc_expanded_cpu", - "OnnxBackendNodeModelTest.test_constant_pad_negative_axes_cpu", - ), - ( - skip_rng_tests, - "OnnxBackendNodeModelTest.test_bernoulli_cpu", - "OnnxBackendNodeModelTest.test_bernoulli_double_cpu", - "OnnxBackendNodeModelTest.test_bernoulli_double_expanded_cpu", - "OnnxBackendNodeModelTest.test_bernoulli_expanded_cpu", - "OnnxBackendNodeModelTest.test_bernoulli_seed_cpu", - "OnnxBackendNodeModelTest.test_bernoulli_seed_expanded_cpu", - ), - ( - xfail_issue_63137, - "OnnxBackendNodeModelTest.test_optional_get_element_cpu", - "OnnxBackendNodeModelTest.test_optional_get_element_sequence_cpu", - "OnnxBackendNodeModelTest.test_optional_has_element_cpu", - "OnnxBackendNodeModelTest.test_optional_has_element_empty_cpu", - "OnnxBackendNodeModelTest.test_loop16_seq_none_cpu", # OptionalHasElement, SequenceInsert - ), - ( - xfail_issue_63138, - "OnnxBackendNodeModelTest.test_shape_end_1_cpu", - "OnnxBackendNodeModelTest.test_shape_end_negative_1_cpu", - "OnnxBackendNodeModelTest.test_shape_start_1_cpu", - "OnnxBackendNodeModelTest.test_shape_start_1_end_2_cpu", - "OnnxBackendNodeModelTest.test_shape_start_1_end_negative_1_cpu", - "OnnxBackendNodeModelTest.test_shape_start_negative_1_cpu", - ), - ( - xfail_issue_69444, - "OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_align_corners_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_align_corners_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu", - ), - ( - xfail_issue_78741, - "OnnxBackendNodeModelTest.test_compress_0_cpu", - "OnnxBackendNodeModelTest.test_compress_1_cpu", - "OnnxBackendNodeModelTest.test_compress_default_axis_cpu", - "OnnxBackendNodeModelTest.test_compress_negative_axis_cpu", - "OnnxBackendNodeModelTest.test_constant_pad_cpu", - "OnnxBackendNodeModelTest.test_constantofshape_float_ones_cpu", - "OnnxBackendNodeModelTest.test_constantofshape_int_shape_zero_cpu", - "OnnxBackendNodeModelTest.test_constantofshape_int_zeros_cpu", - "OnnxBackendNodeModelTest.test_edge_pad_cpu", - "OnnxBackendNodeModelTest.test_expand_dim_changed_cpu", - "OnnxBackendNodeModelTest.test_expand_dim_unchanged_cpu", - "OnnxBackendNodeModelTest.test_loop11_cpu", - "OnnxBackendNodeModelTest.test_nonmaxsuppression_center_point_box_format_cpu", - "OnnxBackendNodeModelTest.test_nonmaxsuppression_flipped_coordinates_cpu", - "OnnxBackendNodeModelTest.test_nonmaxsuppression_identical_boxes_cpu", - "OnnxBackendNodeModelTest.test_nonmaxsuppression_limit_output_size_cpu", - "OnnxBackendNodeModelTest.test_nonmaxsuppression_single_box_cpu", - "OnnxBackendNodeModelTest.test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu", - "OnnxBackendNodeModelTest.test_nonmaxsuppression_suppress_by_IOU_cpu", - "OnnxBackendNodeModelTest.test_nonmaxsuppression_two_batches_cpu", - "OnnxBackendNodeModelTest.test_nonmaxsuppression_two_classes_cpu", - "OnnxBackendNodeModelTest.test_nonzero_example_cpu", - "OnnxBackendNodeModelTest.test_onehot_with_axis_cpu", - "OnnxBackendNodeModelTest.test_onehot_with_negative_axis_cpu", - "OnnxBackendNodeModelTest.test_onehot_without_axis_cpu", - "OnnxBackendNodeModelTest.test_range_float_type_positive_delta_cpu", - "OnnxBackendNodeModelTest.test_range_float_type_positive_delta_expanded_cpu", - "OnnxBackendNodeModelTest.test_range_int32_type_negative_delta_cpu", - "OnnxBackendNodeModelTest.test_range_int32_type_negative_delta_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_negative_axes_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reflect_pad_cpu", - "OnnxBackendNodeModelTest.test_reshape_extended_dims_cpu", - "OnnxBackendNodeModelTest.test_reshape_negative_dim_cpu", - "OnnxBackendNodeModelTest.test_reshape_negative_extended_dims_cpu", - "OnnxBackendNodeModelTest.test_reshape_one_dim_cpu", - "OnnxBackendNodeModelTest.test_reshape_reduced_dims_cpu", - "OnnxBackendNodeModelTest.test_reshape_reordered_all_dims_cpu", - "OnnxBackendNodeModelTest.test_reshape_reordered_last_dims_cpu", - "OnnxBackendNodeModelTest.test_reshape_zero_and_negative_dim_cpu", - "OnnxBackendNodeModelTest.test_reshape_zero_dim_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_scales_nearest_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_sizes_cubic_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_align_corners_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_asymmetric_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_align_corners_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_scales_nearest_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_sizes_cubic_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_floor_align_corners_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu", - "OnnxBackendNodeModelTest.test_slice_cpu", - "OnnxBackendNodeModelTest.test_slice_default_axes_cpu", - "OnnxBackendNodeModelTest.test_slice_default_steps_cpu", - "OnnxBackendNodeModelTest.test_slice_end_out_of_bounds_cpu", - "OnnxBackendNodeModelTest.test_slice_neg_cpu", - "OnnxBackendNodeModelTest.test_slice_neg_steps_cpu", - "OnnxBackendNodeModelTest.test_slice_negative_axes_cpu", - "OnnxBackendNodeModelTest.test_slice_start_out_of_bounds_cpu", - "OnnxBackendNodeModelTest.test_split_variable_parts_1d_cpu", - "OnnxBackendNodeModelTest.test_split_variable_parts_2d_cpu", - "OnnxBackendNodeModelTest.test_split_variable_parts_default_axis_cpu", - "OnnxBackendNodeModelTest.test_tile_precomputed_cpu", - "OnnxBackendNodeModelTest.test_top_k_cpu", - "OnnxBackendNodeModelTest.test_top_k_negative_axis_cpu", - "OnnxBackendNodeModelTest.test_top_k_smallest_cpu", - "OnnxBackendNodeModelTest.test_upsample_nearest_cpu", - "OnnxBackendSimpleModelTest.test_expand_shape_model1_cpu", - "OnnxBackendSimpleModelTest.test_expand_shape_model2_cpu", - "OnnxBackendSimpleModelTest.test_expand_shape_model3_cpu", - "OnnxBackendSimpleModelTest.test_expand_shape_model4_cpu", - ), - ( - skip_segfault, - "OnnxBackendNodeModelTest.test_sce_NCd1d2d3d4d5_mean_weight_cpu", # ticket: 81976 - "OnnxBackendNodeModelTest.test_sce_NCd1d2d3d4d5_mean_weight_log_prob_cpu", # ticket: 81976 - "OnnxBackendNodeModelTest.test_sce_NCd1d2d3d4d5_none_no_weight_cpu", # ticket: 81976 - "OnnxBackendNodeModelTest.test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_cpu", # ticket: 81976 - "OnnxBackendNodeModelTest.test_layer_normalization_2d_axis0_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_2d_axis1_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_2d_axis_negative_1_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_2d_axis_negative_2_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis0_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis1_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis2_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis_negative_1_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis_negative_2_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis_negative_3_epsilon_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis0_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis1_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis2_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis3_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_1_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_2_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_3_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_4_cpu", # ticket: 90649 - "OnnxBackendNodeModelTest.test_layer_normalization_default_axis_cpu", # ticket: 90649 - ), - ( - xfail_issue_81976, # SoftmaxCrossEntropyLoss operator - "OnnxBackendNodeModelTest.test_sce_mean_3d_cpu", - "OnnxBackendNodeModelTest.test_sce_mean_3d_log_prob_cpu", - ), - ( - xfail_issue_82038, - "OnnxBackendNodeModelTest.test_scatternd_add_cpu", - "OnnxBackendNodeModelTest.test_scatternd_multiply_cpu", - ), - ( - xfail_issue_82039, - "OnnxBackendNodeModelTest.test_identity_opt_cpu", - ), - ( - xfail_issue_90649, - "OnnxBackendNodeModelTest.test_blackmanwindow_cpu", - "OnnxBackendNodeModelTest.test_blackmanwindow_symmetric_cpu", - "OnnxBackendNodeModelTest.test_hammingwindow_cpu", - "OnnxBackendNodeModelTest.test_hammingwindow_symmetric_cpu", - "OnnxBackendNodeModelTest.test_hannwindow_cpu", - "OnnxBackendNodeModelTest.test_hannwindow_symmetric_cpu", - "OnnxBackendNodeModelTest.test_melweightmatrix_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_add_1_sequence_1_tensor_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_add_2_sequences_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_extract_shapes_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_identity_1_sequence_1_tensor_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_identity_1_sequence_cpu", - "OnnxBackendNodeModelTest.test_sequence_map_identity_2_sequences_cpu", - "OnnxBackendNodeModelTest.test_stft_cpu", - "OnnxBackendNodeModelTest.test_stft_with_window_cpu", - ), - ( - skip_issue_91151, - "OnnxBackendNodeModelTest.test_castlike_BFLOAT16_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_BFLOAT16_cpu", - ), - ( - xfail_issue_101965, - "OnnxBackendNodeModelTest.test_dft_axis_cpu", - "OnnxBackendNodeModelTest.test_dft_cpu", - "OnnxBackendNodeModelTest.test_dft_inverse_cpu", - ), - ( - xfail_unsupported_by_legacy_api, - "OnnxBackendNodeModelTest.test_blackmanwindow_expanded_cpu", - "OnnxBackendNodeModelTest.test_blackmanwindow_symmetric_expanded_cpu", - "OnnxBackendNodeModelTest.test_hammingwindow_expanded_cpu", - "OnnxBackendNodeModelTest.test_hammingwindow_symmetric_expanded_cpu", - "OnnxBackendNodeModelTest.test_hannwindow_expanded_cpu", - "OnnxBackendNodeModelTest.test_hannwindow_symmetric_expanded_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_and_pad_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_keep_dims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_keep_dims_random_expanded_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_expanded_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_pad_expanded_cpu", - "OnnxBackendNodeModelTest.test_split_variable_parts_1d_opset13_cpu", - "OnnxBackendNodeModelTest.test_split_variable_parts_1d_opset18_cpu", - "OnnxBackendNodeModelTest.test_split_variable_parts_2d_opset13_cpu", - "OnnxBackendNodeModelTest.test_split_variable_parts_2d_opset18_cpu", - "OnnxBackendNodeModelTest.test_split_variable_parts_default_axis_opset13_cpu", - "OnnxBackendNodeModelTest.test_split_variable_parts_default_axis_opset18_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_keep_dims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_keep_dims_random_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_keepdims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_keepdims_random_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_negative_axes_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_keepdims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_keepdims_random_expanded_cpu", - ), - ( - skip_bitwise_ui64, - "OnnxBackendNodeModelTest.test_bitwise_and_ui64_bcast_3v1d_cpu", - "OnnxBackendNodeModelTest.test_bitwise_or_ui64_bcast_3v1d_cpu", - ), - ( - xfail_issue_99949, - "OnnxBackendNodeModelTest.test_bitwise_not_2d_cpu", - "OnnxBackendNodeModelTest.test_bitwise_not_3d_cpu", - "OnnxBackendNodeModelTest.test_bitwise_not_4d_cpu", - "OnnxBackendNodeModelTest.test_bitwise_xor_ui8_bcast_4v3d_cpu", - "OnnxBackendNodeModelTest.test_bitwise_xor_i16_3d_cpu", - "OnnxBackendNodeModelTest.test_bitwise_xor_i32_2d_cpu", - "OnnxBackendNodeModelTest.test_bitwise_xor_ui64_bcast_3v1d_cpu", - ), - ( - xfail_issue_99950, - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_and_pad_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_chw_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_chw_expanded_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_hwc_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_hwc_expanded_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_pad_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_negative_axes_hwc_cpu", - "OnnxBackendNodeModelTest.test_center_crop_pad_crop_negative_axes_hwc_expanded_cpu", - ), - ( - xfail_issue_99952, - "OnnxBackendNodeModelTest.test_col2im_5d_cpu", - "OnnxBackendNodeModelTest.test_col2im_cpu", - "OnnxBackendNodeModelTest.test_col2im_dilations_cpu", - "OnnxBackendNodeModelTest.test_col2im_pads_cpu", - "OnnxBackendNodeModelTest.test_col2im_strides_cpu", - ), - ( - xfail_issue_99954, - "OnnxBackendNodeModelTest.test_constant_pad_axes_cpu", - ), - ( - xfail_issue_99955, - "OnnxBackendNodeModelTest.test_group_normalization_epsilon_expanded_cpu", - "OnnxBackendNodeModelTest.test_group_normalization_example_expanded_cpu", - ), - ( - xfail_issue_99957, - "OnnxBackendNodeModelTest.test_layer_normalization_2d_axis1_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_2d_axis_negative_1_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis1_epsilon_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis2_epsilon_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis_negative_1_epsilon_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_3d_axis_negative_2_epsilon_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis1_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis2_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis3_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_1_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_2_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_4d_axis_negative_3_expanded_ver18_cpu", - "OnnxBackendNodeModelTest.test_layer_normalization_default_axis_expanded_ver18_cpu", - ), - ( - xfail_issue_99958, - "OnnxBackendNodeModelTest.test_logsoftmax_large_number_expanded_ver18_cpu", - ), - ( - xfail_issue_99959, - "OnnxBackendNodeModelTest.test_mish_cpu", - ), - ( - xfail_issue_99960, - "OnnxBackendNodeModelTest.test_mvn_expanded_ver18_cpu", - ), - ( - xfail_issue_99961, - "OnnxBackendNodeModelTest.test_optional_get_element_optional_sequence_cpu", - "OnnxBackendNodeModelTest.test_optional_get_element_optional_tensor_cpu", - "OnnxBackendNodeModelTest.test_optional_get_element_tensor_cpu", - "OnnxBackendNodeModelTest.test_optional_has_element_empty_no_input_name_optional_input_cpu", - "OnnxBackendNodeModelTest.test_optional_has_element_empty_no_input_name_tensor_input_cpu", - "OnnxBackendNodeModelTest.test_optional_has_element_empty_no_input_optional_input_cpu", - "OnnxBackendNodeModelTest.test_optional_has_element_empty_no_input_tensor_input_cpu", - "OnnxBackendNodeModelTest.test_optional_has_element_empty_optional_input_cpu", - "OnnxBackendNodeModelTest.test_optional_has_element_optional_input_cpu", - "OnnxBackendNodeModelTest.test_optional_has_element_tensor_input_cpu", - ), - ( - xfail_issue_99968, - "OnnxBackendNodeModelTest.test_reduce_l1_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_do_not_keepdims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_do_not_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_do_not_keepdims_random_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_keep_dims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_keep_dims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_negative_axes_keep_dims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_negative_axes_keep_dims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_do_not_keepdims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_do_not_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_do_not_keepdims_random_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_keep_dims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_keep_dims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_negative_axes_keep_dims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_negative_axes_keep_dims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_asc_axes_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_asc_axes_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_desc_axes_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_desc_axes_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_do_not_keepdims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_do_not_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_do_not_keepdims_random_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_negative_axes_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_negative_axes_cpu", - "OnnxBackendNodeModelTest.test_reduce_max_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_max_do_not_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_max_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_max_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_max_negative_axes_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_max_negative_axes_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_mean_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_negative_axes_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_mean_do_not_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_mean_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_mean_negative_axes_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_mean_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_mean_negative_axes_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_min_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_min_do_not_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_min_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_min_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_min_negative_axes_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_min_negative_axes_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_prod_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_prod_do_not_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_prod_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_prod_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_prod_negative_axes_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_prod_negative_axes_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_do_not_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_do_not_keepdims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_do_not_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_do_not_keepdims_random_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_negative_axes_keepdims_example_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_negative_axes_keepdims_random_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_negative_axes_keep_dims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l1_negative_axes_keep_dims_random_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_negative_axes_keep_dims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_negative_axes_keep_dims_random_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_negative_axes_keepdims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_negative_axes_keepdims_random_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_negative_axes_keepdims_example_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_negative_axes_keepdims_random_expanded_cpu", - ), - ( - xfail_issue_99969, - "OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_antialias_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_antialias_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_sizes_cubic_antialias_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_sizes_linear_antialias_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_not_smaller_cpu", - "OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_not_larger_cpu", - "OnnxBackendNodeModelTest.test_resize_tf_crop_and_resize_axes_2_3_cpu", - "OnnxBackendNodeModelTest.test_resize_tf_crop_and_resize_axes_3_2_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_scales_nearest_axes_2_3_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_scales_nearest_axes_3_2_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_axes_2_3_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_axes_3_2_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_not_larger_cpu", - ), - ( - xfail_issue_99970, - "OnnxBackendNodeModelTest.test_scatternd_max_cpu", - "OnnxBackendNodeModelTest.test_scatternd_min_cpu", - ), - ( - xfail_issue_99972, - "OnnxBackendNodeModelTest.test_softmax_large_number_expanded_ver18_cpu", - ), - ( - xfail_issue_99973, - "OnnxBackendNodeModelTest.test_split_1d_uneven_split_opset18_cpu", - "OnnxBackendNodeModelTest.test_split_2d_uneven_split_opset18_cpu", - "OnnxBackendNodeModelTest.test_split_zero_size_splits_opset13_cpu", - "OnnxBackendNodeModelTest.test_split_zero_size_splits_opset18_cpu", - ), - ( - xfail_issue_113506, - "OnnxBackendNodeModelTest.test_lstm_with_peepholes_cpu", - ), - ( - xfail_issue_119896, - "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT8E4M3FNUZ_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT8E4M3FN_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT8E5M2FNUZ_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT8E5M2_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT8E4M3FNUZ_to_FLOAT16_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT8E4M3FNUZ_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT8E4M3FN_to_FLOAT16_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT8E4M3FN_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT8E5M2FNUZ_to_FLOAT16_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT8E5M2FNUZ_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT8E5M2_to_FLOAT16_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT8E5M2_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT_to_FLOAT8E4M3FNUZ_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT_to_FLOAT8E4M3FN_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT_to_FLOAT8E5M2FNUZ_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT_to_FLOAT8E5M2_cpu", - "OnnxBackendNodeModelTest.test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FNUZ_cpu", - "OnnxBackendNodeModelTest.test_cast_no_saturate_FLOAT16_to_FLOAT8E4M3FN_cpu", - "OnnxBackendNodeModelTest.test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2FNUZ_cpu", - "OnnxBackendNodeModelTest.test_cast_no_saturate_FLOAT16_to_FLOAT8E5M2_cpu", - "OnnxBackendNodeModelTest.test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FNUZ_cpu", - "OnnxBackendNodeModelTest.test_cast_no_saturate_FLOAT_to_FLOAT8E4M3FN_cpu", - "OnnxBackendNodeModelTest.test_cast_no_saturate_FLOAT_to_FLOAT8E5M2FNUZ_cpu", - "OnnxBackendNodeModelTest.test_cast_no_saturate_FLOAT_to_FLOAT8E5M2_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT8E4M3FNUZ_to_FLOAT_expanded_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT8E4M3FN_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT8E4M3FN_to_FLOAT_expanded_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT8E5M2FNUZ_to_FLOAT_expanded_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT8E5M2_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT8E5M2_to_FLOAT_expanded_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_FLOAT8E4M3FNUZ_expanded_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_FLOAT8E4M3FN_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_FLOAT8E4M3FN_expanded_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_FLOAT8E5M2FNUZ_expanded_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_FLOAT8E5M2_cpu", - "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_FLOAT8E5M2_expanded_cpu", - "OnnxBackendNodeModelTest.test_dequantizelinear_e4m3fn_cpu", - "OnnxBackendNodeModelTest.test_dequantizelinear_e5m2_cpu", - "OnnxBackendNodeModelTest.test_quantizelinear_e4m3fn_cpu", - "OnnxBackendNodeModelTest.test_quantizelinear_e5m2_cpu", - "OnnxBackendNodeModelTest.test_dequantizelinear_e4m3fn_float16_cpu", - "OnnxBackendNodeModelTest.test_dequantizelinear_e4m3fn_zero_point_cpu", - ), - ( - xfail_issue_119900, - "OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_half_pixel_symmetric_cpu", - "OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_half_pixel_symmetric_cpu", - ), - ( - xfail_issue_119903, - "OnnxBackendNodeModelTest.test_basic_deform_conv_with_padding_cpu", - "OnnxBackendNodeModelTest.test_basic_deform_conv_without_padding_cpu", - "OnnxBackendNodeModelTest.test_deform_conv_with_mask_bias_cpu", - "OnnxBackendNodeModelTest.test_deform_conv_with_multiple_offset_groups_cpu", - ), - ( - xfail_issue_119906, - "OnnxBackendNodeModelTest.test_lppool_1d_default_cpu", - "OnnxBackendNodeModelTest.test_lppool_2d_default_cpu", - "OnnxBackendNodeModelTest.test_lppool_2d_dilations_cpu", - "OnnxBackendNodeModelTest.test_lppool_2d_pads_cpu", - "OnnxBackendNodeModelTest.test_lppool_2d_same_lower_cpu", - "OnnxBackendNodeModelTest.test_lppool_2d_same_upper_cpu", - "OnnxBackendNodeModelTest.test_lppool_2d_strides_cpu", - "OnnxBackendNodeModelTest.test_lppool_3d_default_cpu", - ), - ( - xfail_issue_119919, - "OnnxBackendNodeModelTest.test_wrap_pad_cpu", - ), - ( - xfail_issue_119922, - "OnnxBackendNodeModelTest.test_ai_onnx_ml_array_feature_extractor_cpu", - "OnnxBackendNodeModelTest.test_ai_onnx_ml_binarizer_cpu", - "OnnxBackendNodeModelTest.test_ai_onnx_ml_label_encoder_string_int_cpu", - "OnnxBackendNodeModelTest.test_ai_onnx_ml_label_encoder_string_int_no_default_cpu", - "OnnxBackendNodeModelTest.test_ai_onnx_ml_label_encoder_tensor_mapping_cpu", - "OnnxBackendNodeModelTest.test_ai_onnx_ml_label_encoder_tensor_value_only_mapping_cpu", - ), - ( - xfail_issue_119925, - "OnnxBackendNodeModelTest.test_averagepool_2d_dilations_cpu", - ), - ( - xfail_issue_119926, - "OnnxBackendNodeModelTest.test_roialign_mode_max_cpu", - ), - ( - xfail_issue_125485, - "OnnxBackendNodeModelTest.test_affine_grid_2d_align_corners_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_2d_align_corners_expanded_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_2d_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_2d_expanded_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_3d_align_corners_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_3d_align_corners_expanded_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_3d_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_3d_expanded_cpu", - ), - ( - xfail_issue_125486, - "OnnxBackendNodeModelTest.test_gelu_default_1_cpu", - "OnnxBackendNodeModelTest.test_gelu_default_2_cpu", - "OnnxBackendNodeModelTest.test_gelu_tanh_1_cpu", - "OnnxBackendNodeModelTest.test_gelu_tanh_2_cpu", - ), - ( - xfail_issue_125488, - "OnnxBackendNodeModelTest.test_image_decoder_decode_bmp_rgb_cpu", - "OnnxBackendNodeModelTest.test_image_decoder_decode_jpeg2k_rgb_cpu", - "OnnxBackendNodeModelTest.test_image_decoder_decode_jpeg_bgr_cpu", - "OnnxBackendNodeModelTest.test_image_decoder_decode_jpeg_grayscale_cpu", - "OnnxBackendNodeModelTest.test_image_decoder_decode_jpeg_rgb_cpu", - "OnnxBackendNodeModelTest.test_image_decoder_decode_png_rgb_cpu", - "OnnxBackendNodeModelTest.test_image_decoder_decode_pnm_rgb_cpu", - "OnnxBackendNodeModelTest.test_image_decoder_decode_tiff_rgb_cpu", - "OnnxBackendNodeModelTest.test_image_decoder_decode_webp_rgb_cpu", - ), - ( - xfail_issue_125487, - "OnnxBackendNodeModelTest.test_gridsample_aligncorners_true_cpu", - "OnnxBackendNodeModelTest.test_gridsample_bicubic_align_corners_0_additional_1_cpu", - "OnnxBackendNodeModelTest.test_gridsample_bicubic_align_corners_1_additional_1_cpu", - "OnnxBackendNodeModelTest.test_gridsample_bicubic_cpu", - "OnnxBackendNodeModelTest.test_gridsample_bilinear_align_corners_0_additional_1_cpu", - "OnnxBackendNodeModelTest.test_gridsample_bilinear_align_corners_1_additional_1_cpu", - "OnnxBackendNodeModelTest.test_gridsample_bilinear_cpu", - "OnnxBackendNodeModelTest.test_gridsample_cpu", - "OnnxBackendNodeModelTest.test_gridsample_volumetric_bilinear_align_corners_0_cpu", - "OnnxBackendNodeModelTest.test_gridsample_volumetric_bilinear_align_corners_1_cpu", - "OnnxBackendNodeModelTest.test_gridsample_volumetric_nearest_align_corners_0_cpu", - "OnnxBackendNodeModelTest.test_gridsample_volumetric_nearest_align_corners_1_cpu", - ), - ( - skip_issue_125489, - "OnnxBackendNodeModelTest.test_isinf_float16_cpu", - ), - ( - xfail_issue_125491, - "OnnxBackendNodeModelTest.test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False_cpu", - "OnnxBackendNodeModelTest.test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True_cpu", - "OnnxBackendNodeModelTest.test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False_cpu", - "OnnxBackendNodeModelTest.test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True_cpu", - "OnnxBackendNodeModelTest.test_averagepool_3d_dilations_small_cpu", - ), - ( - xfail_issue_125492, - "OnnxBackendNodeModelTest.test_dft_axis_opset19_cpu", - "OnnxBackendNodeModelTest.test_dft_inverse_opset19_cpu", - "OnnxBackendNodeModelTest.test_dft_opset19_cpu", - ), - ( - xfail_issue_125493, - "OnnxBackendNodeModelTest.test_reduce_l1_empty_set_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_empty_set_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_empty_set_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_empty_set_cpu", - "OnnxBackendNodeModelTest.test_reduce_min_empty_set_cpu", - "OnnxBackendNodeModelTest.test_reduce_prod_empty_set_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_empty_set_cpu", - ), - ( - xfail_issue_125495, - "OnnxBackendNodeModelTest.test_reduce_max_bool_inputs_cpu", - "OnnxBackendNodeModelTest.test_reduce_min_bool_inputs_cpu", - ), - ( - xfail_issue_127812, - "OnnxBackendNodeModelTest.test_reduce_l1_empty_set_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_l2_empty_set_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_empty_set_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_empty_set_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_empty_set_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_empty_set_non_reduced_axis_zero_cpu", - "OnnxBackendNodeModelTest.test_reduce_sum_square_empty_set_expanded_cpu", - ), - ( - skip_misalignment, - "OnnxBackendNodeModelTest.test_gelu_default_2_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_empty_set_expanded_cpu", - "OnnxBackendNodeModelTest.test_reduce_log_sum_exp_empty_set_expanded_cpu", - ), -] - -for test_group in tests_expected_to_fail: - for test_case in test_group[1:]: - expect_fail("{}".format(test_case), test_group[0]) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py b/src/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py deleted file mode 100644 index 025c438fedf..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import platform -import os - -import numpy as np -import ngraph as ng -import pytest -from openvino.inference_engine import IECore - -from tests_compatibility.runtime import get_runtime - - -@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', - reason='Ticket - 122712') -def test_import_onnx_with_external_data(): - model_path = os.path.join(os.path.dirname(__file__), "models/external_data.onnx") - ie = IECore() - ie_network = ie.read_network(model=model_path) - - ng_function = ng.function_from_cnn(ie_network) - - dtype = np.float32 - value_a = np.array([1.0, 3.0, 5.0], dtype=dtype) - value_b = np.array([3.0, 5.0, 1.0], dtype=dtype) - # third input [5.0, 1.0, 3.0] read from external file - - runtime = get_runtime() - computation = runtime.computation(ng_function) - result = computation(value_a, value_b) - assert np.allclose(result, np.array([3.0, 3.0, 3.0], dtype=dtype)) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_onnx_import.py b/src/bindings/python/tests_compatibility/test_onnx/test_onnx_import.py deleted file mode 100644 index 7fe63152a70..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_onnx_import.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os - -import numpy as np -import ngraph as ng -import onnx -from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info -from openvino.inference_engine import IECore - -from tests_compatibility.runtime import get_runtime -from tests_compatibility.test_onnx.utils.onnx_helpers import import_onnx_model - - -def test_import_onnx_function(): - model_path = os.path.join(os.path.dirname(__file__), "models/add_abc.onnx") - ie = IECore() - ie_network = ie.read_network(model=model_path) - - ng_function = ng.function_from_cnn(ie_network) - - dtype = np.float32 - value_a = np.array([1.0], dtype=dtype) - value_b = np.array([2.0], dtype=dtype) - value_c = np.array([3.0], dtype=dtype) - - runtime = get_runtime() - computation = runtime.computation(ng_function) - result = computation(value_a, value_b, value_c) - assert np.allclose(result, np.array([6], dtype=dtype)) - - -def test_simple_graph(): - node1 = make_node("Add", ["A", "B"], ["X"], name="add_node1") - node2 = make_node("Add", ["X", "C"], ["Y"], name="add_node2") - graph = make_graph( - [node1, node2], - "test_graph", - [ - make_tensor_value_info("A", onnx.TensorProto.FLOAT, [1]), - make_tensor_value_info("B", onnx.TensorProto.FLOAT, [1]), - make_tensor_value_info("C", onnx.TensorProto.FLOAT, [1]), - ], - [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [1])], - ) - model = make_model(graph, producer_name="ngraph ONNX Importer") - - ng_model_function = import_onnx_model(model) - - runtime = get_runtime() - computation = runtime.computation(ng_model_function) - assert np.array_equal(computation(1, 2, 3)[0], np.array([6.0], dtype=np.float32)) - assert np.array_equal(computation(4, 5, 6)[0], np.array([15.0], dtype=np.float32)) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_batchnorm.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_batchnorm.py deleted file mode 100644 index e9e610c8eac..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_batchnorm.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import onnx - -from tests_compatibility.test_onnx.utils import run_node - - -def make_batch_norm_node(**node_attributes): - return onnx.helper.make_node( - "BatchNormalization", inputs=["X", "scale", "B", "mean", "var"], outputs=["Y"], **node_attributes - ) - - -def test_batch_norm_test_node(): - data = np.arange(48).reshape((1, 3, 4, 4)).astype(np.float32) - scale = np.ones((3,)).astype(np.float32) # Gamma - bias = np.zeros((3,)).astype(np.float32) # Beta - mean = np.mean(data, axis=(0, 2, 3)) - var = np.var(data, axis=(0, 2, 3)) - - expected_output = np.array( - [ - [ - [ - [-1.62694025, -1.41001487, -1.19308949, -0.97616416], - [-0.75923878, -0.54231346, -0.32538807, -0.10846269], - [0.10846269, 0.32538807, 0.54231334, 0.75923872], - [0.9761641, 1.19308949, 1.41001487, 1.62694025], - ], - [ - [-1.62694049, -1.41001511, -1.19308972, -0.97616434], - [-0.7592392, -0.54231358, -0.32538843, -0.10846281], - [0.10846233, 0.32538795, 0.5423131, 0.75923872], - [0.97616386, 1.19308949, 1.41001463, 1.62694025], - ], - [ - [-1.62694025, -1.41001511, -1.19308949, -0.97616434], - [-0.75923872, -0.54231358, -0.32538795, -0.10846233], - [0.10846233, 0.32538795, 0.54231358, 0.7592392], - [0.97616386, 1.19308949, 1.41001511, 1.62694073], - ], - ] - ], - dtype=np.float32, - ) - - node = make_batch_norm_node() - result = run_node(node, [data, scale, bias, mean, var])[0] - assert np.allclose(result, expected_output, rtol=1e-04, atol=1e-08) - - scale = np.broadcast_to(0.1, (3,)).astype(np.float32) # Gamma - bias = np.broadcast_to(1, (3,)).astype(np.float32) # Beta - - expected_output = np.array( - [ - [ - [ - [0.83730596, 0.85899848, 0.88069105, 0.90238357], - [0.92407608, 0.94576865, 0.96746117, 0.98915374], - [1.01084626, 1.03253877, 1.05423129, 1.07592392], - [1.09761643, 1.11930895, 1.14100146, 1.16269398], - ], - [ - [0.83730596, 0.85899854, 0.88069105, 0.90238357], - [0.92407608, 0.94576865, 0.96746117, 0.98915374], - [1.01084626, 1.03253877, 1.05423141, 1.07592392], - [1.09761643, 1.11930895, 1.14100146, 1.16269398], - ], - [ - [0.83730596, 0.85899848, 0.88069105, 0.90238357], - [0.92407614, 0.94576865, 0.96746117, 0.98915374], - [1.01084626, 1.03253877, 1.05423141, 1.07592392], - [1.09761643, 1.11930895, 1.14100146, 1.16269398], - ], - ] - ], - dtype=np.float32, - ) - - node = make_batch_norm_node() - result = run_node(node, [data, scale, bias, mean, var])[0] - assert np.allclose(result, expected_output, rtol=1e-04, atol=1e-08) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_binary.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_binary.py deleted file mode 100644 index 437a2ac44a1..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_binary.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import onnx -import pytest -from onnx.helper import make_graph, make_model, make_tensor_value_info - -from tests_compatibility.test_onnx.utils import run_model - - -def import_and_compute(op_type, input_data_left, input_data_right, opset=7, **node_attributes): - inputs = [np.array(input_data_left), np.array(input_data_right)] - onnx_node = onnx.helper.make_node(op_type, inputs=["x", "y"], outputs=["z"], **node_attributes) - - input_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) - for name, value in zip(onnx_node.input, inputs) - ] - output_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, ()) for name in onnx_node.output] - - graph = make_graph([onnx_node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="ngraph ONNX Importer") - model.opset_import[0].version = opset - return run_model(model, inputs)[0] - - -def test_add_opset4(): - assert np.array_equal(import_and_compute("Add", 1, 2, opset=4), np.array(3, dtype=np.float32)) - - assert np.array_equal(import_and_compute("Add", [1], [2], opset=4), np.array([3], dtype=np.float32)) - - assert np.array_equal( - import_and_compute("Add", [1, 2], [3, 4], opset=4), np.array([4, 6], dtype=np.float32) - ) - - assert np.array_equal( - import_and_compute("Add", [1, 2, 3], [4, 5, 6], opset=4), np.array([5, 7, 9], dtype=np.float32) - ) - - assert np.array_equal( - import_and_compute("Add", [[1, 2, 3], [4, 5, 6]], [7, 8, 9], broadcast=1, opset=4), - np.array([[8, 10, 12], [11, 13, 15]], dtype=np.float32), - ) - - # shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar - left_operand = np.ones((2, 3, 4, 5)).astype(np.float32) - assert np.array_equal(import_and_compute("Add", left_operand, 8, broadcast=1, opset=4), left_operand + 8) - - # shape(A) = (2, 3, 4, 5), shape(B) = (5,) - left_operand = np.ones((2, 3, 4, 5), dtype=np.float32) - right_operand = np.random.rand(5).astype(np.float32) - import_and_compute("Add", left_operand, right_operand, broadcast=1, opset=4) - - # shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) - left_operand = np.ones((2, 3, 4, 5), dtype=np.float32) - right_operand = np.random.rand(4, 5).astype(np.float32) - assert np.array_equal( - import_and_compute("Add", left_operand, right_operand, broadcast=1, opset=4), - left_operand + right_operand, - ) - - # shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 - left_operand = np.ones((2, 3, 4, 5), dtype=np.float32) - right_operand = np.random.rand(3, 4).astype(np.float32) - assert np.array_equal( - import_and_compute("Add", left_operand, right_operand, broadcast=1, axis=1, opset=4), - left_operand + right_operand.reshape(1, 3, 4, 1), - ) - - # shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 - left_operand = np.ones((2, 3, 4, 5), dtype=np.float32) - right_operand = np.random.rand(2).astype(np.float32) - assert np.array_equal( - import_and_compute("Add", left_operand, right_operand, broadcast=1, axis=0, opset=4), - left_operand + right_operand.reshape(2, 1, 1, 1), - ) - - -@pytest.mark.parametrize( - "left_shape,right_shape", - [ - ((1,), (1,)), - ((256, 256, 3), (3,)), - ((5, 4), (1,)), - ((5, 4), (4,)), - ((15, 3, 5), (3, 5)), - ((15, 3, 5), (15, 1, 5)), - ((15, 3, 5), (3, 1)), - ((8, 1, 6, 1), (7, 1, 5)), - ], -) -def test_add_opset7(left_shape, right_shape): - """Test Add-7 operator, which uses numpy-style broadcasting.""" - left_input = np.ones(left_shape) - right_input = np.ones(right_shape) - assert np.array_equal(import_and_compute("Add", left_input, right_input), left_input + right_input) - - -def test_sub(): - assert np.array_equal(import_and_compute("Sub", 20, 1), np.array(19, dtype=np.float32)) - - assert np.array_equal(import_and_compute("Sub", [20], [1]), np.array([19], dtype=np.float32)) - - assert np.array_equal(import_and_compute("Sub", [20, 19], [1, 2]), np.array([19, 17], dtype=np.float32)) - - assert np.array_equal( - import_and_compute("Sub", [[1, 2, 3], [4, 5, 6]], [7, 8, 9], opset=6, broadcast=1), - np.array([[-6, -6, -6], [-3, -3, -3]], dtype=np.float32), - ) - - -def test_mul(): - assert np.array_equal(import_and_compute("Mul", 2, 3), np.array(6, dtype=np.float32)) - - assert np.array_equal(import_and_compute("Mul", [2], [3]), np.array([6], dtype=np.float32)) - - assert np.array_equal(import_and_compute("Mul", [2, 3], [4, 5]), np.array([8, 15], dtype=np.float32)) - - assert np.array_equal( - import_and_compute("Mul", [[1, 2, 3], [4, 5, 6]], [7, 8, 9], opset=6, broadcast=1), - np.array([[7, 16, 27], [28, 40, 54]], dtype=np.float32), - ) - - -def test_div(): - assert np.array_equal(import_and_compute("Div", 6, 3), np.array(2, dtype=np.float32)) - - assert np.array_equal(import_and_compute("Div", [6], [3]), np.array([2], dtype=np.float32)) - - assert np.array_equal(import_and_compute("Div", [6, 8], [3, 2]), np.array([2, 4], dtype=np.float32)) - - assert np.array_equal( - import_and_compute("Div", [[10, 20, 30], [40, 50, 60]], [2, 5, 6], opset=6, broadcast=1), - np.array([[5, 4, 5], [20, 10, 10]], dtype=np.float32), - ) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_convpool.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_convpool.py deleted file mode 100644 index 5e43a10a152..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_convpool.py +++ /dev/null @@ -1,402 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import onnx -import pytest -from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info -from onnx.onnx_cpp2py_export.checker import ValidationError - -from tests_compatibility.runtime import get_runtime -from tests_compatibility.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node - - -@pytest.fixture -def ndarray_1x1x4x4(): - return np.array( - [[11, 12, 13, 14], [15, 16, 17, 18], [19, 20, 21, 22], [23, 24, 25, 26]], dtype=np.float32 - ).reshape([1, 1, 4, 4]) - - -def make_onnx_model_for_conv_op(x_shape, weights_shape, transpose=False, **attributes): - output_shape = () # We don't need output shape to be accurate for these tests - - if transpose: - node_op = "ConvTranspose" - else: - node_op = "Conv" - - node = make_node(node_op, ["X", "weight"], ["Y"], name="test_node", **attributes) - graph = make_graph( - [node], - "test_graph", - [ - make_tensor_value_info("X", onnx.TensorProto.FLOAT, x_shape), - make_tensor_value_info("weight", onnx.TensorProto.FLOAT, weights_shape), - ], - [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, output_shape)], - ) - model = make_model(graph, producer_name="ngraph ONNXImporter") - return model - - -def import_and_compute_conv(x, weights, transpose=False, **attributes): - x, weights = np.array(x), np.array(weights) - onnx_model = make_onnx_model_for_conv_op(x.shape, weights.shape, transpose=transpose, **attributes) - ng_model_function = import_onnx_model(onnx_model) - computation = get_runtime().computation(ng_model_function) - return computation(x, weights)[0] - - -def test_2d_conv(): - # x should have shape N(batch) x C x H x W - input_x = np.array( - [ - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - dtype=np.float32, - ).reshape(1, 1, 9, 9) - - # filter weights should have shape M x C x kH x kW - input_filter = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape( - [1, 1, 3, 3] - ) - - # convolution with padding=1 should produce 9 x 9 output: - result = import_and_compute_conv(input_x, input_filter, pads=(1, 1, 1, 1), strides=(1, 1)) - assert np.array_equal( - result, - np.array( - [ - [ - [ - [0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0], - ] - ] - ], - dtype=np.float32, - ), - ) - - # convolution with padding=0 should produce 7 x 7 output: - result = import_and_compute_conv(input_x, input_filter, pads=(0, 0, 0, 0), strides=(1, 1)) - assert np.array_equal( - result, - np.array( - [ - [ - [ - [-20, -20, 20, 20, 0, 0, 0], - [-20, -20, 20, 20, 0, 0, 0], - [-20, -20, 20, 20, 0, 0, 0], - [-20, -20, 20, 20, 0, 0, 0], - [-20, -20, 20, 20, 0, 0, 0], - [-20, -20, 20, 20, 0, 0, 0], - [-20, -20, 20, 20, 0, 0, 0], - ] - ] - ], - dtype=np.float32, - ), - ) - - # convolution with strides=2 should produce 4 x 4 output: - result = import_and_compute_conv(input_x, input_filter, pads=(0, 0, 0, 0), strides=(2, 2)) - assert np.array_equal( - result, - np.array( - [ - [ - [ - [-20.0, 20.0, 0.0, 0.0], - [-20.0, 20.0, 0.0, 0.0], - [-20.0, 20.0, 0.0, 0.0], - [-20.0, 20.0, 0.0, 0.0], - ] - ] - ], - dtype=np.float32, - ), - ) - - # convolution with dilations=2 should produce 5 x 5 output: - result = import_and_compute_conv(input_x, input_filter, dilations=(2, 2)) - assert np.array_equal( - result, - np.array( - [ - [ - [ - [0, 0, 20, 20, 0], - [0, 0, 20, 20, 0], - [0, 0, 20, 20, 0], - [0, 0, 20, 20, 0], - [0, 0, 20, 20, 0], - ] - ] - ], - dtype=np.float32, - ), - ) - - -def test_3d_conv(): - # x should have shape N(batch) x C x H x W x D - input_x = np.array( - [ - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], - ], - dtype=np.float32, - ).reshape([1, 1, 9, 9, 1]) - input_x = np.broadcast_to(input_x, (1, 1, 9, 9, 4)) - - # filter weights should have shape M x C x kH x kW x kD - input_filter = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape( - [1, 1, 3, 3, 1] - ) - input_filter = np.broadcast_to(input_filter, (1, 1, 3, 3, 3)) - - # convolution with padding=0 should produce 7 x 7 x 2 output: - result = import_and_compute_conv( - input_x, input_filter, dilations=(1, 1, 1), pads=(0, 0, 0, 0, 0, 0), strides=(1, 1, 1) - ) - - assert np.array_equal( - np.moveaxis(result.squeeze(), (0, 1, 2), (1, 2, 0)), - np.array( - [ - [ - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - ], - [ - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], - ], - ], - dtype=np.float32, - ), - ) - - -def test_2d_conv_transpose(): - # x should have shape N(batch) x C x H x W - input_x = np.array( - [ - [0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], - [0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0], - ], - dtype=np.float32, - ).reshape([1, 1, 9, 9]) - - # filter weights should have shape M x C x kH x kW - input_filter = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape( - [1, 1, 3, 3] - ) - - # deconvolution with padding=1 should produce 9 x 9 output: - result = import_and_compute_conv(input_x, input_filter, transpose=True, pads=(1, 1, 1, 1), strides=(1, 1)) - - assert np.array_equal( - result.reshape([9, 9]), - np.array( - [ - [-50.0, -50.0, 100.0, 100.0, -50.0, -50.0, 0.0, 0.0, 0.0], - [-75.0, -75.0, 150.0, 150.0, -75.0, -75.0, 0.0, 0.0, 0.0], - [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], - [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], - [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], - [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], - [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], - [-75.0, -75.0, 150.0, 150.0, -75.0, -75.0, 0.0, 0.0, 0.0], - [-50.0, -50.0, 100.0, 100.0, -50.0, -50.0, 0.0, 0.0, 0.0], - ], - dtype=np.float32, - ), - ) - - -def test_pad_opset_1(): - x = np.ones((2, 2), dtype=np.float32) - y = np.pad(x, pad_width=1, mode="constant") - - model = get_node_model("Pad", x, paddings=[1, 1, 1, 1]) - ng_results = run_model(model, [x]) - assert np.array_equal(ng_results, [y]) - - x = np.random.randn(1, 3, 4, 5).astype(np.float32) - y = np.pad(x, pad_width=((0, 0), (0, 0), (1, 2), (3, 4)), mode="constant") - - model = get_node_model("Pad", x, mode="constant", paddings=[0, 0, 1, 3, 0, 0, 2, 4]) - ng_results = run_model(model, [x]) - assert np.array_equal(ng_results, [y]) - - # incorrect paddings rank - x = np.ones((2, 2), dtype=np.float32) - model = get_node_model("Pad", x, paddings=[0, 1, 1, 3, 1, 2]) - with pytest.raises(RuntimeError): - run_model(model, [x]) - - # no paddings arttribute - model = get_node_model("Pad", x) - with pytest.raises(ValidationError): - import_onnx_model(model) - - -def test_pad_opset_2(): - x = np.ones((2, 2), dtype=np.float32) - y = np.pad(x, pad_width=1, mode="constant") - - model = get_node_model("Pad", x, opset=2, pads=[1, 1, 1, 1]) - ng_results = run_model(model, [x]) - assert np.array_equal(ng_results, [y]) - - x = np.random.randn(1, 3, 4, 5).astype(np.float32) - y = np.pad(x, pad_width=((0, 0), (0, 0), (1, 2), (3, 4)), mode="constant") - - model = get_node_model("Pad", x, opset=2, mode="constant", pads=[0, 0, 1, 3, 0, 0, 2, 4]) - ng_results = run_model(model, [x]) - assert np.array_equal(ng_results, [y]) - - # incorrect pads rank - x = np.ones((2, 2), dtype=np.float32) - model = get_node_model("Pad", x, opset=2, pads=[0, 1, 1, 3, 1, 2]) - with pytest.raises(RuntimeError): - run_model(model, [x]) - - -def test_pad_negative_values_begin(): - x = np.ones((2, 2), dtype=np.float32) - - # Axis 1 begin - model = get_node_model("Pad", x, opset=2, pads=[-1, 0, 0, 0]) - ng_result = run_model(model, [x])[0] - assert np.array_equal(ng_result, np.array([[1, 1]])) - - # Axis 2 begin - model = get_node_model("Pad", x, opset=2, pads=[0, -1, 0, 0]) - ng_result = run_model(model, [x])[0] - assert np.array_equal(ng_result, np.array([[1], [1]])) - - -def test_pad_negative_values_end(): - x = np.ones((2, 2), dtype=np.float32) - - # Axis 1 end - model = get_node_model("Pad", x, opset=2, pads=[0, 0, -1, 0]) - ng_result = run_model(model, [x])[0] - assert np.array_equal(ng_result, np.array([[1.0, 1.0]])) - - # Axis 2 end - model = get_node_model("Pad", x, opset=2, pads=[0, 0, 0, -1]) - ng_result = run_model(model, [x])[0] - assert np.array_equal(ng_result, np.array([[1], [1]])) - - -def test_pool_average(ndarray_1x1x4x4): - x = ndarray_1x1x4x4 - node = onnx.helper.make_node( - "AveragePool", inputs=["x"], outputs=["y"], kernel_shape=(2, 2), strides=(2, 2) - ) - y = np.array([[13.5, 15.5], [21.5, 23.5]], dtype=np.float32).reshape([1, 1, 2, 2]) - ng_results = run_node(node, [x]) - assert np.array_equal(ng_results, [y]) - - node = onnx.helper.make_node( - "AveragePool", inputs=["x"], outputs=["y"], kernel_shape=(2, 2), strides=(2, 2), pads=(1, 1, 1, 1) - ) - y = np.array([[11, 12.5, 14], [17, 18.5, 20], [23, 24.5, 26]], dtype=np.float32).reshape([1, 1, 3, 3]) - ng_results = run_node(node, [x]) - assert np.array_equal(ng_results, [y]) - - -def test_pool_average_3d(ndarray_1x1x4x4): - x = np.broadcast_to(ndarray_1x1x4x4, (1, 1, 4, 4, 4)) - node = onnx.helper.make_node( - "AveragePool", inputs=["x"], outputs=["y"], kernel_shape=(2, 2, 2), strides=(2, 2, 2) - ) - y = np.array([[[13.5, 15.5], [21.5, 23.5]], [[13.5, 15.5], [21.5, 23.5]]], dtype=np.float32).reshape( - [1, 1, 2, 2, 2] - ) - ng_results = run_node(node, [x]) - assert np.array_equal(ng_results, [y]) - - -def test_pool_max(ndarray_1x1x4x4): - node = onnx.helper.make_node("MaxPool", inputs=["x"], outputs=["y"], kernel_shape=(2, 2), strides=(2, 2)) - - x = ndarray_1x1x4x4 - y = np.array([[16, 18], [24, 26]], dtype=np.float32).reshape([1, 1, 2, 2]) - - ng_results = run_node(node, [x]) - assert np.array_equal(ng_results, [y]) - - -def test_pool_global_max(ndarray_1x1x4x4): - node = onnx.helper.make_node("GlobalMaxPool", inputs=["x"], outputs=["y"]) - - x = ndarray_1x1x4x4 - y = np.array([26], dtype=np.float32).reshape([1, 1, 1, 1]) - - ng_results = run_node(node, [x]) - assert np.array_equal(ng_results, [y]) - - -def test_pool_global_average(ndarray_1x1x4x4): - node = onnx.helper.make_node("GlobalAveragePool", inputs=["x"], outputs=["y"]) - - x = ndarray_1x1x4x4 - y = np.array([18.5], dtype=np.float32).reshape([1, 1, 1, 1]) - - ng_results = run_node(node, [x]) - assert np.array_equal(ng_results, [y]) - - -def test_pool_global_average_3d(ndarray_1x1x4x4): - x = np.broadcast_to(ndarray_1x1x4x4, (1, 1, 4, 4, 4)) - - node = onnx.helper.make_node("GlobalAveragePool", inputs=["x"], outputs=["y"]) - y = np.array([18.5], dtype=np.float32).reshape([1, 1, 1, 1, 1]) - ng_results = run_node(node, [x]) - assert np.array_equal(ng_results, [y]) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_logical.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_logical.py deleted file mode 100644 index 4e98600a605..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_logical.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import onnx -import pytest - -from tests_compatibility.test_onnx.utils import run_node - - -@pytest.mark.parametrize( - "onnx_op, numpy_func, data_type", - [ - pytest.param("And", np.logical_and, bool), - pytest.param("Or", np.logical_or, bool), - pytest.param("Xor", np.logical_xor, bool), - pytest.param("Equal", np.equal, np.int32), - pytest.param("Greater", np.greater, np.int32), - pytest.param("Less", np.less, np.int32), - ], -) -def test_logical(onnx_op, numpy_func, data_type): - node = onnx.helper.make_node(onnx_op, inputs=["A", "B"], outputs=["C"], broadcast=1) - - input_a = np.array([[0, 1, -1], [0, 1, -1], [0, 1, -1]]).astype(data_type) - input_b = np.array([[0, 0, 0], [1, 1, 1], [-1, -1, -1]]).astype(data_type) - expected_output = numpy_func(input_a, input_b) - ng_results = run_node(node, [input_a, input_b], opset_version=4) - assert np.array_equal(ng_results, [expected_output]) - - input_a = np.array([[0, 1, -1], [0, 1, -1], [0, 1, -1]]).astype(data_type) - input_b = np.array(1).astype(data_type) - expected_output = numpy_func(input_a, input_b) - ng_results = run_node(node, [input_a, input_b], opset_version=4) - assert np.array_equal(ng_results, [expected_output]) - - -def test_logical_not(): - input_data = np.array([[False, True, True], [False, True, False], [False, False, True]]) - expected_output = np.logical_not(input_data) - - node = onnx.helper.make_node("Not", inputs=["X"], outputs=["Y"]) - ng_results = run_node(node, [input_data]) - assert np.array_equal(ng_results, [expected_output]) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_matmul.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_matmul.py deleted file mode 100644 index 5f1ed8a5952..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_matmul.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import onnx -from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info -import pytest - -from tests_compatibility.runtime import get_runtime -from tests_compatibility.test_onnx.utils import import_onnx_model - - -def make_onnx_model_for_matmul_op(input_left, input_right): - output_shape = np.matmul(input_left, input_right).shape - node = make_node("MatMul", ["X", "Y"], ["Z"], name="test_node") - graph = make_graph( - [node], - "test_graph", - [ - make_tensor_value_info("X", onnx.TensorProto.FLOAT, input_left.shape), - make_tensor_value_info("Y", onnx.TensorProto.FLOAT, input_right.shape), - ], - [make_tensor_value_info("Z", onnx.TensorProto.FLOAT, output_shape)], - ) - model = make_model(graph, producer_name="ngraph ONNXImporter") - return model - - -def import_and_compute_matmul(input_left, input_right): - input_data_left = np.array(input_left) - input_data_right = np.array(input_right) - onnx_model = make_onnx_model_for_matmul_op(input_data_left, input_data_right) - transformer = get_runtime() - ng_model_function = import_onnx_model(onnx_model) - computation = transformer.computation(ng_model_function) - return computation(input_data_left, input_data_right)[0] - - -def numpy_gemm(input_a, input_b, input_c, alpha=1, beta=1, trans_a=False, trans_b=False, broadcast=False): - input_a, input_b, input_c = np.array(input_a), np.array(input_b), np.array(input_c) - if trans_a: - input_a = input_a.T - if trans_b: - input_b = input_b.T - - return (alpha * np.dot(input_a, input_b)) + (beta * input_c) - - -def make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs): - input_a_for_output = input_a - input_b_for_output = input_b - if kwargs.get("transA"): - input_a_for_output = input_a.T - if kwargs.get("transB"): - input_b_for_output = input_b.T - - output_shape = np.dot(input_a_for_output, input_b_for_output).shape - node = make_node("Gemm", ["A", "B", "C"], ["Y"], name="test_node", **kwargs) - graph = make_graph( - [node], - "test_graph", - [ - make_tensor_value_info("A", onnx.TensorProto.FLOAT, input_a.shape), - make_tensor_value_info("B", onnx.TensorProto.FLOAT, input_b.shape), - make_tensor_value_info("C", onnx.TensorProto.FLOAT, input_c.shape), - ], - [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, output_shape)], - ) - model = make_model(graph, producer_name="ngraph ONNXImporter") - return model - - -def import_and_compute_gemm(input_a, input_b, input_c, **kwargs): - input_a, input_b, input_c = np.array(input_a), np.array(input_b), np.array(input_c) - - if kwargs.get("trans_a"): - kwargs["transA"] = kwargs["trans_a"] - del kwargs["trans_a"] - - if kwargs.get("trans_b"): - kwargs["transB"] = kwargs["trans_b"] - del kwargs["trans_b"] - - onnx_model = make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs) - transformer = get_runtime() - ng_model_function = import_onnx_model(onnx_model) - computation = transformer.computation(ng_model_function) - return computation(input_a, input_b, input_c)[0] - - -@pytest.mark.parametrize( - "data, description", - [ - pytest.param(([1, 2], [1, 3]), "vector and vector 1"), - (([1, 2, 3], [[4], [5], [6]]), "vector and vector 2"), - (([[1, 2, 3]], [1, 2, 3]), "vector and vector 3"), - (([1, 2, 3], [[4, 5], [6, 7], [8, 9]]), "vector and matrix"), - (([[1, 2, 3], [4, 5, 6]], [[7], [8], [9]]), "matrix and vector"), - (([[1, 2], [3, 4]], [[5, 6], [7, 8]]), "matrix and matrix 1"), - (([[1, 2, 3], [4, 5, 6]], [[7, 8], [9, 10], [11, 12]]), "matrix and matrix 2"), - (([[1, 2], [3, 4], [5, 6]], [[7, 8, 9], [10, 11, 12]]), "matrix and matrix 3") - ], -) -def test_op_matmul(data, description): - assert np.allclose(import_and_compute_matmul(*data), np.matmul(*data)) - - -def test_op_matmul_3d(): - # 3d tensor @ 3d tensor - data = ([[[1, 2], [3, 4]], [[1, 2], [3, 4]]], [[[5, 6], [7, 8]], [[5, 6], [7, 8]]]) - assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data)) - - data = (np.ones((5, 2, 3)), (np.ones((5, 3, 2)) + 2)) - assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data)) - - -@pytest.mark.parametrize( - "data, kwargs, description", - [ - pytest.param(([1, 2], [1, 3], [1, 4]), {}, "vectors"), - pytest.param(([1, 2], [1, 3], 1), {}, "vectors and scalar"), - pytest.param(([1, 2], [1, 3], [1]), {}, "vectors and identity vector"), - pytest.param(([1, 2], [1, 3], [1, 4]), {"alpha": 7.0, "beta": 9.0}, - "vectors with alpha and beta"), - pytest.param(([1, 2, 3, 4], [1, 3, 5, 7], [1, 4]), {"alpha": 7.0, "beta": 9.0}, - "longer vectors with alpha and beta") - ], -) -def test_gemm(data, kwargs, description): - assert np.allclose(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs)) - - -@pytest.mark.parametrize( - "data, kwargs, description", - [ - pytest.param(([1, 2], [1, 3], [1, 4]), {"trans_a": True, "trans_b": True}, - "vectors with trans_a/trans_b"), - pytest.param(([[1, 2], [1, 2]], [[1, 3], [1, 3]], [4, 1]), - {"trans_a": True, "trans_b": True, "alpha": 7.0, "beta": 9.0}, - "matrices and vector with trans_b and alpha/beta"), - pytest.param(([[1, 2]], [[1, 3]], 1), {"trans_b": True, "alpha": 7.0, "beta": 9.0}, - "matrices and scalar with trans_b and alpha/beta"), - pytest.param(([[1], [2]], [[1], [3]], 1), {"trans_a": True, "alpha": 7.0, "beta": 9.0}, - "matrices and scalar with trans_a and alpha/beta"), - ], -) -def test_gemm_transpositions(data, kwargs, description): - assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs)) - - -def test_gemm_flatten(): - # input_a.shape is (4,1) - data = ([[1], [2], [3], [4]], [1, 3, 5, 7], [1, 4]) - kwargs = {"alpha": 7.0, "beta": 9.0, "trans_a": True} - assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs)) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py deleted file mode 100644 index 7b1ebc7295c..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import platform - -import numpy as np -import onnx -import pytest - -from tests_compatibility.test_onnx.utils import run_node - - -def import_and_compute(op_type, input_data, **node_attrs): - data_inputs = [np.array(input_data)] - node = onnx.helper.make_node(op_type, inputs=["x"], outputs=["y"], **node_attrs) - return run_node(node, data_inputs).pop() - - -def assert_onnx_import_equals_callable(onnx_op_type, python_function, data, **kwargs): - data = np.array(data, dtype=np.float32) - assert np.allclose(import_and_compute(onnx_op_type, data, **kwargs), python_function(data, **kwargs)) - - -def test_sigmoid(): - def sigmoid(x): - return 1 / (1 + np.exp(-x)) - - assert_onnx_import_equals_callable("Sigmoid", sigmoid, [-2, -1.0, 0.0, 1.0, 2.0]) - assert_onnx_import_equals_callable("Sigmoid", sigmoid, [0.0]) - assert_onnx_import_equals_callable("Sigmoid", sigmoid, [-2, -1.0, 0.0, 1.0, 2.0]) - - -def test_tanh(): - assert_onnx_import_equals_callable("Tanh", np.tanh, [-2, -1.0, 0.0, 1.0, 2.0]) - assert_onnx_import_equals_callable("Tanh", np.tanh, [0.0]) - assert_onnx_import_equals_callable("Tanh", np.tanh, [-2, -1.0, 0.0, 1.0, 2.0]) - - -def test_relu(): - def relu(x): - return np.maximum(x, 0) - - assert_onnx_import_equals_callable("Relu", relu, [-2, -1.0, 0.0, 1.0, 2.0]) - assert_onnx_import_equals_callable("Relu", relu, [0.0]) - assert_onnx_import_equals_callable("Relu", relu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1]) - assert_onnx_import_equals_callable("Relu", relu, [[1, 2, 3], [4, 5, 6]]) - assert_onnx_import_equals_callable("Relu", relu, [[-3, -2, -1], [1, 2, 3]]) - - -@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', - reason='Ticket - 122712') -def test_leaky_relu(): - def leaky_relu(x, alpha=0.01): - return np.maximum(alpha * x, x) - - assert_onnx_import_equals_callable("LeakyRelu", leaky_relu, [-2, -1.0, 0.0, 1.0, 2.0], alpha=0.5) - assert_onnx_import_equals_callable("LeakyRelu", leaky_relu, [0.0]) - assert_onnx_import_equals_callable( - "LeakyRelu", leaky_relu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], alpha=1.0 - ) - assert_onnx_import_equals_callable("LeakyRelu", leaky_relu, [[1, 2, 3], [4, 5, 6]], alpha=0.2) - assert_onnx_import_equals_callable("LeakyRelu", leaky_relu, [[-3, -2, -1], [1, 2, 3]]) - - -@pytest.mark.parametrize( - "x, slope", - [ - ([-2, -1.0, 0.0, 1.0, 2.0], 0.5), - ([0.0], 1), - ([-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], 1), - ([[1, 2, 3], [4, 5, 6]], 0.5), - ([[-3, -2, -1], [1, 2, 3]], 1), - ] -) -def test_parametric_relu(x, slope): - def parametic_relu(x, slope): - return np.where(x < 0, slope * x, x) - - x, slope = np.array(x).astype(np.float32), np.array(slope).astype(np.float32) - expected_output = parametic_relu(x, slope) - node = onnx.helper.make_node("PRelu", inputs=["x", "slope"], outputs=["y"]) - output = run_node(node, [x, slope]).pop() - assert np.allclose(output, expected_output) - - -@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', - reason='Ticket - 122712') -def test_selu(): - # f(x) = gamma * (alpha * exp(x) - alpha) for x <= 0, y = gamma * x for x > 0 - def selu(x, alpha=1.67326319217681884765625, gamma=1.05070102214813232421875): - return np.where(x <= 0, gamma * (alpha * np.exp(x) - alpha), gamma * x) - - assert_onnx_import_equals_callable("Selu", selu, [-2, -1.0, 0.0, 1.0, 2.0]) - assert_onnx_import_equals_callable("Selu", selu, [0.0]) - assert_onnx_import_equals_callable("Selu", selu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1]) - assert_onnx_import_equals_callable("Selu", selu, [[1, 2, 3], [4, 5, 6]]) - assert_onnx_import_equals_callable("Selu", selu, [-2, -1.0, 0.0, 1.0, 2.0], gamma=0.5, alpha=0.5) - - -@pytest.mark.parametrize( - "data, alpha_value", - [ - pytest.param([-2, -1.0, 0.0, 1.0, 2.0], 1.0), - pytest.param([0.0], 1.0), - pytest.param([-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], 1.0), - pytest.param([[1, 2, 3], [4, 5, 6]], 1.0), - pytest.param([-2, -1.0, 0.0, 1.0, 2.0], 0.5) - ] -) -def test_elu(data, alpha_value): - # f(x) = alpha * (exp(x) - 1) for x < 0, f(x) = x for x >= 0 - def elu(x, alpha): - return np.where(x < 0, alpha * (np.exp(x) - 1), x) - - assert_onnx_import_equals_callable("Elu", elu, data, alpha=alpha_value) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_reduction.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_reduction.py deleted file mode 100644 index 4122d1a3158..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_reduction.py +++ /dev/null @@ -1,376 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import onnx -import pytest - -from tests_compatibility import xfail_issue_99962 -from tests_compatibility.runtime import get_runtime -from tests_compatibility.test_onnx.utils import ( - run_node, - import_onnx_model, -) - -reduce_data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32) -reduce_axis_parameters = [ - (0,), - (1,), - (2,), - (0, 1), - (0, 2), - (1, 2), - (0, 1, 2) -] - -reduce_operation_parameters_as_attr = [ - ("ReduceMax", np.max), - ("ReduceMin", np.min), - ("ReduceMean", np.mean), - ("ReduceProd", np.prod) -] - -reduce_operation_parameters_as_const = [ - ("ReduceSum", np.sum), -] - - -def import_and_compute(op_type, input_data, **node_attrs): - data_inputs = [np.array(input_data)] - node = onnx.helper.make_node(op_type, inputs=["x"], outputs=["y"], **node_attrs) - return run_node(node, data_inputs).pop() - - -def import_and_compute_with_axes_as_const(op_type, data, axes, **node_attrs): - data_input = np.array(data) - axes_input = np.array(axes, dtype=int) - axes_const_node = onnx.helper.make_node( - "Constant", - inputs=[], - outputs=["const_axes"], - value=onnx.helper.make_tensor( - name="const_axes", - data_type=onnx.TensorProto.INT64, - dims=axes_input.shape, - vals=axes_input.flatten(), - ), - ) - node = onnx.helper.make_node( - op_type, inputs=["x", "const_axes"], outputs=["y"], **node_attrs - ) - graph = onnx.helper.make_graph( - [axes_const_node, node], - "test_graph", - [onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, data_input.shape)], - [onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, ())], - ) - - model = onnx.helper.make_model(graph, producer_name="ngraph ONNX Importer") - model.opset_import[0].version = 13 - ng_model_function = import_onnx_model(model) - runtime = get_runtime() - computation = runtime.computation(ng_model_function) - return computation(data_input)[0] - - -@pytest.mark.parametrize("operation, ref_operation", - reduce_operation_parameters_as_attr + reduce_operation_parameters_as_const) -def test_reduce_operation_keepdims_none_axes(operation, ref_operation): - assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=True), - ref_operation(reduce_data, keepdims=True)) - - -@xfail_issue_99962 -@pytest.mark.parametrize("operation, ref_operation", reduce_operation_parameters_as_attr) -@pytest.mark.parametrize("axes", reduce_axis_parameters) -def test_reduce_operation_keepdims_with_axes_as_attr(operation, ref_operation, axes): - assert np.array_equal(import_and_compute(operation, reduce_data, axes=axes, keepdims=True), - ref_operation(reduce_data, keepdims=True, axis=axes)) - - -@pytest.mark.parametrize("operation, ref_operation", reduce_operation_parameters_as_const) -@pytest.mark.parametrize("axes", reduce_axis_parameters) -def test_reduce_operation_keepdims_with_axes_as_const(operation, ref_operation, axes): - assert np.array_equal(import_and_compute_with_axes_as_const(operation, reduce_data, axes, keepdims=True), - ref_operation(reduce_data, keepdims=True, axis=axes)) - - -@xfail_issue_99962 -@pytest.mark.parametrize("axes", [ - None, - (0,), - (1,), - (2,), - (0, 1), - (0, 2), - (1, 2), - (0, 1, 2)]) -@pytest.mark.parametrize("operation, ref_operation", reduce_operation_parameters_as_attr) -def test_reduce_operation_no_keepdims_axes_as_attr(operation, ref_operation, axes): - if axes: - assert np.array_equal(import_and_compute(operation, reduce_data, axes=axes, keepdims=False), - ref_operation(reduce_data, keepdims=False, axis=axes)) - else: - assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=False), - ref_operation(reduce_data, keepdims=False)) - - -@pytest.mark.parametrize("axes", [ - None, - (0,), - (1,), - (2,), - (0, 1), - (0, 2), - (1, 2), - (0, 1, 2)]) -@pytest.mark.parametrize("operation, ref_operation", reduce_operation_parameters_as_const) -def test_reduce_operation_no_keepdims_axes_as_const(operation, ref_operation, axes): - if axes: - assert np.array_equal(import_and_compute_with_axes_as_const(operation, - reduce_data, - axes, - keepdims=False), - ref_operation(reduce_data, keepdims=False, axis=axes)) - else: - assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=False), - ref_operation(reduce_data, keepdims=False)) - - -@xfail_issue_99962 -@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)]) -def test_reduce_l1(reduction_axes): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.uniform(-100, 100, shape).astype(np.float32) - - expected = np.sum(np.abs(input_data), keepdims=True, axis=reduction_axes) - node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], axes=reduction_axes) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - expected = np.sum(np.abs(input_data), keepdims=False, axis=reduction_axes) - node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - -def test_reduce_l1_default_axes(): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.uniform(-100, 100, shape).astype(np.float32) - - expected = np.sum(np.abs(input_data), keepdims=True) - node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"]) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - expected = np.array(np.sum(np.abs(input_data), keepdims=False)) - node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], keepdims=0) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - -@xfail_issue_99962 -@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)]) -def test_reduce_l2(reduction_axes): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.uniform(-100, 100, shape).astype(np.float32) - - expected = np.sqrt(np.sum(np.square(input_data), keepdims=True, axis=reduction_axes)) - node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], axes=reduction_axes) - raw_result = run_node(node, [input_data]) - ng_result = np.array(raw_result.pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - expected = np.sqrt(np.sum(np.square(input_data), keepdims=False, axis=reduction_axes)) - node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - -def test_reduce_l2_default_axes(): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.uniform(-100, 100, shape).astype(np.float32) - - expected = np.sqrt(np.sum(np.square(input_data), keepdims=True)) - node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"]) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - expected = np.array(np.sqrt(np.sum(np.square(input_data), keepdims=False))) - node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], keepdims=0) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - -@xfail_issue_99962 -@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)]) -def test_reduce_log_sum(reduction_axes): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.uniform(0, 1, shape).astype(np.float32) - - expected = np.log(np.sum(input_data, keepdims=True, axis=reduction_axes)) - node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], axes=reduction_axes) - ng_result = run_node(node, [input_data]).pop() - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - expected = np.log(np.sum(input_data, keepdims=False, axis=reduction_axes)) - node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes) - ng_result = run_node(node, [input_data]).pop() - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - -def test_reduce_log_sum_default_axes(): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.uniform(0, 1, shape).astype(np.float32) - - expected = np.log(np.sum(input_data, keepdims=True)) - node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"]) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - expected = np.log(np.sum(input_data, keepdims=False)) - node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], keepdims=0) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - -@xfail_issue_99962 -def test_reduce_log_sum_exp(): - def logsumexp(data, axis=None, keepdims=True): - return np.log(np.sum(np.exp(data), axis=axis, keepdims=keepdims)) - - data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32) - - assert np.array_equal(import_and_compute("ReduceLogSumExp", data), logsumexp(data, keepdims=True)) - assert np.array_equal( - import_and_compute("ReduceLogSumExp", data, keepdims=0), logsumexp(data, keepdims=False) - ) - - assert np.array_equal( - import_and_compute("ReduceLogSumExp", data, axes=(1,)), logsumexp(data, keepdims=True, axis=(1,)) - ) - assert np.array_equal( - import_and_compute("ReduceLogSumExp", data, axes=(1,), keepdims=0), - logsumexp(data, keepdims=False, axis=(1,)), - ) - - assert np.array_equal( - import_and_compute("ReduceLogSumExp", data, axes=(0, 2)), logsumexp(data, keepdims=True, axis=(0, 2)) - ) - assert np.array_equal( - import_and_compute("ReduceLogSumExp", data, axes=(0, 2), keepdims=0), - logsumexp(data, keepdims=False, axis=(0, 2)), - ) - - assert np.array_equal( - import_and_compute("ReduceLogSumExp", data, axes=(0, 1, 2)), - logsumexp(data, keepdims=True, axis=(0, 1, 2)), - ) - assert np.array_equal( - import_and_compute("ReduceLogSumExp", data, axes=(0, 1, 2), keepdims=0), - logsumexp(data, keepdims=False, axis=(0, 1, 2)), - ) - - -@xfail_issue_99962 -@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)]) -def test_reduce_sum_square(reduction_axes): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.uniform(-100, 100, shape).astype(np.float32) - - expected = np.sum(np.square(input_data), keepdims=True, axis=reduction_axes) - node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"], axes=reduction_axes) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - expected = np.sum(np.square(input_data), keepdims=False, axis=reduction_axes) - node = onnx.helper.make_node( - "ReduceSumSquare", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes - ) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - -def test_reduce_sum_square_default_axes(): - shape = [2, 4, 3, 2] - np.random.seed(133391) - input_data = np.random.uniform(-100, 100, shape).astype(np.float32) - - expected = np.sum(np.square(input_data), keepdims=True) - node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"]) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - expected = np.sum(np.square(input_data), keepdims=False) - node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"], keepdims=0) - ng_result = np.array(run_node(node, [input_data]).pop()) - assert np.array_equal(expected.shape, ng_result.shape) - assert np.allclose(expected, ng_result) - - -def test_reduce_argmin(): - def argmin(ndarray, axis, keepdims=False): - res = np.argmin(ndarray, axis=axis) - if keepdims: - res = np.expand_dims(res, axis=axis) - return res - - data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32) - - assert np.array_equal(import_and_compute("ArgMin", data, axis=0), argmin(data, keepdims=True, axis=0)) - assert np.array_equal( - import_and_compute("ArgMin", data, axis=0, keepdims=0), argmin(data, keepdims=False, axis=0) - ) - assert np.array_equal(import_and_compute("ArgMin", data, axis=1), argmin(data, keepdims=True, axis=1)) - assert np.array_equal( - import_and_compute("ArgMin", data, axis=1, keepdims=0), argmin(data, keepdims=False, axis=1) - ) - assert np.array_equal(import_and_compute("ArgMin", data, axis=2), argmin(data, keepdims=True, axis=2)) - assert np.array_equal( - import_and_compute("ArgMin", data, axis=2, keepdims=0), argmin(data, keepdims=False, axis=2) - ) - - -def test_reduce_argmax(): - def argmax(ndarray, axis, keepdims=False): - res = np.argmax(ndarray, axis=axis) - if keepdims: - res = np.expand_dims(res, axis=axis) - return res - - data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32) - - assert np.array_equal(import_and_compute("ArgMax", data, axis=0), argmax(data, keepdims=True, axis=0)) - assert np.array_equal( - import_and_compute("ArgMax", data, axis=0, keepdims=0), argmax(data, keepdims=False, axis=0) - ) - assert np.array_equal(import_and_compute("ArgMax", data, axis=1), argmax(data, keepdims=True, axis=1)) - assert np.array_equal( - import_and_compute("ArgMax", data, axis=1, keepdims=0), argmax(data, keepdims=False, axis=1) - ) - assert np.array_equal(import_and_compute("ArgMax", data, axis=2), argmax(data, keepdims=True, axis=2)) - assert np.array_equal( - import_and_compute("ArgMax", data, axis=2, keepdims=0), argmax(data, keepdims=False, axis=2) - ) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_reshape.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_reshape.py deleted file mode 100644 index 6e75b4d2fd4..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_reshape.py +++ /dev/null @@ -1,350 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import onnx -import pytest -from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info - -from tests_compatibility.runtime import get_runtime -from tests_compatibility.test_onnx.utils import ( - all_arrays_equal, - get_node_model, - import_onnx_model, - run_model, - run_node, -) -from tests_compatibility import (xfail_issue_35927, - xfail_issue_44858, - skip_dynamic_model) - - -def test_reshape(): - input_data = np.arange(2560, dtype=np.int32).reshape([16, 4, 4, 10]) - reshape_node = onnx.helper.make_node( - "Reshape", inputs=["x"], outputs=["y"], shape=(256, 10) - ) - expected_output = input_data.reshape([256, 10]) - - ng_results = run_node(reshape_node, [input_data], opset_version=4) - assert np.array_equal(ng_results, [expected_output]) - - -def test_reshape_opset5(): - original_shape = [2, 3, 4] - test_cases = { - "reordered_dims": np.array([4, 2, 3], dtype=np.int64), - "reduced_dims": np.array([3, 8], dtype=np.int64), - "extended_dims": np.array([3, 2, 2, 2], dtype=np.int64), - "one_dim": np.array([24], dtype=np.int64), - "negative_dim": np.array([6, -1, 2], dtype=np.int64), - } - input_data = np.random.random_sample(original_shape).astype(np.float32) - - for _, shape in test_cases.items(): - const_node = make_node( - "Constant", - inputs=[], - outputs=["const_shape"], - value=onnx.helper.make_tensor( - name="const_tensor", - data_type=onnx.TensorProto.INT64, - dims=shape.shape, - vals=shape.flatten(), - ), - ) - reshape_node = onnx.helper.make_node( - "Reshape", inputs=["data", "const_shape"], outputs=["reshaped"] - ) - - graph = make_graph( - [const_node, reshape_node], - "test_graph", - [make_tensor_value_info("data", onnx.TensorProto.FLOAT, input_data.shape)], - [make_tensor_value_info("reshaped", onnx.TensorProto.FLOAT, ())], - ) - - model = make_model(graph, producer_name="ngraph ONNX Importer") - model.opset_import[0].version = 5 - ng_model_function = import_onnx_model(model) - runtime = get_runtime() - computation = runtime.computation(ng_model_function) - ng_results = computation(input_data) - expected_output = np.reshape(input_data, shape) - assert np.array_equal(ng_results[0], expected_output) - - -@pytest.mark.xfail(reason="RuntimeError: Reshape z has dynamic second input!") -def test_reshape_opset5_param_err(): - original_shape = [2, 3, 4] - output_shape = np.array([4, 2, 3], dtype=np.int32) - input_data = np.random.random_sample(original_shape).astype(np.float32) - reshape_node = onnx.helper.make_node("Reshape", inputs=["x", "y"], outputs=["z"]) - ng_result = run_node(reshape_node, [input_data, output_shape], opset_version=5) - assert ng_result[0].shape == output_shape - - -@pytest.mark.parametrize( - "axis,expected_output", - [ - (0, np.arange(120).reshape(1, 120)), - (1, np.arange(120).reshape(2, 60)), - (2, np.arange(120).reshape(6, 20)), - (3, np.arange(120).reshape(24, 5)), - (4, np.arange(120).reshape(120, 1)), - ], -) -def test_flatten(axis, expected_output): - data = np.arange(120, dtype=np.int32).reshape([2, 3, 4, 5]) - node = onnx.helper.make_node("Flatten", inputs=["x"], outputs=["y"], axis=axis) - ng_results = run_node(node, [data]) - assert np.array_equal(ng_results, [expected_output]) - - -def test_flatten_exception(): - data = np.arange(120).reshape([2, 3, 4, 5]) - node = onnx.helper.make_node("Flatten", inputs=["x"], outputs=["y"], axis=5) - - with pytest.raises(RuntimeError): - run_node(node, [data]) - - -def test_transpose(): - data = np.arange(120, dtype=np.int32).reshape([2, 3, 4, 5]) - - node = onnx.helper.make_node("Transpose", inputs=["x"], outputs=["y"]) - expected_output = data.T - ng_results = run_node(node, [data]) - assert np.array_equal(ng_results, [expected_output]) - - node = onnx.helper.make_node( - "Transpose", inputs=["x"], outputs=["y"], perm=(3, 1, 0, 2) - ) - expected_output = np.transpose(data, axes=(3, 1, 0, 2)) - ng_results = run_node(node, [data]) - assert np.array_equal(ng_results, [expected_output]) - - -@xfail_issue_35927 -def test_slice_opset1(): - data = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) - - expected_output = np.array([[5, 6, 7]]) - model = get_node_model("Slice", data, axes=[0, 1], starts=[1, 0], ends=[2, 3]) - ng_results = run_model(model, [data]) - assert np.array_equal(ng_results, [expected_output]) - - expected_output = np.array([[2, 3, 4]]) - model = get_node_model("Slice", data, starts=[0, 1], ends=[-1, 1000]) - ng_results = run_model(model, [data]) - assert np.array_equal(ng_results, [expected_output]) - - data = np.random.randn(20, 10, 5).astype(np.float32) - expected_output = data[0:3, 0:10] - model = get_node_model("Slice", data, axes=[0, 1], starts=[0, 0], ends=[3, 10]) - ng_results = run_model(model, [data]) - assert np.array_equal(ng_results, [expected_output]) - - # default axes - data = np.random.randn(20, 10, 5).astype(np.float32) - expected_output = data[:, :, 3:4] - model = get_node_model("Slice", data, starts=[0, 0, 3], ends=[20, 10, 4]) - ng_results = run_model(model, [data]) - assert np.array_equal(ng_results, [expected_output]) - - # end out of bounds - data = np.random.randn(20, 10, 5).astype(np.float32) - expected_output = data[:, 1:1000] - model = get_node_model("Slice", data, axes=[1], starts=[1], ends=[1000]) - ng_results = run_model(model, [data]) - assert np.array_equal(ng_results, [expected_output]) - - # negative value - data = np.random.randn(20, 10, 5).astype(np.float32) - expected_output = data[:, 0:-1] - model = get_node_model("Slice", data, axes=[1], starts=[0], ends=[-1]) - ng_results = run_model(model, [data]) - assert np.array_equal(ng_results, [expected_output]) - - # start ouf of bounds - data = np.random.randn(20, 10, 5).astype(np.float32) - expected_output = data[:, 1000:1000] - model = get_node_model("Slice", data, axes=[1], starts=[1000], ends=[1000]) - ng_results = run_model(model, [data]) - assert np.array_equal(ng_results, [expected_output]) - - -def test_concat(): - a = np.array([[1, 2], [3, 4]], dtype=np.int32) - b = np.array([[5, 6]], dtype=np.int32) - - node = onnx.helper.make_node("Concat", inputs=["x"], outputs=["z"], axis=0) - ng_results = run_node(node, [a]) - assert np.array_equal(ng_results, [a]) - - expected_output = np.concatenate((a, b), axis=0) - node = onnx.helper.make_node("Concat", inputs=["x", "y"], outputs=["z"], axis=0) - ng_results = run_node(node, [a, b]) - assert np.array_equal(ng_results, [expected_output]) - - a = np.array([[1, 2], [3, 4]], dtype=np.int32) - b = np.array([[5, 6]], dtype=np.int32).T - expected_output = np.concatenate((a, b), axis=1) - node = onnx.helper.make_node("Concat", inputs=["x", "y"], outputs=["z"], axis=1) - ng_results = run_node(node, [a, b]) - assert np.array_equal(ng_results, [expected_output]) - - test_cases = { - "1d": ([1, 2], [3, 4]), - "2d": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]), - "3d": ( - [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], - [[[9, 10], [11, 12]], [[13, 14], [15, 16]]], - ), - } - - for _, values in test_cases.items(): - values = [np.asarray(v) for v in values] - for i in range(len(values[0].shape)): - in_args = ["value" + str(k) for k in range(len(values))] - node = onnx.helper.make_node( - "Concat", - inputs=list(in_args), - outputs=["output"], - axis=i, - ) - expected_output = np.concatenate(values, i) - ng_results = run_node(node, np.array(values, dtype=np.int32)) - assert np.array_equal(ng_results, [expected_output]) - - -@skip_dynamic_model -def test_squeeze(): - data = np.arange(6, dtype=np.int32).reshape([1, 2, 3, 1]) - expected_output = data.reshape([2, 3]) - - axes = np.array([0, 3]).astype(np.int64) - node = onnx.helper.make_node("Squeeze", inputs=["x", "axes"], outputs=["y"]) - ng_results = run_node(node, [data, axes]) - assert np.array_equal(ng_results, [expected_output]) - - data = np.random.randn(1, 3, 4, 5).astype(np.float32) - expected_output = np.squeeze(data, axis=0) - axes = np.array([0]).astype(np.int64) - node = onnx.helper.make_node("Squeeze", inputs=["x", "axes"], outputs=["y"]) - ng_results = run_node(node, [data, axes]) - assert np.array_equal(ng_results, [expected_output]) - - -@xfail_issue_44858 -def test_unsqueeze(): - data = np.random.randn(3, 4, 5).astype(np.float32) - expected_output = np.expand_dims(data, axis=0) - axes = np.array([0]).astype(np.int64) - node = onnx.helper.make_node("Unsqueeze", inputs=["x", "axes"], outputs=["y"]) - ng_results = run_node(node, [data, axes]) - assert np.array_equal(ng_results, [expected_output]) - - expected_output = np.reshape(data, [1, 3, 4, 5, 1]) - axes = np.array([0, 4]).astype(np.int64) - node = onnx.helper.make_node("Unsqueeze", inputs=["x", "axes"], outputs=["y"]) - ng_results = run_node(node, [data, axes]) - assert np.array_equal(ng_results, [expected_output]) - - expected_output = np.reshape(data, [1, 3, 1, 4, 5]) - axes = np.array([0, 2]).astype(np.int64) - node = onnx.helper.make_node("Unsqueeze", inputs=["x", "axes"], outputs=["y"]) - ng_results = run_node(node, [data, axes]) - assert np.array_equal(ng_results, [expected_output]) - - -@pytest.mark.parametrize( - "node, expected_output", - [ - # Split into 2 equal parts along axis=0 - ( - onnx.helper.make_node("Split", inputs=["x"], outputs=["y", "z"], axis=0), - [ - np.array([[0, 1, 2, 3]], dtype=np.int32), - np.array([[4, 5, 6, 7]], dtype=np.int32), - ], - ), - # Default, split along axis=0 into 2 equal parts - ( - onnx.helper.make_node("Split", inputs=["x"], outputs=["y", "z"]), - [ - np.array([[0, 1, 2, 3]], dtype=np.int32), - np.array([[4, 5, 6, 7]], dtype=np.int32), - ], - ), - # Split into 2 equal parts along axis=1 - ( - onnx.helper.make_node("Split", inputs=["x"], outputs=["a", "b"], axis=1), - [ - np.array([[0, 1], [4, 5]], dtype=np.int32), - np.array([[2, 3], [6, 7]], dtype=np.int32), - ], - ), - # Split into 4 equal parts along axis=1 - ( - onnx.helper.make_node( - "Split", inputs=["x"], outputs=["a", "b", "c", "d"], axis=1 - ), - [ - np.array([[0], [4]], dtype=np.int32), - np.array([[1], [5]], dtype=np.int32), - np.array([[2], [6]], dtype=np.int32), - np.array([[3], [7]], dtype=np.int32), - ], - ), - ], -) -def test_split_2d(node, expected_output): - data = np.arange(8, dtype=np.int32).reshape(2, 4) - ng_results = run_node(node, [data]) - assert all_arrays_equal(ng_results, expected_output) - - -def test_depth_to_space(): - b, c, h, w = shape = (2, 8, 3, 3) - blocksize = 2 - data = np.random.random_sample(shape).astype(np.float32) - tmp = np.reshape(data, [b, blocksize, blocksize, c // (blocksize ** 2), h, w]) - tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) - expected_output = np.reshape( - tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize] - ) - - node = onnx.helper.make_node( - "DepthToSpace", inputs=["x"], outputs=["y"], blocksize=blocksize - ) - ng_results = run_node(node, [data]) - assert np.array_equal(ng_results, [expected_output]) - - # (1, 4, 2, 3) input tensor - data = np.array( - [ - [ - [[0, 1, 2], [3, 4, 5]], - [[6, 7, 8], [9, 10, 11]], - [[12, 13, 14], [15, 16, 17]], - [[18, 19, 20], [21, 22, 23]], - ] - ] - ).astype(np.float32) - # (1, 1, 4, 6) output tensor - expected_output = np.array( - [ - [ - [ - [0, 6, 1, 7, 2, 8], - [12, 18, 13, 19, 14, 20], - [3, 9, 4, 10, 5, 11], - [15, 21, 16, 22, 17, 23], - ] - ] - ] - ).astype(np.float32) - - ng_results = run_node(node, [data]) - assert np.array_equal(ng_results, [expected_output]) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py deleted file mode 100644 index ad7b8e8ffba..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py +++ /dev/null @@ -1,489 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import platform - -import numpy as np -import onnx -import onnx.mapping -import pytest -from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info - -from ngraph.exceptions import NgraphTypeError -from tests_compatibility.runtime import get_runtime -from tests_compatibility.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node - - -@pytest.mark.parametrize( - "input_data", - [ - np.array([-4, 0, 5, -10], dtype=np.float32), - np.array([[-4, 0, 5, -10], [-4, 0, 5, -10]], dtype=np.float32), - np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]], dtype=np.float32), - ], -) -def test_abs(input_data): - expected_output = np.abs(input_data) - node = onnx.helper.make_node("Abs", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [input_data]) - assert np.array_equal(ng_results, [expected_output]) - - -@pytest.mark.parametrize( - "input_data", - [ - np.array([4, 0, 5, 10]), - np.array([[4, 0, 5, 10], [4, 0, 5, 10]]), - np.array([[[1, 2], [3, 4]], [[1, 2], [3, 4]]]), - ], -) -def test_sqrt(input_data): - input_data = input_data.astype(np.float32) - expected_output = np.sqrt(input_data) - node = onnx.helper.make_node("Sqrt", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [input_data]) - assert np.allclose(ng_results, [expected_output]) - - -@pytest.mark.parametrize( - "input_data", - [ - np.array([4, 0, 5, 10]), - np.array([[4, 0, 5, 10], [4, 0, 5, 10]]), - np.array([[[1, 2], [3, 4]], [[1, 2], [3, 4]]]), - ], -) -def test_exp(input_data): - input_data = input_data.astype(np.float32) - expected_output = np.exp(input_data) - node = onnx.helper.make_node("Exp", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [input_data]) - assert np.allclose(ng_results, [expected_output]) - - -@pytest.mark.parametrize( - "input_data", - [ - np.array([4, 2, 5, 10]), - np.array([[4, 1, 5, 10], [4, 2, 5, 10]]), - np.array([[[1, 2], [3, 4]], [[1, 2], [3, 4]]]), - ], -) -def test_log(input_data): - input_data = input_data.astype(np.float32) - expected_output = np.log(input_data) - node = onnx.helper.make_node("Log", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [input_data]) - assert np.allclose(ng_results, [expected_output]) - - -@pytest.mark.parametrize( - "input_data", - [ - np.array([-4, 0, 5, -10], dtype=np.float32), - np.array([[-4, 0, 5, -10], [-4, 0, 5, -10]], dtype=np.float32), - np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]], dtype=np.float32), - ], -) -def test_neg(input_data): - expected_output = np.negative(input_data) - node = onnx.helper.make_node("Neg", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [input_data]) - assert np.array_equal(ng_results, [expected_output]) - - -@pytest.mark.parametrize( - "input_data", - [ - np.array([-4.2, 0.43, 5.99, -10.01]), - np.array([[-4.5, 0.99, 5.01, -10.00], [-4.5, 0.5, 5.1, 10.01]]), - np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]]) / 6, - ], -) -def test_floor(input_data): - input_data = input_data.astype(np.float32) - expected_output = np.floor(input_data) - node = onnx.helper.make_node("Floor", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [input_data]) - assert np.array_equal(ng_results, [expected_output]) - - -@pytest.mark.parametrize( - "input_data", - [ - np.array([-4.2, 0, 5.99, -10.01]), - np.array([[-4.5, 0.99, 5.01, -10.00], [-4.5, 0.5, 5.1, 10.01]]), - np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]]) / 6, - ], -) -def test_ceil(input_data): - input_data = input_data.astype(np.float32) - expected_output = np.ceil(input_data) - node = onnx.helper.make_node("Ceil", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [input_data]) - assert np.array_equal(ng_results, [expected_output]) - - -@pytest.mark.parametrize( - "min_value, max_value", - [(np.finfo(np.float32).min, np.finfo(np.float32).max), (-0.5, 0.5), (0.0, np.finfo(np.float32).max)], -) -def test_clip(min_value, max_value): - np.random.seed(133391) - input_data = np.float32(-100.0) + np.random.randn(3, 4, 5).astype(np.float32) * np.float32(200.0) - model = get_node_model("Clip", input_data, opset=10, min=float(min_value), max=float(max_value)) - result = run_model(model, [input_data]) - expected = np.clip(input_data, min_value, max_value) - assert np.allclose(result, [expected]) - - -def test_clip_default(): - np.random.seed(133391) - input_data = -100.0 + np.random.randn(3, 4, 5).astype(np.float32) * 200.0 - - model = get_node_model("Clip", input_data, opset=10, min=0.0) - result = run_model(model, [input_data]) - expected = np.clip(input_data, np.float32(0.0), np.finfo(np.float32).max) - assert np.allclose(result, [expected]) - - model = get_node_model("Clip", input_data, opset=10, max=0.0) - result = run_model(model, [input_data]) - expected = np.clip(input_data, np.finfo(np.float32).min, np.float32(0.0)) - assert np.allclose(result, [expected]) - - -@pytest.mark.parametrize( - "input_data", - [ - np.array([-4.2, 1, 5.99, -10.01]), - np.array([[-4.5, 0.99, 5.01, -10.00], [-4.5, 0.5, 5.1, 10.01]]), - np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]]) / 6, - ], -) -def test_reciprocal(input_data): - input_data = input_data.astype(np.float32) - expected_output = np.reciprocal(input_data) - node = onnx.helper.make_node("Reciprocal", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [input_data]) - assert np.allclose(ng_results, [expected_output]) - - -@pytest.mark.parametrize("axis, dim1, dim2", [(0, 1, 60), (1, 3, 20), (2, 12, 5)]) -def test_hardmax(axis, dim1, dim2): - def hardmax_2d(data): - return np.eye(data.shape[1], dtype=data.dtype)[np.argmax(data, axis=1)] - - np.random.seed(133391) - data = np.random.rand(3, 4, 5).astype(np.float32) - expected = hardmax_2d(data.reshape(dim1, dim2)).reshape(3, 4, 5) - node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], axis=axis) - ng_results = run_node(node, [data], opset_version=12) - assert np.allclose(ng_results, [expected]) - - -def test_hardmax_special_cases(): - def hardmax_2d(data): - return np.eye(data.shape[1], dtype=data.dtype)[np.argmax(data, axis=1)] - - np.random.seed(133391) - data = np.random.rand(3, 4, 5).astype(np.float32) - - # default axis=1 - expected = hardmax_2d(data.reshape(3, 20)).reshape(3, 4, 5) - node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [data], opset_version=12) - assert np.allclose(ng_results, [expected]) - - expected = hardmax_2d(data.reshape(12, 5)).reshape(3, 4, 5) - node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], axis=-1) - ng_results = run_node(node, [data], opset_version=12) - assert np.allclose(ng_results, [expected]) - - with pytest.raises(RuntimeError): - node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], axis=3) - ng_results = run_node(node, [data], opset_version=12) - - # For multiple occurrences of the maximal values, the first occurrence is selected - # for one-hot output - data = np.array([[3, 3, 3, 1]]).astype(np.float32) - expected = np.array([[1, 0, 0, 0]]).astype(np.float32) - node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [data], opset_version=12) - assert np.allclose(ng_results, [expected]) - - -@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', - reason='Ticket - 122712') -def test_hardsigmoid(): - def hardsigmoid(data, alpha=0.2, beta=0.5): - return np.clip(alpha * data + beta, 0, 1) - - np.random.seed(133391) - alpha = np.random.rand() - beta = np.random.rand() - data = np.random.rand(3, 4, 5).astype(np.float32) - - expected = hardsigmoid(data, alpha, beta) - node = onnx.helper.make_node("HardSigmoid", inputs=["x"], outputs=["y"], alpha=alpha, beta=beta) - ng_results = run_node(node, [data]) - assert np.allclose(ng_results, [expected]) - - expected = hardsigmoid(data) - node = onnx.helper.make_node("HardSigmoid", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [data]) - assert np.allclose(ng_results, [expected]) - - -def test_logsoftmax(): - def logsoftmax_2d(x): - max_x = np.max(x, axis=1).reshape((-1, 1)) - exp_x = np.exp(x - max_x) - return x - max_x - np.log(np.sum(exp_x, axis=1).reshape((-1, 1))) - - np.random.seed(133391) - data = np.random.randn(3, 4, 5).astype(np.float32) - - node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=0) - expected = logsoftmax_2d(data.reshape(1, 60)).reshape(3, 4, 5) - ng_results = run_node(node, [data], opset_version=12) - assert np.allclose(ng_results, [expected]) - - node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=1) - expected = logsoftmax_2d(data.reshape(3, 20)).reshape(3, 4, 5) - ng_results = run_node(node, [data], opset_version=12) - assert np.allclose(ng_results, [expected]) - - # default axis is 1 - node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"]) - ng_results = run_node(node, [data], opset_version=12) - assert np.allclose(ng_results, [expected]) - - node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=2) - expected = logsoftmax_2d(data.reshape(12, 5)).reshape(3, 4, 5) - ng_results = run_node(node, [data], opset_version=12) - assert np.allclose(ng_results, [expected]) - - with pytest.raises(RuntimeError): - node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=3) - ng_results = run_node(node, [data], opset_version=12) - - -def test_softplus(): - def softplus(x): - return np.where(x < 20, np.log(np.exp(x) + 1), x) - - np.random.seed(133391) - data = np.random.randn(3, 4, 5).astype(np.float32) - - node = onnx.helper.make_node("Softplus", inputs=["x"], outputs=["y"]) - expected = softplus(data) - ng_results = run_node(node, [data]) - assert np.allclose(ng_results, [expected]) - - -def test_softsign(): - def softsign(x): - return x / (1 + np.abs(x)) - - np.random.seed(133391) - data = np.random.randn(3, 4, 5).astype(np.float32) - - node = onnx.helper.make_node("Softsign", inputs=["x"], outputs=["y"]) - expected = softsign(data) - ng_results = run_node(node, [data]) - assert np.allclose(ng_results, [expected]) - - -def test_identity(): - np.random.seed(133391) - shape = [2, 4] - input_data = np.random.randn(*shape).astype(np.float32) - - identity_node = make_node("Identity", inputs=["x"], outputs=["y"]) - ng_results = run_node(identity_node, [input_data]) - assert np.array_equal(ng_results, [input_data]) - - node1 = make_node("Add", inputs=["A", "B"], outputs=["add1"], name="add_node1") - node2 = make_node("Identity", inputs=["add1"], outputs=["identity1"], name="identity_node1") - node3 = make_node("Abs", inputs=["identity1"], outputs=["Y"], name="abs_node1") - - graph = make_graph( - [node1, node2, node3], - "test_graph", - [ - make_tensor_value_info("A", onnx.TensorProto.FLOAT, shape), - make_tensor_value_info("B", onnx.TensorProto.FLOAT, shape), - ], - [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, shape)], - ) - model = make_model(graph, producer_name="ngraph ONNX Importer") - ng_model_function = import_onnx_model(model) - runtime = get_runtime() - computation = runtime.computation(ng_model_function) - ng_results = computation(input_data, input_data) - expected_result = np.abs(input_data + input_data) - - assert np.array_equal(ng_results[0], expected_result) - - -@pytest.mark.parametrize("val_type, input_data", [(np.dtype(bool), np.zeros((2, 2), dtype=int))]) -def test_cast_to_bool(val_type, input_data): - expected = np.array(input_data, dtype=val_type) - - model = get_node_model("Cast", input_data, opset=6, to=onnx.helper.np_dtype_to_tensor_dtype(val_type)) - result = run_model(model, [input_data]) - assert np.allclose(result, expected) - - -@pytest.mark.parametrize( - "val_type, range_start, range_end, in_dtype", - [ - (np.dtype(np.float32), -8, 8, np.dtype(np.int32)), - (np.dtype(np.float64), -16383, 16383, np.dtype(np.int64)), - ], -) -def test_cast_to_float(val_type, range_start, range_end, in_dtype): - np.random.seed(133391) - input_data = np.random.randint(range_start, range_end, size=(2, 2), dtype=in_dtype) - expected = np.array(input_data, dtype=val_type) - - model = get_node_model("Cast", input_data, opset=6, to=onnx.helper.np_dtype_to_tensor_dtype(val_type)) - result = run_model(model, [input_data]) - assert np.allclose(result, expected) - - -@pytest.mark.parametrize( - "val_type", [np.dtype(np.int8), - np.dtype(np.int16), - np.dtype(np.int32), - np.dtype(np.int64)] -) -def test_cast_to_int(val_type): - np.random.seed(133391) - input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16) - expected = np.array(input_data, dtype=val_type) - - model = get_node_model("Cast", input_data, opset=6, to=onnx.helper.np_dtype_to_tensor_dtype(val_type)) - result = run_model(model, [input_data]) - assert np.allclose(result, expected) - - -@pytest.mark.parametrize( - "val_type", [np.dtype(np.uint8), np.dtype(np.uint16), np.dtype(np.uint32), np.dtype(np.uint64)] -) -def test_cast_to_uint(val_type): - np.random.seed(133391) - input_data = np.ceil(np.random.rand(2, 3, 4) * 16) - expected = np.array(input_data, dtype=val_type) - - model = get_node_model("Cast", input_data, opset=6, to=onnx.helper.np_dtype_to_tensor_dtype(val_type)) - result = run_model(model, [input_data]) - assert np.allclose(result, expected) - - -def test_cast_errors(): - from onnx.onnx_cpp2py_export.checker import ValidationError - - np.random.seed(133391) - input_data = np.ceil(np.random.rand(2, 3, 4) * 16) - - # missing 'to' attribute - node = onnx.helper.make_node("Cast", inputs=["A"], outputs=["B"]) - input_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) - for name, value in zip(node.input, [input_data]) - ] - output_tensors = [ - make_tensor_value_info(node.output[0], onnx.TensorProto.FLOAT16, input_data.shape) - ] # type: ignore - - graph = make_graph([node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="NgraphBackend") - with pytest.raises(ValidationError): - import_onnx_model(model) - - # unsupported data type representation - node = onnx.helper.make_node("Cast", inputs=["A"], outputs=["B"], to=1.2345) - input_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) - for name, value in zip(node.input, [input_data]) - ] - output_tensors = [ - make_tensor_value_info(node.output[0], onnx.TensorProto.INT32, input_data.shape) - ] # type: ignore - - graph = make_graph([node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="NgraphBackend") - with pytest.raises(ValidationError): - import_onnx_model(model) - - # unsupported input tensor data type: - node = onnx.helper.make_node("Cast", inputs=["A"], outputs=["B"], to=onnx.TensorProto.INT32) - input_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.COMPLEX64, value.shape) - for name, value in zip(node.input, [input_data]) - ] - output_tensors = [ - make_tensor_value_info(node.output[0], onnx.TensorProto.INT32, input_data.shape) - ] # type: ignore - - graph = make_graph([node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="NgraphBackend") - with pytest.raises((RuntimeError, NgraphTypeError)): - import_onnx_model(model) - - # unsupported output tensor data type: - node = onnx.helper.make_node("Cast", inputs=["A"], outputs=["B"], to=onnx.TensorProto.COMPLEX128) - input_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) - for name, value in zip(node.input, [input_data]) - ] - output_tensors = [ - make_tensor_value_info(node.output[0], onnx.TensorProto.COMPLEX128, input_data.shape) - ] # type: ignore - - graph = make_graph([node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="NgraphBackend") - with pytest.raises(RuntimeError): - import_onnx_model(model) - - -@pytest.mark.parametrize("value_type", - [pytest.param(np.float64), - pytest.param(np.float32)]) -@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', - reason='Ticket - 122712') -def test_constant(value_type): - values = np.random.randn(5, 5).astype(value_type) - node = onnx.helper.make_node( - "Constant", - inputs=[], - outputs=["values"], - value=onnx.helper.make_tensor( - name="const_tensor", - data_type=onnx.helper.np_dtype_to_tensor_dtype(np.dtype(value_type)), - dims=values.shape, - vals=values.flatten(), - ), - ) - - ng_results = run_node(node, []) - assert np.allclose(ng_results, [values]) - - -def test_constant_err(): - values = np.random.randn(5, 5).astype(np.float16) - node = onnx.helper.make_node( - "Constant", - inputs=[], - outputs=["values"], - value=onnx.helper.make_tensor( - name="const_tensor", - data_type=onnx.helper.np_dtype_to_tensor_dtype(np.dtype(np.float16)), - dims=values.shape, - vals=values.flatten(), - ), - ) - - ng_results = run_node(node, []) - assert np.allclose(ng_results, [values]) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_variadic.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_variadic.py deleted file mode 100644 index b86a8fbf768..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_variadic.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from functools import reduce - -import numpy as np -import onnx -import pytest - -from tests_compatibility.test_onnx.utils import run_node - - -@pytest.mark.parametrize( - "onnx_op,numpy_func", [("Sum", np.add), ("Min", np.minimum), ("Max", np.maximum)] -) -def test_variadic(onnx_op, numpy_func): - data = [ - np.array([1, 2, 3], dtype=np.int32), - np.array([4, 5, 6], dtype=np.int32), - np.array([7, 8, 9], dtype=np.int32), - ] - node = onnx.helper.make_node( - onnx_op, inputs=["data_0", "data_1", "data_2"], outputs=["y"] - ) - expected_output = reduce(numpy_func, data) - - ng_results = run_node(node, data) - assert np.array_equal(ng_results, [expected_output]) - - -def test_mean(): - data = [ - np.array([1, 2, 3], dtype=np.int32), - np.array([4, 5, 6], dtype=np.int32), - np.array([7, 8, 9], dtype=np.int32), - ] - node = onnx.helper.make_node( - "Mean", inputs=["data_0", "data_1", "data_2"], outputs=["y"] - ) - expected_output = reduce(np.add, data) / len(data) - - ng_results = run_node(node, data) - assert np.array_equal(ng_results, [expected_output]) diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py b/src/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py deleted file mode 100644 index 49dff95a054..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tests_compatibility -from operator import itemgetter -from pathlib import Path -from typing import Sequence, Any -import numpy as np - -from tests_compatibility.test_onnx.utils import OpenVinoOnnxBackend -from tests_compatibility.test_onnx.utils.model_importer import ModelImportRunner - -from tests_compatibility import ( - xfail_issue_38701, - xfail_issue_37957, - xfail_issue_38084, - xfail_issue_39669, - xfail_issue_38726, - xfail_issue_37973, - xfail_issue_47430, - xfail_issue_47495, - xfail_issue_48145, - xfail_issue_48190, - xfail_issue_78843, - xfail_issue_onnx_models_140) - -MODELS_ROOT_DIR = tests_compatibility.MODEL_ZOO_DIR - -def yolov3_post_processing(outputs : Sequence[Any]) -> Sequence[Any]: - concat_out_index = 2 - # remove all elements with value -1 from yolonms_layer_1/concat_2:0 output - concat_out = outputs[concat_out_index][outputs[concat_out_index] != -1] - concat_out = np.expand_dims(concat_out, axis=0) - outputs[concat_out_index] = concat_out - return outputs - -def tinyyolov3_post_processing(outputs : Sequence[Any]) -> Sequence[Any]: - concat_out_index = 2 - # remove all elements with value -1 from yolonms_layer_1:1 output - concat_out = outputs[concat_out_index][outputs[concat_out_index] != -1] - concat_out = concat_out.reshape((outputs[concat_out_index].shape[0], -1, 3)) - outputs[concat_out_index] = concat_out - return outputs - -post_processing = { - "yolov3" : {"post_processing" : yolov3_post_processing}, - "tinyyolov3" : {"post_processing" : tinyyolov3_post_processing}, - "tiny-yolov3-11": {"post_processing": tinyyolov3_post_processing}, -} - -tolerance_map = { - "arcface_lresnet100e_opset8": {"atol": 0.001, "rtol": 0.001}, - "fp16_inception_v1": {"atol": 0.001, "rtol": 0.001}, - "mobilenet_opset7": {"atol": 0.001, "rtol": 0.001}, - "resnet50_v2_opset7": {"atol": 0.001, "rtol": 0.001}, - "test_mobilenetv2-1.0": {"atol": 0.001, "rtol": 0.001}, - "test_resnet101v2": {"atol": 0.001, "rtol": 0.001}, - "test_resnet18v2": {"atol": 0.001, "rtol": 0.001}, - "test_resnet34v2": {"atol": 0.001, "rtol": 0.001}, - "test_resnet50v2": {"atol": 0.001, "rtol": 0.001}, - "mosaic": {"atol": 0.001, "rtol": 0.001}, - "pointilism": {"atol": 0.001, "rtol": 0.001}, - "rain_princess": {"atol": 0.001, "rtol": 0.001}, - "udnie": {"atol": 0.001, "rtol": 0.001}, - "candy": {"atol": 0.003, "rtol": 0.003}, - "densenet-3": {"atol": 1e-7, "rtol": 0.0011}, - "arcfaceresnet100-8": {"atol": 0.001, "rtol": 0.001}, - "mobilenetv2-7": {"atol": 0.001, "rtol": 0.001}, - "resnet101-v1-7": {"atol": 0.001, "rtol": 0.001}, - "resnet101-v2-7": {"atol": 0.001, "rtol": 0.001}, - "resnet152-v1-7": {"atol": 1e-7, "rtol": 0.003}, - "resnet152-v2-7": {"atol": 0.001, "rtol": 0.001}, - "resnet18-v1-7": {"atol": 0.001, "rtol": 0.001}, - "resnet18-v2-7": {"atol": 0.001, "rtol": 0.001}, - "resnet34-v2-7": {"atol": 0.001, "rtol": 0.001}, - "vgg16-7": {"atol": 0.001, "rtol": 0.001}, - "vgg19-bn-7": {"atol": 0.001, "rtol": 0.001}, - "tinyyolov2-7": {"atol": 0.001, "rtol": 0.001}, - "tinyyolov2-8": {"atol": 0.001, "rtol": 0.001}, - "candy-8": {"atol": 0.001, "rtol": 0.001}, - "candy-9": {"atol": 0.007, "rtol": 0.001}, - "mosaic-8": {"atol": 0.003, "rtol": 0.001}, - "mosaic-9": {"atol": 0.001, "rtol": 0.001}, - "pointilism-8": {"atol": 0.001, "rtol": 0.001}, - "pointilism-9": {"atol": 0.001, "rtol": 0.001}, - "rain-princess-8": {"atol": 0.001, "rtol": 0.001}, - "rain-princess-9": {"atol": 0.001, "rtol": 0.001}, - "udnie-8": {"atol": 0.001, "rtol": 0.001}, - "udnie-9": {"atol": 0.001, "rtol": 0.001}, - "mxnet_arcface": {"atol": 1.5e-5, "rtol": 0.001}, - "resnet100": {"atol": 1.5e-5, "rtol": 0.001}, - "densenet121": {"atol": 1e-7, "rtol": 0.0011}, - "resnet152v1": {"atol": 1e-7, "rtol": 0.003}, - "test_shufflenetv2": {"atol": 1e-05, "rtol": 0.001}, - "tiny_yolov2": {"atol": 1e-05, "rtol": 0.001}, - "mobilenetv2-1": {"atol": 1e-04, "rtol": 0.001}, - "resnet101v1": {"atol": 1e-04, "rtol": 0.001}, - "resnet101v2": {"atol": 1e-06, "rtol": 0.001}, - "resnet152v2": {"atol": 1e-05, "rtol": 0.001}, - "resnet18v2": {"atol": 1e-05, "rtol": 0.001}, - "resnet34v2": {"atol": 1e-05, "rtol": 0.001}, - "vgg16": {"atol": 1e-05, "rtol": 0.001}, - "vgg19-bn": {"atol": 1e-05, "rtol": 0.001}, - "test_tiny_yolov2": {"atol": 1e-05, "rtol": 0.001}, - "test_resnet152v2": {"atol": 1e-04, "rtol": 0.001}, - "test_mobilenetv2-1": {"atol": 1e-04, "rtol": 0.001}, - "yolov3": {"atol": 0.001, "rtol": 0.001}, - "yolov4": {"atol": 1e-04, "rtol": 0.001}, - "tinyyolov3": {"atol": 1e-04, "rtol": 0.001}, - "tiny-yolov3-11": {"atol": 1e-04, "rtol": 0.001}, - "GPT2": {"atol": 5e-06, "rtol": 0.01}, - "GPT-2-LM-HEAD": {"atol": 4e-06}, - "test_retinanet_resnet101": {"atol": 1.3e-06}, -} - -zoo_models = [] -# rglob doesn't work for symlinks, so models have to be physically somwhere inside "MODELS_ROOT_DIR" -for path in Path(MODELS_ROOT_DIR).rglob("*.onnx"): - mdir = path.parent - file_name = path.name - if path.is_file() and not file_name.startswith("."): - model = {"model_name": path, "model_file": file_name, "dir": mdir} - basedir = mdir.stem - if basedir in tolerance_map: - # updated model looks now: - # {"model_name": path, "model_file": file, "dir": mdir, "atol": ..., "rtol": ...} - model.update(tolerance_map[basedir]) - if basedir in post_processing: - model.update(post_processing[basedir]) - zoo_models.append(model) - -if len(zoo_models) > 0: - zoo_models = sorted(zoo_models, key=itemgetter("model_name")) - - # Set backend device name to be used instead of hardcoded by ONNX BackendTest class ones. - OpenVinoOnnxBackend.backend_name = tests_compatibility.BACKEND_NAME - - # import all test cases at global scope to make them visible to pytest - backend_test = ModelImportRunner(OpenVinoOnnxBackend, zoo_models, __name__, MODELS_ROOT_DIR) - test_cases = backend_test.test_cases["OnnxBackendModelImportTest"] - # flake8: noqa: E501 - if tests_compatibility.MODEL_ZOO_XFAIL: - import_xfail_list = [ - # ONNX Model Zoo - (xfail_issue_38701, "test_onnx_model_zoo_text_machine_comprehension_bidirectional_attention_flow_model_bidaf_9_bidaf_bidaf_cpu"), - (xfail_issue_38726, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_decoder_with_lm_head_12_t5_decoder_with_lm_head_cpu"), - - # Model MSFT - (xfail_issue_37957, "test_MSFT_opset10_mask_rcnn_keras_mask_rcnn_keras_cpu"), - ] - for test_case in import_xfail_list: - xfail, test_name = test_case - xfail(getattr(test_cases, test_name)) - - del test_cases - - test_cases = backend_test.test_cases["OnnxBackendModelExecutionTest"] - if tests_compatibility.MODEL_ZOO_XFAIL: - execution_xfail_list = [ - # ONNX Model Zoo - (xfail_issue_39669, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_encoder_12_t5_encoder_cpu"), - (xfail_issue_38084, "test_onnx_model_zoo_vision_object_detection_segmentation_mask_rcnn_model_MaskRCNN_10_mask_rcnn_R_50_FPN_1x_cpu"), - (xfail_issue_47430, "test_onnx_model_zoo_vision_object_detection_segmentation_fcn_model_fcn_resnet50_11_fcn_resnet50_11_model_cpu"), - (xfail_issue_47430, "test_onnx_model_zoo_vision_object_detection_segmentation_fcn_model_fcn_resnet101_11_fcn_resnet101_11_model_cpu"), - (xfail_issue_48145, "test_onnx_model_zoo_text_machine_comprehension_bert_squad_model_bertsquad_8_download_sample_8_bertsquad8_cpu"), - (xfail_issue_48190, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_roberta_base_11_cpu"), - (xfail_issue_onnx_models_140, "test_onnx_model_zoo_vision_object_detection_segmentation_duc_model_ResNet101_DUC_7_ResNet101_DUC_HDC_ResNet101_DUC_HDC_cpu"), - (xfail_issue_78843, "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_ssd_mobilenet_v1_cpu"), - - # Model MSFT - (xfail_issue_37973, "test_MSFT_opset7_tf_inception_v2_model_cpu"), - (xfail_issue_37973, "test_MSFT_opset8_tf_inception_v2_model_cpu"), - (xfail_issue_37973, "test_MSFT_opset9_tf_inception_v2_model_cpu"), - (xfail_issue_37973, "test_MSFT_opset11_tf_inception_v2_model_cpu"), - (xfail_issue_37973, "test_MSFT_opset10_tf_inception_v2_model_cpu"), - - (xfail_issue_58676, "test_MSFT_opset7_fp16_tiny_yolov2_onnxzoo_winmlperf_tiny_yolov2_cpu"), - (xfail_issue_58676, "test_MSFT_opset8_fp16_tiny_yolov2_onnxzoo_winmlperf_tiny_yolov2_cpu"), - - (xfail_issue_38084, "test_MSFT_opset10_mask_rcnn_mask_rcnn_R_50_FPN_1x_cpu"), - - (xfail_issue_39669, "test_MSFT_opset9_cgan_cgan_cpu"), - (xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"), - (xfail_issue_78843, "test_MSFT_opset10_mlperf_ssd_mobilenet_300_ssd_mobilenet_v1_coco_2018_01_28_cpu"), - ] - for test_case in import_xfail_list + execution_xfail_list: - xfail, test_name = test_case - xfail(getattr(test_cases, test_name)) - - del test_cases - - globals().update(backend_test.enable_report().test_cases) diff --git a/src/bindings/python/tests_compatibility/test_onnx/utils/__init__.py b/src/bindings/python/tests_compatibility/test_onnx/utils/__init__.py deleted file mode 100644 index 500f055a3af..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/utils/__init__.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from string import ascii_uppercase -from typing import Any, Dict, Iterable, List, Optional, Text - -import numpy as np -import onnx -from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info - -import tests_compatibility -from tests_compatibility.runtime import get_runtime -from tests_compatibility.test_onnx.utils.onnx_backend import OpenVinoOnnxBackend -from tests_compatibility.test_onnx.utils.onnx_helpers import import_onnx_model - - -def run_node(onnx_node, data_inputs, **kwargs): - # type: (onnx.NodeProto, List[np.ndarray], Dict[Text, Any]) -> List[np.ndarray] - """ - Convert ONNX node to ngraph node and perform computation on input data. - - :param onnx_node: ONNX NodeProto describing a computation node - :param data_inputs: list of numpy ndarrays with input data - :return: list of numpy ndarrays with computed output - """ - OpenVinoOnnxBackend.backend_name = tests_compatibility.BACKEND_NAME - return OpenVinoOnnxBackend.run_node(onnx_node, data_inputs, **kwargs) - - -def run_model(onnx_model, data_inputs): - # type: (onnx.ModelProto, List[np.ndarray]) -> List[np.ndarray] - """ - Convert ONNX model to an ngraph model and perform computation on input data. - - :param onnx_model: ONNX ModelProto describing an ONNX model - :param data_inputs: list of numpy ndarrays with input data - :return: list of numpy ndarrays with computed output - """ - ng_model_function = import_onnx_model(onnx_model) - runtime = get_runtime() - computation = runtime.computation(ng_model_function) - return computation(*data_inputs) - - -def get_node_model(op_type, *input_data, opset=1, num_outputs=1, **node_attributes): - # type: (str, *Any, Optional[int], Optional[int], **Any) -> onnx.ModelProto - """Generate model with single requested node. - - Input and output Tensor data type is the same. - - :param op_type: The ONNX node operation. - :param input_data: Optional list of input arguments for node. - :param opset: The ONNX operation set version to use. Default to 4. - :param num_outputs: The number of node outputs. - :param node_attributes: Optional dictionary of node attributes. - :return: Generated model with single node for requested ONNX operation. - """ - node_inputs = [np.array(data) for data in input_data] - num_inputs = len(node_inputs) - node_input_names = [ascii_uppercase[idx] for idx in range(num_inputs)] - node_output_names = [ascii_uppercase[num_inputs + idx] for idx in range(num_outputs)] - onnx_node = make_node(op_type, node_input_names, node_output_names, **node_attributes) - - input_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) - for name, value in zip(onnx_node.input, node_inputs) - ] - output_tensors = [ - make_tensor_value_info(name, onnx.TensorProto.FLOAT, ()) for name in onnx_node.output - ] # type: ignore - - graph = make_graph([onnx_node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="Ngraph ONNX Importer") - model.opset_import[0].version = opset - return model - - -def all_arrays_equal(first_list, second_list): - # type: (Iterable[np.ndarray], Iterable[np.ndarray]) -> bool - """ - Check that all numpy ndarrays in `first_list` are equal to all numpy ndarrays in `second_list`. - - :param first_list: iterable containing numpy ndarray objects - :param second_list: another iterable containing numpy ndarray objects - :return: True if all ndarrays are equal, otherwise False - """ - return all(map(lambda pair: np.array_equal(*pair), zip(first_list, second_list))) diff --git a/src/bindings/python/tests_compatibility/test_onnx/utils/model_importer.py b/src/bindings/python/tests_compatibility/test_onnx/utils/model_importer.py deleted file mode 100644 index c7434cec990..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/utils/model_importer.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import onnx -import onnx.backend.test -import unittest -import dataclasses - -from collections import defaultdict, namedtuple -from onnx import numpy_helper, NodeProto, ModelProto -from onnx.backend.base import Backend, BackendRep -from onnx.backend.test.case.test_case import TestCase as OnnxTestCase -from onnx.backend.test.runner import TestItem -from pathlib import Path -from tests_compatibility.test_onnx.utils.onnx_helpers import import_onnx_model -from typing import Any, Dict, List, Optional, Pattern, Set, Text, Type, Union, Callable, Sequence - - -# add post-processing function as part of test data -OnnxTestCase_fields = [field.name for field in dataclasses.fields(OnnxTestCase)] -ExtOnnxTestCase = dataclasses.make_dataclass(cls_name="TestCaseExt", - fields=[*OnnxTestCase_fields, "post_processing"]) - - -class ModelImportRunner(onnx.backend.test.BackendTest): - def __init__( - self, - backend: Type[Backend], - models: List[Dict[str, Path]], - parent_module: Optional[str] = None, - data_root: Optional[Path] = "", - ) -> None: - self.backend = backend - self._parent_module = parent_module - self._include_patterns = set() # type: Set[Pattern[Text]] - self._exclude_patterns = set() # type: Set[Pattern[Text]] - self._test_items = defaultdict(dict) # type: Dict[Text, Dict[Text, TestItem]] - self._xfail_patterns = set() # type: Set[Pattern[Text]] - - for model in models: - test_name = "test{}".format(model["model_name"]) \ - .replace(str(data_root), "") \ - .replace(".onnx", "") \ - .replace("/", "_") \ - .replace("\\", "_") \ - .replace("-", "_") - - test_case = ExtOnnxTestCase( - name=test_name, - url=None, - model_name=model["model_name"], - model_dir=model["dir"], - model=model["model_file"], - data_sets=None, - kind="OnnxBackendRealModelTest", - rtol=model.get("rtol", 0.001), - atol=model.get("atol", 1e-07), - __test__=True, - post_processing=model.get("post_processing", None) - ) - self._add_model_import_test(test_case) - self._add_model_execution_test(test_case) - - @staticmethod - def _load_onnx_model(model_dir: Path, filename: Path) -> ModelProto: - if model_dir is None: - raise unittest.SkipTest("Model directory not provided") - - return onnx.load(model_dir / filename) - - def _add_model_import_test(self, model_test: ExtOnnxTestCase) -> None: - # model is loaded at runtime, note sometimes it could even - # never loaded if the test skipped - model_marker = [None] # type: List[Optional[Union[ModelProto, NodeProto]]] - - def run_import(test_self: Any, device: Text) -> None: - model = ModelImportRunner._load_onnx_model(model_test.model_dir, model_test.model) - model_marker[0] = model - assert import_onnx_model(model) - - self._add_test("ModelImport", model_test.name, run_import, model_marker) - - @classmethod - def _execute_npz_data( - cls, model_dir: str, prepared_model: BackendRep, result_rtol: float, result_atol: float, - post_processing: Callable[[Sequence[Any]], Sequence[Any]] = None - ) -> int: - executed_tests = 0 - for test_data_npz in model_dir.glob("test_data_*.npz"): - test_data = np.load(test_data_npz, encoding="bytes") - inputs = list(test_data["inputs"]) - outputs = list(prepared_model.run(inputs)) - ref_outputs = test_data["outputs"] - if post_processing is not None: - outputs = post_processing(outputs) - cls.assert_similar_outputs(ref_outputs, outputs, result_rtol, result_atol) - executed_tests = executed_tests + 1 - return executed_tests - - @classmethod - def _execute_pb_data( - cls, model_dir: str, prepared_model: BackendRep, result_rtol: float, result_atol: float, - post_processing: Callable[[Sequence[Any]], Sequence[Any]] = None - ) -> int: - executed_tests = 0 - for test_data_dir in model_dir.glob("test_data_set*"): - inputs = [] - inputs_num = len(list(test_data_dir.glob("input_*.pb"))) - for i in range(inputs_num): - input_file = Path(test_data_dir) / "input_{}.pb".format(i) - tensor = onnx.TensorProto() - with open(input_file, "rb") as f: - tensor.ParseFromString(f.read()) - inputs.append(numpy_helper.to_array(tensor)) - ref_outputs = [] - ref_outputs_num = len(list(test_data_dir.glob("output_*.pb"))) - for i in range(ref_outputs_num): - output_file = Path(test_data_dir) / "output_{}.pb".format(i) - tensor = onnx.TensorProto() - with open(output_file, "rb") as f: - tensor.ParseFromString(f.read()) - ref_outputs.append(numpy_helper.to_array(tensor)) - if(len(inputs) == 0): - continue - outputs = list(prepared_model.run(inputs)) - if post_processing is not None: - outputs = post_processing(outputs) - cls.assert_similar_outputs(ref_outputs, outputs, result_rtol, result_atol) - executed_tests = executed_tests + 1 - return executed_tests - - def _add_model_execution_test(self, model_test: ExtOnnxTestCase) -> None: - # model is loaded at runtime, note sometimes it could even - # never loaded if the test skipped - model_marker = [None] # type: List[Optional[Union[ModelProto, NodeProto]]] - - def run_execution(test_self: Any, device: Text) -> None: - model = ModelImportRunner._load_onnx_model(model_test.model_dir, model_test.model) - model_marker[0] = model - prepared_model = self.backend.prepare(model, device) - assert prepared_model is not None - executed_tests = ModelImportRunner._execute_npz_data( - model_test.model_dir, prepared_model, model_test.rtol, model_test.atol, - model_test.post_processing - ) - - executed_tests = executed_tests + ModelImportRunner._execute_pb_data( - model_test.model_dir, prepared_model, model_test.rtol, model_test.atol, - model_test.post_processing - ) - assert executed_tests > 0, "This model has no test data" - self._add_test("ModelExecution", model_test.name, run_execution, model_marker) diff --git a/src/bindings/python/tests_compatibility/test_onnx/utils/onnx_backend.py b/src/bindings/python/tests_compatibility/test_onnx/utils/onnx_backend.py deleted file mode 100644 index a3eea6a2ac9..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/utils/onnx_backend.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -""" -ONNX Backend implementation. - -See ONNX documentation for details: -https://github.com/onnx/onnx/blob/master/docs/Implementing%20an%20ONNX%20backend.md -""" - -from typing import Any, Dict, List, Optional, Sequence, Text, Tuple - -import numpy -import onnx -from onnx.backend.base import Backend, BackendRep -from onnx.helper import make_graph, make_model, make_tensor_value_info, np_dtype_to_tensor_dtype - -from ngraph.impl import Function -from tests_compatibility.runtime import get_runtime -from tests_compatibility.test_onnx.utils.onnx_helpers import import_onnx_model - - -class OpenVinoOnnxBackendRep(BackendRep): - def __init__(self, ng_model_function, device="CPU"): # type: (List[Function], str) -> None - super().__init__() - self.device = device - self.ng_model_function = ng_model_function - self.runtime = get_runtime() - self.computation = self.runtime.computation(ng_model_function) - - def run(self, inputs, **kwargs): # type: (Any, **Any) -> Tuple[Any, ...] - """Run computation on model.""" - return self.computation(*inputs) - - -class OpenVinoOnnxBackend(Backend): - @classmethod - def is_compatible( - cls, - model, # type: onnx.ModelProto - device="CPU", # type: Text - **kwargs # type: Any - ): # type: (...) -> bool - # Return whether the model is compatible with the backend. - try: - import_onnx_model(model) - return True - except Exception: - return False - - @classmethod - def prepare( - cls, - onnx_model, # type: onnx.ModelProto - device="CPU", # type: Text - **kwargs # type: Any - ): # type: (...) -> OpenVinoOnnxBackendRep - super().prepare(onnx_model, device, **kwargs) - ng_model_function = import_onnx_model(onnx_model) - return OpenVinoOnnxBackendRep(ng_model_function, device) - - @classmethod - def run_model( - cls, - model, # type: onnx.ModelProto - inputs, # type: Any - device="CPU", # type: Text - **kwargs # type: Any - ): # type: (...) -> Tuple[Any, ...] - return cls.prepare(model, device, **kwargs).run(inputs) - - @classmethod - def run_node( - cls, - node, # type: onnx.NodeProto - inputs, # type: Any - device="CPU", # type: Text - outputs_info=None, # type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]] - **kwargs # type: Dict[Text, Any] - ): # type: (...) -> Optional[Tuple[Any, ...]] - """Prepare and run a computation on an ONNX node.""" - # default values for input/output tensors - input_tensor_types = [np_dtype_to_tensor_dtype(node_input.dtype) for node_input in inputs] - output_tensor_types = [onnx.TensorProto.FLOAT for _ in range(len(node.output))] - output_tensor_shapes = [()] # type: List[Tuple[int, ...]] - - if outputs_info is not None: - output_tensor_types = [ - np_dtype_to_tensor_dtype(dtype) for (dtype, _) in outputs_info - ] - output_tensor_shapes = [shape for (_, shape) in outputs_info] - - input_tensors = [ - make_tensor_value_info(name, tensor_type, value.shape) - for name, value, tensor_type in zip(node.input, inputs, input_tensor_types) - ] - output_tensors = [ - make_tensor_value_info(name, tensor_type, shape) - for name, shape, tensor_type in zip( - node.output, output_tensor_shapes, output_tensor_types - ) - ] - - graph = make_graph([node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="OpenVinoOnnxBackend") - if "opset_version" in kwargs: - model.opset_import[0].version = kwargs["opset_version"] - return cls.prepare(model, device).run(inputs) - - @classmethod - def supports_device(cls, device): # type: (Text) -> bool - """Check whether the backend is compiled with particular device support. - - In particular it's used in the testing suite. - """ - return device != "CUDA" and device != "NVIDIA" - - -class OpenVinoTestBackend(OpenVinoOnnxBackend): - @classmethod - def is_compatible( - cls, - model, # type: onnx.ModelProto - device="CPU", # type: Text - **kwargs # type: Any - ): # type: (...) -> bool - # Return whether the model is compatible with the backend. - import_onnx_model(model) - return True - - -prepare = OpenVinoOnnxBackend.prepare -run_model = OpenVinoOnnxBackend.run_model -run_node = OpenVinoOnnxBackend.run_node -supports_device = OpenVinoOnnxBackend.supports_device diff --git a/src/bindings/python/tests_compatibility/test_onnx/utils/onnx_helpers.py b/src/bindings/python/tests_compatibility/test_onnx/utils/onnx_helpers.py deleted file mode 100644 index c5e774da297..00000000000 --- a/src/bindings/python/tests_compatibility/test_onnx/utils/onnx_helpers.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import onnx -from openvino.inference_engine import IECore - -import ngraph as ng -from ngraph.impl import Function - - -def import_onnx_model(model: onnx.ModelProto) -> Function: - onnx.checker.check_model(model) - model_byte_string = model.SerializeToString() - - ie = IECore() - ie_network = ie.read_network(model=model_byte_string, weights=b"", init_from_buffer=True) - - ng_function = ng.function_from_cnn(ie_network) - return ng_function diff --git a/src/bindings/python/tests_compatibility/test_utils/test_utils.py b/src/bindings/python/tests_compatibility/test_utils/test_utils.py deleted file mode 100644 index aa03aced982..00000000000 --- a/src/bindings/python/tests_compatibility/test_utils/test_utils.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import openvino -import ngraph as ng -from openvino.inference_engine import IECore, IENetwork -from ngraph.impl import Function, Shape, Type -from ngraph.impl.op import Parameter - -from typing import Tuple, Union, List -import numpy as np - - -def get_test_function(): - element_type = Type.f32 - param = Parameter(element_type, Shape([1, 3, 22, 22])) - relu = ng.relu(param) - func = Function([relu], [param], "test") - assert func is not None - return func - - -def generate_image(shape: Tuple = (1, 3, 32, 32), dtype: Union[str, np.dtype] = "float32") -> np.array: - np.random.seed(42) - return np.random.rand(*shape).astype(dtype) - - -def generate_relu_model(input_shape: List[int]) -> openvino.inference_engine.IENetwork: - param = ng.parameter(input_shape, np.float32, name="parameter") - relu = ng.relu(param, name="relu") - func = Function([relu], [param], "test") - func.get_ordered_ops()[2].friendly_name = "friendly" - caps = Function.to_capsule(func) - cnnNetwork = IENetwork(caps) - return cnnNetwork - - -def generate_relu_compiled_model(input_shape: List[int], device = "CPU") -> openvino.inference_engine.ExecutableNetwork: - core = IECore() - cnnNetwork = generate_relu_model(input_shape) - return core.load_network(cnnNetwork, device, {}) diff --git a/src/bindings/python/tests_compatibility/test_utils/utils/plugins.xml b/src/bindings/python/tests_compatibility/test_utils/utils/plugins.xml deleted file mode 100644 index 1b0c122f05b..00000000000 --- a/src/bindings/python/tests_compatibility/test_utils/utils/plugins.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - diff --git a/src/bindings/python/tests_compatibility/test_utils/utils/plugins_apple.xml b/src/bindings/python/tests_compatibility/test_utils/utils/plugins_apple.xml deleted file mode 100644 index 1b0c122f05b..00000000000 --- a/src/bindings/python/tests_compatibility/test_utils/utils/plugins_apple.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - diff --git a/src/bindings/python/tests_compatibility/test_utils/utils/plugins_win.xml b/src/bindings/python/tests_compatibility/test_utils/utils/plugins_win.xml deleted file mode 100644 index 51f871a3a8c..00000000000 --- a/src/bindings/python/tests_compatibility/test_utils/utils/plugins_win.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - diff --git a/src/bindings/python/tests_compatibility/test_utils/utils/test_model.onnx b/src/bindings/python/tests_compatibility/test_utils/utils/test_model.onnx deleted file mode 100644 index 333053b81fe..00000000000 Binary files a/src/bindings/python/tests_compatibility/test_utils/utils/test_model.onnx and /dev/null differ diff --git a/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp16.bin b/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp16.bin deleted file mode 100644 index 8b86761726a..00000000000 Binary files a/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp16.bin and /dev/null differ diff --git a/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp16.xml b/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp16.xml deleted file mode 100644 index d80918c2ecd..00000000000 --- a/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp16.xml +++ /dev/null @@ -1,467 +0,0 @@ - - - - - - - - 1 - 3 - 32 - 32 - - - - - - - - 16 - 3 - 5 - 5 - - - - - - - - 1 - 3 - 32 - 32 - - - 16 - 3 - 5 - 5 - - - - - 1 - 16 - 32 - 32 - - - - - - - - 1 - 16 - 1 - 1 - - - - - - - 1 - 16 - 32 - 32 - - - 1 - 16 - 1 - 1 - - - - - 1 - 16 - 32 - 32 - - - - - - - 1 - 16 - 32 - 32 - - - - - 1 - 16 - 32 - 32 - - - - - - - - 1 - 16 - 32 - 32 - - - - - 1 - 16 - 16 - 16 - - - - - - - - 32 - 16 - 5 - 5 - - - - - - - - 1 - 16 - 16 - 16 - - - 32 - 16 - 5 - 5 - - - - - 1 - 32 - 16 - 16 - - - - - - - - 1 - 32 - 1 - 1 - - - - - - - 1 - 32 - 16 - 16 - - - 1 - 32 - 1 - 1 - - - - - 1 - 32 - 16 - 16 - - - - - - - - 64 - 32 - 3 - 3 - - - - - - - - 1 - 32 - 16 - 16 - - - 64 - 32 - 3 - 3 - - - - - 1 - 64 - 18 - 18 - - - - - - - - 1 - 64 - 1 - 1 - - - - - - - 1 - 64 - 18 - 18 - - - 1 - 64 - 1 - 1 - - - - - 1 - 64 - 18 - 18 - - - - - - - 1 - 64 - 18 - 18 - - - - - 1 - 64 - 18 - 18 - - - - - - - - 1 - 64 - 18 - 18 - - - - - 1 - 64 - 9 - 9 - - - - - - - - 2 - - - - - - - - 1 - 64 - 9 - 9 - - - 2 - - - - - 1 - 5184 - - - - - - - - 10 - 5184 - - - - - - - - 1 - 5184 - - - 10 - 5184 - - - - - 1 - 10 - - - - - - - - 1 - 10 - - - - - - - 1 - 10 - - - 1 - 10 - - - - - 1 - 10 - - - - - - - - 1 - 10 - - - - - 1 - 10 - - - - - - - 1 - 10 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp32.bin b/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp32.bin deleted file mode 100644 index b5f85e41083..00000000000 Binary files a/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp32.bin and /dev/null differ diff --git a/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp32.xml b/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp32.xml deleted file mode 100644 index dcf0cd74b06..00000000000 --- a/src/bindings/python/tests_compatibility/test_utils/utils/test_model_fp32.xml +++ /dev/null @@ -1,467 +0,0 @@ - - - - - - - - 1 - 3 - 32 - 32 - - - - - - - - 16 - 3 - 5 - 5 - - - - - - - - 1 - 3 - 32 - 32 - - - 16 - 3 - 5 - 5 - - - - - 1 - 16 - 32 - 32 - - - - - - - - 1 - 16 - 1 - 1 - - - - - - - 1 - 16 - 32 - 32 - - - 1 - 16 - 1 - 1 - - - - - 1 - 16 - 32 - 32 - - - - - - - 1 - 16 - 32 - 32 - - - - - 1 - 16 - 32 - 32 - - - - - - - - 1 - 16 - 32 - 32 - - - - - 1 - 16 - 16 - 16 - - - - - - - - 32 - 16 - 5 - 5 - - - - - - - - 1 - 16 - 16 - 16 - - - 32 - 16 - 5 - 5 - - - - - 1 - 32 - 16 - 16 - - - - - - - - 1 - 32 - 1 - 1 - - - - - - - 1 - 32 - 16 - 16 - - - 1 - 32 - 1 - 1 - - - - - 1 - 32 - 16 - 16 - - - - - - - - 64 - 32 - 3 - 3 - - - - - - - - 1 - 32 - 16 - 16 - - - 64 - 32 - 3 - 3 - - - - - 1 - 64 - 18 - 18 - - - - - - - - 1 - 64 - 1 - 1 - - - - - - - 1 - 64 - 18 - 18 - - - 1 - 64 - 1 - 1 - - - - - 1 - 64 - 18 - 18 - - - - - - - 1 - 64 - 18 - 18 - - - - - 1 - 64 - 18 - 18 - - - - - - - - 1 - 64 - 18 - 18 - - - - - 1 - 64 - 9 - 9 - - - - - - - - 2 - - - - - - - - 1 - 64 - 9 - 9 - - - 2 - - - - - 1 - 5184 - - - - - - - - 10 - 5184 - - - - - - - - 1 - 5184 - - - 10 - 5184 - - - - - 1 - 10 - - - - - - - - 1 - 10 - - - - - - - 1 - 10 - - - 1 - 10 - - - - - 1 - 10 - - - - - - - - 1 - 10 - - - - - 1 - 10 - - - - - - - 1 - 10 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -