From 78358bacb043fea23e87592af773a1d47681a1cf Mon Sep 17 00:00:00 2001 From: Jan Iwaszkiewicz Date: Mon, 18 Oct 2021 21:36:48 +0200 Subject: [PATCH] [PYTHON API] Expose ngraph bindings as part of openvino API (#8027) --- runtime/bindings/python/setup.py | 33 +- .../src/compatibility/pyngraph/axis_set.cpp | 2 +- .../compatibility/pyngraph/axis_vector.cpp | 4 +- .../src/compatibility/pyngraph/coordinate.cpp | 2 +- .../pyngraph/coordinate_diff.cpp | 4 +- .../src/compatibility/pyngraph/dimension.cpp | 2 +- .../pyngraph/frontend/frontend.cpp | 3 +- .../pyngraph/frontend/frontend_manager.cpp | 3 +- .../pyngraph/frontend/inputmodel.cpp | 3 +- .../compatibility/pyngraph/frontend/place.cpp | 5 +- .../src/compatibility/pyngraph/function.cpp | 2 +- .../src/compatibility/pyngraph/node.cpp | 5 +- .../compatibility/pyngraph/node_factory.cpp | 2 +- .../src/compatibility/pyngraph/node_input.cpp | 3 +- .../compatibility/pyngraph/node_output.cpp | 3 +- .../compatibility/pyngraph/ops/constant.cpp | 3 +- .../compatibility/pyngraph/ops/parameter.cpp | 5 +- .../src/compatibility/pyngraph/ops/result.cpp | 4 +- .../ops/util/arithmetic_reduction.cpp | 2 +- .../util/binary_elementwise_arithmetic.cpp | 2 +- .../util/binary_elementwise_comparison.cpp | 2 +- .../ops/util/binary_elementwise_logical.cpp | 2 +- .../pyngraph/ops/util/index_reduction.cpp | 3 +- .../pyngraph/ops/util/op_annotations.cpp | 3 +- .../ops/util/unary_elementwise_arithmetic.cpp | 2 +- .../compatibility/pyngraph/partial_shape.cpp | 4 +- .../compatibility/pyngraph/passes/manager.cpp | 2 +- .../src/compatibility/pyngraph/shape.cpp | 2 +- .../src/compatibility/pyngraph/strides.cpp | 2 +- .../pyngraph/types/element_type.cpp | 2 +- .../src/compatibility/pyngraph/variant.cpp | 2 +- .../src/compatibility/pyngraph/variant.hpp | 2 +- .../bindings/python/src/openvino/__init__.py | 54 +- .../python/src/openvino/exceptions.py | 16 + .../python/src/openvino/impl/__init__.py | 52 + .../python/src/openvino/impl/op/__init__.py | 23 + .../src/openvino/impl/op/util/__init__.py | 16 + .../src/openvino/impl/passes/__init__.py | 6 + .../python/src/openvino/opset1/__init__.py | 111 + .../python/src/openvino/opset1/ops.py | 2876 +++++++++++++++++ .../python/src/openvino/opset2/__init__.py | 117 + .../python/src/openvino/opset2/ops.py | 179 + .../python/src/openvino/opset3/__init__.py | 133 + .../python/src/openvino/opset3/ops.py | 634 ++++ .../python/src/openvino/opset4/__init__.py | 143 + .../python/src/openvino/opset4/ops.py | 409 +++ .../python/src/openvino/opset5/__init__.py | 150 + .../python/src/openvino/opset5/ops.py | 427 +++ .../python/src/openvino/opset6/__init__.py | 152 + .../python/src/openvino/opset6/ops.py | 163 + .../python/src/openvino/opset7/__init__.py | 156 + .../python/src/openvino/opset7/ops.py | 166 + .../python/src/openvino/opset8/__init__.py | 161 + .../python/src/openvino/opset8/ops.py | 369 +++ .../python/src/openvino/opset_utils.py | 21 + .../python/src/openvino/utils/__init__.py | 4 + .../python/src/openvino/utils/broadcasting.py | 36 + .../python/src/openvino/utils/decorators.py | 52 + .../src/openvino/utils/input_validation.py | 136 + .../python/src/openvino/utils/node_factory.py | 167 + .../python/src/openvino/utils/reduction.py | 23 + .../openvino/utils/tensor_iterator_types.py | 154 + .../python/src/openvino/utils/types.py | 146 + .../python/src/pyopenvino/core/containers.cpp | 9 + .../python/src/pyopenvino/core/containers.hpp | 3 + .../python/src/pyopenvino/core/ie_network.cpp | 17 +- .../python/src/pyopenvino/graph/axis_set.cpp | 43 + .../python/src/pyopenvino/graph/axis_set.hpp | 11 + .../src/pyopenvino/graph/axis_vector.cpp | 20 + .../src/pyopenvino/graph/axis_vector.hpp | 11 + .../src/pyopenvino/graph/coordinate.cpp | 21 + .../src/pyopenvino/graph/coordinate.hpp | 11 + .../src/pyopenvino/graph/coordinate_diff.cpp | 37 + .../src/pyopenvino/graph/coordinate_diff.hpp | 11 + .../graph/dict_attribute_visitor.cpp | 343 ++ .../graph/dict_attribute_visitor.hpp | 131 + .../python/src/pyopenvino/graph/dimension.cpp | 209 ++ .../python/src/pyopenvino/graph/dimension.hpp | 11 + .../python/src/pyopenvino/graph/function.cpp | 314 ++ .../python/src/pyopenvino/graph/function.hpp | 11 + .../python/src/pyopenvino/graph/node.cpp | 306 ++ .../python/src/pyopenvino/graph/node.hpp | 11 + .../src/pyopenvino/graph/node_factory.cpp | 119 + .../src/pyopenvino/graph/node_factory.hpp | 11 + .../src/pyopenvino/graph/node_input.cpp | 78 + .../src/pyopenvino/graph/node_input.hpp | 11 + .../src/pyopenvino/graph/node_output.cpp | 78 + .../src/pyopenvino/graph/node_output.hpp | 11 + .../src/pyopenvino/graph/ops/constant.cpp | 142 + .../src/pyopenvino/graph/ops/constant.hpp | 11 + .../src/pyopenvino/graph/ops/parameter.cpp | 38 + .../src/pyopenvino/graph/ops/parameter.hpp | 11 + .../src/pyopenvino/graph/ops/result.cpp | 21 + .../src/pyopenvino/graph/ops/result.hpp | 11 + .../graph/ops/util/arithmetic_reduction.cpp | 27 + .../graph/ops/util/arithmetic_reduction.hpp | 11 + .../util/binary_elementwise_arithmetic.cpp | 17 + .../util/binary_elementwise_arithmetic.hpp | 11 + .../util/binary_elementwise_comparison.cpp | 17 + .../util/binary_elementwise_comparison.hpp | 11 + .../ops/util/binary_elementwise_logical.cpp | 17 + .../ops/util/binary_elementwise_logical.hpp | 11 + .../graph/ops/util/index_reduction.cpp | 31 + .../graph/ops/util/index_reduction.hpp | 11 + .../ops/util/regmodule_graph_op_util.cpp | 22 + .../ops/util/regmodule_graph_op_util.hpp | 17 + .../ops/util/unary_elementwise_arithmetic.cpp | 17 + .../ops/util/unary_elementwise_arithmetic.hpp | 11 + .../src/pyopenvino/graph/partial_shape.cpp | 218 ++ .../src/pyopenvino/graph/partial_shape.hpp | 11 + .../src/pyopenvino/graph/passes/manager.cpp | 43 + .../src/pyopenvino/graph/passes/manager.hpp | 11 + .../graph/passes/regmodule_graph_passes.cpp | 14 + .../graph/passes/regmodule_graph_passes.hpp | 12 + .../python/src/pyopenvino/graph/rt_map.cpp | 48 + .../python/src/pyopenvino/graph/rt_map.hpp | 11 + .../python/src/pyopenvino/graph/shape.cpp | 47 + .../python/src/pyopenvino/graph/shape.hpp | 11 + .../python/src/pyopenvino/graph/strides.cpp | 37 + .../python/src/pyopenvino/graph/strides.hpp | 11 + .../pyopenvino/graph/types/element_type.cpp | 51 + .../pyopenvino/graph/types/element_type.hpp | 23 + .../graph/types/regmodule_graph_types.cpp | 13 + .../graph/types/regmodule_graph_types.hpp | 13 + .../python/src/pyopenvino/graph/util.cpp | 39 + .../python/src/pyopenvino/graph/util.hpp | 11 + .../python/src/pyopenvino/graph/util.py | 8 + .../python/src/pyopenvino/graph/variant.cpp | 19 + .../python/src/pyopenvino/graph/variant.hpp | 76 + .../python/src/pyopenvino/pyopenvino.cpp | 83 +- runtime/bindings/python/tests/__init__.py | 2 + runtime/bindings/python/tests/runtime.py | 32 +- .../tests/test_inference_engine/test_core.py | 90 +- .../test_inference_engine/test_tensor.py | 120 +- .../python/tests/test_ngraph/__init__.py | 2 +- .../tests/test_ngraph/test_adaptive_pool.py | 17 +- .../python/tests/test_ngraph/test_basic.py | 111 +- .../tests/test_ngraph/test_convolution.py | 20 +- .../python/tests/test_ngraph/test_core.py | 16 +- .../tests/test_ngraph/test_create_op.py | 592 ++-- .../python/tests/test_ngraph/test_ctc_loss.py | 16 +- .../tests/test_ngraph/test_data_movement.py | 39 +- .../python/tests/test_ngraph/test_dft.py | 55 +- .../tests/test_ngraph/test_dyn_attributes.py | 46 +- .../python/tests/test_ngraph/test_einsum.py | 8 +- .../python/tests/test_ngraph/test_gather.py | 12 +- .../python/tests/test_ngraph/test_idft.py | 52 +- .../test_ngraph/test_input_validation.py | 4 +- .../tests/test_ngraph/test_log_softmax.py | 8 +- .../python/tests/test_ngraph/test_manager.py | 10 +- .../tests/test_ngraph/test_node_factory.py | 24 +- .../tests/test_ngraph/test_normalization.py | 14 +- .../python/tests/test_ngraph/test_ops.py | 103 +- .../tests/test_ngraph/test_ops_binary.py | 82 +- .../tests/test_ngraph/test_ops_fused.py | 118 +- .../tests/test_ngraph/test_ops_matmul.py | 4 +- .../tests/test_ngraph/test_ops_multioutput.py | 16 +- .../tests/test_ngraph/test_ops_reshape.py | 42 +- .../tests/test_ngraph/test_ops_scatter.py | 20 +- .../tests/test_ngraph/test_ops_unary.py | 129 +- .../python/tests/test_ngraph/test_pooling.py | 44 +- .../python/tests/test_ngraph/test_proposal.py | 12 +- .../tests/test_ngraph/test_random_uniform.py | 10 +- .../tests/test_ngraph/test_reduction.py | 75 +- .../python/tests/test_ngraph/test_roll.py | 10 +- .../test_ngraph/test_sequence_processing.py | 10 +- .../python/tests/test_ngraph/test_swish.py | 14 +- .../python/tests/test_ngraph/test_utils.py | 22 +- .../bindings/python/tests/test_ngraph/util.py | 8 +- .../python/tests/test_onnx/test_backend.py | 41 + .../test_onnx/test_onnx_external_data.py | 9 +- .../tests/test_onnx/test_onnx_import.py | 9 +- .../python/tests/test_onnx/test_ops_binary.py | 1 + .../tests/test_onnx/test_ops_logical.py | 9 +- .../python/tests/test_onnx/test_ops_matmul.py | 13 +- .../python/tests/test_onnx/test_ops_unary.py | 5 +- .../python/tests/test_onnx/test_zoo_models.py | 7 + .../tests/test_onnx/utils/onnx_backend.py | 2 +- .../tests/test_onnx/utils/onnx_helpers.py | 11 +- .../python/tests_compatibility/__init__.py | 152 + .../python/tests_compatibility/conftest.py | 114 + .../mock_py_ngraph_frontend/CMakeLists.txt | 24 + .../mock_py_frontend.cpp | 25 + .../mock_py_frontend.hpp | 665 ++++ .../mock/pyngraph_fe_mock_api/CMakeLists.txt | 20 + .../pyngraph_mock_frontend_api.cpp | 127 + .../python/tests_compatibility/runtime.py | 193 ++ .../test_frontend/test_frontend_onnx.py | 2 +- .../test_frontend_onnx_editor.py | 0 .../test_frontend/test_frontendmanager.py | 0 .../test_ngraph/__init__.py | 6 + .../test_ngraph/test_adaptive_pool.py | 63 + .../test_ngraph/test_basic.py | 472 +++ .../test_ngraph/test_convolution.py | 219 ++ .../test_ngraph/test_core.py | 262 ++ .../test_ngraph/test_create_op.py | 1925 +++++++++++ .../test_ngraph/test_ctc_loss.py | 27 + .../test_ngraph/test_data_movement.py | 218 ++ .../test_ngraph/test_dft.py | 117 + .../test_ngraph/test_dyn_attributes.py | 227 ++ .../test_ngraph/test_einsum.py | 98 + .../test_ngraph/test_gather.py | 89 + .../test_ngraph/test_idft.py | 117 + .../test_ngraph/test_input_validation.py | 157 + .../test_ngraph/test_log_softmax.py | 17 + .../test_ngraph/test_manager.py | 37 + .../test_ngraph/test_node_factory.py | 94 + .../test_ngraph/test_normalization.py | 142 + .../test_ngraph/test_ops.py | 856 +++++ .../test_ngraph/test_ops_binary.py | 209 ++ .../test_ngraph/test_ops_fused.py | 766 +++++ .../test_ngraph/test_ops_matmul.py | 40 + .../test_ngraph/test_ops_multioutput.py | 36 + .../test_ngraph/test_ops_reshape.py | 201 ++ .../test_ngraph/test_ops_scatter.py | 35 + .../test_ngraph/test_ops_unary.py | 233 ++ .../test_ngraph/test_pooling.py | 430 +++ .../test_ngraph/test_proposal.py | 36 + .../test_ngraph/test_random_uniform.py | 27 + .../test_ngraph/test_reduction.py | 213 ++ .../test_ngraph/test_roll.py | 18 + .../test_ngraph/test_sequence_processing.py | 45 + .../test_ngraph/test_swish.py | 29 + .../test_ngraph/test_utils.py | 28 + .../tests_compatibility/test_ngraph/util.py | 79 + .../tests_compatibility/test_onnx/__init__.py | 2 + .../test_onnx/model_zoo_preprocess.sh | 170 + .../test_onnx/models/add_abc.onnx | 24 + .../test_onnx/models/data/tensor.data | Bin 0 -> 12 bytes .../test_onnx/models/external_data.onnx | 22 + .../test_onnx/test_backend.py | 528 +++ .../test_onnx/test_onnx_external_data.py | 28 + .../test_onnx/test_onnx_import.py | 54 + .../test_onnx/test_ops_batchnorm.py | 84 + .../test_onnx/test_ops_binary.py | 136 + .../test_onnx/test_ops_convpool.py | 402 +++ .../test_onnx/test_ops_logical.py | 44 + .../test_onnx/test_ops_matmul.py | 155 + .../test_onnx/test_ops_nonlinear.py | 109 + .../test_onnx/test_ops_reduction.py | 368 +++ .../test_onnx/test_ops_reshape.py | 415 +++ .../test_onnx/test_ops_unary.py | 485 +++ .../test_onnx/test_ops_variadic.py | 43 + .../test_onnx/test_zoo_models.py | 197 ++ .../test_onnx/utils/__init__.py | 87 + .../test_onnx/utils/model_importer.py | 149 + .../test_onnx/utils/onnx_backend.py | 135 + .../test_onnx/utils/onnx_helpers.py | 30 + runtime/bindings/python/tox.ini | 7 +- scripts/CMakeLists.txt | 1 + 250 files changed, 24080 insertions(+), 1086 deletions(-) create mode 100644 runtime/bindings/python/src/openvino/exceptions.py create mode 100644 runtime/bindings/python/src/openvino/impl/__init__.py create mode 100644 runtime/bindings/python/src/openvino/impl/op/__init__.py create mode 100644 runtime/bindings/python/src/openvino/impl/op/util/__init__.py create mode 100644 runtime/bindings/python/src/openvino/impl/passes/__init__.py create mode 100644 runtime/bindings/python/src/openvino/opset1/__init__.py create mode 100644 runtime/bindings/python/src/openvino/opset1/ops.py create mode 100644 runtime/bindings/python/src/openvino/opset2/__init__.py create mode 100644 runtime/bindings/python/src/openvino/opset2/ops.py create mode 100644 runtime/bindings/python/src/openvino/opset3/__init__.py create mode 100644 runtime/bindings/python/src/openvino/opset3/ops.py create mode 100644 runtime/bindings/python/src/openvino/opset4/__init__.py create mode 100644 runtime/bindings/python/src/openvino/opset4/ops.py create mode 100644 runtime/bindings/python/src/openvino/opset5/__init__.py create mode 100644 runtime/bindings/python/src/openvino/opset5/ops.py create mode 100644 runtime/bindings/python/src/openvino/opset6/__init__.py create mode 100644 runtime/bindings/python/src/openvino/opset6/ops.py create mode 100644 runtime/bindings/python/src/openvino/opset7/__init__.py create mode 100644 runtime/bindings/python/src/openvino/opset7/ops.py create mode 100644 runtime/bindings/python/src/openvino/opset8/__init__.py create mode 100644 runtime/bindings/python/src/openvino/opset8/ops.py create mode 100644 runtime/bindings/python/src/openvino/opset_utils.py create mode 100644 runtime/bindings/python/src/openvino/utils/__init__.py create mode 100644 runtime/bindings/python/src/openvino/utils/broadcasting.py create mode 100644 runtime/bindings/python/src/openvino/utils/decorators.py create mode 100644 runtime/bindings/python/src/openvino/utils/input_validation.py create mode 100644 runtime/bindings/python/src/openvino/utils/node_factory.py create mode 100644 runtime/bindings/python/src/openvino/utils/reduction.py create mode 100644 runtime/bindings/python/src/openvino/utils/tensor_iterator_types.py create mode 100644 runtime/bindings/python/src/openvino/utils/types.py create mode 100644 runtime/bindings/python/src/pyopenvino/graph/axis_set.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/axis_set.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/axis_vector.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/axis_vector.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/coordinate.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/coordinate.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/coordinate_diff.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/dimension.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/dimension.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/function.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/function.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/node.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/node.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/node_factory.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/node_factory.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/node_input.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/node_input.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/node_output.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/node_output.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/constant.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/constant.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/parameter.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/parameter.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/result.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/result.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/partial_shape.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/partial_shape.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/passes/manager.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/passes/manager.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/rt_map.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/rt_map.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/shape.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/shape.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/strides.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/strides.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/types/element_type.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/types/element_type.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/util.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/util.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/util.py create mode 100644 runtime/bindings/python/src/pyopenvino/graph/variant.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/graph/variant.hpp create mode 100644 runtime/bindings/python/tests_compatibility/__init__.py create mode 100644 runtime/bindings/python/tests_compatibility/conftest.py create mode 100644 runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/CMakeLists.txt create mode 100644 runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp create mode 100644 runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp create mode 100644 runtime/bindings/python/tests_compatibility/mock/pyngraph_fe_mock_api/CMakeLists.txt create mode 100644 runtime/bindings/python/tests_compatibility/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp create mode 100644 runtime/bindings/python/tests_compatibility/runtime.py rename runtime/bindings/python/{tests => tests_compatibility}/test_frontend/test_frontend_onnx.py (98%) rename runtime/bindings/python/{tests => tests_compatibility}/test_frontend/test_frontend_onnx_editor.py (100%) rename runtime/bindings/python/{tests => tests_compatibility}/test_frontend/test_frontendmanager.py (100%) create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/__init__.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_basic.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_convolution.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_core.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_create_op.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_ctc_loss.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_data_movement.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_dft.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_dyn_attributes.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_einsum.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_gather.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_idft.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_input_validation.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_log_softmax.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_manager.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_node_factory.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_normalization.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_ops.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_binary.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_fused.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_matmul.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_multioutput.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_reshape.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_scatter.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_unary.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_pooling.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_proposal.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_reduction.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_roll.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_sequence_processing.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_swish.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/test_utils.py create mode 100644 runtime/bindings/python/tests_compatibility/test_ngraph/util.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/__init__.py create mode 100755 runtime/bindings/python/tests_compatibility/test_onnx/model_zoo_preprocess.sh create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/models/add_abc.onnx create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/models/data/tensor.data create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/models/external_data.onnx create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_backend.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_onnx_import.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_ops_batchnorm.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_ops_binary.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_ops_convpool.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_ops_logical.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_ops_matmul.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_ops_reduction.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_ops_reshape.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_ops_variadic.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/utils/__init__.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/utils/model_importer.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/utils/onnx_backend.py create mode 100644 runtime/bindings/python/tests_compatibility/test_onnx/utils/onnx_helpers.py diff --git a/runtime/bindings/python/setup.py b/runtime/bindings/python/setup.py index 17af4b09475..6a4dd6268b3 100644 --- a/runtime/bindings/python/setup.py +++ b/runtime/bindings/python/setup.py @@ -17,10 +17,10 @@ from setuptools.command.develop import develop as _develop from distutils.command.build import build as _build __version__ = os.environ.get("NGRAPH_VERSION", "0.0.0.dev0") -PYNGRAPH_ROOT_DIR = os.path.abspath(os.path.dirname(__file__)) -OPENVINO_ROOT_DIR = os.path.normpath(os.path.join(PYNGRAPH_ROOT_DIR, "../../..")) +PYTHON_API_ROOT_DIR = os.path.abspath(os.path.dirname(__file__)) +OPENVINO_ROOT_DIR = os.path.normpath(os.path.join(PYTHON_API_ROOT_DIR, "../../..")) # Change current working directory to runtime/bindings/python -os.chdir(PYNGRAPH_ROOT_DIR) +os.chdir(PYTHON_API_ROOT_DIR) NGRAPH_LIBS = ["ngraph", "onnx_ngraph_frontend", "openvino"] @@ -40,13 +40,26 @@ packages = [ "ngraph.impl.op.util", "ngraph.impl.passes", "ngraph.frontend", - "openvino" + "openvino", + "openvino.opset1", + "openvino.opset2", + "openvino.opset3", + "openvino.opset4", + "openvino.opset5", + "openvino.opset6", + "openvino.opset7", + "openvino.opset8", + "openvino.utils", + "openvino.impl", + "openvino.impl.op", + "openvino.impl.op.util", + "openvino.impl.passes", ] data_files = [] -with open(os.path.join(PYNGRAPH_ROOT_DIR, "requirements.txt")) as req: +with open(os.path.join(PYTHON_API_ROOT_DIR, "requirements.txt")) as req: requirements = req.read().splitlines() cmdclass = {} @@ -143,6 +156,8 @@ class BuildCMakeExt(build_ext): build_dir = pathlib.Path(self.build_temp) extension_path = pathlib.Path(self.get_ext_fullpath(extension.name)) + if extension.name == "pyopenvino": + extension_path = pathlib.Path(os.path.join(extension_path.parent.absolute(), "openvino")) os.makedirs(build_dir, exist_ok=True) os.makedirs(extension_path.parent.absolute(), exist_ok=True) @@ -152,7 +167,7 @@ class BuildCMakeExt(build_ext): root_dir = OPENVINO_ROOT_DIR bin_dir = os.path.join(OPENVINO_ROOT_DIR, "bin") if os.environ.get("OpenVINO_DIR") is not None: - root_dir = PYNGRAPH_ROOT_DIR + root_dir = PYTHON_API_ROOT_DIR bin_dir = build_dir self.announce("Configuring cmake project", level=3) @@ -185,7 +200,7 @@ class InstallCMakeLibs(install_lib): root_dir = os.path.join(OPENVINO_ROOT_DIR, "bin") if os.environ.get("OpenVINO_DIR") is not None: - root_dir = pathlib.Path(PYNGRAPH_ROOT_DIR) + root_dir = pathlib.Path(PYTHON_API_ROOT_DIR) lib_ext = "" if "linux" in sys.platform: @@ -214,8 +229,8 @@ cmdclass["build_ext"] = BuildCMakeExt cmdclass["install_lib"] = InstallCMakeLibs setup( - name="ngraph-core", - description="nGraph - Intel's graph compiler and runtime for Neural Networks", + name="openvino", + description="OpenVINO - deploying pre-trained deep learning models", version=__version__, author="Intel Corporation", url="https://github.com/openvinotoolkit/openvino", diff --git a/runtime/bindings/python/src/compatibility/pyngraph/axis_set.cpp b/runtime/bindings/python/src/compatibility/pyngraph/axis_set.cpp index baf4e70c6b5..aff720e32b7 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/axis_set.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/axis_set.cpp @@ -16,7 +16,7 @@ namespace py = pybind11; void regclass_pyngraph_AxisSet(py::module m) { - py::class_> axis_set(m, "AxisSet"); + py::class_> axis_set(m, "AxisSet", py::module_local()); axis_set.doc() = "ngraph.impl.AxisSet wraps ngraph::AxisSet"; axis_set.def(py::init&>(), py::arg("axes")); axis_set.def(py::init&>(), py::arg("axes")); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/axis_vector.cpp b/runtime/bindings/python/src/compatibility/pyngraph/axis_vector.cpp index de768e073df..e1de150eedd 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/axis_vector.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/axis_vector.cpp @@ -12,7 +12,9 @@ namespace py = pybind11; void regclass_pyngraph_AxisVector(py::module m) { - py::class_> axis_vector(m, "AxisVector"); + py::class_> axis_vector(m, + "AxisVector", + py::module_local()); axis_vector.doc() = "ngraph.impl.AxisVector wraps ngraph::AxisVector"; axis_vector.def(py::init&>(), py::arg("axes")); axis_vector.def(py::init&>(), py::arg("axes")); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/coordinate.cpp b/runtime/bindings/python/src/compatibility/pyngraph/coordinate.cpp index 2f6d6de0def..798f099f256 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/coordinate.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/coordinate.cpp @@ -12,7 +12,7 @@ namespace py = pybind11; void regclass_pyngraph_Coordinate(py::module m) { - py::class_> coordinate(m, "Coordinate"); + py::class_> coordinate(m, "Coordinate", py::module_local()); coordinate.doc() = "ngraph.impl.Coordinate wraps ngraph::Coordinate"; coordinate.def(py::init&>()); coordinate.def(py::init()); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp b/runtime/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp index 7a9e0cadb9d..7a825e2f531 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/coordinate_diff.cpp @@ -16,7 +16,9 @@ namespace py = pybind11; void regclass_pyngraph_CoordinateDiff(py::module m) { - py::class_> coordinate_diff(m, "CoordinateDiff"); + py::class_> coordinate_diff(m, + "CoordinateDiff", + py::module_local()); coordinate_diff.doc() = "ngraph.impl.CoordinateDiff wraps ngraph::CoordinateDiff"; coordinate_diff.def(py::init&>()); coordinate_diff.def(py::init&>()); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/dimension.cpp b/runtime/bindings/python/src/compatibility/pyngraph/dimension.cpp index 718413ef927..e3ce676c303 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/dimension.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/dimension.cpp @@ -18,7 +18,7 @@ namespace py = pybind11; void regclass_pyngraph_Dimension(py::module m) { using value_type = ngraph::Dimension::value_type; - py::class_> dim(m, "Dimension"); + py::class_> dim(m, "Dimension", py::module_local()); dim.doc() = "ngraph.impl.Dimension wraps ngraph::Dimension"; dim.def(py::init<>()); dim.def(py::init(), diff --git a/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend.cpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend.cpp index fe34853f204..db3dccfc44b 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend.cpp @@ -16,7 +16,8 @@ namespace py = pybind11; void regclass_pyngraph_FrontEnd(py::module m) { py::class_> fem(m, "FrontEnd", - py::dynamic_attr()); + py::dynamic_attr(), + py::module_local()); fem.doc() = "ngraph.impl.FrontEnd wraps ngraph::frontend::FrontEnd"; fem.def( diff --git a/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend_manager.cpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend_manager.cpp index 902bbcb3226..250e80e69e5 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend_manager.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/frontend/frontend_manager.cpp @@ -17,7 +17,8 @@ void regclass_pyngraph_FrontEndManager(py::module m) { py::class_> fem( m, "FrontEndManager", - py::dynamic_attr()); + py::dynamic_attr(), + py::module_local()); fem.doc() = "ngraph.impl.FrontEndManager wraps ngraph::frontend::FrontEndManager"; fem.def(py::init<>()); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/frontend/inputmodel.cpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/inputmodel.cpp index 4c640531c43..a8cb5f30557 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/frontend/inputmodel.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/frontend/inputmodel.cpp @@ -14,7 +14,8 @@ namespace py = pybind11; void regclass_pyngraph_InputModel(py::module m) { py::class_> im(m, "InputModel", - py::dynamic_attr()); + py::dynamic_attr(), + py::module_local()); im.doc() = "ngraph.impl.InputModel wraps ngraph::frontend::InputModel"; im.def("get_place_by_tensor_name", diff --git a/runtime/bindings/python/src/compatibility/pyngraph/frontend/place.cpp b/runtime/bindings/python/src/compatibility/pyngraph/frontend/place.cpp index 60bf74528ab..5414d74529f 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/frontend/place.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/frontend/place.cpp @@ -14,7 +14,10 @@ namespace py = pybind11; void regclass_pyngraph_Place(py::module m) { - py::class_> place(m, "Place", py::dynamic_attr()); + py::class_> place(m, + "Place", + py::dynamic_attr(), + py::module_local()); place.doc() = "ngraph.impl.Place wraps ngraph::frontend::Place"; place.def("is_input", diff --git a/runtime/bindings/python/src/compatibility/pyngraph/function.cpp b/runtime/bindings/python/src/compatibility/pyngraph/function.cpp index 44d083111f9..09881204597 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/function.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/function.cpp @@ -16,7 +16,7 @@ namespace py = pybind11; static const char* CAPSULE_NAME = "ngraph_function"; void regclass_pyngraph_Function(py::module m) { - py::class_> function(m, "Function"); + py::class_> function(m, "Function", py::module_local()); function.doc() = "ngraph.impl.Function wraps ngraph::Function"; function.def(py::init([](const ngraph::ResultVector& res, diff --git a/runtime/bindings/python/src/compatibility/pyngraph/node.cpp b/runtime/bindings/python/src/compatibility/pyngraph/node.cpp index c12c3ac6413..956297a2357 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/node.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/node.cpp @@ -36,7 +36,10 @@ using PyRTMap = std::map>; PYBIND11_MAKE_OPAQUE(PyRTMap); void regclass_pyngraph_Node(py::module m) { - py::class_, PyNode> node(m, "Node", py::dynamic_attr()); + py::class_, PyNode> node(m, + "Node", + py::dynamic_attr(), + py::module_local()); node.doc() = "ngraph.impl.Node wraps ngraph::Node"; node.def( "__add__", diff --git a/runtime/bindings/python/src/compatibility/pyngraph/node_factory.cpp b/runtime/bindings/python/src/compatibility/pyngraph/node_factory.cpp index d4ad3d7bacb..e93e3ce30bb 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/node_factory.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/node_factory.cpp @@ -98,7 +98,7 @@ private: } // namespace void regclass_pyngraph_NodeFactory(py::module m) { - py::class_ node_factory(m, "NodeFactory"); + py::class_ node_factory(m, "NodeFactory", py::module_local()); node_factory.doc() = "NodeFactory creates nGraph nodes"; node_factory.def(py::init()); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/node_input.cpp b/runtime/bindings/python/src/compatibility/pyngraph/node_input.cpp index 34efa4d1780..383e5edb841 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/node_input.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/node_input.cpp @@ -14,7 +14,8 @@ namespace py = pybind11; void regclass_pyngraph_Input(py::module m) { py::class_, std::shared_ptr>> input(m, "Input", - py::dynamic_attr()); + py::dynamic_attr(), + py::module_local()); input.doc() = "ngraph.impl.Input wraps ngraph::Input"; input.def("get_node", diff --git a/runtime/bindings/python/src/compatibility/pyngraph/node_output.cpp b/runtime/bindings/python/src/compatibility/pyngraph/node_output.cpp index bc13fc5cc74..3e35327fb85 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/node_output.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/node_output.cpp @@ -14,7 +14,8 @@ namespace py = pybind11; void regclass_pyngraph_Output(py::module m) { py::class_, std::shared_ptr>> output(m, "Output", - py::dynamic_attr()); + py::dynamic_attr(), + py::module_local()); output.doc() = "ngraph.impl.Output wraps ngraph::Output"; output.def("get_node", diff --git a/runtime/bindings/python/src/compatibility/pyngraph/ops/constant.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/constant.cpp index bfcfe4aad44..281c15725a3 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/ops/constant.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/ops/constant.cpp @@ -61,7 +61,8 @@ void regclass_pyngraph_op_Constant(py::module m) { py::class_, ngraph::Node> constant( m, "Constant", - py::buffer_protocol()); + py::buffer_protocol(), + py::module_local()); constant.doc() = "ngraph.impl.op.Constant wraps ngraph::op::Constant"; constant.def(py::init&>()); constant.def(py::init&>()); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp index 60765c78faa..604d6f53d91 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/ops/parameter.cpp @@ -16,7 +16,10 @@ namespace py = pybind11; void regclass_pyngraph_op_Parameter(py::module m) { - py::class_, ngraph::Node> parameter(m, "Parameter"); + py::class_, ngraph::Node> parameter( + m, + "Parameter", + py::module_local()); parameter.doc() = "ngraph.impl.op.Parameter wraps ngraph::op::Parameter"; parameter.def("__repr__", [](const ngraph::Node& self) { std::string class_name = py::cast(self).get_type().attr("__name__").cast(); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/ops/result.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/result.cpp index 3ec95de8253..5f66df05d93 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/ops/result.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/ops/result.cpp @@ -15,6 +15,8 @@ namespace py = pybind11; void regclass_pyngraph_op_Result(py::module m) { - py::class_, ngraph::Node> result(m, "Result"); + py::class_, ngraph::Node> result(m, + "Result", + py::module_local()); result.doc() = "ngraph.impl.op.Result wraps ngraph::op::Result"; } diff --git a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp index 46c046d0b76..d77d20558cf 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/arithmetic_reduction.cpp @@ -14,7 +14,7 @@ namespace py = pybind11; void regclass_pyngraph_op_util_ArithmeticReduction(py::module m) { py::class_> - arithmeticReduction(m, "ArithmeticReduction"); + arithmeticReduction(m, "ArithmeticReduction", py::module_local()); // arithmeticReduction.def(py::init&, // const ngraph::AxisSet& >()); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp index 55f0cd9574b..8a51f2bc916 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_arithmetic.cpp @@ -14,5 +14,5 @@ namespace py = pybind11; void regclass_pyngraph_op_util_BinaryElementwiseArithmetic(py::module m) { py::class_> - binaryElementwiseArithmetic(m, "BinaryElementwiseArithmetic"); + binaryElementwiseArithmetic(m, "BinaryElementwiseArithmetic", py::module_local()); } diff --git a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp index af4aa5ffcc0..d916b64932a 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_comparison.cpp @@ -14,5 +14,5 @@ namespace py = pybind11; void regclass_pyngraph_op_util_BinaryElementwiseComparison(py::module m) { py::class_> - binaryElementwiseComparison(m, "BinaryElementwiseComparison"); + binaryElementwiseComparison(m, "BinaryElementwiseComparison", py::module_local()); } diff --git a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp index b5ab8b1b813..2f1acd30cbe 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/binary_elementwise_logical.cpp @@ -13,5 +13,5 @@ namespace py = pybind11; void regclass_pyngraph_op_util_BinaryElementwiseLogical(py::module m) { py::class_> - binaryElementwiseLogical(m, "BinaryElementwiseLogical"); + binaryElementwiseLogical(m, "BinaryElementwiseLogical", py::module_local()); } diff --git a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp index 35101945512..26734aae7d4 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/index_reduction.cpp @@ -15,7 +15,8 @@ namespace py = pybind11; void regclass_pyngraph_op_util_IndexReduction(py::module m) { py::class_> indexReduction( m, - "IndexRedection"); + "IndexRedection", + py::module_local()); indexReduction.def("get_reduction_axis", &ngraph::op::util::IndexReduction::get_reduction_axis); indexReduction.def("set_reduction_axis", &ngraph::op::util::IndexReduction::set_reduction_axis); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp index f1430ae8f76..c066433b4a4 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/op_annotations.cpp @@ -14,6 +14,7 @@ namespace py = pybind11; void regclass_pyngraph_op_util_OpAnnotations(py::module m) { py::class_> opAnnotations( m, - "OpAnnotations"); + "OpAnnotations", + py::module_local()); opAnnotations.def(py::init<>()); } diff --git a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp index b05caa4e261..e9edf7df240 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/ops/util/unary_elementwise_arithmetic.cpp @@ -14,5 +14,5 @@ namespace py = pybind11; void regclass_pyngraph_op_util_UnaryElementwiseArithmetic(py::module m) { py::class_> - unaryElementwiseArithmetic(m, "UnaryElementwiseArithmetic"); + unaryElementwiseArithmetic(m, "UnaryElementwiseArithmetic", py::module_local()); } diff --git a/runtime/bindings/python/src/compatibility/pyngraph/partial_shape.cpp b/runtime/bindings/python/src/compatibility/pyngraph/partial_shape.cpp index 80004760dc0..1bebdfe255d 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/partial_shape.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/partial_shape.cpp @@ -20,7 +20,9 @@ namespace py = pybind11; static const char* CAPSULE_NAME = "ngraph_partial_shape"; void regclass_pyngraph_PartialShape(py::module m) { - py::class_> shape(m, "PartialShape"); + py::class_> shape(m, + "PartialShape", + py::module_local()); shape.doc() = "ngraph.impl.PartialShape wraps ngraph::PartialShape"; shape.def(py::init([](const std::vector& dimensions) { diff --git a/runtime/bindings/python/src/compatibility/pyngraph/passes/manager.cpp b/runtime/bindings/python/src/compatibility/pyngraph/passes/manager.cpp index bc309b3bc46..dd3ae2b6ad6 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/passes/manager.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/passes/manager.cpp @@ -32,7 +32,7 @@ public: } // namespace void regclass_pyngraph_passes_Manager(py::module m) { - py::class_ manager(m, "Manager"); + py::class_ manager(m, "Manager", py::module_local()); manager.doc() = "ngraph.impl.passes.Manager wraps ngraph::pass::Manager using ManagerWrapper"; manager.def(py::init<>()); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/shape.cpp b/runtime/bindings/python/src/compatibility/pyngraph/shape.cpp index 424d0abddca..fa3feab68b6 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/shape.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/shape.cpp @@ -16,7 +16,7 @@ namespace py = pybind11; void regclass_pyngraph_Shape(py::module m) { - py::class_> shape(m, "Shape"); + py::class_> shape(m, "Shape", py::module_local()); shape.doc() = "ngraph.impl.Shape wraps ngraph::Shape"; shape.def(py::init&>(), py::arg("axis_lengths")); shape.def(py::init&>(), py::arg("axis_lengths")); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/strides.cpp b/runtime/bindings/python/src/compatibility/pyngraph/strides.cpp index 1e39b194b8a..a806fb9f848 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/strides.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/strides.cpp @@ -16,7 +16,7 @@ namespace py = pybind11; void regclass_pyngraph_Strides(py::module m) { - py::class_> strides(m, "Strides"); + py::class_> strides(m, "Strides", py::module_local()); strides.doc() = "ngraph.impl.Strides wraps ngraph::Strides"; strides.def(py::init&>(), py::arg("axis_strides")); strides.def(py::init&>(), py::arg("axis_strides")); diff --git a/runtime/bindings/python/src/compatibility/pyngraph/types/element_type.cpp b/runtime/bindings/python/src/compatibility/pyngraph/types/element_type.cpp index 1b1fb74b068..6a057c48bfa 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/types/element_type.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/types/element_type.cpp @@ -13,7 +13,7 @@ namespace py = pybind11; void regclass_pyngraph_Type(py::module m) { - py::class_> type(m, "Type"); + py::class_> type(m, "Type", py::module_local()); type.doc() = "ngraph.impl.Type wraps ngraph::element::Type"; type.attr("boolean") = ngraph::element::boolean; type.attr("f16") = ngraph::element::f16; diff --git a/runtime/bindings/python/src/compatibility/pyngraph/variant.cpp b/runtime/bindings/python/src/compatibility/pyngraph/variant.cpp index bb8577352d3..602954fee14 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/variant.cpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/variant.cpp @@ -11,7 +11,7 @@ namespace py = pybind11; void regclass_pyngraph_Variant(py::module m) { - py::class_> variant_base(m, "Variant"); + py::class_> variant_base(m, "Variant", py::module_local()); variant_base.doc() = "ngraph.impl.Variant wraps ngraph::Variant"; } diff --git a/runtime/bindings/python/src/compatibility/pyngraph/variant.hpp b/runtime/bindings/python/src/compatibility/pyngraph/variant.hpp index 5f107d9fa70..7041f5cd0ce 100644 --- a/runtime/bindings/python/src/compatibility/pyngraph/variant.hpp +++ b/runtime/bindings/python/src/compatibility/pyngraph/variant.hpp @@ -23,7 +23,7 @@ extern void regclass_pyngraph_VariantWrapper(py::module m, std::string typestrin py::class_, std::shared_ptr>, ngraph::Variant> - variant_wrapper(m, pyclass_name); + variant_wrapper(m, pyclass_name, py::module_local()); variant_wrapper.doc() = "ngraph.impl.Variant[typestring] wraps ngraph::VariantWrapper"; diff --git a/runtime/bindings/python/src/openvino/__init__.py b/runtime/bindings/python/src/openvino/__init__.py index 2ce7bb4cd49..6919158fa31 100644 --- a/runtime/bindings/python/src/openvino/__init__.py +++ b/runtime/bindings/python/src/openvino/__init__.py @@ -1,6 +1,29 @@ # Copyright (C) 2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +"""openvino module namespace, exposing factory functions for all ops and other classes.""" +# noqa: F401 + +from pkg_resources import get_distribution, DistributionNotFound + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore # mypy issue #1422 + +try: + __version__ = get_distribution("openvino-core").version +except DistributionNotFound: + __version__ = "0.0.0.dev0" + +from openvino.ie_api import BlobWrapper +from openvino.ie_api import infer +from openvino.ie_api import async_infer +from openvino.ie_api import get_result +from openvino.ie_api import blob_from_file + +from openvino.impl import Dimension +from openvino.impl import Function +from openvino.impl import Node +from openvino.impl import PartialShape + from openvino.pyopenvino import Core from openvino.pyopenvino import IENetwork from openvino.pyopenvino import ExecutableNetwork @@ -22,11 +45,32 @@ from openvino.pyopenvino import ColorFormat from openvino.pyopenvino import PreProcessChannel from openvino.pyopenvino import Tensor -from openvino.ie_api import BlobWrapper -from openvino.ie_api import infer -from openvino.ie_api import async_infer -from openvino.ie_api import get_result -from openvino.ie_api import blob_from_file +from openvino import opset1 +from openvino import opset2 +from openvino import opset3 +from openvino import opset4 +from openvino import opset5 +from openvino import opset6 +from openvino import opset7 +from openvino import opset8 + +# Extend Node class to support binary operators +Node.__add__ = opset8.add +Node.__sub__ = opset8.subtract +Node.__mul__ = opset8.multiply +Node.__div__ = opset8.divide +Node.__truediv__ = opset8.divide +Node.__radd__ = lambda left, right: opset8.add(right, left) +Node.__rsub__ = lambda left, right: opset8.subtract(right, left) +Node.__rmul__ = lambda left, right: opset8.multiply(right, left) +Node.__rdiv__ = lambda left, right: opset8.divide(right, left) +Node.__rtruediv__ = lambda left, right: opset8.divide(right, left) +Node.__eq__ = opset8.equal +Node.__ne__ = opset8.not_equal +Node.__lt__ = opset8.less +Node.__le__ = opset8.less_equal +Node.__gt__ = opset8.greater +Node.__ge__ = opset8.greater_equal # Patching for Blob class # flake8: noqa: F811 diff --git a/runtime/bindings/python/src/openvino/exceptions.py b/runtime/bindings/python/src/openvino/exceptions.py new file mode 100644 index 00000000000..ebeac25d643 --- /dev/null +++ b/runtime/bindings/python/src/openvino/exceptions.py @@ -0,0 +1,16 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""openvino exceptions hierarchy. All exceptions are descendants of NgraphError.""" + + +class NgraphError(Exception): + """Base class for Ngraph exceptions.""" + + +class UserInputError(NgraphError): + """User provided unexpected input.""" + + +class NgraphTypeError(NgraphError, TypeError): + """Type mismatch error.""" diff --git a/runtime/bindings/python/src/openvino/impl/__init__.py b/runtime/bindings/python/src/openvino/impl/__init__.py new file mode 100644 index 00000000000..4a33b52aff7 --- /dev/null +++ b/runtime/bindings/python/src/openvino/impl/__init__.py @@ -0,0 +1,52 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" +Package: openvino.impl +Low level wrappers for the c++ api. +""" + +# flake8: noqa + +import os +import sys + +if sys.platform == "win32": + # Installer, yum, pip installs openvino dlls to the different directories + # and those paths need to be visible to the openvino modules + # + # If you're using a custom installation of openvino, + # add the location of openvino dlls to your system PATH. + # + # looking for the libs in the pip installation path by default. + openvino_libs = [os.path.join(os.path.dirname(__file__), '..', '..', '..'), + os.path.join(os.path.dirname(__file__), '..', '..', 'openvino', 'libs')] + # setupvars.bat script set all libs paths to OPENVINO_LIB_PATHS environment variable. + openvino_libs_installer = os.getenv('OPENVINO_LIB_PATHS') + if openvino_libs_installer: + openvino_libs.extend(openvino_libs_installer.split(';')) + for lib in openvino_libs: + lib_path = os.path.join(os.path.dirname(__file__), lib) + if os.path.isdir(lib_path): + # On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH. + if (3, 8) <= sys.version_info: + os.add_dll_directory(os.path.abspath(lib_path)) + else: + os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"] + +from openvino.pyopenvino import Dimension +from openvino.pyopenvino import Function +from openvino.pyopenvino import Input +from openvino.pyopenvino import Output +from openvino.pyopenvino import Node +from openvino.pyopenvino import Type +from openvino.pyopenvino import PartialShape +from openvino.pyopenvino import Shape +from openvino.pyopenvino import Strides +from openvino.pyopenvino import CoordinateDiff +from openvino.pyopenvino import AxisSet +from openvino.pyopenvino import AxisVector +from openvino.pyopenvino import Coordinate +from openvino.pyopenvino import Output + +from openvino.pyopenvino import util diff --git a/runtime/bindings/python/src/openvino/impl/op/__init__.py b/runtime/bindings/python/src/openvino/impl/op/__init__.py new file mode 100644 index 00000000000..b2a869948a2 --- /dev/null +++ b/runtime/bindings/python/src/openvino/impl/op/__init__.py @@ -0,0 +1,23 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" +Package: openvino.op +Low level wrappers for the c++ api in ov::op. +""" + +# flake8: noqa + +import numpy as np + +from openvino.pyopenvino.op import Constant + +"""Retrieve Constant inner data. + + Internally uses PyBind11 Numpy's buffer protocol. + + :return Numpy array containing internally stored constant data. +""" +Constant.get_data = lambda self: np.array(self, copy=True) + +from openvino.pyopenvino.op import Parameter diff --git a/runtime/bindings/python/src/openvino/impl/op/util/__init__.py b/runtime/bindings/python/src/openvino/impl/op/util/__init__.py new file mode 100644 index 00000000000..26c7dce579c --- /dev/null +++ b/runtime/bindings/python/src/openvino/impl/op/util/__init__.py @@ -0,0 +1,16 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" +Package: openvino.op.util +Low level wrappers for the c++ api in ov::op::util. +""" +# flake8: noqa + +from openvino.pyopenvino.op.util import UnaryElementwiseArithmetic +from openvino.pyopenvino.op.util import BinaryElementwiseComparison +from openvino.pyopenvino.op.util import BinaryElementwiseArithmetic +from openvino.pyopenvino.op.util import BinaryElementwiseLogical +from openvino.pyopenvino.op.util import OpAnnotations +from openvino.pyopenvino.op.util import ArithmeticReduction +from openvino.pyopenvino.op.util import IndexReduction diff --git a/runtime/bindings/python/src/openvino/impl/passes/__init__.py b/runtime/bindings/python/src/openvino/impl/passes/__init__.py new file mode 100644 index 00000000000..0787724f35b --- /dev/null +++ b/runtime/bindings/python/src/openvino/impl/passes/__init__.py @@ -0,0 +1,6 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# flake8: noqa + +from openvino.pyopenvino.passes import Manager diff --git a/runtime/bindings/python/src/openvino/opset1/__init__.py b/runtime/bindings/python/src/openvino/opset1/__init__.py new file mode 100644 index 00000000000..e8d7a9539c7 --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset1/__init__.py @@ -0,0 +1,111 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.opset1.ops import absolute +from openvino.opset1.ops import absolute as abs +from openvino.opset1.ops import acos +from openvino.opset1.ops import add +from openvino.opset1.ops import asin +from openvino.opset1.ops import atan +from openvino.opset1.ops import avg_pool +from openvino.opset1.ops import batch_norm_inference +from openvino.opset1.ops import binary_convolution +from openvino.opset1.ops import broadcast +from openvino.opset1.ops import ceiling +from openvino.opset1.ops import ceiling as ceil +from openvino.opset1.ops import clamp +from openvino.opset1.ops import concat +from openvino.opset1.ops import constant +from openvino.opset1.ops import convert +from openvino.opset1.ops import convert_like +from openvino.opset1.ops import convolution +from openvino.opset1.ops import convolution_backprop_data +from openvino.opset1.ops import cos +from openvino.opset1.ops import cosh +from openvino.opset1.ops import ctc_greedy_decoder +from openvino.opset1.ops import deformable_convolution +from openvino.opset1.ops import deformable_psroi_pooling +from openvino.opset1.ops import depth_to_space +from openvino.opset1.ops import detection_output +from openvino.opset1.ops import divide +from openvino.opset1.ops import elu +from openvino.opset1.ops import equal +from openvino.opset1.ops import erf +from openvino.opset1.ops import exp +from openvino.opset1.ops import fake_quantize +from openvino.opset1.ops import floor +from openvino.opset1.ops import floor_mod +from openvino.opset1.ops import gather +from openvino.opset1.ops import gather_tree +from openvino.opset1.ops import greater +from openvino.opset1.ops import greater_equal +from openvino.opset1.ops import grn +from openvino.opset1.ops import group_convolution +from openvino.opset1.ops import group_convolution_backprop_data +from openvino.opset1.ops import hard_sigmoid +from openvino.opset1.ops import interpolate +from openvino.opset1.ops import less +from openvino.opset1.ops import less_equal +from openvino.opset1.ops import log +from openvino.opset1.ops import logical_and +from openvino.opset1.ops import logical_not +from openvino.opset1.ops import logical_or +from openvino.opset1.ops import logical_xor +from openvino.opset1.ops import lrn +from openvino.opset1.ops import lstm_cell +from openvino.opset1.ops import lstm_sequence +from openvino.opset1.ops import matmul +from openvino.opset1.ops import max_pool +from openvino.opset1.ops import maximum +from openvino.opset1.ops import minimum +from openvino.opset1.ops import mod +from openvino.opset1.ops import multiply +from openvino.opset1.ops import negative +from openvino.opset1.ops import non_max_suppression +from openvino.opset1.ops import normalize_l2 +from openvino.opset1.ops import not_equal +from openvino.opset1.ops import one_hot +from openvino.opset1.ops import pad +from openvino.opset1.ops import parameter +from openvino.opset1.ops import power +from openvino.opset1.ops import prelu +from openvino.opset1.ops import prior_box +from openvino.opset1.ops import prior_box_clustered +from openvino.opset1.ops import psroi_pooling +from openvino.opset1.ops import proposal +from openvino.opset1.ops import range +from openvino.opset1.ops import reduce_logical_and +from openvino.opset1.ops import reduce_logical_or +from openvino.opset1.ops import reduce_max +from openvino.opset1.ops import reduce_mean +from openvino.opset1.ops import reduce_min +from openvino.opset1.ops import reduce_prod +from openvino.opset1.ops import reduce_sum +from openvino.opset1.ops import region_yolo +from openvino.opset1.ops import relu +from openvino.opset1.ops import reshape +from openvino.opset1.ops import result +from openvino.opset1.ops import reverse_sequence +from openvino.opset1.ops import select +from openvino.opset1.ops import selu +from openvino.opset1.ops import shape_of +from openvino.opset1.ops import sigmoid +from openvino.opset1.ops import sign +from openvino.opset1.ops import sin +from openvino.opset1.ops import sinh +from openvino.opset1.ops import softmax +from openvino.opset1.ops import space_to_depth +from openvino.opset1.ops import split +from openvino.opset1.ops import sqrt +from openvino.opset1.ops import squared_difference +from openvino.opset1.ops import squeeze +from openvino.opset1.ops import strided_slice +from openvino.opset1.ops import subtract +from openvino.opset1.ops import tan +from openvino.opset1.ops import tanh +from openvino.opset1.ops import tensor_iterator +from openvino.opset1.ops import tile +from openvino.opset1.ops import topk +from openvino.opset1.ops import transpose +from openvino.opset1.ops import unsqueeze +from openvino.opset1.ops import variadic_split diff --git a/runtime/bindings/python/src/openvino/opset1/ops.py b/runtime/bindings/python/src/openvino/opset1/ops.py new file mode 100644 index 00000000000..84b795ccd41 --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset1/ops.py @@ -0,0 +1,2876 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for all openvino ops.""" +from typing import Callable, Iterable, List, Optional, Set, Union + +import numpy as np +from functools import partial + +from openvino.impl import Node, PartialShape, Shape +from openvino.impl.op import Constant, Parameter +from openvino.opset_utils import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( + check_valid_attributes, + is_non_negative_value, + is_positive_value, +) +from openvino.utils.node_factory import NodeFactory +from openvino.utils.tensor_iterator_types import ( + GraphBody, + TensorIteratorSliceInputDesc, + TensorIteratorMergedInputDesc, + TensorIteratorInvariantInputDesc, + TensorIteratorBodyOutputDesc, + TensorIteratorConcatOutputDesc, +) +from openvino.utils.types import ( + NodeInput, + NumericData, + NumericType, + ScalarData, + TensorShape, + as_node, + as_nodes, + get_dtype, + get_element_type, + get_element_type_str, + make_constant_node, +) + + +_get_node_factory_opset1 = partial(_get_node_factory, "opset1") + +# -------------------------------------------- ops ------------------------------------------------ + + +@unary_op +def absolute(node: NodeInput, name: Optional[str] = None) -> Node: + """Return node which applies f(x) = abs(x) to the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with Abs operation applied on it. + """ + return _get_node_factory_opset1().create("Abs", [node]) + + +@unary_op +def acos(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply inverse cosine function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arccos operation applied on it. + """ + return _get_node_factory_opset1().create("Acos", [node]) + + +@binary_op +def add( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which applies f(x) = A+B to the input nodes element-wise.""" + return _get_node_factory_opset1().create( + "Add", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@unary_op +def asin(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply inverse sine function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arcsin operation applied on it. + """ + return _get_node_factory_opset1().create("Asin", [node]) + + +@unary_op +def atan(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply inverse tangent function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arctan operation applied on it. + """ + return _get_node_factory_opset1().create("Atan", [node]) + + +@nameable_op +def avg_pool( + data_batch: NodeInput, + strides: List[int], + pads_begin: TensorShape, + pads_end: TensorShape, + kernel_shape: TensorShape, + exclude_pad: bool, + rounding_type: str = "floor", + auto_pad: Optional[str] = None, + name: Optional[str] = None, +) -> Node: + """Return average pooling node. + + @param data_batch: The input node providing data. + @param strides: The window movement strides. + @param pads_begin: The input data optional padding below filled with zeros. + @param pads_end: The input data optional padding below filled with zeros. + @param kernel_shape: The pooling window shape. + @param exclude_pad: Whether or not to include zero padding in average computations. + @param rounding_type: Determines used rounding schema when computing output shape. Acceptable + values are: ['floor', 'ceil'] + @param auto_pad: Determines how the padding is calculated. Acceptable values: + [None, 'same_upper', 'same_lower', 'valid'] + @param name: Optional name for the new output node. + + @return New node with AvgPool operation applied on its data. + """ + if auto_pad is None: + auto_pad = "explicit" + return _get_node_factory_opset1().create( + "AvgPool", + [as_node(data_batch)], + { + "strides": strides, + "pads_begin": pads_begin, + "pads_end": pads_end, + "kernel": kernel_shape, + "exclude-pad": exclude_pad, + "rounding_type": rounding_type.upper(), + "auto_pad": auto_pad.upper(), + }, + ) + + +@nameable_op +def batch_norm_inference( + data: NodeInput, + gamma: NodeInput, + beta: NodeInput, + mean: NodeInput, + variance: NodeInput, + epsilon: float, + name: Optional[str] = None, +) -> Node: + """Perform layer normalizes a input tensor by mean and variance with appling scale and offset. + + @param data: The input tensor with data for normalization. + @param gamma: The scalar scaling for normalized value. + @param beta: The bias added to the scaled normalized value. + @param mean: The value for mean normalization. + @param variance: The value for variance normalization. + @param epsilon: The number to be added to the variance to avoid division + by zero when normalizing a value. + @param name: The optional name of the output node. + @return The new node which performs BatchNormInference. + """ + inputs = as_nodes(gamma, beta, data, mean, variance) + return _get_node_factory_opset1().create("BatchNormInference", inputs, {"epsilon": epsilon}) + + +@nameable_op +def binary_convolution( + data: NodeInput, + filters: NodeInput, + strides: List[int], + pads_begin: List[int], + pads_end: List[int], + dilations: List[int], + mode: str, + pad_value: float, + auto_pad: str = "EXPLICIT", + name: Optional[str] = None, +) -> Node: + """Create node performing convolution with binary weights, binary input and integer output. + + @param data: The node providing data batch tensor. + @param filter: The node providing filters tensor. + @param strides: The kernel window movement strides. + @param pads_begin: The number of pixels to add to the beginning along each axis. + @param pads_end: The number of pixels to add to the end along each axis. + @param dilations: The distance in width and height between elements (weights) in the filter. + @param mode: Defines how input tensor 0/1 values and weights 0/1 are interpreted. + @param pad_value: Floating-point value used to fill pad area. + @param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. + @param name: The optional new name for output node. + @return New node performing binary convolution operation. + """ + return _get_node_factory_opset1().create( + "BinaryConvolution", + as_nodes(data, filters), + { + "strides": strides, + "pads_begin": pads_begin, + "pads_end": pads_end, + "dilations": dilations, + "mode": mode, + "pad_value": pad_value, + "auto_pad": auto_pad, + }, + ) + + +@nameable_op +def broadcast( + data: NodeInput, + target_shape: NodeInput, + axes_mapping: Optional[NodeInput] = None, + mode: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Create a node which broadcasts the input node's values along specified axes to a desired shape. + + @param data: The node with input tensor data. + @param target_shape: The node with a new shape we want to broadcast tensor to. + @param axes_mapping: The node with a axis positions (0-based) in the result + that are being broadcast. + @param mode: The type of broadcasting that specifies mapping of input tensor axes + to output shape axes. Range of values: NUMPY, EXPLICIT. + @param name: Optional new name for output node. + @return New node with broadcast shape. + """ + inputs = as_nodes(data, target_shape) + if mode.upper() == "EXPLICIT": + inputs.append(as_node(axes_mapping)) + return _get_node_factory_opset1().create( + "Broadcast", inputs, {"mode": mode.upper()} + ) + + +@nameable_op +def ctc_greedy_decoder( + data: NodeInput, + sequence_mask: NodeInput, + merge_repeated: bool = True, + name: Optional[str] = None, +) -> Node: + """Perform greedy decoding on the logits given in input (best path). + + @param data: Logits on which greedy decoding is performed. + @param sequence_mask: The tensor with sequence masks for each sequence in the batch. + @param merge_repeated: The flag for merging repeated labels during the CTC calculation. + @param name: Optional name for output node. + @return The new node performing an CTCGreedyDecoder operation on input tensor. + """ + node_inputs = as_nodes(data, sequence_mask) + return _get_node_factory_opset1().create( + "CTCGreedyDecoder", node_inputs, {"ctc_merge_repeated": merge_repeated} + ) + + +@unary_op +def ceiling(node: NodeInput, name: Optional[str] = None) -> Node: + """Return node which applies ceiling to the input node element-wise. + + @param node: The node providing data to ceiling operation. + @param name: Optional name for output node. + @return The node performing element-wise ceiling. + """ + return _get_node_factory_opset1().create("Ceiling", [node]) + + +@nameable_op +def clamp( + data: NodeInput, min_value: ScalarData, max_value: ScalarData, name: Optional[str] = None +) -> Node: + """Perform clamp element-wise on data from input node. + + @param data: Input tensor. One of: input node, array or scalar. + @param min_value: The lower bound of the range. Scalar value. + @param max_value: The upper bound of the range. Scalar value. + @param name: Optional output node name. + @return The new node performing a clamp operation on its input data element-wise. + + Performs a clipping operation on an input value between a pair of boundary values. + + For each element in `data`, if the element's value is lower than `min_value`, + it will be replaced with `min_value`. If the value is higher than `max_value`, + it will be replaced by `max_value`. + Intermediate values of `data` are returned without change. + + Clamp uses the following logic: + + @code{.py} + if data < min_value: + data=min_value + elif data > max_value: + data=max_value + @endcode + """ + return _get_node_factory_opset1().create( + "Clamp", [as_node(data)], {"min": min_value, "max": max_value} + ) + + +@nameable_op +def concat(nodes: List[NodeInput], axis: int, name: Optional[str] = None) -> Node: + """Concatenate input nodes into single new node along specified axis. + + @param nodes: The nodes we want concatenate into single new node. + @param axis: The axis along which we want to concatenate input nodes. + @param name: The optional new name for output node. + @return Return new node that is a concatenation of input nodes. + """ + return _get_node_factory_opset1().create("Concat", as_nodes(*nodes), {"axis": axis}) + + +@nameable_op +def constant(value: NumericData, dtype: NumericType = None, name: Optional[str] = None) -> Constant: + """Create a Constant node from provided value. + + @param value: One of: array of values or scalar to initialize node with. + @param dtype: The data type of provided data. + @param name: Optional name for output node. + @return The Constant node initialized with provided data. + """ + return make_constant_node(value, dtype) + + +@nameable_op +def convert( + data: NodeInput, destination_type: Union[str, NumericType], name: Optional[str] = None +) -> Node: + """Return node which casts input node values to specified type. + + @param data: Node which produces the input tensor. + @param destination_type: Provides the target type for the conversion. + @param name: Optional name for the output node. + @return New node performing the conversion operation. + """ + if not isinstance(destination_type, str): + destination_type = get_element_type_str(destination_type) + return _get_node_factory_opset1().create( + "Convert", [as_node(data)], {"destination_type": destination_type.lower()} + ) + + +@binary_op +def convert_like(data: NodeInput, like: NodeInput, name: Optional[str] = None) -> Node: + """Return node which casts data node values to the type of another node. + + @param data: Node which produces the input tensor + @param like: Node which provides the target type information for the conversion + @param name: Optional name for the output node. + @return New node performing the conversion operation. + """ + return _get_node_factory_opset1().create("ConvertLike", [data, like]) + + +@nameable_op +def convolution( + data: NodeInput, + filters: NodeInput, + strides: List[int], + pads_begin: List[int], + pads_end: List[int], + dilations: List[int], + auto_pad: str = "EXPLICIT", + name: Optional[str] = None, +) -> Node: + """Return node performing batched convolution operation. + + @param data: The node providing data batch tensor. + @param filter: The node providing filters tensor. + @param strides: The kernel window movement strides. + @param pads_begin: The number of zero padding elements to add on each axis below 0 coordinate. + @param pads_end: The number of zero padding elements to add on each axis above max coordinate + @param dilations: The data batch dilation strides. + @param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. + @param name: The optional new name for output node. + @return New node performing batched convolution operation. + """ + return _get_node_factory_opset1().create( + "Convolution", + as_nodes(data, filters), + { + "strides": strides, + "pads_begin": pads_begin, + "pads_end": pads_end, + "dilations": dilations, + "auto_pad": auto_pad, + }, + ) + + +@nameable_op +def convolution_backprop_data( + data: NodeInput, + filters: NodeInput, + strides: List[int], + output_shape: Optional[NodeInput] = None, + pads_begin: Optional[List[int]] = None, + pads_end: Optional[List[int]] = None, + dilations: Optional[List[int]] = None, + auto_pad: Optional[str] = None, + output_padding: Optional[List[int]] = None, + name: Optional[str] = None, +) -> Node: + """Create node performing a batched-convolution backprop data operation. + + @param data: The node producing data from forward-prop + @param filters: The node producing the filters from forward-prop. + @param output_shape: The node producing output delta. + @param strides: The distance (in pixels) to slide the filter on the feature map + over the axes. + @param pads_begin: The number of pixels to add to the beginning along each axis. + @param pads_end: The number of pixels to add to the end along each axis. + @param dilations: The distance in width and height between elements (weights) + in the filter. + @param name: The node name. + + @return The node object representing ConvolutionBackpropData operation. + """ + spatial_dim_count = len(strides) + if pads_begin is None: + pads_begin = [0] * spatial_dim_count + if pads_end is None: + pads_end = [0] * spatial_dim_count + if dilations is None: + dilations = [1] * spatial_dim_count + if auto_pad is None: + auto_pad = "explicit" + if output_padding is None: + output_padding = [0] * spatial_dim_count + args = as_nodes(data, filters) + if output_shape is not None: + args.append(as_node(output_shape)) + + return _get_node_factory_opset1().create( + "ConvolutionBackpropData", + args, + { + "strides": strides, + "pads_begin": pads_begin, + "pads_end": pads_end, + "dilations": dilations, + "auto_pad": auto_pad.upper(), + "output_padding": output_padding, + }, + ) + + +@unary_op +def cos(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply cosine function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with cos operation applied on it. + """ + return _get_node_factory_opset1().create("Cos", [node]) + + +@unary_op +def cosh(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply hyperbolic cosine function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with cosh operation applied on it. + """ + return _get_node_factory_opset1().create("Cosh", [node]) + + +@nameable_op +def deformable_convolution( + data: NodeInput, + deformable_values: NodeInput, + filters: NodeInput, + strides: List[int], + pads_begin: List[int], + pads_end: List[int], + dilations: List[int], + auto_pad: str = "EXPLICIT", + group: int = 1, + deformable_group: int = 1, + name: Optional[str] = None, +) -> Node: + """Create node performing deformable convolution. + + @param data: The node providing data batch tensor. + @param filter: The node providing filters tensor. + @param strides: The distance (in pixels) to slide the filter on the feature map over the axes. + @param pads_begin: The number of pixels to add to the beginning along each axis. + @param pads_end: The number of pixels to add to the end along each axis. + @param dilations: The distance in width and height between elements (weights) in the filter. + @param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. + @param group: The number of groups which both output and input should be split into. + @param deformable_group: The number of groups which deformable values and output should be split + into along the channel axis. + @param name: The optional new name for output node. + @return New node performing deformable convolution operation. + """ + return _get_node_factory_opset1().create( + "DeformableConvolution", + as_nodes(data, deformable_values, filters), + { + "strides": strides, + "pads_begin": pads_begin, + "pads_end": pads_end, + "dilations": dilations, + "auto_pad": auto_pad, + "group": group, + "deformable_group": deformable_group, + }, + ) + + +@nameable_op +def deformable_psroi_pooling( + feature_maps: NodeInput, + coords: NodeInput, + output_dim: int, + spatial_scale: float, + group_size: int = 1, + mode: str = "bilinear_deformable", + spatial_bins_x: int = 1, + spatial_bins_y: int = 1, + trans_std: float = 1.0, + part_size: int = 1, + offsets: Optional[NodeInput] = None, + name: Optional[str] = None, +) -> Node: + """Return node performing DeformablePSROIPooling operation. + + DeformablePSROIPooling computes position-sensitive pooling + on regions of interest specified by input. + + @param feature_maps: 4D tensor with feature maps. + @param coords: 2D tensor describing box consisting of tuples: [batch_id, x_1, y_1, x_2, y_2]. + @param output_dim: A pooled output channel number. + @param spatial_scale: A multiplicative spatial scale factor to translate ROI. + @param group_size: The number of groups to encode position-sensitive score. + @param mode: Specifies mode for pooling. Range of values: ['bilinear_deformable']. + @param spatial_bins_x: Specifies numbers of bins to divide the input feature maps over width. + @param spatial_bins_y: Specifies numbers of bins to divide the input feature maps over height. + @param trans_std: The value that all transformation (offset) values are multiplied with. + @param part_size: The number of parts the output tensor spatial dimensions are divided into. + @param offsets: Optional node. 4D input blob with transformation values (offsets). + @param name: The optional new name for output node. + @return New node performing DeformablePSROIPooling operation. + """ + node_inputs = as_nodes(feature_maps, coords) + if offsets is not None: + node_inputs.append(as_node(offsets)) + + return _get_node_factory_opset1().create( + "DeformablePSROIPooling", + node_inputs, + { + "output_dim": output_dim, + "spatial_scale": spatial_scale, + "group_size": group_size, + "mode": mode, + "spatial_bins_x": spatial_bins_x, + "spatial_bins_y": spatial_bins_y, + "trans_std": trans_std, + "part_size": part_size, + }, + ) + + +@nameable_op +def depth_to_space(node: Node, mode: str, block_size: int = 1, name: str = None) -> Node: + """Rearranges input tensor from depth into blocks of spatial data. + + Values from the height and width dimensions are moved to the depth dimension. + + Input tensor has shape [N,C,H,W], where N is the batch axis, C is the channel or depth, + H is the height and W is the width. + + Output node produces a tensor with shape: + + [N, C * `block_size` * `block_size`, H / `block_size`, W / `block_size`] + + @param node: The node with input tensor data. + @param mode: Specifies how the input depth dimension is split to block coordinates + + blocks_first: The input is divided to [block_size, ..., block_size, new_depth] + depth_first: The input is divided to [new_depth, block_size, ..., block_size] + + @param block_size: The size of the spatial block of values describing + how the tensor's data is to be rearranged. + @param name: Optional output node name. + @return The new node performing an DepthToSpace operation on its input tensor. + """ + return _get_node_factory_opset1().create( + "DepthToSpace", [node], {"mode": mode, "block_size": block_size}, + ) + + +@nameable_op +def detection_output( + box_logits: Node, + class_preds: Node, + proposals: Node, + attrs: dict, + aux_class_preds: NodeInput = None, + aux_box_preds: NodeInput = None, + name: Optional[str] = None, +) -> Node: + """Generate the detection output using information on location and confidence predictions. + + @param box_logits: The 2D input tensor with box logits. + @param class_preds: The 2D input tensor with class predictions. + @param proposals: The 3D input tensor with proposals. + @param attrs: The dictionary containing key, value pairs for attributes. + @param aux_class_preds: The 2D input tensor with additional class predictions information. + @param aux_box_preds: The 2D input tensor with additional box predictions information. + @param name: Optional name for the output node. + @return Node representing DetectionOutput operation. + + Available attributes are: + + * num_classes The number of classes to be predicted. + Range of values: positive integer number + Default value: None + Required: yes + + * background_label_id The background label id. + Range of values: integer value + Default value: 0 + Required: no + + * top_k Maximum number of results to be kept per batch after NMS step. + Range of values: integer value + Default value: -1 + Required: no + + * variance_encoded_in_target The flag that denotes if variance is encoded in target. + Range of values: {False, True} + Default value: False + Required: no + + * keep_top_k Maximum number of bounding boxes per batch to be kept after NMS step. + Range of values: integer values + Default value: None + Required: yes + + * code_type The type of coding method for bounding boxes. + Range of values: {'caffe.PriorBoxParameter.CENTER_SIZE', + 'caffe.PriorBoxParameter.CORNER'} + Default value: 'caffe.PriorBoxParameter.CORNER' + Required: no + + * share_location The flag that denotes if bounding boxes are shared among different + classes. + Range of values: {True, False} + Default value: True + Required: no + + * nms_threshold The threshold to be used in the NMS stage. + Range of values: floating point value + Default value: None + Required: yes + + * confidence_threshold Specifies the minimum confidence threshold for detection boxes to be + considered. + Range of values: floating point value + Default value: 0 + Required: no + + * clip_after_nms The flag that denotes whether to perform clip bounding boxes after + non-maximum suppression or not. + Range of values: {True, False} + Default value: False + Required: no + + * clip_before_nms The flag that denotes whether to perform clip bounding boxes before + non-maximum suppression or not. + Range of values: {True, False} + Default value: False + Required: no + + * decrease_label_id The flag that denotes how to perform NMS. + Range of values: False - perform NMS like in Caffe*. + True - perform NMS like in MxNet*. + + Default value: False + Required: no + + * normalized The flag that denotes whether input tensors with boxes are normalized. + Range of values: {True, False} + Default value: False + Required: no + + * input_height The input image height. + Range of values: positive integer number + Default value: 1 + Required: no + + * input_width The input image width. + Range of values: positive integer number + Default value: 1 + Required: no + + * objectness_score The threshold to sort out confidence predictions. + Range of values: non-negative float number + Default value: 0 + Required: no + + Example of attribute dictionary: + @code{.py} + # just required ones + attrs = { + 'num_classes': 85, + 'keep_top_k': [1, 2, 3], + 'nms_threshold': 0.645, + } + + attrs = { + 'num_classes': 85, + 'keep_top_k': [1, 2, 3], + 'nms_threshold': 0.645, + 'normalized': True, + 'clip_before_nms': True, + 'input_height': [32], + 'input_width': [32], + } + @endcode + + Optional attributes which are absent from dictionary will be set with corresponding default. + """ + requirements = [ + ("num_classes", True, np.integer, is_positive_value), + ("background_label_id", False, np.integer, None), + ("top_k", False, np.integer, None), + ("variance_encoded_in_target", False, np.bool_, None), + ("keep_top_k", True, np.integer, None), + ("code_type", False, np.str_, None), + ("share_location", False, np.bool_, None), + ("nms_threshold", True, np.floating, None), + ("confidence_threshold", False, np.floating, None), + ("clip_after_nms", False, np.bool_, None), + ("clip_before_nms", False, np.bool_, None), + ("decrease_label_id", False, np.bool_, None), + ("normalized", False, np.bool_, None), + ("input_height", False, np.integer, is_positive_value), + ("input_width", False, np.integer, is_positive_value), + ("objectness_score", False, np.floating, is_non_negative_value), + ] + + check_valid_attributes("DetectionOutput", attrs, requirements) + + inputs = [box_logits, class_preds, proposals] + if aux_class_preds is not None: + inputs.append(aux_class_preds) + if aux_box_preds is not None: + inputs.append(aux_box_preds) + + return _get_node_factory_opset1().create("DetectionOutput", inputs, attrs) + + +@binary_op +def divide( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which applies f(x) = A/B to the input nodes element-wise. + + @param left_node: The node providing dividend data. + @param right_node: The node providing divisor data. + @param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. + @param name: Optional name for output node. + @return The node performing element-wise division. + """ + return _get_node_factory_opset1().create( + "Divide", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@nameable_op +def elu(data: NodeInput, alpha: NumericType, name: Optional[str] = None) -> Node: + """Perform Exponential Linear Unit operation element-wise on data from input node. + + Computes exponential linear: alpha * (exp(data) - 1) if < 0, data otherwise. + + For more information refer to: + [Fast and Accurate Deep Network Learning by Exponential Linear Units](http://arxiv.org/abs/1511.07289) + + @param data: Input tensor. One of: input node, array or scalar. + @param alpha: Scalar multiplier for negative values. + @param name: Optional output node name. + @return The new node performing an ELU operation on its input data element-wise. + """ + return _get_node_factory_opset1().create("Elu", [as_node(data)], {"alpha": alpha}) + + +@binary_op +def equal( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which checks if input nodes are equal element-wise. + + @param left_node: The first input node for equal operation. + @param right_node: The second input node for equal operation. + @param auto_broadcast: The type of broadcasting specifies rules used for + auto-broadcasting of input tensors. + @param name: The optional name for output new node. + @return The node performing element-wise equality check. + """ + return _get_node_factory_opset1().create( + "Equal", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@unary_op +def erf(node: NodeInput, name: Optional[str] = None) -> Node: + """Return node which calculates Gauss error function element-wise with given tensor. + + @param node: The node providing data for operation. + @param name: The optional name for new output node. + @return The new node performing element-wise Erf operation. + """ + return _get_node_factory_opset1().create("Erf", [node]) + + +@unary_op +def exp(node: NodeInput, name: Optional[str] = None) -> Node: + """Return node which applies exponential function to the input node element-wise. + + @param node: The node providing data for operation. + @param name: The optional name for new output node. + @return The new node performing natural exponential operation. + """ + return _get_node_factory_opset1().create("Exp", [node]) + + +@nameable_op +def fake_quantize( + data: NodeInput, + input_low: NodeInput, + input_high: NodeInput, + output_low: NodeInput, + output_high: NodeInput, + levels: int, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + r"""Perform an element-wise linear quantization on input data. + + @param data: The node with data tensor. + @param input_low: The node with the minimum for input values. + @param input_high: The node with the maximum for input values. + @param output_low: The node with the minimum quantized value. + @param output_high: The node with the maximum quantized value. + @param levels: The number of quantization levels. Integer value. + @param auto_broadcast: The type of broadcasting specifies rules used for + auto-broadcasting of input tensors. + @return New node with quantized value. + + Input floating point values are quantized into a discrete set of floating point values. + + @code{.py} + if x <= input_low: + output = output_low + if x > input_high: + output = output_high + else: + output = fake_quantize(output) + @endcode + + Fake quantize uses the following logic: + + \f[ output = + \dfrac{round( \dfrac{data - input\_low}{(input\_high - input\_low)\cdot (levels-1)})} + {(levels-1)\cdot (output\_high - output\_low)} + output\_low + \f] + """ + return _get_node_factory_opset1().create( + "FakeQuantize", + as_nodes(data, input_low, input_high, output_low, output_high), + {"levels": levels, "auto_broadcast": auto_broadcast.upper()}, + ) + + +@unary_op +def floor(node: NodeInput, name: Optional[str] = None) -> Node: + """Return node which applies floor to the input node element-wise. + + @param node: The input node providing data. + @param name: The optional name for new output node. + @return The node performing element-wise floor operation. + """ + return _get_node_factory_opset1().create("Floor", [node]) + + +@binary_op +def floor_mod( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node performing element-wise FloorMod (division reminder) with two given tensors. + + @param left_node: The first input node for FloorMod operation. + @param right_node: The second input node for FloorMod operation. + @param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. + @param name: Optional name for output node. + @return The node performing element-wise FloorMod operation. + """ + return _get_node_factory_opset1().create( + "FloorMod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@nameable_op +def gather( + data: NodeInput, indices: NodeInput, axis: NodeInput, name: Optional[str] = None +) -> Node: + """Return Gather node which takes slices from axis of data according to indices. + + @param data: The tensor from which slices are gathered. + @param indices: Tensor with indexes to gather. + @param axis: The dimension index to gather data from. + @param name: Optional name for output node. + @return The new node performing a Gather operation on the data input tensor. + """ + node_inputs = as_nodes(data, indices, axis) + return _get_node_factory_opset1().create("Gather", node_inputs) + + +@nameable_op +def gather_tree( + step_ids: NodeInput, + parent_idx: NodeInput, + max_seq_len: NodeInput, + end_token: NodeInput, + name: Optional[str] = None, +) -> Node: + """Perform GatherTree operation. + + @param step_ids: The tensor with indices from per each step. + @param parent_idx: The tensor with with parent beam indices. + @param max_seq_len: The tensor with maximum lengths for each sequence in the batch. + @param end_token: The scalar tensor with value of the end marker in a sequence. + @param name: Optional name for output node. + @return The new node performing a GatherTree operation. + + The GatherTree node generates the complete beams from the indices per each step + and the parent beam indices. + GatherTree uses the following logic: + + @code{.py} + for batch in range(BATCH_SIZE): + for beam in range(BEAM_WIDTH): + max_sequence_in_beam = min(MAX_TIME, max_seq_len[batch]) + + parent = parent_idx[max_sequence_in_beam - 1, batch, beam] + + for level in reversed(range(max_sequence_in_beam - 1)): + final_idx[level, batch, beam] = step_idx[level, batch, parent] + + parent = parent_idx[level, batch, parent] + @endcode + """ + node_inputs = as_nodes(step_ids, parent_idx, max_seq_len, end_token) + return _get_node_factory_opset1().create("GatherTree", node_inputs) + + +@binary_op +def greater( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which checks if left input node is greater than the right node element-wise. + + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting specifies rules used for + auto-broadcasting of input tensors. + @param name: The optional new name for output node. + @return The node performing element-wise check whether left_node is greater than right_node. + """ + return _get_node_factory_opset1().create( + "Greater", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@binary_op +def greater_equal( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which checks if left node is greater or equal to the right node element-wise. + + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting specifies rules used for + auto-broadcasting of input tensors. + @param name: The optional new name for output node. + @return The node performing element-wise check whether left_node is greater than or equal + right_node. + """ + return _get_node_factory_opset1().create( + "GreaterEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +def grn(data: Node, bias: float, name: Optional[str] = None) -> Node: + r"""Perform Global Response Normalization with L2 norm (across channels only). + + Computes GRN operation on channels for input tensor: + + \f[ output_i = \dfrac{input_i}{\sqrt{\sum_{i}^{C} input_i}} \f] + + @param data: The node with data tensor. + @param bias: The bias added to the variance. Scalar value. + @param name: Optional output node name. + @return The new node performing a GRN operation on tensor's channels. + """ + return _get_node_factory_opset1().create("GRN", [data], {"bias": bias}) + + +@nameable_op +def group_convolution( + data: NodeInput, + filters: NodeInput, + strides: List[int], + pads_begin: List[int], + pads_end: List[int], + dilations: List[int], + auto_pad: str = "EXPLICIT", + name: Optional[str] = None, +) -> Node: + """Perform Group Convolution operation on data from input node. + + @param data: The node producing input data. + @param filters: The node producing filters data. + @param strides: The distance (in pixels) to slide the filter on the feature map + over the axes. + @param pads_begin: The number of pixels to add at the beginning along each axis. + @param pads_end: The number of pixels to add at the end along each axis. + @param dilations: The distance in width and height between elements (weights) in the filter. + @param auto_pad: Describes how to perform padding. Possible values: + EXPLICIT: Pad dimensions are explicity specified + SAME_LOWER: Pad dimensions computed to match input shape + Ceil(num_dims/2) at the beginning and + Floor(num_dims/2) at the end + SAME_UPPER: Pad dimensions computed to match input shape + Floor(num_dims/2) at the beginning and + Ceil(num_dims/2) at the end + VALID: No padding + @param name: Optional output node name. + @return The new node performing a Group Convolution operation on tensor from input node. + """ + return _get_node_factory_opset1().create( + "GroupConvolution", + as_nodes(data, filters), + { + "strides": strides, + "pads_begin": pads_begin, + "pads_end": pads_end, + "dilations": dilations, + "auto_pad": auto_pad.upper(), + }, + ) + + +@nameable_op +def group_convolution_backprop_data( + data: NodeInput, + filters: NodeInput, + strides: List[int], + output_shape: Optional[NodeInput] = None, + pads_begin: Optional[List[int]] = None, + pads_end: Optional[List[int]] = None, + dilations: Optional[List[int]] = None, + auto_pad: str = "EXPLICIT", + output_padding: Optional[List[int]] = None, + name: Optional[str] = None, +) -> Node: + """Perform Group Convolution operation on data from input node. + + @param data: The node producing input data. + @param filters: The node producing filter data. + @param strides: The distance (in pixels) to slide the filter on the feature map + over the axes. + @param output_shape: The node that specifies spatial shape of the output. + @param pads_begin: The number of pixels to add at the beginning along each axis. + @param pads_end: The number of pixels to add at the end along each axis. + @param dilations: The distance in width and height between elements (weights) + in the filter. + @param auto_pad: Describes how to perform padding. Possible values: + EXPLICIT: Pad dimensions are explicity specified + SAME_LOWER: Pad dimensions computed to match input shape + Ceil(num_dims/2) at the beginning and + Floor(num_dims/2) at the end + SAME_UPPER: Pad dimensions computed to match input shape + Floor(num_dims/2) at the beginning and + Ceil(num_dims/2) at the end + VALID: No padding + @param output_padding: The additional amount of paddings added per each spatial axis + in the output tensor. + @param name: Optional output node name. + @return The new node performing a Group Convolution operation on tensor from input node. + """ + spatial_dim_count = len(strides) + if dilations is None: + dilations = [1] * spatial_dim_count + if output_padding is None: + output_padding = [0] * spatial_dim_count + + attributes = { + "strides": strides, + "dilations": dilations, + "auto_pad": auto_pad.upper(), + "output_padding": output_padding, + } + args = as_nodes(data, filters) + + if output_shape is not None: + args.append(as_node(output_shape)) + else: + if pads_begin is None: + pads_begin = [0] * spatial_dim_count + if pads_end is None: + pads_end = [0] * spatial_dim_count + attributes["pads_begin"] = pads_begin + attributes["pads_end"] = pads_end + + return _get_node_factory_opset1().create("GroupConvolutionBackpropData", args, attributes) + + +@nameable_op +def hard_sigmoid(data: Node, alpha: NodeInput, beta: NodeInput, name: Optional[str] = None) -> Node: + """Perform Hard Sigmoid operation element-wise on data from input node. + + @param data: The node with data tensor. + @param alpha: A node producing the alpha parameter. + @param beta: A node producing the beta parameter + @param name: Optional output node name. + @return The new node performing a Hard Sigmoid element-wise on input tensor. + + Hard Sigmoid uses the following logic: + + @code{.py} + y = max(0, min(1, alpha * data + beta)) + @endcode + """ + return _get_node_factory_opset1().create("HardSigmoid", [data, as_node(alpha), as_node(beta)]) + + +@nameable_op +def interpolate( + image: Node, output_shape: NodeInput, attrs: dict, name: Optional[str] = None +) -> Node: + """Perform interpolation of independent slices in input tensor. + + @param image: The node providing input tensor with data for interpolation. + @param output_shape: 1D tensor describing output shape for spatial axes. + @param attrs: The dictionary containing key, value pairs for attributes. + @param name: Optional name for the output node. + @return Node representing interpolation operation. + + Available attributes are: + + * axes Specify spatial dimension indices where interpolation is applied. + Type: List of non-negative integer numbers. + Required: yes. + + * mode Specifies type of interpolation. + Range of values: one of {nearest, linear, cubic, area} + Type: string + Required: yes + + * align_corners A flag that specifies whether to align corners or not. True means the + alignment is applied, False means the alignment isn't applied. + Range of values: True or False. Default: True. + Required: no + + * antialias A flag that specifies whether to perform anti-aliasing. + Range of values: False - do not perform anti-aliasing + True - perform anti-aliasing + Default value: False + Required: no + + * pads_begin Specify the number of pixels to add to the beginning of the image being + interpolated. A scalar that specifies padding for each spatial dimension. + Range of values: list of non-negative integer numbers. Default value: 0 + Required: no + + * pads_end Specify the number of pixels to add to the beginning of the image being + interpolated. A scalar that specifies padding for each spatial dimension. + Range of values: list of non-negative integer numbers. Default value: 0 + Required: no + + Example of attribute dictionary: + @code{.py} + # just required ones + attrs = { + 'axes': [2, 3], + 'mode': 'cubic', + } + + attrs = { + 'axes': [2, 3], + 'mode': 'cubic', + 'antialias': True, + 'pads_begin': [2, 2, 2], + } + @endcode + Optional attributes which are absent from dictionary will be set with corresponding default. + """ + requirements = [ + ("axes", True, np.integer, is_non_negative_value), + ("mode", True, np.str_, None), + ("align_corners", False, np.bool_, None), + ("antialias", False, np.bool_, None), + ("pads_begin", False, np.integer, is_non_negative_value), + ("pads_end", False, np.integer, is_non_negative_value), + ] + + check_valid_attributes("Interpolate", attrs, requirements) + + return _get_node_factory_opset1().create("Interpolate", [image, as_node(output_shape)], attrs) + + +@binary_op +def less( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which checks if left input node is less than the right node element-wise. + + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting specifies rules used for + auto-broadcasting of input tensors. + @param name: The optional new name for output node. + @return The node performing element-wise check whether left_node is less than the right_node. + """ + return _get_node_factory_opset1().create( + "Less", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@binary_op +def less_equal( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which checks if left input node is less or equal the right node element-wise. + + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting specifies rules used for + auto-broadcasting of input tensors. + @param name: The optional new name for output node. + @return The node performing element-wise check whether left_node is less than or equal the + right_node. + """ + return _get_node_factory_opset1().create( + "LessEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@unary_op +def log(node: NodeInput, name: Optional[str] = None) -> Node: + """Return node which applies natural logarithm to the input node element-wise. + + @param node: The input node providing data for operation. + @param name: The optional new name for output node. + @return The new node performing log operation element-wise. + """ + return _get_node_factory_opset1().create("Log", [node]) + + +@binary_op +def logical_and( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which perform logical and operation on input nodes element-wise. + + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes + to output shape axes. Range of values: numpy, explicit. + @param name: The optional new name for output node. + @return The node performing logical and operation on input nodes corresponding elements. + """ + return _get_node_factory_opset1().create( + "LogicalAnd", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@unary_op +def logical_not(node: NodeInput, name: Optional[str] = None) -> Node: + """Return node which applies element-wise logical negation to the input node. + + @param node: The input node providing data. + @param name: The optional new name for output node. + @return The node performing element-wise logical NOT operation with given tensor. + """ + return _get_node_factory_opset1().create("LogicalNot", [node]) + + +@binary_op +def logical_or( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which performs logical OR operation on input nodes element-wise. + + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes + to output shape axes. Range of values: numpy, explicit. + @param name: The optional new name for output node. + @return The node performing logical or operation on input nodes corresponding elements. + """ + return _get_node_factory_opset1().create( + "LogicalOr", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@binary_op +def logical_xor( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which performs logical XOR operation on input nodes element-wise. + + @param left_node: The first input node providing data. + @param right_node: The second input node providing data. + @param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes + to output shape axes. Range of values: numpy, explicit. + @param name: The optional new name for output node. + @return The node performing logical or operation on input nodes corresponding elements. + """ + return _get_node_factory_opset1().create( + "LogicalXor", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@nameable_op +def lrn( + data: NodeInput, + axes: NodeInput, + alpha: float = 1, + beta: float = 0.5, + bias: float = 1, + size: int = 5, + name: Optional[str] = None, +) -> Node: + """Return a node which performs element-wise Local Response Normalization (LRN) operation. + + @param data: Input data. + @param alpha: A scale factor (usually positive). + @param beta: An exponent. + @param bias: An offset (usually positive) to avoid dividing by 0. + @param size: Width of the 1-D normalization window. + @param name: An optional name of the output node. + @return The new node which performs LRN. + """ + attributes = {"alpha": alpha, "beta": beta, "bias": bias, "size": size} + return _get_node_factory_opset1().create("LRN", as_nodes(data, axes), attributes) + + +@nameable_op +def lstm_cell( + X: NodeInput, + initial_hidden_state: NodeInput, + initial_cell_state: NodeInput, + W: NodeInput, + R: NodeInput, + B: NodeInput, + hidden_size: int, + activations: List[str] = None, + activations_alpha: List[float] = None, + activations_beta: List[float] = None, + clip: float = 0.0, + name: Optional[str] = None, +) -> Node: + """Return a node which performs LSTMCell operation. + + @param X: The input tensor with shape: [batch_size, input_size]. + @param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. + @param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. + @param W: The weight tensor with shape: [4*hidden_size, input_size]. + @param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. + @param B: The bias tensor for gates with shape: [4*hidden_size]. + @param hidden_size: Specifies hidden state size. + @param activations: The list of three activation functions for gates. + @param activations_alpha: The list of alpha parameters for activation functions. + @param activations_beta: The list of beta parameters for activation functions. + @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. + @param name: An optional name of the output node. + + @return The new node represents LSTMCell. Node outputs count: 2. + """ + if activations is None: + activations = ["sigmoid", "tanh", "tanh"] + if activations_alpha is None: + activations_alpha = [] + if activations_beta is None: + activations_beta = [] + + node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, W, R, B) + + # P - nGraph additional input, no such input in the OV spec + peepholes_count = 3 # nGraph default + peepholes_shape = [peepholes_count * hidden_size] + peepholes_array = np.zeros(peepholes_shape) # nGraph default + data_dtype = get_dtype(node_inputs[0].get_output_element_type(0)) + default_P = make_constant_node(peepholes_array, dtype=data_dtype) + node_inputs.append(default_P) + + weights_format = "fico" # IE LSTMWeightsFormat, no such attribute in the OV spec + input_forget = False # nGraph default, no such attribute in the OV spec + + attributes = { + "hidden_size": hidden_size, + "activations": activations, + "activations_alpha": activations_alpha, + "activations_beta": activations_beta, + "clip": clip, + "weights_format": weights_format, + "input_forget": input_forget, + } + return _get_node_factory_opset1().create("LSTMCell", node_inputs, attributes) + + +@nameable_op +def lstm_sequence( + X: NodeInput, + initial_hidden_state: NodeInput, + initial_cell_state: NodeInput, + sequence_lengths: NodeInput, + W: NodeInput, + R: NodeInput, + B: NodeInput, + hidden_size: int, + direction: str, + activations: List[str] = None, + activations_alpha: List[float] = None, + activations_beta: List[float] = None, + clip: float = 0.0, + name: Optional[str] = None, +) -> Node: + """Return a node which performs LSTMSequence operation. + + @param X: The input tensor. Shape: [batch_size, seq_length, input_size]. + @param initial_hidden_state: The hidden state tensor. + Shape: [batch_size, num_directions, hidden_size]. + @param initial_cell_state: The cell state tensor. + Shape: [batch_size, num_directions, hidden_size]. + @param sequence_lengths: Specifies real sequence lengths for each batch element. + Shape: [batch_size]. Integer type. + @param W: Tensor with weights for matrix multiplication operation with input portion of data. + Shape: [num_directions, 4*hidden_size, input_size]. + @param R: The tensor with weights for matrix multiplication operation with hidden state. + Shape: [num_directions, 4*hidden_size, hidden_size]. + @param B: The tensor with biases. + Shape: [num_directions, 4*hidden_size]. + @param hidden_size: Specifies hidden state size. + @param direction: Specifies if the RNN is forward, reverse, or bidirectional. + @param activations: The list of three activation functions for gates. + @param activations_alpha: The list of alpha parameters for activation functions. + @param activations_beta: The list of beta parameters for activation functions. + @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. + @param name: An optional name of the output node. + + @return The new node represents LSTMSequence. Node outputs count: 3. + """ + if activations is None: + activations = ["sigmoid", "tanh", "tanh"] + if activations_alpha is None: + activations_alpha = [] + if activations_beta is None: + activations_beta = [] + + node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B) + + # P - nGraph additional input, no such input in the OV spec + peepholes_count = 3 # nGraph default + if direction.lower() == "bidirectional": + num_directions = 2 + else: + num_directions = 1 + peepholes_shape = [num_directions, peepholes_count * hidden_size] + peepholes_array = np.zeros(peepholes_shape) # nGraph default + data_dtype = get_dtype(node_inputs[0].get_output_element_type(0)) + default_P = make_constant_node(peepholes_array, dtype=data_dtype) + node_inputs.append(default_P) + + weights_format = "fico" # IE LSTMWeightsFormat, no such attribute in the OV spec + input_forget = False # nGraph default, no such attribute in the OV spec + + attributes = { + "hidden_size": hidden_size, + "direction": direction.lower(), + "activations": activations, + "activations_alpha": activations_alpha, + "activations_beta": activations_beta, + "clip": clip, + "weights_format": weights_format, + "input_forget": input_forget, + } + return _get_node_factory_opset1().create("LSTMSequence", node_inputs, attributes) + + +@nameable_op +def matmul( + data_a: NodeInput, + data_b: NodeInput, + transpose_a: bool, + transpose_b: bool, + name: Optional[str] = None, +) -> Node: + """Return the Matrix Multiplication operation. + + @param data_a: left-hand side matrix + @param data_b: right-hand side matrix + @param transpose_a: should the first matrix be transposed before operation + @param transpose_b: should the second matrix be transposed + @return MatMul operation node + """ + print("transpose_a", transpose_a, "transpose_b", transpose_b) + return _get_node_factory_opset1().create( + "MatMul", as_nodes(data_a, data_b), {"transpose_a": transpose_a, "transpose_b": transpose_b} + ) + + +@nameable_op +def max_pool( + data: NodeInput, + strides: List[int], + pads_begin: List[int], + pads_end: List[int], + kernel_shape: TensorShape, + rounding_type: str = "floor", + auto_pad: Optional[str] = None, + name: Optional[str] = None, +) -> Node: + """Perform max pooling operation with given parameters on provided data. + + @param data: The node providing input data. + @param strides: The distance (in pixels) to slide the filter on the feature map + over the axes. + @param pads_begin: The number of pixels to add at the beginning along each axis. + @param pads_end: The number of pixels to add at the end along each axis. + @param kernel_shape: The pooling operation kernel shape. + @param rounding_type: Determines used rounding schema when computing output shape. Acceptable + values are: ['floor', 'ceil'] + @param auto_pad: Determines how the padding is calculated. Acceptable values: + [None, 'same_upper', 'same_lower', 'valid'] + @param name: The optional name for the created output node. + + @return The new node performing max pooling operation. + """ + if auto_pad is None: + auto_pad = "explicit" + return _get_node_factory_opset1().create( + "MaxPool", + [as_node(data)], + { + "strides": strides, + "pads_begin": pads_begin, + "pads_end": pads_end, + "kernel": kernel_shape, + "rounding_type": rounding_type.upper(), + "auto_pad": auto_pad.upper(), + }, + ) + + +@binary_op +def maximum( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which applies the maximum operation to input nodes elementwise.""" + return _get_node_factory_opset1().create( + "Maximum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@binary_op +def minimum( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which applies the minimum operation to input nodes elementwise.""" + return _get_node_factory_opset1().create( + "Minimum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@binary_op +def mod( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node performing element-wise division reminder with two given tensors. + + @param left_node: The first input node for mod operation. + @param right_node: The second input node for mod operation. + @param auto_broadcast: Specifies rules used for auto-broadcasting of input tensors. + @param name: Optional name for output node. + @return The node performing element-wise Mod operation. + """ + return _get_node_factory_opset1().create( + "Mod", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@binary_op +def multiply( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which applies f(x) = A*B to the input nodes elementwise.""" + return _get_node_factory_opset1().create( + "Multiply", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@unary_op +def negative(node: NodeInput, name: Optional[str] = None) -> Node: + """Return node which applies f(x) = -x to the input node elementwise.""" + return _get_node_factory_opset1().create("Negative", [node]) + + +@nameable_op +def non_max_suppression( + boxes: NodeInput, + scores: NodeInput, + max_output_boxes_per_class: Optional[NodeInput] = None, + iou_threshold: Optional[NodeInput] = None, + score_threshold: Optional[NodeInput] = None, + box_encoding: str = "corner", + sort_result_descending: bool = True, + name: Optional[str] = None, +) -> Node: + """Return a node which performs NonMaxSuppression. + + @param boxes: Tensor with box coordinates. + @param scores: Tensor with box scores. + @param max_output_boxes_per_class: Tensor Specifying maximum number of boxes + to be selected per class. + @param iou_threshold: Tensor specifying intersection over union threshold + @param score_threshold: Tensor specifying minimum score to consider box for the processing. + @param box_encoding: Format of boxes data encoding. Range of values: corner or cente. + @param sort_result_descending: Flag that specifies whenever it is necessary to sort selected + boxes across batches or not. + @return The new node which performs NonMaxSuppression + """ + if max_output_boxes_per_class is None: + max_output_boxes_per_class = make_constant_node(0, np.int64) + if iou_threshold is None: + iou_threshold = make_constant_node(0, np.float32) + if score_threshold is None: + score_threshold = make_constant_node(0, np.float32) + + inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) + attributes = { + "box_encoding": box_encoding, + "sort_result_descending": sort_result_descending, + } + + return _get_node_factory_opset1().create("NonMaxSuppression", inputs, attributes) + + +@nameable_op +def normalize_l2( + data: NodeInput, axes: NodeInput, eps: float, eps_mode: str, name: Optional[str] = None +) -> Node: + """Construct an NormalizeL2 operation. + + @param data: Node producing the input tensor + @param axes: Node indicating axes along which L2 reduction is calculated + @param eps: The epsilon added to L2 norm + @param eps_mode: how eps is combined with L2 value (`add` or `max`) + @return New node which performs the L2 normalization. + """ + return _get_node_factory_opset1().create( + "NormalizeL2", as_nodes(data, axes), {"eps": eps, "mode": eps_mode} + ) + + +@binary_op +def not_equal( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which checks if input nodes are unequal element-wise. + + @param left_node: The first input node for not-equal operation. + @param right_node: The second input node for not-equal operation. + @param auto_broadcast: The type of broadcasting specifies rules used for + auto-broadcasting of input tensors. + @param name: The optional name for output new node. + @return The node performing element-wise inequality check. + """ + return _get_node_factory_opset1().create( + "NotEqual", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@nameable_op +def one_hot( + indices: NodeInput, + depth: NodeInput, + on_value: NodeInput, + off_value: NodeInput, + axis: int, + name: Optional[str] = None, +) -> Node: + """Create node performing one-hot encoding on input data. + + @param indices: Input tensor of rank N with indices of any supported integer data type. + @param depth: Scalar of any supported integer type that specifies number of classes and + the size of one-hot dimension. + @param on_value: Scalar of any type that is the value that the locations + in output tensor represented by indices in input take. + @param off_value: Scalar of any type that is the value that the locations not represented + by indices in input take. + + @param name: The optional name for new output node. + @return New node performing one-hot operation. + """ + return _get_node_factory_opset1().create( + "OneHot", as_nodes(indices, depth, on_value, off_value), {"axis": axis} + ) + + +@nameable_op +def pad( + arg: NodeInput, + pads_begin: NodeInput, + pads_end: NodeInput, + pad_mode: str, + arg_pad_value: Optional[NodeInput] = None, + name: Optional[str] = None, +) -> Node: + """Return a generic padding operation. + + @param arg: The node producing input tensor to be padded. + @param pads_begin: number of padding elements to be added before position 0 + on each axis of arg. + @param pads_end: number of padding elements to be added after the last element. + @param pad_mode: "constant", "edge", "reflect" or "symmetric" + @param arg_pad_value: value used for padding if pad_mode is "constant" + @return Pad operation node. + """ + input_nodes = as_nodes(arg, pads_begin, pads_end) + if arg_pad_value: + input_nodes.append(as_node(arg_pad_value)) + + pad_mode = pad_mode.upper() + return _get_node_factory_opset1().create("Pad", input_nodes, {"pad_mode": pad_mode}) + + +@nameable_op +def parameter( + shape: TensorShape, dtype: NumericType = np.float32, name: Optional[str] = None +) -> Parameter: + """Return an openvino Parameter object.""" + element_type = get_element_type(dtype) + return Parameter(element_type, PartialShape(shape)) + + +@binary_op +def power( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which perform element-wise exponentiation operation. + + @param left_node: The node providing the base of operation. + @param right_node: The node providing the exponent of operation. + @param name: The optional name for the new output node. + @param auto_broadcast: The type of broadcasting specifies rules used for + auto-broadcasting of input tensors. + @return The new node performing element-wise exponentiation operation on input nodes. + """ + return _get_node_factory_opset1().create( + "Power", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@nameable_op +def prelu(data: NodeInput, slope: NodeInput, name: Optional[str] = None) -> Node: + """Perform Parametrized Relu operation element-wise on data from input node. + + @param data: The node with data tensor. + @param slope: The node with the multipliers for negative values. + @param name: Optional output node name. + @return The new node performing a PRelu operation on tensor's channels. + + PRelu uses the following logic: + + @code{.py} + if data < 0: + data = data * slope + elif data >= 0: + data = data + @endcode + """ + return _get_node_factory_opset1().create("PRelu", as_nodes(data, slope)) + + +@nameable_op +def prior_box_clustered( + output_size: Node, image_size: NodeInput, attrs: dict, name: Optional[str] = None +) -> Node: + """Generate prior boxes of specified sizes normalized to the input image size. + + @param output_size: 1D tensor with two integer elements [height, width]. Specifies the + spatial size of generated grid with boxes. + @param image_size: 1D tensor with two integer elements [image_height, image_width] that + specifies shape of the image for which boxes are generated. + @param attrs: The dictionary containing key, value pairs for attributes. + @param name: Optional name for the output node. + @return Node representing PriorBoxClustered operation. + + Available attributes are: + + * widths Specifies desired boxes widths in pixels. + Range of values: floating point positive numbers. + Default value: 1.0 + Required: no + + * heights Specifies desired boxes heights in pixels. + Range of values: floating point positive numbers. + Default value: 1.0 + Required: no + + * clip The flag that denotes if each value in the output tensor should be clipped + within [0,1]. + Range of values: {True, False} + Default value: True + Required: no + + * step_widths The distance between box centers. + Range of values: floating point positive number + Default value: 0.0 + Required: no + + * step_heights The distance between box centers. + Range of values: floating point positive number + Default value: 0.0 + Required: no + + * offset The shift of box respectively to the top left corner. + Range of values: floating point positive number + Default value: None + Required: yes + + * variance Denotes a variance of adjusting bounding boxes. + Range of values: floating point positive numbers + Default value: [] + Required: no + + Example of attribute dictionary: + @code{.py} + # just required ones + attrs = { + 'offset': 85, + } + + attrs = { + 'offset': 85, + 'clip': False, + 'step_widths': [1.5, 2.0, 2.5] + } + @endcode + + Optional attributes which are absent from dictionary will be set with corresponding default. + """ + requirements = [ + ("widths", False, np.floating, is_positive_value), + ("heights", False, np.floating, is_positive_value), + ("clip", False, np.bool_, None), + ("step_widths", False, np.floating, is_positive_value), + ("step_heights", False, np.floating, is_positive_value), + ("offset", True, np.floating, is_positive_value), + ("variance", False, np.floating, is_positive_value), + ] + + check_valid_attributes("PriorBoxClustered", attrs, requirements) + + return _get_node_factory_opset1().create( + "PriorBoxClustered", [output_size, as_node(image_size)], attrs + ) + + +@nameable_op +def prior_box( + layer_shape: Node, image_shape: NodeInput, attrs: dict, name: Optional[str] = None +) -> Node: + """Generate prior boxes of specified sizes and aspect ratios across all dimensions. + + @param layer_shape: Shape of layer for which prior boxes are computed. + @param image_shape: Shape of image to which prior boxes are scaled. + @param attrs: The dictionary containing key, value pairs for attributes. + @param name: Optional name for the output node. + @return Node representing prior box operation. + + Available attributes are: + + * min_size The minimum box size (in pixels). + Range of values: positive floating point numbers + Default value: [] + Required: no + + * max_size The maximum box size (in pixels). + Range of values: positive floating point numbers + Default value: [] + Required: no + + * aspect_ratio Aspect ratios of prior boxes. + Range of values: set of positive floating point numbers + Default value: [] + Required: no + + * flip The flag that denotes that each aspect_ratio is duplicated and flipped. + Range of values: {True, False} + Default value: False + Required: no + + * clip The flag that denotes if each value in the output tensor should be clipped + to [0,1] interval. + Range of values: {True, False} + Default value: False + Required: no + + * step The distance between box centers. + Range of values: floating point non-negative number + Default value: 0 + Required: no + + * offset This is a shift of box respectively to top left corner. + Range of values: floating point non-negative number + Default value: None + Required: yes + + * variance The variance denotes a variance of adjusting bounding boxes. The attribute + could contain 0, 1 or 4 elements. + Range of values: floating point positive numbers + Default value: [] + Required: no + + * scale_all_sizes The flag that denotes type of inference. + Range of values: False - max_size is ignored + True - max_size is used + Default value: True + Required: no + + * fixed_ratio This is an aspect ratio of a box. + Range of values: a list of positive floating-point numbers + Default value: None + Required: no + + * fixed_size This is an initial box size (in pixels). + Range of values: a list of positive floating-point numbers + Default value: None + Required: no + + * density This is the square root of the number of boxes of each type. + Range of values: a list of positive floating-point numbers + Default value: None + Required: no + + Example of attribute dictionary: + @code{.py} + # just required ones + attrs = { + 'offset': 85, + } + + attrs = { + 'offset': 85, + 'flip': True, + 'clip': True, + 'fixed_size': [32, 64, 128] + } + @endcode + + Optional attributes which are absent from dictionary will be set with corresponding default. + """ + requirements = [ + ("offset", True, np.floating, is_non_negative_value), + ("min_size", False, np.floating, is_positive_value), + ("max_size", False, np.floating, is_positive_value), + ("aspect_ratio", False, np.floating, is_positive_value), + ("flip", False, np.bool_, None), + ("clip", False, np.bool_, None), + ("step", False, np.floating, is_non_negative_value), + ("variance", False, np.floating, is_positive_value), + ("scale_all_sizes", False, np.bool_, None), + ("fixed_ratio", False, np.floating, is_positive_value), + ("fixed_size", False, np.floating, is_positive_value), + ("density", False, np.floating, is_positive_value), + ] + + check_valid_attributes("PriorBox", attrs, requirements) + + return _get_node_factory_opset1().create("PriorBox", [layer_shape, as_node(image_shape)], attrs) + + +@nameable_op +def proposal( + class_probs: Node, + bbox_deltas: Node, + image_shape: NodeInput, + attrs: dict, + name: Optional[str] = None, +) -> Node: + """Filter bounding boxes and outputs only those with the highest prediction confidence. + + @param class_probs: 4D input floating point tensor with class prediction scores. + @param bbox_deltas: 4D input floating point tensor with box logits. + @param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. + @param attrs: The dictionary containing key, value pairs for attributes. + @param name: Optional name for the output node. + @return Node representing Proposal operation. + + * base_size The size of the anchor to which scale and ratio attributes are applied. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + + * pre_nms_topn The number of bounding boxes before the NMS operation. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + + * post_nms_topn The number of bounding boxes after the NMS operation. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + + * nms_thresh The minimum value of the proposal to be taken into consideration. + Range of values: a positive floating-point number + Default value: None + Required: yes + + * feat_stride The step size to slide over boxes (in pixels). + Range of values: a positive unsigned integer + Default value: None + Required: yes + + * min_size The minimum size of box to be taken into consideration. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + + * ratio The ratios for anchor generation. + Range of values: a list of floating-point numbers + Default value: None + Required: yes + + * scale The scales for anchor generation. + Range of values: a list of floating-point numbers + Default value: None + Required: yes + + * clip_before_nms The flag that specifies whether to perform clip bounding boxes before + non-maximum suppression or not. + Range of values: True or False + Default value: True + Required: no + + * clip_after_nms The flag that specifies whether to perform clip bounding boxes after + non-maximum suppression or not. + Range of values: True or False + Default value: False + Required: no + + * normalize The flag that specifies whether to perform normalization of output boxes to + [0,1] interval or not. + Range of values: True or False + Default value: False + Required: no + + * box_size_scale Specifies the scale factor applied to logits of box sizes before decoding. + Range of values: a positive floating-point number + Default value: 1.0 + Required: no + + * box_coordinate_scale Specifies the scale factor applied to logits of box coordinates + before decoding. + Range of values: a positive floating-point number + Default value: 1.0 + Required: no + + * framework Specifies how the box coordinates are calculated. + Range of values: "" (empty string) - calculate box coordinates like in Caffe* + tensorflow - calculate box coordinates like in the TensorFlow* + Object Detection API models + Default value: "" (empty string) + Required: no + + Example of attribute dictionary: + + @code{.py} + # just required ones + attrs = { + 'base_size': 85, + 'pre_nms_topn': 10, + 'post_nms_topn': 20, + 'nms_thresh': 0.34, + 'feat_stride': 16, + 'min_size': 32, + 'ratio': [0.1, 1.5, 2.0, 2.5], + 'scale': [2, 3, 3, 4], + } + @endcode + + Optional attributes which are absent from dictionary will be set with corresponding default. + """ + requirements = [ + ("base_size", True, np.unsignedinteger, is_positive_value), + ("pre_nms_topn", True, np.unsignedinteger, is_positive_value), + ("post_nms_topn", True, np.unsignedinteger, is_positive_value), + ("nms_thresh", True, np.floating, is_positive_value), + ("feat_stride", True, np.unsignedinteger, is_positive_value), + ("min_size", True, np.unsignedinteger, is_positive_value), + ("ratio", True, np.floating, None), + ("scale", True, np.floating, None), + ("clip_before_nms", False, np.bool_, None), + ("clip_after_nms", False, np.bool_, None), + ("normalize", False, np.bool_, None), + ("box_size_scale", False, np.floating, is_positive_value), + ("box_coordinate_scale", False, np.floating, is_positive_value), + ("framework", False, np.str_, None), + ] + + check_valid_attributes("Proposal", attrs, requirements) + + return _get_node_factory_opset1().create( + "Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs + ) + + +@nameable_op +def psroi_pooling( + input: NodeInput, + coords: NodeInput, + output_dim: int, + group_size: int, + spatial_scale: float, + spatial_bins_x: int, + spatial_bins_y: int, + mode: str, + name: Optional[str] = None, +) -> Node: + """Return a node which produces a PSROIPooling operation. + + @param input: Input feature map {N, C, ...} + @param coords: Coordinates of bounding boxes + @param output_dim: Output channel number + @param group_size: Number of groups to encode position-sensitive scores + @param spatial_scale: Ratio of input feature map over input image size + @param spatial_bins_x: Numbers of bins to divide the input feature maps over + @param spatial_bins_y: Numbers of bins to divide the input feature maps over + @param mode: Mode of pooling - "avg" or "bilinear" + @return PSROIPooling node + """ + mode = mode.lower() + return _get_node_factory_opset1().create( + "PSROIPooling", + as_nodes(input, coords), + { + "output_dim": output_dim, + "group_size": group_size, + "spatial_scale": spatial_scale, + "spatial_bins_x": spatial_bins_x, + "spatial_bins_y": spatial_bins_y, + "mode": mode, + }, + ) + + +@nameable_op +def range(start: Node, stop: NodeInput, step: NodeInput, name: Optional[str] = None) -> Node: + """Return a node which produces the Range operation. + + @param start: The start value of the generated range + @param stop: The stop value of the generated range + @param step: The step value for the generated range + @param name: Optional name for output node. + @return Range node + """ + return _get_node_factory_opset1().create("Range", as_nodes(start, stop, step)) + + +@unary_op +def relu(node: NodeInput, name: Optional[str] = None) -> Node: + """Perform rectified linear unit operation on input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: The optional output node name. + @return The new node performing relu operation on its input element-wise. + """ + return _get_node_factory_opset1().create("Relu", [node]) + + +@nameable_op +def reduce_logical_and( + node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None +) -> Node: + """Logical AND reduction operation on input tensor, eliminating the specified reduction axes. + + @param node: The tensor we want to reduce. + @param reduction_axes: The axes to eliminate through AND operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing reduction operation. + """ + return _get_node_factory_opset1().create( + "ReduceLogicalAnd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} + ) + + +@nameable_op +def reduce_logical_or( + node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None +) -> Node: + """Logical OR reduction operation on input tensor, eliminating the specified reduction axes. + + @param node: The tensor we want to reduce. + @param reduction_axes: The axes to eliminate through OR operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing reduction operation. + """ + return _get_node_factory_opset1().create( + "ReduceLogicalOr", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} + ) + + +@nameable_op +def reduce_max( + node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None +) -> Node: + """Max-reduction operation on input tensor, eliminating the specified reduction axes. + + @param node: The tensor we want to max-reduce. + @param reduction_axes: The axes to eliminate through max operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + """ + return _get_node_factory_opset1().create( + "ReduceMax", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} + ) + + +@nameable_op +def reduce_mean( + node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None +) -> Node: + """Mean-reduction operation on input tensor, eliminating the specified reduction axes. + + @param node: The tensor we want to mean-reduce. + @param reduction_axes: The axes to eliminate through mean operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing mean-reduction operation. + """ + return _get_node_factory_opset1().create( + "ReduceMean", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} + ) + + +@nameable_op +def reduce_min( + node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None +) -> Node: + """Min-reduction operation on input tensor, eliminating the specified reduction axes. + + @param node: The tensor we want to min-reduce. + @param reduction_axes: The axes to eliminate through min operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + """ + return _get_node_factory_opset1().create( + "ReduceMin", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} + ) + + +@nameable_op +def reduce_prod( + node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None +) -> Node: + """Product-reduction operation on input tensor, eliminating the specified reduction axes. + + @param node: The tensor we want to product-reduce. + @param reduction_axes: The axes to eliminate through product operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing product-reduction operation. + """ + return _get_node_factory_opset1().create( + "ReduceProd", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} + ) + + +@nameable_op +def reduce_sum( + node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None +) -> Node: + """Perform element-wise sums of the input tensor, eliminating the specified reduction axes. + + @param node: The node providing data for operation. + @param reduction_axes: The axes to eliminate through summation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: The optional new name for output node. + @return The new node performing summation along `reduction_axes` element-wise. + """ + return _get_node_factory_opset1().create( + "ReduceSum", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} + ) + + +@nameable_op +def region_yolo( + input: Node, + coords: int, + classes: int, + num: int, + do_softmax: bool, + mask: List[int], + axis: int, + end_axis: int, + anchors: List[float] = None, + name: Optional[str] = None, +) -> Node: + """Return a node which produces the RegionYolo operation. + + @param input: Input data + @param coords: Number of coordinates for each region + @param classes: Number of classes for each region + @param num: Number of regions + @param do_softmax: Compute softmax + @param mask: Mask + @param axis: Axis to begin softmax on + @param end_axis: Axis to end softmax on + @param anchors: A flattened list of pairs `[width, height]` that describes prior box sizes + @param name: Optional name for output node. + @return RegionYolo node + """ + if anchors is None: + anchors = [] + + return _get_node_factory_opset1().create( + "RegionYolo", + [input], + { + "coords": coords, + "classes": classes, + "num": num, + "do_softmax": do_softmax, + "mask": mask, + "axis": axis, + "end_axis": end_axis, + "anchors": anchors, + }, + ) + + +@nameable_op +def reshape( + node: NodeInput, output_shape: NodeInput, special_zero: bool, name: Optional[str] = None +) -> Node: + """Return reshaped node according to provided parameters. + + @param node: The tensor we want to reshape. + @param output_shape: The node with a new shape for input tensor. + @param special_zero: The boolean variable that controls how zero values in shape are + interpreted. If special_zero is false, then 0 is interpreted as-is + which means that output shape will contain a zero dimension at the + specified location. Input and output tensors are empty in this case. + If special_zero is true, then all zeros in shape implies the copying + of corresponding dimensions from data.shape into the output shape. + Range of values: False or True + """ + return _get_node_factory_opset1().create( + "Reshape", as_nodes(node, output_shape), {"special_zero": special_zero} + ) + + +@unary_op +def result(data: NodeInput, name: Optional[str] = None) -> Node: + """Return a node which represents an output of a graph (Function). + + @param data: The tensor containing the input data + @return Result node + """ + return _get_node_factory_opset1().create("Result", [data]) + + +@nameable_op +def reverse_sequence( + input: NodeInput, + seq_lengths: NodeInput, + batch_axis: NumericData, + seq_axis: NumericData, + name: Optional[str] = None, +) -> Node: + """Return a node which produces a ReverseSequence operation. + + @param input: tensor with input data to reverse + @param seq_lengths: 1D tensor of integers with sequence lengths in the input tensor. + @param batch_axis: index of the batch dimension. + @param seq_axis: index of the sequence dimension. + @return ReverseSequence node + """ + return _get_node_factory_opset1().create( + "ReverseSequence", + as_nodes(input, seq_lengths), + {"batch_axis": batch_axis, "seq_axis": seq_axis}, + ) + + +@nameable_op +def select( + cond: NodeInput, + then_node: NodeInput, + else_node: NodeInput, + auto_broadcast: str = "numpy", + name: Optional[str] = None, +) -> Node: + """Perform an element-wise selection operation on input tensors. + + @param cond: Tensor with selection mask of type `boolean`. + @param then_node: Tensor providing data to be selected if respective `cond` + item value is `True`. + @param else_node: Tensor providing data to be selected if respective `cond` + item value is `False`. + @param auto_broadcast: Mode specifies rules used for auto-broadcasting of input tensors. + @param name: The optional new name for output node. + @return The new node with values selected according to provided arguments. + """ + inputs = as_nodes(cond, then_node, else_node) + return _get_node_factory_opset1().create( + "Select", + inputs, + {"auto_broadcast": auto_broadcast.upper()} + ) + + +@nameable_op +def selu( + data: NodeInput, alpha: NodeInput, lambda_value: NodeInput, name: Optional[str] = None +) -> Node: + """Perform a Scaled Exponential Linear Unit (SELU) operation on input node element-wise. + + @param data: input node, array or scalar. + @param alpha: Alpha coefficient of SELU operation + @param lambda_value: Lambda coefficient of SELU operation + @param name: The optional output node name. + @return The new node performing relu operation on its input element-wise. + """ + return _get_node_factory_opset1().create("Selu", as_nodes(data, alpha, lambda_value)) + + +@nameable_op +def shape_of(data: NodeInput, name: Optional[str] = None) -> Node: + """Return a node which produces a tensor containing the shape of its input data. + + @param data: The tensor containing the input data. + @return ShapeOf node + """ + return _get_node_factory_opset1().create("ShapeOf", [as_node(data)]) + + +@unary_op +def sigmoid(data: NodeInput, name: Optional[str] = None) -> Node: + """Return a node which applies the sigmoid function element-wise. + + @param data: The tensor containing the input data + @return Sigmoid node + """ + return _get_node_factory_opset1().create("Sigmoid", [data]) + + +@unary_op +def sign(node: NodeInput, name: Optional[str] = None) -> Node: + """Perform element-wise sign operation. + + @param node: One of: input node, array or scalar. + @param name: The optional new name for output node. + @return The node with mapped elements of the input tensor to -1 (if it is negative), + 0 (if it is zero), or 1 (if it is positive). + """ + return _get_node_factory_opset1().create("Sign", [node]) + + +@unary_op +def sin(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply sine function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with sin operation applied on it. + """ + return _get_node_factory_opset1().create("Sin", [node]) + + +@unary_op +def sinh(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply hyperbolic sine function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with sin operation applied on it. + """ + return _get_node_factory_opset1().create("Sinh", [node]) + + +@nameable_op +def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node: + """Apply softmax operation on each element of input tensor. + + @param data: The tensor providing input data. + @param axis: An axis along which Softmax should be calculated + @return The new node with softmax operation applied on each element. + """ + return _get_node_factory_opset1().create("Softmax", [as_node(data)], {"axis": axis}) + + +@nameable_op +def space_to_depth(data: Node, mode: str, block_size: int = 1, name: str = None) -> Node: + """Perform SpaceToDepth operation on the input tensor. + + SpaceToDepth rearranges blocks of spatial data into depth. + The operator returns a copy of the input tensor where values from the height + and width dimensions are moved to the depth dimension. + + @param data: The node with data tensor. + @param mode: Specifies how the output depth dimension is gathered from block coordinates. + + blocks_first: The output depth is gathered from [block_size, ..., block_size, C] + depth_first: The output depth is gathered from [C, block_size, ..., block_size] + + @param block_size: The size of the block of values to be moved. Scalar value. + @param name: Optional output node name. + @return The new node performing a SpaceToDepth operation on input tensor. + """ + return _get_node_factory_opset1().create( + "SpaceToDepth", [data], {"mode": mode, "block_size": block_size}, + ) + + +@nameable_op +def split(data: NodeInput, axis: NodeInput, num_splits: int, name: Optional[str] = None) -> Node: + """Return a node which splits the input tensor into same-length slices. + + @param data: The input tensor to be split + @param axis: Axis along which the input data will be split + @param num_splits: Number of the output tensors that should be produced + @return Split node + """ + return _get_node_factory_opset1().create( + "Split", + as_nodes(data, axis), + {"num_splits": num_splits} + ) + + +@unary_op +def sqrt(node: NodeInput, name: Optional[str] = None) -> Node: + """Return node which applies square root to the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return The new node with sqrt operation applied element-wise. + """ + return _get_node_factory_opset1().create("Sqrt", [node]) + + +@binary_op +def squared_difference( + x1: NodeInput, x2: NodeInput, auto_broadcast: str = "NUMPY", name: Optional[str] = None +) -> Node: + r"""Perform an element-wise squared difference between two tensors. + + \f[ y[i] = (x_1[i] - x_2[i])^2 \f] + + @param x1: The node with first input tensor. + @param x2: The node with second input tensor. + @param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes + to output shape axes. Range of values: numpy, explicit. + @param name: Optional new name for output node. + @return The new node performing a squared difference between two tensors. + """ + return _get_node_factory_opset1().create( + "SquaredDifference", [x1, x2], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@nameable_op +def squeeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Node: + """Perform squeeze operation on input tensor. + + @param data: The node with data tensor. + @param axes: List of non-negative integers, indicate the dimensions to squeeze. + One of: input node or array. + @param name: Optional new name for output node. + @return The new node performing a squeeze operation on input tensor. + + Remove single-dimensional entries from the shape of a tensor. + Takes a parameter `axes` with a list of axes to squeeze. + If `axes` is not provided, all the single dimensions will be removed from the shape. + If an `axis` is selected with shape entry not equal to one, an error is raised. + + + For example: + + Inputs: tensor with shape [1, 2, 1, 3, 1, 1], axes=[2, 4] + + Result: tensor with shape [1, 2, 3, 1] + """ + return _get_node_factory_opset1().create("Squeeze", as_nodes(data, axes)) + + +@nameable_op +def strided_slice( + data: NodeInput, + begin: NodeInput, + end: NodeInput, + strides: NodeInput, + begin_mask: List[int], + end_mask: List[int], + new_axis_mask: Optional[List[int]] = None, + shrink_axis_mask: Optional[List[int]] = None, + ellipsis_mask: Optional[List[int]] = None, + name: Optional[str] = None, +) -> Node: + """Return a node which dynamically repeats(replicates) the input data tensor. + + @param data: The tensor to be sliced + @param begin: 1D tensor with begin indexes for input blob slicing + @param end: 1D tensor with end indexes for input blob slicing + @param strides: The slicing strides + @param begin_mask: A mask applied to the 'begin' input indicating which elements + shoud be ignored + @param end_mask: A mask applied to the 'end' input indicating which elements + shoud be ignored + @param new_axis_mask: A mask indicating dimensions where '1' should be inserted + @param shrink_axis_mask: A mask indicating which dimensions should be deleted + @param ellipsis_mask: Indicates positions where missing dimensions should be inserted + @return StridedSlice node + """ + if new_axis_mask is None: + new_axis_mask = [] + if shrink_axis_mask is None: + shrink_axis_mask = [] + if ellipsis_mask is None: + ellipsis_mask = [] + attributes = { + "begin_mask": begin_mask, + "end_mask": end_mask, + "new_axis_mask": new_axis_mask, + "shrink_axis_mask": shrink_axis_mask, + "ellipsis_mask": ellipsis_mask, + } + + return _get_node_factory_opset1().create( + "StridedSlice", as_nodes(data, begin, end, strides), attributes + ) + + +@binary_op +def subtract( + left_node: NodeInput, + right_node: NodeInput, + auto_broadcast: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Return node which applies f(x) = A-B to the input nodes element-wise. + + @param left_node: The node providing data for left hand side of operator. + @param right_node: The node providing data for right hand side of operator. + @param auto_broadcast: The type of broadcasting that specifies mapping of input tensor axes + to output shape axes. Range of values: numpy, explicit. + @param name: The optional name for output node. + @return The new output node performing subtraction operation on both tensors element-wise. + """ + return _get_node_factory_opset1().create( + "Subtract", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()} + ) + + +@unary_op +def tan(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply tangent function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with tan operation applied on it. + """ + return _get_node_factory_opset1().create("Tan", [node]) + + +@unary_op +def tanh(node: NodeInput, name: Optional[str] = None) -> Node: + """Return node which applies hyperbolic tangent to the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with tanh operation applied on it. + """ + return _get_node_factory_opset1().create("Tanh", [node]) + + +@nameable_op +def tensor_iterator( + inputs: List[Node], + graph_body: GraphBody, + slice_input_desc: List[TensorIteratorSliceInputDesc], + merged_input_desc: List[TensorIteratorMergedInputDesc], + invariant_input_desc: List[TensorIteratorInvariantInputDesc], + body_output_desc: List[TensorIteratorBodyOutputDesc], + concat_output_desc: List[TensorIteratorConcatOutputDesc], + name: Optional[str] = None, +) -> Node: + """Perform recurrent execution of the network described in the body, iterating through the data. + + @param inputs: The provided to TensorIterator operator. + @param graph_body: The graph representing the body we execute. + @param slice_input_desc: The descriptors describing sliced inputs, that is nodes + representing tensors we iterate through, processing single + data slice in one iteration. + @param merged_input_desc: The descriptors describing merged inputs, that is nodes + representing variables with initial value at first iteration, + which may be changing through iterations. + @param invariant_input_desc: The descriptors describing invariant inputs, that is nodes + representing variable with persistent value through all + iterations. + @param body_output_desc: The descriptors describing body outputs from specified + iteration. + @param concat_output_desc: The descriptors describing specified output values through + all the iterations concatenated into one node. + @param name: The optional name for output node. + @return Node representing TensorIterator operation. + """ + attributes = { + "body": graph_body.serialize(), + "input_descriptions": {"slice_input_desc": [desc.serialize() for desc in slice_input_desc], + "merged_input_desc": [desc.serialize() for desc in merged_input_desc], + "invariant_input_desc": [desc.serialize() for desc in invariant_input_desc]}, + "output_descriptions": {"body_output_desc": [desc.serialize() for desc in body_output_desc], + "concat_output_desc": [desc.serialize() for desc in concat_output_desc]} + } + + return _get_node_factory_opset1().create("TensorIterator", as_nodes(*inputs), attributes) + + +@nameable_op +def tile(data: NodeInput, repeats: NodeInput, name: Optional[str] = None) -> Node: + """Return a node which dynamically repeats(replicates) the input data tensor. + + @param data: The input tensor to be tiled + @param repeats: Per-dimension replication factors + @return Tile node + """ + return _get_node_factory_opset1().create("Tile", as_nodes(data, repeats)) + + +@nameable_op +def topk( + data: NodeInput, + k: NodeInput, + axis: int, + mode: str, + sort: str, + name: Optional[str] = None, +) -> Node: + """Return a node which performs TopK. + + @param data: Input data. + @param k: K. + @param axis: TopK Axis. + @param mode: Compute TopK largest ('max') or smallest ('min') + @param sort: Order of output elements (sort by: 'none', 'index' or 'value') + @return The new node which performs TopK (both indices and values) + """ + return _get_node_factory_opset1().create( + "TopK", + as_nodes(data, k), + {"axis": axis, "mode": mode, "sort": sort}, + ) + + +@nameable_op +def transpose(data: NodeInput, input_order: NodeInput, name: Optional[str] = None) -> Node: + """Return a node which transposes the data in the input tensor. + + @param data: The input tensor to be transposed + @param input_order: Permutation of axes to be applied to the input tensor + @return Transpose node + """ + return _get_node_factory_opset1().create("Transpose", as_nodes(data, input_order)) + + +def unsqueeze(data: NodeInput, axes: NodeInput, name: Optional[str] = None) -> Node: + """Perform unsqueeze operation on input tensor. + + Insert single-dimensional entries to the shape of a tensor. Takes one required argument axes, + a list of dimensions that will be inserted. + Dimension indices in axes are as seen in the output tensor. + + For example: Inputs: tensor with shape [3, 4, 5], axes=[0, 4] + Result: tensor with shape [1, 3, 4, 5, 1] + + @param data: The node with data tensor. + @param axes: List of non-negative integers, indicate the dimensions to be inserted. + One of: input node or array. + @return The new node performing an unsqueeze operation on input tensor. + """ + return _get_node_factory_opset1().create("Unsqueeze", as_nodes(data, axes)) + + +@nameable_op +def variadic_split( + data: NodeInput, axis: NodeInput, split_lengths: NodeInput, name: Optional[str] = None +) -> Node: + """Return a node which splits the input tensor into variadic length slices. + + @param data: The input tensor to be split + @param axis: Axis along which the input data will be split + @param split_lengths: Sizes of the output tensors along the split axis + @return VariadicSplit node + """ + return _get_node_factory_opset1().create("VariadicSplit", as_nodes(data, axis, split_lengths)) diff --git a/runtime/bindings/python/src/openvino/opset2/__init__.py b/runtime/bindings/python/src/openvino/opset2/__init__.py new file mode 100644 index 00000000000..8b833ae2ca8 --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset2/__init__.py @@ -0,0 +1,117 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.opset1.ops import absolute +from openvino.opset1.ops import absolute as abs +from openvino.opset1.ops import acos +from openvino.opset1.ops import add +from openvino.opset1.ops import asin +from openvino.opset1.ops import atan +from openvino.opset1.ops import avg_pool +from openvino.opset1.ops import batch_norm_inference +from openvino.opset2.ops import batch_to_space +from openvino.opset1.ops import binary_convolution +from openvino.opset1.ops import broadcast +from openvino.opset1.ops import ceiling +from openvino.opset1.ops import ceiling as ceil +from openvino.opset1.ops import clamp +from openvino.opset1.ops import concat +from openvino.opset1.ops import constant +from openvino.opset1.ops import convert +from openvino.opset1.ops import convert_like +from openvino.opset1.ops import convolution +from openvino.opset1.ops import convolution_backprop_data +from openvino.opset1.ops import cos +from openvino.opset1.ops import cosh +from openvino.opset1.ops import ctc_greedy_decoder +from openvino.opset1.ops import deformable_convolution +from openvino.opset1.ops import deformable_psroi_pooling +from openvino.opset1.ops import depth_to_space +from openvino.opset1.ops import detection_output +from openvino.opset1.ops import divide +from openvino.opset1.ops import elu +from openvino.opset1.ops import equal +from openvino.opset1.ops import erf +from openvino.opset1.ops import exp +from openvino.opset1.ops import fake_quantize +from openvino.opset1.ops import floor +from openvino.opset1.ops import floor_mod +from openvino.opset1.ops import gather +from openvino.opset1.ops import gather_tree +from openvino.opset2.ops import gelu +from openvino.opset1.ops import greater +from openvino.opset1.ops import greater_equal +from openvino.opset1.ops import grn +from openvino.opset1.ops import group_convolution +from openvino.opset1.ops import group_convolution_backprop_data +from openvino.opset1.ops import hard_sigmoid +from openvino.opset1.ops import interpolate +from openvino.opset1.ops import less +from openvino.opset1.ops import less_equal +from openvino.opset1.ops import log +from openvino.opset1.ops import logical_and +from openvino.opset1.ops import logical_not +from openvino.opset1.ops import logical_or +from openvino.opset1.ops import logical_xor +from openvino.opset1.ops import lrn +from openvino.opset1.ops import lstm_cell +from openvino.opset1.ops import lstm_sequence +from openvino.opset1.ops import matmul +from openvino.opset1.ops import max_pool +from openvino.opset1.ops import maximum +from openvino.opset1.ops import minimum +from openvino.opset1.ops import mod +from openvino.opset1.ops import multiply +from openvino.opset2.ops import mvn +from openvino.opset1.ops import negative +from openvino.opset1.ops import non_max_suppression +from openvino.opset1.ops import normalize_l2 +from openvino.opset1.ops import not_equal +from openvino.opset1.ops import one_hot +from openvino.opset1.ops import pad +from openvino.opset1.ops import parameter +from openvino.opset1.ops import power +from openvino.opset1.ops import prelu +from openvino.opset1.ops import prior_box +from openvino.opset1.ops import prior_box_clustered +from openvino.opset1.ops import psroi_pooling +from openvino.opset1.ops import proposal +from openvino.opset1.ops import range +from openvino.opset1.ops import reduce_logical_and +from openvino.opset1.ops import reduce_logical_or +from openvino.opset1.ops import reduce_max +from openvino.opset1.ops import reduce_mean +from openvino.opset1.ops import reduce_min +from openvino.opset1.ops import reduce_prod +from openvino.opset1.ops import reduce_sum +from openvino.opset1.ops import region_yolo +from openvino.opset2.ops import reorg_yolo +from openvino.opset1.ops import relu +from openvino.opset1.ops import reshape +from openvino.opset1.ops import result +from openvino.opset1.ops import reverse_sequence +from openvino.opset2.ops import roi_pooling +from openvino.opset1.ops import select +from openvino.opset1.ops import selu +from openvino.opset1.ops import shape_of +from openvino.opset1.ops import sigmoid +from openvino.opset1.ops import sign +from openvino.opset1.ops import sin +from openvino.opset1.ops import sinh +from openvino.opset1.ops import softmax +from openvino.opset2.ops import space_to_batch +from openvino.opset1.ops import space_to_depth +from openvino.opset1.ops import split +from openvino.opset1.ops import sqrt +from openvino.opset1.ops import squared_difference +from openvino.opset1.ops import squeeze +from openvino.opset1.ops import strided_slice +from openvino.opset1.ops import subtract +from openvino.opset1.ops import tan +from openvino.opset1.ops import tanh +from openvino.opset1.ops import tensor_iterator +from openvino.opset1.ops import tile +from openvino.opset1.ops import topk +from openvino.opset1.ops import transpose +from openvino.opset1.ops import unsqueeze +from openvino.opset1.ops import variadic_split diff --git a/runtime/bindings/python/src/openvino/opset2/ops.py b/runtime/bindings/python/src/openvino/opset2/ops.py new file mode 100644 index 00000000000..a5ca471b7ac --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset2/ops.py @@ -0,0 +1,179 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for all openvino ops.""" +from typing import Callable, Iterable, List, Optional, Set, Union + +import numpy as np +from functools import partial + +from openvino.impl import Node, Shape +from openvino.impl.op import Constant, Parameter +from openvino.opset_utils import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( + assert_list_of_ints, + check_valid_attributes, + is_non_negative_value, + is_positive_value, +) +from openvino.utils.node_factory import NodeFactory +from openvino.utils.tensor_iterator_types import ( + GraphBody, + TensorIteratorSliceInputDesc, + TensorIteratorMergedInputDesc, + TensorIteratorInvariantInputDesc, + TensorIteratorBodyOutputDesc, + TensorIteratorConcatOutputDesc, +) +from openvino.utils.types import ( + NodeInput, + NumericData, + NumericType, + ScalarData, + TensorShape, + as_node, + as_nodes, + get_dtype, + get_element_type, + get_element_type_str, + make_constant_node, +) + +_get_node_factory_opset2 = partial(_get_node_factory, "opset2") + +# -------------------------------------------- ops ------------------------------------------------ + + +@nameable_op +def batch_to_space( + data: NodeInput, + block_shape: NodeInput, + crops_begin: NodeInput, + crops_end: NodeInput, + name: Optional[str] = None, +) -> Node: + """Perform BatchToSpace operation on the input tensor. + + BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions. + + @param data: Node producing the data tensor. + @param block_shape: The sizes of the block of values to be moved. + @param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`. + @param crops_end: Specifies the amount to crop from the end along each axis of `data`. + @param name: Optional output node name. + @return The new node performing a BatchToSpace operation. + """ + return _get_node_factory_opset2().create( + "BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end) + ) + + +@unary_op +def gelu(node: NodeInput, name: Optional[str] = None) -> Node: + r"""Perform Gaussian Error Linear Unit operation element-wise on data from input node. + + Computes GELU function: + + \f[ f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}}) \f] + + For more information refer to [Gaussian Error Linear Unit (GELU)](https://arxiv.org/pdf/1606.08415.pdf>) + + @param node: Input tensor. One of: input node, array or scalar. + @param name: Optional output node name. + @return The new node performing a GELU operation on its input data element-wise. + """ + return _get_node_factory_opset2().create("Gelu", [node]) + + +@nameable_op +def mvn( + data: Node, + across_channels: bool = False, + normalize_variance: bool = False, + eps: float = 1e-9, + name: str = None, +) -> Node: + r"""Perform Mean Variance Normalization operation on data from input node. + + Computes MVN on the input tensor `data` (called `X`) using formula: + + \f[ Y = \dfrac{X-EX}{\sqrt{E(X-EX)^2}} \f] + + @param data: The node with data tensor. + @param across_channels: Denotes if mean values are shared across channels. + @param normalize_variance: Denotes whether to perform variance normalization. + @param eps: The number added to the variance to avoid division by zero + when normalizing the value. Scalar value. + @param name: Optional output node name. + @return The new node performing a MVN operation on input tensor. + """ + return _get_node_factory_opset2().create( + "MVN", + [data], + {"across_channels": across_channels, "normalize_variance": normalize_variance, "eps": eps}, + ) + + +@nameable_op +def reorg_yolo(input: Node, stride: List[int], name: Optional[str] = None) -> Node: + """Return a node which produces the ReorgYolo operation. + + @param input: Input data + @param stride: Stride to reorganize input by + @param name: Optional name for output node. + @return ReorgYolo node + """ + return _get_node_factory_opset2().create("ReorgYolo", [input], {"stride": stride}) + + +@nameable_op +def roi_pooling( + input: NodeInput, + coords: NodeInput, + output_size: TensorShape, + spatial_scale: NumericData, + method: str, + name: Optional[str] = None, +) -> Node: + """Return a node which produces an ROIPooling operation. + + @param input: Input feature map {N, C, ...} + @param coords: Coordinates of bounding boxes + @param output_size: Height/Width of ROI output features (shape) + @param spatial_scale: Ratio of input feature map over input image size (float) + @param method: Method of pooling - string: "max" or "bilinear" + @return ROIPooling node + """ + method = method.lower() + return _get_node_factory_opset2().create( + "ROIPooling", + as_nodes(input, coords), + {"output_size": Shape(output_size), "spatial_scale": spatial_scale, "method": method}, + ) + + +@nameable_op +def space_to_batch( + data: NodeInput, + block_shape: NodeInput, + pads_begin: NodeInput, + pads_end: NodeInput, + name: Optional[str] = None, +) -> Node: + """Perform SpaceToBatch operation on the input tensor. + + SpaceToBatch permutes data tensor blocks of spatial data into batch dimension. + The operator returns a copy of the input tensor where values from spatial blocks dimensions + are moved in the batch dimension + + @param data: Node producing the data tensor. + @param block_shape: The sizes of the block of values to be moved. + @param pads_begin: Specifies the padding for the beginning along each axis of `data`. + @param pads_end: Specifies the padding for the ending along each axis of `data`. + @param name: Optional output node name. + @return The new node performing a SpaceToBatch operation. + """ + return _get_node_factory_opset2().create( + "SpaceToBatch", as_nodes(data, block_shape, pads_begin, pads_end) + ) diff --git a/runtime/bindings/python/src/openvino/opset3/__init__.py b/runtime/bindings/python/src/openvino/opset3/__init__.py new file mode 100644 index 00000000000..ba16ddcbd02 --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset3/__init__.py @@ -0,0 +1,133 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.opset1.ops import absolute +from openvino.opset1.ops import absolute as abs +from openvino.opset1.ops import acos +from openvino.opset1.ops import add +from openvino.opset1.ops import asin +from openvino.opset3.ops import assign +from openvino.opset1.ops import atan +from openvino.opset1.ops import avg_pool +from openvino.opset1.ops import batch_norm_inference +from openvino.opset2.ops import batch_to_space +from openvino.opset1.ops import binary_convolution +from openvino.opset3.ops import broadcast +from openvino.opset3.ops import bucketize +from openvino.opset1.ops import ceiling +from openvino.opset1.ops import ceiling as ceil +from openvino.opset1.ops import clamp +from openvino.opset1.ops import concat +from openvino.opset1.ops import constant +from openvino.opset1.ops import convert +from openvino.opset1.ops import convert_like +from openvino.opset1.ops import convolution +from openvino.opset1.ops import convolution_backprop_data +from openvino.opset1.ops import cos +from openvino.opset1.ops import cosh +from openvino.opset1.ops import ctc_greedy_decoder +from openvino.opset3.ops import cum_sum +from openvino.opset3.ops import cum_sum as cumsum +from openvino.opset1.ops import deformable_convolution +from openvino.opset1.ops import deformable_psroi_pooling +from openvino.opset1.ops import depth_to_space +from openvino.opset1.ops import detection_output +from openvino.opset1.ops import divide +from openvino.opset1.ops import elu +from openvino.opset3.ops import embedding_bag_offsets_sum +from openvino.opset3.ops import embedding_bag_packed_sum +from openvino.opset3.ops import embedding_segments_sum +from openvino.opset3.ops import extract_image_patches +from openvino.opset1.ops import equal +from openvino.opset1.ops import erf +from openvino.opset1.ops import exp +from openvino.opset1.ops import fake_quantize +from openvino.opset1.ops import floor +from openvino.opset1.ops import floor_mod +from openvino.opset1.ops import gather +from openvino.opset1.ops import gather_tree +from openvino.opset2.ops import gelu +from openvino.opset1.ops import greater +from openvino.opset1.ops import greater_equal +from openvino.opset1.ops import grn +from openvino.opset1.ops import group_convolution +from openvino.opset1.ops import group_convolution_backprop_data +from openvino.opset3.ops import gru_cell +from openvino.opset1.ops import hard_sigmoid +from openvino.opset1.ops import interpolate +from openvino.opset1.ops import less +from openvino.opset1.ops import less_equal +from openvino.opset1.ops import log +from openvino.opset1.ops import logical_and +from openvino.opset1.ops import logical_not +from openvino.opset1.ops import logical_or +from openvino.opset1.ops import logical_xor +from openvino.opset1.ops import lrn +from openvino.opset1.ops import lstm_cell +from openvino.opset1.ops import lstm_sequence +from openvino.opset1.ops import matmul +from openvino.opset1.ops import max_pool +from openvino.opset1.ops import maximum +from openvino.opset1.ops import minimum +from openvino.opset1.ops import mod +from openvino.opset1.ops import multiply +from openvino.opset2.ops import mvn +from openvino.opset1.ops import negative +from openvino.opset3.ops import non_max_suppression +from openvino.opset3.ops import non_zero +from openvino.opset1.ops import normalize_l2 +from openvino.opset1.ops import not_equal +from openvino.opset1.ops import one_hot +from openvino.opset1.ops import pad +from openvino.opset1.ops import parameter +from openvino.opset1.ops import power +from openvino.opset1.ops import prelu +from openvino.opset1.ops import prior_box +from openvino.opset1.ops import prior_box_clustered +from openvino.opset1.ops import psroi_pooling +from openvino.opset1.ops import proposal +from openvino.opset1.ops import range +from openvino.opset3.ops import read_value +from openvino.opset1.ops import reduce_logical_and +from openvino.opset1.ops import reduce_logical_or +from openvino.opset1.ops import reduce_max +from openvino.opset1.ops import reduce_mean +from openvino.opset1.ops import reduce_min +from openvino.opset1.ops import reduce_prod +from openvino.opset1.ops import reduce_sum +from openvino.opset1.ops import region_yolo +from openvino.opset2.ops import reorg_yolo +from openvino.opset1.ops import relu +from openvino.opset1.ops import reshape +from openvino.opset1.ops import result +from openvino.opset1.ops import reverse_sequence +from openvino.opset3.ops import rnn_cell +from openvino.opset3.ops import roi_align +from openvino.opset2.ops import roi_pooling +from openvino.opset3.ops import scatter_elements_update +from openvino.opset3.ops import scatter_update +from openvino.opset1.ops import select +from openvino.opset1.ops import selu +from openvino.opset3.ops import shape_of +from openvino.opset3.ops import shuffle_channels +from openvino.opset1.ops import sigmoid +from openvino.opset1.ops import sign +from openvino.opset1.ops import sin +from openvino.opset1.ops import sinh +from openvino.opset1.ops import softmax +from openvino.opset2.ops import space_to_batch +from openvino.opset1.ops import space_to_depth +from openvino.opset1.ops import split +from openvino.opset1.ops import sqrt +from openvino.opset1.ops import squared_difference +from openvino.opset1.ops import squeeze +from openvino.opset1.ops import strided_slice +from openvino.opset1.ops import subtract +from openvino.opset1.ops import tan +from openvino.opset1.ops import tanh +from openvino.opset1.ops import tensor_iterator +from openvino.opset1.ops import tile +from openvino.opset3.ops import topk +from openvino.opset1.ops import transpose +from openvino.opset1.ops import unsqueeze +from openvino.opset1.ops import variadic_split diff --git a/runtime/bindings/python/src/openvino/opset3/ops.py b/runtime/bindings/python/src/openvino/opset3/ops.py new file mode 100644 index 00000000000..c9039898b5c --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset3/ops.py @@ -0,0 +1,634 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for all openvino ops.""" +from typing import Callable, Iterable, List, Optional, Set, Union + +import numpy as np +from functools import partial + +from openvino.impl import Node, Shape +from openvino.impl.op import Constant, Parameter +from openvino.opset_utils import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( + assert_list_of_ints, + check_valid_attributes, + is_non_negative_value, + is_positive_value, +) +from openvino.utils.node_factory import NodeFactory +from openvino.utils.tensor_iterator_types import ( + GraphBody, + TensorIteratorSliceInputDesc, + TensorIteratorMergedInputDesc, + TensorIteratorInvariantInputDesc, + TensorIteratorBodyOutputDesc, + TensorIteratorConcatOutputDesc, +) +from openvino.utils.types import ( + NodeInput, + NumericData, + NumericType, + ScalarData, + TensorShape, + as_node, + as_nodes, + get_dtype, + get_element_type, + get_element_type_str, + make_constant_node, +) + +_get_node_factory_opset3 = partial(_get_node_factory, "opset3") + +# -------------------------------------------- ops ------------------------------------------------ + + +@nameable_op +def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: + """Return a node which produces the Assign operation. + + @param new_value: Node producing a value to be assigned to a variable. + @param variable_id: Id of a variable to be updated. + @param name: Optional name for output node. + @return Assign node + """ + return _get_node_factory_opset3().create( + "Assign", + [as_node(new_value)], + {"variable_id": variable_id} + ) + + +@nameable_op +def broadcast( + data: NodeInput, + target_shape: NodeInput, + axes_mapping: Optional[NodeInput] = None, + broadcast_spec: str = "NUMPY", + name: Optional[str] = None, +) -> Node: + """Create a node which broadcasts the input node's values along specified axes to a desired shape. + + @param data: The node with input tensor data. + @param target_shape: The node with a new shape we want to broadcast tensor to. + @param axes_mapping: The node with a axis positions (0-based) in the result + that are being broadcast. + @param broadcast_spec: The type of broadcasting that specifies mapping of input tensor axes + to output shape axes. Range of values: NUMPY, EXPLICIT, BIDIRECTIONAL. + @param name: Optional new name for output node. + @return New node with broadcast shape. + """ + inputs = as_nodes(data, target_shape) + if broadcast_spec.upper() == "EXPLICIT": + inputs.append(as_node(axes_mapping)) + return _get_node_factory_opset3().create( + "Broadcast", inputs, {"mode": broadcast_spec.upper()} + ) + + +@nameable_op +def bucketize( + data: Node, + buckets: NodeInput, + output_type: str = "i64", + with_right_bound: bool = True, + name: Optional[str] = None, +) -> Node: + """Return a node which produces the Bucketize operation. + + @param data: Input data to bucketize + @param buckets: 1-D of sorted unique boundaries for buckets + @param output_type: Output tensor type, "i64" or "i32", defaults to i64 + @param with_right_bound: indicates whether bucket includes the right or left + edge of interval. default true = includes right edge + @param name: Optional name for output node. + @return Bucketize node + """ + return _get_node_factory_opset3().create( + "Bucketize", + [data, as_node(buckets)], + {"output_type": output_type, "with_right_bound": with_right_bound}, + ) + + +@nameable_op +def cum_sum( + arg: NodeInput, + axis: NodeInput, + exclusive: bool = False, + reverse: bool = False, + name: Optional[str] = None, +) -> Node: + """Construct a cumulative summation operation. + + @param arg: The tensor to be summed. + @param axis: zero dimension tensor specifying axis position along which sum will be performed. + @param exclusive: if set to true, the top element is not included + @param reverse: if set to true, will perform the sums in reverse direction + @return New node performing the operation + """ + return _get_node_factory_opset3().create( + "CumSum", as_nodes(arg, axis), {"exclusive": exclusive, "reverse": reverse} + ) + + +@nameable_op +def embedding_bag_offsets_sum( + emb_table: Node, + indices: NodeInput, + offsets: NodeInput, + default_index: Optional[NodeInput] = None, + per_sample_weights: Optional[NodeInput] = None, + name: Optional[str] = None, +) -> Node: + """Return a node which performs sums of bags of embeddings without the intermediate embeddings. + + @param emb_table: Tensor containing the embedding lookup table. + @param indices: Tensor with indices. + @param offsets: Tensor containing the starting index positions of each bag in indices. + @param per_sample_weights: Tensor with weights for each sample. + @param default_index: Scalar containing default index in embedding table to fill empty bags. + @param name: Optional name for output node. + @return The new node which performs EmbeddingBagOffsetsSum + """ + inputs = [emb_table, as_node(indices), as_node(offsets)] + if per_sample_weights is not None: + inputs.append(default_index) + inputs.append(per_sample_weights) + elif default_index is not None: + inputs.append(default_index) + + return _get_node_factory_opset3().create("EmbeddingBagOffsetsSum", inputs, {}) + + +@nameable_op +def embedding_bag_packed_sum( + emb_table: NodeInput, + indices: NodeInput, + per_sample_weights: Optional[NodeInput] = None, + name: Optional[str] = None, +) -> Node: + """Return an EmbeddingBagPackedSum node. + + EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given + input tensor with a row (from the weights matrix) at that index + + @param emb_table: Tensor containing the embedding lookup table. + @param indices: Tensor with indices. + @param per_sample_weights: Weights to be multiplied with embedding table. + @param name: Optional name for output node. + @return EmbeddingBagPackedSum node + """ + inputs = [as_node(emb_table), as_node(indices)] + if per_sample_weights is not None: + inputs.append(as_node(per_sample_weights)) + + return _get_node_factory_opset3().create("EmbeddingBagPackedSum", inputs, {}) + + +@nameable_op +def embedding_segments_sum( + emb_table: Node, + indices: NodeInput, + segment_ids: NodeInput, + num_segments: Optional[NodeInput] = None, + default_index: Optional[NodeInput] = None, + per_sample_weights: Optional[NodeInput] = None, + name: Optional[str] = None, +) -> Node: + """Return an EmbeddingSegmentsSum node. + + EmbeddingSegmentsSum constructs an output tensor by replacing every index in a given + input tensor with a row (from the weights matrix) at that index + + @param emb_table: Tensor containing the embedding lookup table. + @param indices: Tensor with indices. + @param segment_ids: Tensor with indices into the output Tensor + @param num_segments: Tensor with number of segments. + @param default_index: Scalar containing default index in embedding table to fill empty bags. + @param per_sample_weights: Weights to be multiplied with embedding table. + @param name: Optional name for output node. + @return EmbeddingSegmentsSum node + """ + inputs = [as_node(emb_table), as_node(indices), as_node(segment_ids)] + if per_sample_weights is not None: + inputs.append(as_node(num_segments)) + inputs.append(as_node(default_index)) + inputs.append(as_node(per_sample_weights)) + elif default_index is not None: + inputs.append(as_node(num_segments)) + inputs.append(as_node(default_index)) + elif num_segments is not None: + inputs.append(as_node(num_segments)) + + return _get_node_factory_opset3().create("EmbeddingSegmentsSum", inputs, {}) + + +@nameable_op +def extract_image_patches( + image: NodeInput, + sizes: TensorShape, + strides: List[int], + rates: TensorShape, + auto_pad: str, + name: Optional[str] = None, +) -> Node: + """Return a node which produces the ExtractImagePatches operation. + + @param image: 4-D Input data to extract image patches. + @param sizes: Patch size in the format of [size_rows, size_cols]. + @param strides: Patch movement stride in the format of [stride_rows, stride_cols] + @param rates: Element seleciton rate for creating a patch. + @param auto_pad: Padding type. + @param name: Optional name for output node. + @return ExtractImagePatches node + """ + return _get_node_factory_opset3().create( + "ExtractImagePatches", + [as_node(image)], + {"sizes": sizes, "strides": strides, "rates": rates, "auto_pad": auto_pad}, + ) + + +@nameable_op +def gru_cell( + X: NodeInput, + initial_hidden_state: NodeInput, + W: NodeInput, + R: NodeInput, + B: NodeInput, + hidden_size: int, + activations: List[str] = None, + activations_alpha: List[float] = None, + activations_beta: List[float] = None, + clip: float = 0.0, + linear_before_reset: bool = False, + name: Optional[str] = None, +) -> Node: + """Perform GRUCell operation on the tensor from input node. + + GRUCell represents a single GRU Cell that computes the output + using the formula described in the paper: https://arxiv.org/abs/1406.1078 + + Note this class represents only single *cell* and not whole *layer*. + + @param X: The input tensor with shape: [batch_size, input_size]. + @param initial_hidden_state: The hidden state tensor at current time step with shape: + [batch_size, hidden_size]. + @param W: The weights for matrix multiplication, gate order: zrh. + Shape: [3*hidden_size, input_size]. + @param R: The recurrence weights for matrix multiplication. + Shape: [3*hidden_size, hidden_size]. + @param B: The sum of biases (weight and recurrence). + For linear_before_reset set True the shape is [4*hidden_size]. + Otherwise the shape is [3*hidden_size]. + @param hidden_size: The number of hidden units for recurrent cell. + Specifies hidden state size. + @param activations: The vector of activation functions used inside recurrent cell. + @param activation_alpha: The vector of alpha parameters for activation functions in + order respective to activation list. + @param activation_beta: The vector of beta parameters for activation functions in order + respective to activation list. + @param clip: The value defining clipping range [-clip, clip] on input of + activation functions. + @param linear_before_reset: Flag denotes if the layer behaves according to the modification + of GRUCell described in the formula in the ONNX documentation. + @param name: Optional output node name. + @return The new node performing a GRUCell operation on tensor from input node. + """ + if activations is None: + activations = ["sigmoid", "tanh"] + if activations_alpha is None: + activations_alpha = [] + if activations_beta is None: + activations_beta = [] + + input_nodes = as_nodes(X, initial_hidden_state, W, R, B) + attributes = { + "hidden_size": hidden_size, + "activations": activations, + "activations_alpha": activations_alpha, + "activations_beta": activations_beta, + "linear_before_reset": linear_before_reset, + "clip": clip, + } + return _get_node_factory_opset3().create("GRUCell", input_nodes, attributes) + + +@nameable_op +def non_max_suppression( + boxes: NodeInput, + scores: NodeInput, + max_output_boxes_per_class: Optional[NodeInput] = None, + iou_threshold: Optional[NodeInput] = None, + score_threshold: Optional[NodeInput] = None, + box_encoding: str = "corner", + sort_result_descending: bool = True, + output_type: str = "i64", + name: Optional[str] = None, +) -> Node: + """Return a node which performs NonMaxSuppression. + + @param boxes: Tensor with box coordinates. + @param scores: Tensor with box scores. + @param max_output_boxes_per_class: Tensor Specifying maximum number of boxes + to be selected per class. + @param iou_threshold: Tensor specifying intersection over union threshold + @param score_threshold: Tensor specifying minimum score to consider box for the processing. + @param box_encoding: Format of boxes data encoding. + @param sort_result_descending: Flag that specifies whenever it is necessary to sort selected + boxes across batches or not. + @param output_type: Output element type. + @return The new node which performs NonMaxSuppression + """ + if max_output_boxes_per_class is None: + max_output_boxes_per_class = make_constant_node(0, np.int64) + if iou_threshold is None: + iou_threshold = make_constant_node(0, np.float32) + if score_threshold is None: + score_threshold = make_constant_node(0, np.float32) + + inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) + attributes = { + "box_encoding": box_encoding, + "sort_result_descending": sort_result_descending, + "output_type": output_type, + } + + return _get_node_factory_opset3().create("NonMaxSuppression", inputs, attributes) + + +@nameable_op +def non_zero(data: NodeInput, output_type: str = "i64", name: Optional[str] = None,) -> Node: + """Return the indices of the elements that are non-zero. + + @param data: Input data. + @param output_type: Output tensor type. + + @return The new node which performs NonZero + """ + return _get_node_factory_opset3().create( + "NonZero", + [as_node(data)], + {"output_type": output_type} + ) + + +@nameable_op +def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: + """Return a node which produces the Assign operation. + + @param init_value: Node producing a value to be returned instead of an unassigned variable. + @param variable_id: Id of a variable to be read. + @param name: Optional name for output node. + @return ReadValue node + """ + return _get_node_factory_opset3().create( + "ReadValue", + [as_node(init_value)], + {"variable_id": variable_id} + ) + + +@nameable_op +def rnn_cell( + X: NodeInput, + initial_hidden_state: NodeInput, + W: NodeInput, + R: NodeInput, + B: NodeInput, + hidden_size: int, + activations: List[str], + activations_alpha: List[float], + activations_beta: List[float], + clip: float = 0.0, + name: Optional[str] = None, +) -> Node: + """Perform RNNCell operation on tensor from input node. + + It follows notation and equations defined as in ONNX standard: + https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN + + Note this class represents only single *cell* and not whole RNN *layer*. + + @param X: The input tensor with shape: [batch_size, input_size]. + @param initial_hidden_state: The hidden state tensor at current time step with shape: + [batch_size, hidden_size]. + @param W: The weight tensor with shape: [hidden_size, input_size]. + @param R: The recurrence weight tensor with shape: [hidden_size, + hidden_size]. + @param B: The sum of biases (weight and recurrence) with shape: [hidden_size]. + @param hidden_size: The number of hidden units for recurrent cell. + Specifies hidden state size. + @param activations: The vector of activation functions used inside recurrent cell. + @param activation_alpha: The vector of alpha parameters for activation functions in + order respective to activation list. + @param activation_beta: The vector of beta parameters for activation functions in order + respective to activation list. + @param clip: The value defining clipping range [-clip, clip] on input of + activation functions. + @param name: Optional output node name. + @return The new node performing a RNNCell operation on tensor from input node. + """ + if activations is None: + activations = ["tanh"] + if activations_alpha is None: + activations_alpha = [] + if activations_beta is None: + activations_beta = [] + + input_nodes = as_nodes(X, initial_hidden_state, W, R, B) + attributes = { + "hidden_size": hidden_size, + "activations": activations, + "activations_alpha": activations_alpha, + "activations_beta": activations_beta, + "clip": clip, + } + return _get_node_factory_opset3().create("RNNCell", input_nodes, attributes) + + +@nameable_op +def roi_align( + data: NodeInput, + rois: NodeInput, + batch_indices: NodeInput, + pooled_h: int, + pooled_w: int, + sampling_ratio: int, + spatial_scale: float, + mode: str, + name: Optional[str] = None, +) -> Node: + """Return a node which performs ROIAlign. + + @param data: Input data. + @param rois: RoIs (Regions of Interest) to pool over. + @param batch_indices: Tensor with each element denoting the index of + the corresponding image in the batch. + @param pooled_h: Height of the ROI output feature map. + @param pooled_w: Width of the ROI output feature map. + @param sampling_ratio: Number of bins over height and width to use to calculate + each output feature map element. + @param spatial_scale: Multiplicative spatial scale factor to translate ROI coordinates. + @param mode: Method to perform pooling to produce output feature map elements. + + @return The new node which performs ROIAlign + """ + inputs = as_nodes(data, rois, batch_indices) + attributes = { + "pooled_h": pooled_h, + "pooled_w": pooled_w, + "sampling_ratio": sampling_ratio, + "spatial_scale": spatial_scale, + "mode": mode, + } + return _get_node_factory_opset3().create("ROIAlign", inputs, attributes) + + +@nameable_op +def scatter_elements_update( + data: NodeInput, + indices: NodeInput, + updates: NodeInput, + axis: NodeInput, + name: Optional[str] = None, +) -> Node: + """Return a node which produces a ScatterElementsUpdate operation. + + @param data: The input tensor to be updated. + @param indices: The tensor with indexes which will be updated. + @param updates: The tensor with update values. + @param axis: The axis for scatter. + @return ScatterElementsUpdate node + + ScatterElementsUpdate creates a copy of the first input tensor with updated elements + specified with second and third input tensors. + + For each entry in `updates`, the target index in `data` is obtained by combining + the corresponding entry in `indices` with the index of the entry itself: the + index-value for dimension equal to `axis` is obtained from the value of the + corresponding entry in `indices` and the index-value for dimension not equal + to `axis` is obtained from the index of the entry itself. + + """ + return _get_node_factory_opset3().create( + "ScatterElementsUpdate", as_nodes(data, indices, updates, axis) + ) + + +@nameable_op +def scatter_update( + data: Node, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None +) -> Node: + """Return a node which produces a ScatterUpdate operation. + + ScatterUpdate sets new values to slices from data addressed by indices. + + @param data: The input tensor to be updated. + @param indices: The tensor with indexes which will be updated. + @param updates: The tensor with update values. + @param axis: The axis at which elements will be updated. + @return ScatterUpdate node + """ + return _get_node_factory_opset3().create( + "ScatterUpdate", + as_nodes(data, indices, updates, axis) + ) + + +@nameable_op +def shape_of(data: NodeInput, output_type: str = "i64", name: Optional[str] = None) -> Node: + """Return a node which produces a tensor containing the shape of its input data. + + @param data: The tensor containing the input data. + @param output_type: Output element type. + @return ShapeOf node + """ + return _get_node_factory_opset3().create( + "ShapeOf", + [as_node(data)], + {"output_type": output_type} + ) + + +@nameable_op +def shuffle_channels(data: Node, axis: int, group: int, name: Optional[str] = None) -> Node: + """Perform permutation on data in the channel dimension of the input tensor. + + @param data: The node with input tensor. + @param axis: Channel dimension index in the data tensor. + A negative value means that the index should be calculated + from the back of the input data shape. + @param group: The channel dimension specified by the axis parameter + should be split into this number of groups. + @param name: Optional output node name. + @return The new node performing a permutation on data in the channel dimension + of the input tensor. + + The operation is the equivalent with the following transformation of the input tensor + `data` of shape [N, C, H, W]: + + `data_reshaped` = reshape(`data`, [N, group, C / group, H * W]) + + `data_trnasposed` = transpose(`data_reshaped`, [0, 2, 1, 3]) + + `output` = reshape(`data_trnasposed`, [N, C, H, W]) + + For example: + + @code{.py} + Inputs: tensor of shape [1, 6, 2, 2] + + data = [[[[ 0., 1.], [ 2., 3.]], + [[ 4., 5.], [ 6., 7.]], + [[ 8., 9.], [10., 11.]], + [[12., 13.], [14., 15.]], + [[16., 17.], [18., 19.]], + [[20., 21.], [22., 23.]]]] + + axis = 1 + groups = 3 + + Output: tensor of shape [1, 6, 2, 2] + + output = [[[[ 0., 1.], [ 2., 3.]], + [[ 8., 9.], [10., 11.]], + [[16., 17.], [18., 19.]], + [[ 4., 5.], [ 6., 7.]], + [[12., 13.], [14., 15.]], + [[20., 21.], [22., 23.]]]] + @endcode + """ + return _get_node_factory_opset3().create( + "ShuffleChannels", [as_node(data)], {"axis": axis, "group": group} + ) + + +@nameable_op +def topk( + data: NodeInput, + k: NodeInput, + axis: int, + mode: str, + sort: str, + index_element_type: str = "i32", + name: Optional[str] = None, +) -> Node: + """Return a node which performs TopK. + + @param data: Input data. + @param k: K. + @param axis: TopK Axis. + @param mode: Compute TopK largest ('max') or smallest ('min') + @param sort: Order of output elements (sort by: 'none', 'index' or 'value') + @param index_element_type: Type of output tensor with indices. + @return The new node which performs TopK (both indices and values) + """ + return _get_node_factory_opset3().create( + "TopK", + as_nodes(data, k), + {"axis": axis, "mode": mode, "sort": sort, "index_element_type": index_element_type}, + ) diff --git a/runtime/bindings/python/src/openvino/opset4/__init__.py b/runtime/bindings/python/src/openvino/opset4/__init__.py new file mode 100644 index 00000000000..6d7004656ce --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset4/__init__.py @@ -0,0 +1,143 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.opset1.ops import absolute +from openvino.opset1.ops import absolute as abs +from openvino.opset1.ops import acos +from openvino.opset4.ops import acosh +from openvino.opset1.ops import add +from openvino.opset1.ops import asin +from openvino.opset4.ops import asinh +from openvino.opset3.ops import assign +from openvino.opset1.ops import atan +from openvino.opset4.ops import atanh +from openvino.opset1.ops import avg_pool +from openvino.opset1.ops import batch_norm_inference +from openvino.opset2.ops import batch_to_space +from openvino.opset1.ops import binary_convolution +from openvino.opset3.ops import broadcast +from openvino.opset3.ops import bucketize +from openvino.opset1.ops import ceiling +from openvino.opset1.ops import ceiling as ceil +from openvino.opset1.ops import clamp +from openvino.opset1.ops import concat +from openvino.opset1.ops import constant +from openvino.opset1.ops import convert +from openvino.opset1.ops import convert_like +from openvino.opset1.ops import convolution +from openvino.opset1.ops import convolution_backprop_data +from openvino.opset1.ops import cos +from openvino.opset1.ops import cosh +from openvino.opset1.ops import ctc_greedy_decoder +from openvino.opset4.ops import ctc_loss +from openvino.opset3.ops import cum_sum +from openvino.opset3.ops import cum_sum as cumsum +from openvino.opset1.ops import deformable_convolution +from openvino.opset1.ops import deformable_psroi_pooling +from openvino.opset1.ops import depth_to_space +from openvino.opset1.ops import detection_output +from openvino.opset1.ops import divide +from openvino.opset1.ops import elu +from openvino.opset3.ops import embedding_bag_offsets_sum +from openvino.opset3.ops import embedding_bag_packed_sum +from openvino.opset3.ops import embedding_segments_sum +from openvino.opset3.ops import extract_image_patches +from openvino.opset1.ops import equal +from openvino.opset1.ops import erf +from openvino.opset1.ops import exp +from openvino.opset1.ops import fake_quantize +from openvino.opset1.ops import floor +from openvino.opset1.ops import floor_mod +from openvino.opset1.ops import gather +from openvino.opset1.ops import gather_tree +from openvino.opset2.ops import gelu +from openvino.opset1.ops import greater +from openvino.opset1.ops import greater_equal +from openvino.opset1.ops import grn +from openvino.opset1.ops import group_convolution +from openvino.opset1.ops import group_convolution_backprop_data +from openvino.opset3.ops import gru_cell +from openvino.opset1.ops import hard_sigmoid +from openvino.opset4.ops import hswish +from openvino.opset1.ops import interpolate +from openvino.opset1.ops import less +from openvino.opset1.ops import less_equal +from openvino.opset1.ops import log +from openvino.opset1.ops import logical_and +from openvino.opset1.ops import logical_not +from openvino.opset1.ops import logical_or +from openvino.opset1.ops import logical_xor +from openvino.opset1.ops import lrn +from openvino.opset4.ops import lstm_cell +from openvino.opset1.ops import lstm_sequence +from openvino.opset1.ops import matmul +from openvino.opset1.ops import max_pool +from openvino.opset1.ops import maximum +from openvino.opset1.ops import minimum +from openvino.opset4.ops import mish +from openvino.opset1.ops import mod +from openvino.opset1.ops import multiply +from openvino.opset2.ops import mvn +from openvino.opset1.ops import negative +from openvino.opset4.ops import non_max_suppression +from openvino.opset3.ops import non_zero +from openvino.opset1.ops import normalize_l2 +from openvino.opset1.ops import not_equal +from openvino.opset1.ops import one_hot +from openvino.opset1.ops import pad +from openvino.opset1.ops import parameter +from openvino.opset1.ops import power +from openvino.opset1.ops import prelu +from openvino.opset1.ops import prior_box +from openvino.opset1.ops import prior_box_clustered +from openvino.opset1.ops import psroi_pooling +from openvino.opset4.ops import proposal +from openvino.opset1.ops import range +from openvino.opset3.ops import read_value +from openvino.opset4.ops import reduce_l1 +from openvino.opset4.ops import reduce_l2 +from openvino.opset1.ops import reduce_logical_and +from openvino.opset1.ops import reduce_logical_or +from openvino.opset1.ops import reduce_max +from openvino.opset1.ops import reduce_mean +from openvino.opset1.ops import reduce_min +from openvino.opset1.ops import reduce_prod +from openvino.opset1.ops import reduce_sum +from openvino.opset1.ops import region_yolo +from openvino.opset2.ops import reorg_yolo +from openvino.opset1.ops import relu +from openvino.opset1.ops import reshape +from openvino.opset1.ops import result +from openvino.opset1.ops import reverse_sequence +from openvino.opset3.ops import rnn_cell +from openvino.opset3.ops import roi_align +from openvino.opset2.ops import roi_pooling +from openvino.opset3.ops import scatter_elements_update +from openvino.opset3.ops import scatter_update +from openvino.opset1.ops import select +from openvino.opset1.ops import selu +from openvino.opset3.ops import shape_of +from openvino.opset3.ops import shuffle_channels +from openvino.opset1.ops import sigmoid +from openvino.opset1.ops import sign +from openvino.opset1.ops import sin +from openvino.opset1.ops import sinh +from openvino.opset1.ops import softmax +from openvino.opset4.ops import softplus +from openvino.opset2.ops import space_to_batch +from openvino.opset1.ops import space_to_depth +from openvino.opset1.ops import split +from openvino.opset1.ops import sqrt +from openvino.opset1.ops import squared_difference +from openvino.opset1.ops import squeeze +from openvino.opset1.ops import strided_slice +from openvino.opset1.ops import subtract +from openvino.opset4.ops import swish +from openvino.opset1.ops import tan +from openvino.opset1.ops import tanh +from openvino.opset1.ops import tensor_iterator +from openvino.opset1.ops import tile +from openvino.opset3.ops import topk +from openvino.opset1.ops import transpose +from openvino.opset1.ops import unsqueeze +from openvino.opset1.ops import variadic_split diff --git a/runtime/bindings/python/src/openvino/opset4/ops.py b/runtime/bindings/python/src/openvino/opset4/ops.py new file mode 100644 index 00000000000..0ece908265d --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset4/ops.py @@ -0,0 +1,409 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for all openvino ops.""" +from typing import Callable, Iterable, List, Optional, Set, Union + +import numpy as np +from functools import partial + +from openvino.impl import Node, Shape +from openvino.impl.op import Constant, Parameter +from openvino.opset_utils import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( + assert_list_of_ints, + check_valid_attributes, + is_non_negative_value, + is_positive_value, +) +from openvino.utils.node_factory import NodeFactory +from openvino.utils.tensor_iterator_types import ( + GraphBody, + TensorIteratorSliceInputDesc, + TensorIteratorMergedInputDesc, + TensorIteratorInvariantInputDesc, + TensorIteratorBodyOutputDesc, + TensorIteratorConcatOutputDesc, +) +from openvino.utils.types import ( + NodeInput, + NumericData, + NumericType, + ScalarData, + TensorShape, + as_node, + as_nodes, + get_dtype, + get_element_type, + get_element_type_str, + make_constant_node, +) + +_get_node_factory_opset4 = partial(_get_node_factory, "opset4") + +# -------------------------------------------- ops ------------------------------------------------ + + +@nameable_op +def ctc_loss( + logits: NodeInput, + logit_length: NodeInput, + labels: NodeInput, + label_length: NodeInput, + blank_index: Optional[NodeInput] = None, + preprocess_collapse_repeated: bool = False, + ctc_merge_repeated: bool = True, + unique: bool = False, + name: Optional[str] = None, +) -> Node: + """Return a node which performs CTCLoss. + + @param logits: 3-D tensor of logits. + @param logit_length: 1-D tensor of lengths for each object from a batch. + @param labels: 2-D tensor of labels for which likelihood is estimated using logits. + @param label_length: 1-D tensor of length for each label sequence. + @param blank_index: Scalar used to mark a blank index. + @param preprocess_collapse_repeated: Flag for preprocessing labels before loss calculation. + @param ctc_merge_repeated: Flag for merging repeated characters in a potential alignment. + @param unique: Flag to find unique elements in a target. + @return The new node which performs CTCLoss + """ + if blank_index is not None: + inputs = as_nodes(logits, logit_length, labels, label_length, blank_index) + else: + inputs = as_nodes(logits, logit_length, labels, label_length) + + attributes = { + "preprocess_collapse_repeated": preprocess_collapse_repeated, + "ctc_merge_repeated": ctc_merge_repeated, + "unique": unique, + } + + return _get_node_factory_opset4().create("CTCLoss", inputs, attributes) + + +@nameable_op +def non_max_suppression( + boxes: NodeInput, + scores: NodeInput, + max_output_boxes_per_class: Optional[NodeInput] = None, + iou_threshold: Optional[NodeInput] = None, + score_threshold: Optional[NodeInput] = None, + box_encoding: str = "corner", + sort_result_descending: bool = True, + output_type: str = "i64", + name: Optional[str] = None, +) -> Node: + """Return a node which performs NonMaxSuppression. + + @param boxes: Tensor with box coordinates. + @param scores: Tensor with box scores. + @param max_output_boxes_per_class: Tensor Specifying maximum number of boxes + to be selected per class. + @param iou_threshold: Tensor specifying intersection over union threshold + @param score_threshold: Tensor specifying minimum score to consider box for the processing. + @param box_encoding: Format of boxes data encoding. + @param sort_result_descending: Flag that specifies whenever it is necessary to sort selected + boxes across batches or not. + @param output_type: Output element type. + @return The new node which performs NonMaxSuppression + """ + if max_output_boxes_per_class is None: + max_output_boxes_per_class = make_constant_node(0, np.int64) + if iou_threshold is None: + iou_threshold = make_constant_node(0, np.float32) + if score_threshold is None: + score_threshold = make_constant_node(0, np.float32) + + inputs = as_nodes(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) + attributes = { + "box_encoding": box_encoding, + "sort_result_descending": sort_result_descending, + "output_type": output_type, + } + + return _get_node_factory_opset4().create("NonMaxSuppression", inputs, attributes) + + +@nameable_op +def softplus(data: NodeInput, name: Optional[str] = None) -> Node: + """Apply SoftPlus operation on each element of input tensor. + + @param data: The tensor providing input data. + @return The new node with SoftPlus operation applied on each element. + """ + return _get_node_factory_opset4().create("SoftPlus", as_nodes(data), {}) + + +@nameable_op +def mish(data: NodeInput, name: Optional[str] = None,) -> Node: + """Return a node which performs Mish. + + @param data: Tensor with input data floating point type. + @return The new node which performs Mish + """ + return _get_node_factory_opset4().create("Mish", as_nodes(data), {}) + + +@nameable_op +def hswish(data: NodeInput, name: Optional[str] = None,) -> Node: + """Return a node which performs HSwish (hard version of Swish). + + @param data: Tensor with input data floating point type. + @return The new node which performs HSwish + """ + return _get_node_factory_opset4().create("HSwish", as_nodes(data), {}) + + +@nameable_op +def swish( + data: NodeInput, + beta: Optional[NodeInput] = None, + name: Optional[str] = None, +) -> Node: + """Return a node which performing Swish activation function Swish(x, beta=1.0) = x * sigmoid(x * beta)). + + @param data: Tensor with input data floating point type. + @return The new node which performs Swish + """ + if beta is None: + beta = make_constant_node(1.0, np.float32) + return _get_node_factory_opset4().create("Swish", as_nodes(data, beta), {}) + + +@nameable_op +def acosh(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply hyperbolic inverse cosine function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arccosh operation applied on it. + """ + return _get_node_factory_opset4().create("Acosh", [node]) + + +@nameable_op +def asinh(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply hyperbolic inverse sinus function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arcsinh operation applied on it. + """ + return _get_node_factory_opset4().create("Asinh", [node]) + + +@nameable_op +def atanh(node: NodeInput, name: Optional[str] = None) -> Node: + """Apply hyperbolic inverse tangent function on the input node element-wise. + + @param node: One of: input node, array or scalar. + @param name: Optional new name for output node. + @return New node with arctanh operation applied on it. + """ + return _get_node_factory_opset4().create("Atanh", [node]) + + +@nameable_op +def proposal( + class_probs: Node, + bbox_deltas: Node, + image_shape: NodeInput, + attrs: dict, + name: Optional[str] = None, +) -> Node: + """Filter bounding boxes and outputs only those with the highest prediction confidence. + + @param class_probs: 4D input floating point tensor with class prediction scores. + @param bbox_deltas: 4D input floating point tensor with corrected predictions of bounding boxes + @param image_shape: The 1D input tensor with 3 or 4 elements describing image shape. + @param attrs: The dictionary containing key, value pairs for attributes. + @param name: Optional name for the output node. + * base_size The size of the anchor to which scale and ratio attributes are applied. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + * pre_nms_topn The number of bounding boxes before the NMS operation. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + * post_nms_topn The number of bounding boxes after the NMS operation. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + * nms_thresh The minimum value of the proposal to be taken into consideration. + Range of values: a positive floating-point number + Default value: None + Required: yes + * feat_stride The step size to slide over boxes (in pixels). + Range of values: a positive unsigned integer + Default value: None + Required: yes + * min_size The minimum size of box to be taken into consideration. + Range of values: a positive unsigned integer number + Default value: None + Required: yes + * ratio The ratios for anchor generation. + Range of values: a list of floating-point numbers + Default value: None + Required: yes + * scale The scales for anchor generation. + Range of values: a list of floating-point numbers + Default value: None + Required: yes + * clip_before_nms The flag that specifies whether to perform clip bounding boxes before + non-maximum suppression or not. + Range of values: True or False + Default value: True + Required: no + * clip_after_nms The flag that specifies whether to perform clip bounding boxes after + non-maximum suppression or not. + Range of values: True or False + Default value: False + Required: no + * normalize The flag that specifies whether to perform normalization of output boxes to + [0,1] interval or not. + Range of values: True or False + Default value: False + Required: no + * box_size_scale Specifies the scale factor applied to logits of box sizes before decoding. + Range of values: a positive floating-point number + Default value: 1.0 + Required: no + * box_coordinate_scale Specifies the scale factor applied to logits of box coordinates + before decoding. + Range of values: a positive floating-point number + Default value: 1.0 + Required: no + * framework Specifies how the box coordinates are calculated. + Range of values: "" (empty string) - calculate box coordinates like in Caffe* + tensorflow - calculate box coordinates like in the TensorFlow* + Object Detection API models + Default value: "" (empty string) + Required: no + + Example of attribute dictionary: + @code{.py} + # just required ones + attrs = { + 'base_size': 85, + 'pre_nms_topn': 10, + 'post_nms_topn': 20, + 'nms_thresh': 0.34, + 'feat_stride': 16, + 'min_size': 32, + 'ratio': [0.1, 1.5, 2.0, 2.5], + 'scale': [2, 3, 3, 4], + } + @endcode + Optional attributes which are absent from dictionary will be set with corresponding default. + @return Node representing Proposal operation. + """ + requirements = [ + ("base_size", True, np.unsignedinteger, is_positive_value), + ("pre_nms_topn", True, np.unsignedinteger, is_positive_value), + ("post_nms_topn", True, np.unsignedinteger, is_positive_value), + ("nms_thresh", True, np.floating, is_positive_value), + ("feat_stride", True, np.unsignedinteger, is_positive_value), + ("min_size", True, np.unsignedinteger, is_positive_value), + ("ratio", True, np.floating, None), + ("scale", True, np.floating, None), + ("clip_before_nms", False, np.bool_, None), + ("clip_after_nms", False, np.bool_, None), + ("normalize", False, np.bool_, None), + ("box_size_scale", False, np.floating, is_positive_value), + ("box_coordinate_scale", False, np.floating, is_positive_value), + ("framework", False, np.str_, None), + ] + + check_valid_attributes("Proposal", attrs, requirements) + + return _get_node_factory_opset4().create( + "Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs + ) + + +@nameable_op +def reduce_l1( + node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None +) -> Node: + """L1-reduction operation on input tensor, eliminating the specified reduction axes. + + @param node: The tensor we want to mean-reduce. + @param reduction_axes: The axes to eliminate through mean operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing mean-reduction operation. + """ + return _get_node_factory_opset4().create( + "ReduceL1", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} + ) + + +@nameable_op +def reduce_l2( + node: NodeInput, reduction_axes: NodeInput, keep_dims: bool = False, name: Optional[str] = None +) -> Node: + """L2-reduction operation on input tensor, eliminating the specified reduction axes. + + @param node: The tensor we want to mean-reduce. + @param reduction_axes: The axes to eliminate through mean operation. + @param keep_dims: If set to True it holds axes that are used for reduction + @param name: Optional name for output node. + @return The new node performing mean-reduction operation. + """ + return _get_node_factory_opset4().create( + "ReduceL2", as_nodes(node, reduction_axes), {"keep_dims": keep_dims} + ) + + +@nameable_op +def lstm_cell( + X: NodeInput, + initial_hidden_state: NodeInput, + initial_cell_state: NodeInput, + W: NodeInput, + R: NodeInput, + B: NodeInput, + hidden_size: int, + activations: List[str] = None, + activations_alpha: List[float] = None, + activations_beta: List[float] = None, + clip: float = 0.0, + name: Optional[str] = None, +) -> Node: + """Return a node which performs LSTMCell operation. + + @param X: The input tensor with shape: [batch_size, input_size]. + @param initial_hidden_state: The hidden state tensor with shape: [batch_size, hidden_size]. + @param initial_cell_state: The cell state tensor with shape: [batch_size, hidden_size]. + @param W: The weight tensor with shape: [4*hidden_size, input_size]. + @param R: The recurrence weight tensor with shape: [4*hidden_size, hidden_size]. + @param B: The bias tensor for gates with shape: [4*hidden_size]. + @param hidden_size: Specifies hidden state size. + @param activations: The list of three activation functions for gates. + @param activations_alpha: The list of alpha parameters for activation functions. + @param activations_beta: The list of beta parameters for activation functions. + @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. + @param name: An optional name of the output node. + + @return The new node represents LSTMCell. Node outputs count: 2. + """ + if activations is None: + activations = ["sigmoid", "tanh", "tanh"] + if activations_alpha is None: + activations_alpha = [] + if activations_beta is None: + activations_beta = [] + + node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, W, R, B) + + attributes = { + "hidden_size": hidden_size, + "activations": activations, + "activations_alpha": activations_alpha, + "activations_beta": activations_beta, + "clip": clip, + } + return _get_node_factory_opset4().create("LSTMCell", node_inputs, attributes) diff --git a/runtime/bindings/python/src/openvino/opset5/__init__.py b/runtime/bindings/python/src/openvino/opset5/__init__.py new file mode 100644 index 00000000000..4ae03c592ab --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset5/__init__.py @@ -0,0 +1,150 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.opset1.ops import absolute +from openvino.opset1.ops import absolute as abs +from openvino.opset1.ops import acos +from openvino.opset4.ops import acosh +from openvino.opset1.ops import add +from openvino.opset1.ops import asin +from openvino.opset4.ops import asinh +from openvino.opset3.ops import assign +from openvino.opset1.ops import atan +from openvino.opset4.ops import atanh +from openvino.opset1.ops import avg_pool +from openvino.opset5.ops import batch_norm_inference +from openvino.opset2.ops import batch_to_space +from openvino.opset1.ops import binary_convolution +from openvino.opset3.ops import broadcast +from openvino.opset3.ops import bucketize +from openvino.opset1.ops import ceiling +from openvino.opset1.ops import ceiling as ceil +from openvino.opset1.ops import clamp +from openvino.opset1.ops import concat +from openvino.opset1.ops import constant +from openvino.opset1.ops import convert +from openvino.opset1.ops import convert_like +from openvino.opset1.ops import convolution +from openvino.opset1.ops import convolution_backprop_data +from openvino.opset1.ops import cos +from openvino.opset1.ops import cosh +from openvino.opset1.ops import ctc_greedy_decoder +from openvino.opset4.ops import ctc_loss +from openvino.opset3.ops import cum_sum +from openvino.opset3.ops import cum_sum as cumsum +from openvino.opset1.ops import deformable_convolution +from openvino.opset1.ops import deformable_psroi_pooling +from openvino.opset1.ops import depth_to_space +from openvino.opset1.ops import detection_output +from openvino.opset1.ops import divide +from openvino.opset1.ops import elu +from openvino.opset3.ops import embedding_bag_offsets_sum +from openvino.opset3.ops import embedding_bag_packed_sum +from openvino.opset3.ops import embedding_segments_sum +from openvino.opset3.ops import extract_image_patches +from openvino.opset1.ops import equal +from openvino.opset1.ops import erf +from openvino.opset1.ops import exp +from openvino.opset1.ops import fake_quantize +from openvino.opset1.ops import floor +from openvino.opset1.ops import floor_mod +from openvino.opset1.ops import gather +from openvino.opset5.ops import gather_nd +from openvino.opset1.ops import gather_tree +from openvino.opset2.ops import gelu +from openvino.opset1.ops import greater +from openvino.opset1.ops import greater_equal +from openvino.opset1.ops import grn +from openvino.opset1.ops import group_convolution +from openvino.opset1.ops import group_convolution_backprop_data +from openvino.opset3.ops import gru_cell +from openvino.opset5.ops import gru_sequence +from openvino.opset1.ops import hard_sigmoid +from openvino.opset5.ops import hsigmoid +from openvino.opset4.ops import hswish +from openvino.opset1.ops import interpolate +from openvino.opset1.ops import less +from openvino.opset1.ops import less_equal +from openvino.opset1.ops import log +from openvino.opset1.ops import logical_and +from openvino.opset1.ops import logical_not +from openvino.opset1.ops import logical_or +from openvino.opset1.ops import logical_xor +from openvino.opset5.ops import log_softmax +from openvino.opset5.ops import loop +from openvino.opset1.ops import lrn +from openvino.opset4.ops import lstm_cell +from openvino.opset1.ops import lstm_sequence +from openvino.opset1.ops import matmul +from openvino.opset1.ops import max_pool +from openvino.opset1.ops import maximum +from openvino.opset1.ops import minimum +from openvino.opset4.ops import mish +from openvino.opset1.ops import mod +from openvino.opset1.ops import multiply +from openvino.opset2.ops import mvn +from openvino.opset1.ops import negative +from openvino.opset5.ops import non_max_suppression +from openvino.opset3.ops import non_zero +from openvino.opset1.ops import normalize_l2 +from openvino.opset1.ops import not_equal +from openvino.opset1.ops import one_hot +from openvino.opset1.ops import pad +from openvino.opset1.ops import parameter +from openvino.opset1.ops import power +from openvino.opset1.ops import prelu +from openvino.opset1.ops import prior_box +from openvino.opset1.ops import prior_box_clustered +from openvino.opset1.ops import psroi_pooling +from openvino.opset4.ops import proposal +from openvino.opset1.ops import range +from openvino.opset3.ops import read_value +from openvino.opset4.ops import reduce_l1 +from openvino.opset4.ops import reduce_l2 +from openvino.opset1.ops import reduce_logical_and +from openvino.opset1.ops import reduce_logical_or +from openvino.opset1.ops import reduce_max +from openvino.opset1.ops import reduce_mean +from openvino.opset1.ops import reduce_min +from openvino.opset1.ops import reduce_prod +from openvino.opset1.ops import reduce_sum +from openvino.opset1.ops import region_yolo +from openvino.opset2.ops import reorg_yolo +from openvino.opset1.ops import relu +from openvino.opset1.ops import reshape +from openvino.opset1.ops import result +from openvino.opset1.ops import reverse_sequence +from openvino.opset3.ops import rnn_cell +from openvino.opset5.ops import rnn_sequence +from openvino.opset3.ops import roi_align +from openvino.opset2.ops import roi_pooling +from openvino.opset5.ops import round +from openvino.opset3.ops import scatter_elements_update +from openvino.opset3.ops import scatter_update +from openvino.opset1.ops import select +from openvino.opset1.ops import selu +from openvino.opset3.ops import shape_of +from openvino.opset3.ops import shuffle_channels +from openvino.opset1.ops import sigmoid +from openvino.opset1.ops import sign +from openvino.opset1.ops import sin +from openvino.opset1.ops import sinh +from openvino.opset1.ops import softmax +from openvino.opset4.ops import softplus +from openvino.opset2.ops import space_to_batch +from openvino.opset1.ops import space_to_depth +from openvino.opset1.ops import split +from openvino.opset1.ops import sqrt +from openvino.opset1.ops import squared_difference +from openvino.opset1.ops import squeeze +from openvino.opset1.ops import strided_slice +from openvino.opset1.ops import subtract +from openvino.opset4.ops import swish +from openvino.opset1.ops import tan +from openvino.opset1.ops import tanh +from openvino.opset1.ops import tensor_iterator +from openvino.opset1.ops import tile +from openvino.opset3.ops import topk +from openvino.opset1.ops import transpose +from openvino.opset1.ops import unsqueeze +from openvino.opset1.ops import variadic_split diff --git a/runtime/bindings/python/src/openvino/opset5/ops.py b/runtime/bindings/python/src/openvino/opset5/ops.py new file mode 100644 index 00000000000..16700cf2bf9 --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset5/ops.py @@ -0,0 +1,427 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for all openvino ops.""" +from typing import Callable, Iterable, List, Optional, Set, Union + +import numpy as np +from functools import partial + +from openvino.impl import Node, Shape +from openvino.impl.op import Constant, Parameter +from openvino.opset_utils import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( + assert_list_of_ints, + check_valid_attributes, + is_non_negative_value, + is_positive_value, +) +from openvino.utils.node_factory import NodeFactory +from openvino.utils.tensor_iterator_types import ( + GraphBody, + TensorIteratorSliceInputDesc, + TensorIteratorMergedInputDesc, + TensorIteratorInvariantInputDesc, + TensorIteratorBodyOutputDesc, + TensorIteratorConcatOutputDesc, +) +from openvino.utils.types import ( + NodeInput, + NumericData, + NumericType, + ScalarData, + TensorShape, + as_node, + as_nodes, + get_dtype, + get_element_type, + get_element_type_str, + make_constant_node, +) + +_get_node_factory_opset5 = partial(_get_node_factory, "opset5") + +# -------------------------------------------- ops ------------------------------------------------ + + +@nameable_op +def batch_norm_inference( + data: NodeInput, + gamma: NodeInput, + beta: NodeInput, + mean: NodeInput, + variance: NodeInput, + epsilon: float, + name: Optional[str] = None, +) -> Node: + """Perform layer normalizes a input tensor by mean and variance with appling scale and offset. + + @param data: The input tensor with data for normalization. + @param gamma: The scalar scaling for normalized value. + @param beta: The bias added to the scaled normalized value. + @param mean: The value for mean normalization. + @param variance: The value for variance normalization. + @param epsilon: The number to be added to the variance to avoid division + by zero when normalizing a value. + @param name: The optional name of the output node. + @return: The new node which performs BatchNormInference. + """ + inputs = as_nodes(data, gamma, beta, mean, variance) + return _get_node_factory_opset5().create("BatchNormInference", inputs, {"epsilon": epsilon}) + + +@nameable_op +def gather_nd( + data: NodeInput, + indices: NodeInput, + batch_dims: Optional[int] = 0, + name: Optional[str] = None, +) -> Node: + """Return a node which performs GatherND. + + @param data: N-D tensor with data for gathering + @param indices: K-D tensor of tuples with indices by which data is gathered + @param batch_dims: Scalar value of batch dimensions + @return: The new node which performs GatherND + """ + inputs = as_nodes(data, indices) + + attributes = { + "batch_dims": batch_dims + } + + return _get_node_factory_opset5().create("GatherND", inputs, attributes) + + +@nameable_op +def log_softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node: + """Apply LogSoftmax operation on each element of input tensor. + + @param data: The tensor providing input data. + @param axis: An axis along which LogSoftmax should be calculated + @return: The new node with LogSoftmax operation applied on each element. + """ + return _get_node_factory_opset5().create("LogSoftmax", [as_node(data)], {"axis": axis}) + + +@nameable_op +def non_max_suppression( + boxes: NodeInput, + scores: NodeInput, + max_output_boxes_per_class: Optional[NodeInput] = None, + iou_threshold: Optional[NodeInput] = None, + score_threshold: Optional[NodeInput] = None, + soft_nms_sigma: Optional[NodeInput] = None, + box_encoding: str = "corner", + sort_result_descending: bool = True, + output_type: str = "i64", + name: Optional[str] = None, +) -> Node: + """Return a node which performs NonMaxSuppression. + + @param boxes: Tensor with box coordinates. + @param scores: Tensor with box scores. + @param max_output_boxes_per_class: Tensor Specifying maximum number of boxes + to be selected per class. + @param iou_threshold: Tensor specifying intersection over union threshold + @param score_threshold: Tensor specifying minimum score to consider box for the processing. + @param soft_nms_sigma: Tensor specifying the sigma parameter for Soft-NMS. + @param box_encoding: Format of boxes data encoding. + @param sort_result_descending: Flag that specifies whenever it is necessary to sort selected + boxes across batches or not. + @param output_type: Output element type. + @return: The new node which performs NonMaxSuppression + """ + if max_output_boxes_per_class is None: + max_output_boxes_per_class = make_constant_node(0, np.int64) + if iou_threshold is None: + iou_threshold = make_constant_node(0, np.float32) + if score_threshold is None: + score_threshold = make_constant_node(0, np.float32) + if soft_nms_sigma is None: + inputs = as_nodes( + boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold + ) + else: + inputs = as_nodes( + boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, soft_nms_sigma + ) + + attributes = { + "box_encoding": box_encoding, + "sort_result_descending": sort_result_descending, + "output_type": output_type, + } + + return _get_node_factory_opset5().create("NonMaxSuppression", inputs, attributes) + + +@nameable_op +def round(data: NodeInput, mode: str = "half_to_even", name: Optional[str] = None) -> Node: + """Apply Round operation on each element of input tensor. + + @param data: The tensor providing input data. + @param mode: Rule to round halfway cases. If set to 'half_to_even' then halfs round to the nearest even + integer or rounding in such a way that the result heads away from zero if `mode` attribute is + 'half_away_from_zero`. + @param name: An optional name of the output node. + @return: The new node with Round operation applied on each element. + """ + return _get_node_factory_opset5().create("Round", as_nodes(data), {"mode": mode.upper()}) + + +@nameable_op +def lstm_sequence( + X: NodeInput, + initial_hidden_state: NodeInput, + initial_cell_state: NodeInput, + sequence_lengths: NodeInput, + W: NodeInput, + R: NodeInput, + B: NodeInput, + hidden_size: int, + direction: str, + activations: List[str] = None, + activations_alpha: List[float] = None, + activations_beta: List[float] = None, + clip: float = 0.0, + name: Optional[str] = None, +) -> Node: + """Return a node which performs LSTMSequence operation. + + @param X: The input tensor. Shape: [batch_size, seq_length, input_size]. + @param initial_hidden_state: The hidden state tensor. + Shape: [batch_size, num_directions, hidden_size]. + @param initial_cell_state: The cell state tensor. + Shape: [batch_size, num_directions, hidden_size]. + @param sequence_lengths: Specifies real sequence lengths for each batch element. + Shape: [batch_size]. Integer type. + @param W: Tensor with weights for matrix multiplication operation with input portion of data. + Expected format: fico + Shape: [num_directions, 4*hidden_size, input_size]. + @param R: The tensor with weights for matrix multiplication operation with hidden state. + Expected format: fico + Shape: [num_directions, 4*hidden_size, hidden_size]. + @param B: The sum of biases (weight and recurrence). Expected format: fico + Shape: [num_directions, 4*hidden_size]. + @param hidden_size: Specifies hidden state size. + @param direction: Specifies if the RNN is forward, reverse, or bidirectional. + @param activations: The list of three activation functions for gates. + @param activations_alpha: The list of alpha parameters for activation functions. + @param activations_beta: The list of beta parameters for activation functions. + @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. + @param name: An optional name of the output node. + + @return: The new node represents LSTMSequence. Node outputs count: 3. + """ + if activations is None: + activations = ["sigmoid", "tanh", "tanh"] + if activations_alpha is None: + activations_alpha = [] + if activations_beta is None: + activations_beta = [] + + node_inputs = as_nodes(X, initial_hidden_state, initial_cell_state, sequence_lengths, W, R, B) + + attributes = { + "hidden_size": hidden_size, + "direction": direction.lower(), + "activations": activations, + "activations_alpha": activations_alpha, + "activations_beta": activations_beta, + "clip": clip, + } + return _get_node_factory_opset5().create("LSTMSequence", node_inputs, attributes) + + +def hsigmoid(data: NodeInput, name: Optional[str] = None,) -> Node: + """Return a node which performs HSigmoid. + + @param data: Tensor with input data floating point type. + @return: The new node which performs HSigmoid + """ + return _get_node_factory_opset5().create("HSigmoid", as_nodes(data), {}) + + +@nameable_op +def gru_sequence( + X: NodeInput, + initial_hidden_state: NodeInput, + sequence_lengths: NodeInput, + W: NodeInput, + R: NodeInput, + B: NodeInput, + hidden_size: int, + direction: str, + activations: List[str] = None, + activations_alpha: List[float] = None, + activations_beta: List[float] = None, + clip: float = 0.0, + linear_before_reset: bool = False, + name: Optional[str] = None, +) -> Node: + """Return a node which performs GRUSequence operation. + + @param X: The input tensor. Shape: [batch_size, seq_length, input_size]. + @param initial_hidden_state: The hidden state tensor. + Shape: [batch_size, num_directions, hidden_size]. + @param sequence_lengths: Specifies real sequence lengths for each batch element. + Shape: [batch_size]. Integer type. + @param W: Tensor with weights for matrix multiplication operation with input portion of data. + Shape: [num_directions, 3*hidden_size, input_size]. + @param R: The tensor with weights for matrix multiplication operation with hidden state. + Shape: [num_directions, 3*hidden_size, hidden_size]. + @param B: The sum of biases (weight and recurrence). + For linear_before_reset set True the shape is [num_directions, 4*hidden_size]. + Otherwise the shape is [num_directions, 3*hidden_size]. + @param hidden_size: Specifies hidden state size. + @param direction: Specifies if the RNN is forward, reverse, or bidirectional. + @param activations: The list of three activation functions for gates. + @param activations_alpha: The list of alpha parameters for activation functions. + @param activations_beta: The list of beta parameters for activation functions. + @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. + @param linear_before_reset: Flag denotes if the layer behaves according to the modification + of GRU described in the formula in the ONNX documentation. + @param name: An optional name of the output node. + + @return: The new node represents GRUSequence. Node outputs count: 2. + """ + if activations is None: + activations = ["sigmoid", "tanh"] + if activations_alpha is None: + activations_alpha = [] + if activations_beta is None: + activations_beta = [] + + node_inputs = as_nodes(X, initial_hidden_state, sequence_lengths, W, R, B) + + attributes = { + "hidden_size": hidden_size, + "direction": direction.lower(), + "activations": activations, + "activations_alpha": activations_alpha, + "activations_beta": activations_beta, + "linear_before_reset": linear_before_reset, + "clip": clip, + } + return _get_node_factory_opset5().create("GRUSequence", node_inputs, attributes) + + +@nameable_op +def rnn_sequence( + X: NodeInput, + initial_hidden_state: NodeInput, + sequence_lengths: NodeInput, + W: NodeInput, + R: NodeInput, + B: NodeInput, + hidden_size: int, + direction: str, + activations: List[str] = None, + activations_alpha: List[float] = None, + activations_beta: List[float] = None, + clip: float = 0.0, + name: Optional[str] = None, +) -> Node: + """Return a node which performs RNNSequence operation. + + @param X: The input tensor. Shape: [batch_size, seq_length, input_size]. + @param initial_hidden_state: The hidden state tensor. + Shape: [batch_size, num_directions, hidden_size]. + @param sequence_lengths: Specifies real sequence lengths for each batch element. + Shape: [batch_size]. Integer type. + @param W: Tensor with weights for matrix multiplication operation with input portion of data. + Shape: [num_directions, hidden_size, input_size]. + @param R: The tensor with weights for matrix multiplication operation with hidden state. + Shape: [num_directions, hidden_size, hidden_size]. + @param B: The sum of biases (weight and recurrence). + Shape: [num_directions, hidden_size]. + @param hidden_size: Specifies hidden state size. + @param direction: Specifies if the RNN is forward, reverse, or bidirectional. + @param activations: The list of three activation functions for gates. + @param activations_alpha: The list of alpha parameters for activation functions. + @param activations_beta: The list of beta parameters for activation functions. + @param clip: Specifies bound values [-C, C] for tensor clipping performed before activations. + @param name: An optional name of the output node. + + @return: The new node represents RNNSequence. Node outputs count: 2. + """ + if activations is None: + activations = ["tanh"] + if activations_alpha is None: + activations_alpha = [] + if activations_beta is None: + activations_beta = [] + + inputs = as_nodes(X, initial_hidden_state, sequence_lengths, W, R, B) + + attributes = { + "hidden_size": hidden_size, + "direction": direction.lower(), + "activations": activations, + "activations_alpha": activations_alpha, + "activations_beta": activations_beta, + "clip": clip, + } + + return _get_node_factory_opset5().create("RNNSequence", inputs, attributes) + + +@nameable_op +def loop( + trip_count: NodeInput, + execution_condition: NodeInput, + inputs: List[Node], + graph_body: GraphBody, + slice_input_desc: List[TensorIteratorSliceInputDesc], + merged_input_desc: List[TensorIteratorMergedInputDesc], + invariant_input_desc: List[TensorIteratorInvariantInputDesc], + body_output_desc: List[TensorIteratorBodyOutputDesc], + concat_output_desc: List[TensorIteratorConcatOutputDesc], + body_condition_output_idx: int, + current_iteration_input_idx: int = -1, + name: Optional[str] = None, +) -> Node: + """Perform recurrent execution of the network described in the body, iterating through the data. + + @param trip_count: A scalar or 1D tensor with 1 element specifying + maximum number of iterations. + @param execution_condition: A scalar or 1D tensor with 1 element + specifying whether to execute the first iteration or not. + @param inputs: The provided to TensorIterator operator. + @param graph_body: The graph representing the body we execute. + @param slice_input_desc: The descriptors describing sliced inputs, that is nodes + representing tensors we iterate through, processing single + data slice in one iteration. + @param merged_input_desc: The descriptors describing merged inputs, that is nodes + representing variables with initial value at first iteration, + which may be changing through iterations. + @param invariant_input_desc: The descriptors describing invariant inputs, that is nodes + representing variable with persistent value through all + iterations. + @param body_output_desc: The descriptors describing body outputs from specified + iteration. + @param concat_output_desc: The descriptors describing specified output values through + all the iterations concatenated into one node. + @param body_condition_output_idx: Determines the purpose of the corresponding result in + the graph_body. This result will determine the dynamic + exit condition. If the value of this result is False, + then iterations stop. + @param current_iteration_input_idx: Determines the purpose of the corresponding parameter + in the graph_body. This parameter will be used as + an iteration counter. Optional. + @return: The new node which performs Loop. + """ + attributes = { + "body": graph_body.serialize(), + "input_descriptions": {"slice_input_desc": [desc.serialize() for desc in slice_input_desc], + "merged_input_desc": [desc.serialize() for desc in merged_input_desc], + "invariant_input_desc": [desc.serialize() for desc in invariant_input_desc]}, + "output_descriptions": {"body_output_desc": [desc.serialize() for desc in body_output_desc], + "concat_output_desc": [desc.serialize() for desc in concat_output_desc]}, + "special_body_ports": {"body_condition_output_idx": body_condition_output_idx, + "current_iteration_input_idx": current_iteration_input_idx} + } + return _get_node_factory_opset5().create("Loop", as_nodes(trip_count, execution_condition, *inputs), + attributes) diff --git a/runtime/bindings/python/src/openvino/opset6/__init__.py b/runtime/bindings/python/src/openvino/opset6/__init__.py new file mode 100644 index 00000000000..a496edd1fbf --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset6/__init__.py @@ -0,0 +1,152 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.opset1.ops import absolute +from openvino.opset1.ops import absolute as abs +from openvino.opset1.ops import acos +from openvino.opset4.ops import acosh +from openvino.opset1.ops import add +from openvino.opset1.ops import asin +from openvino.opset4.ops import asinh +from openvino.opset6.ops import assign +from openvino.opset1.ops import atan +from openvino.opset4.ops import atanh +from openvino.opset1.ops import avg_pool +from openvino.opset5.ops import batch_norm_inference +from openvino.opset2.ops import batch_to_space +from openvino.opset1.ops import binary_convolution +from openvino.opset3.ops import broadcast +from openvino.opset3.ops import bucketize +from openvino.opset1.ops import ceiling +from openvino.opset1.ops import ceiling as ceil +from openvino.opset1.ops import clamp +from openvino.opset1.ops import concat +from openvino.opset1.ops import constant +from openvino.opset1.ops import convert +from openvino.opset1.ops import convert_like +from openvino.opset1.ops import convolution +from openvino.opset1.ops import convolution_backprop_data +from openvino.opset1.ops import cos +from openvino.opset1.ops import cosh +from openvino.opset1.ops import ctc_greedy_decoder +from openvino.opset6.ops import ctc_greedy_decoder_seq_len +from openvino.opset4.ops import ctc_loss +from openvino.opset3.ops import cum_sum +from openvino.opset3.ops import cum_sum as cumsum +from openvino.opset1.ops import deformable_convolution +from openvino.opset1.ops import deformable_psroi_pooling +from openvino.opset1.ops import depth_to_space +from openvino.opset1.ops import detection_output +from openvino.opset1.ops import divide +from openvino.opset1.ops import elu +from openvino.opset3.ops import embedding_bag_offsets_sum +from openvino.opset3.ops import embedding_bag_packed_sum +from openvino.opset3.ops import embedding_segments_sum +from openvino.opset3.ops import extract_image_patches +from openvino.opset1.ops import equal +from openvino.opset1.ops import erf +from openvino.opset1.ops import exp +from openvino.opset1.ops import fake_quantize +from openvino.opset1.ops import floor +from openvino.opset1.ops import floor_mod +from openvino.opset1.ops import gather +from openvino.opset6.ops import gather_elements +from openvino.opset5.ops import gather_nd +from openvino.opset1.ops import gather_tree +from openvino.opset2.ops import gelu +from openvino.opset1.ops import greater +from openvino.opset1.ops import greater_equal +from openvino.opset1.ops import grn +from openvino.opset1.ops import group_convolution +from openvino.opset1.ops import group_convolution_backprop_data +from openvino.opset3.ops import gru_cell +from openvino.opset5.ops import gru_sequence +from openvino.opset1.ops import hard_sigmoid +from openvino.opset5.ops import hsigmoid +from openvino.opset4.ops import hswish +from openvino.opset1.ops import interpolate +from openvino.opset1.ops import less +from openvino.opset1.ops import less_equal +from openvino.opset1.ops import log +from openvino.opset1.ops import logical_and +from openvino.opset1.ops import logical_not +from openvino.opset1.ops import logical_or +from openvino.opset1.ops import logical_xor +from openvino.opset5.ops import log_softmax +from openvino.opset5.ops import loop +from openvino.opset1.ops import lrn +from openvino.opset4.ops import lstm_cell +from openvino.opset1.ops import lstm_sequence +from openvino.opset1.ops import matmul +from openvino.opset1.ops import max_pool +from openvino.opset1.ops import maximum +from openvino.opset1.ops import minimum +from openvino.opset4.ops import mish +from openvino.opset1.ops import mod +from openvino.opset1.ops import multiply +from openvino.opset6.ops import mvn +from openvino.opset1.ops import negative +from openvino.opset5.ops import non_max_suppression +from openvino.opset3.ops import non_zero +from openvino.opset1.ops import normalize_l2 +from openvino.opset1.ops import not_equal +from openvino.opset1.ops import one_hot +from openvino.opset1.ops import pad +from openvino.opset1.ops import parameter +from openvino.opset1.ops import power +from openvino.opset1.ops import prelu +from openvino.opset1.ops import prior_box +from openvino.opset1.ops import prior_box_clustered +from openvino.opset1.ops import psroi_pooling +from openvino.opset4.ops import proposal +from openvino.opset1.ops import range +from openvino.opset6.ops import read_value +from openvino.opset4.ops import reduce_l1 +from openvino.opset4.ops import reduce_l2 +from openvino.opset1.ops import reduce_logical_and +from openvino.opset1.ops import reduce_logical_or +from openvino.opset1.ops import reduce_max +from openvino.opset1.ops import reduce_mean +from openvino.opset1.ops import reduce_min +from openvino.opset1.ops import reduce_prod +from openvino.opset1.ops import reduce_sum +from openvino.opset1.ops import region_yolo +from openvino.opset2.ops import reorg_yolo +from openvino.opset1.ops import relu +from openvino.opset1.ops import reshape +from openvino.opset1.ops import result +from openvino.opset1.ops import reverse_sequence +from openvino.opset3.ops import rnn_cell +from openvino.opset5.ops import rnn_sequence +from openvino.opset3.ops import roi_align +from openvino.opset2.ops import roi_pooling +from openvino.opset5.ops import round +from openvino.opset3.ops import scatter_elements_update +from openvino.opset3.ops import scatter_update +from openvino.opset1.ops import select +from openvino.opset1.ops import selu +from openvino.opset3.ops import shape_of +from openvino.opset3.ops import shuffle_channels +from openvino.opset1.ops import sigmoid +from openvino.opset1.ops import sign +from openvino.opset1.ops import sin +from openvino.opset1.ops import sinh +from openvino.opset1.ops import softmax +from openvino.opset4.ops import softplus +from openvino.opset2.ops import space_to_batch +from openvino.opset1.ops import space_to_depth +from openvino.opset1.ops import split +from openvino.opset1.ops import sqrt +from openvino.opset1.ops import squared_difference +from openvino.opset1.ops import squeeze +from openvino.opset1.ops import strided_slice +from openvino.opset1.ops import subtract +from openvino.opset4.ops import swish +from openvino.opset1.ops import tan +from openvino.opset1.ops import tanh +from openvino.opset1.ops import tensor_iterator +from openvino.opset1.ops import tile +from openvino.opset3.ops import topk +from openvino.opset1.ops import transpose +from openvino.opset1.ops import unsqueeze +from openvino.opset1.ops import variadic_split diff --git a/runtime/bindings/python/src/openvino/opset6/ops.py b/runtime/bindings/python/src/openvino/opset6/ops.py new file mode 100644 index 00000000000..14b0ae1b72a --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset6/ops.py @@ -0,0 +1,163 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for all openvino ops.""" +from typing import Callable, Iterable, List, Optional, Set, Union + +import numpy as np +from functools import partial + +from openvino.impl import Node, Shape +from openvino.impl.op import Constant, Parameter +from openvino.opset_utils import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( + assert_list_of_ints, + check_valid_attributes, + is_non_negative_value, + is_positive_value, +) +from openvino.utils.node_factory import NodeFactory +from openvino.utils.tensor_iterator_types import ( + GraphBody, + TensorIteratorSliceInputDesc, + TensorIteratorMergedInputDesc, + TensorIteratorInvariantInputDesc, + TensorIteratorBodyOutputDesc, + TensorIteratorConcatOutputDesc, +) +from openvino.utils.types import ( + NodeInput, + NumericData, + NumericType, + ScalarData, + TensorShape, + as_node, + as_nodes, + get_dtype, + get_element_type, + get_element_type_str, + make_constant_node, +) + +_get_node_factory_opset6 = partial(_get_node_factory, "opset6") + +# -------------------------------------------- ops ------------------------------------------------ + + +@nameable_op +def ctc_greedy_decoder_seq_len( + data: NodeInput, + sequence_length: NodeInput, + blank_index: Optional[NodeInput] = None, + merge_repeated: bool = True, + classes_index_type: str = "i32", + sequence_length_type: str = "i32", + name: Optional[str] = None, +) -> Node: + """Return a node which performs CTCGreedyDecoderSeqLen. + + @param data: The input 3D tensor. Shape: [batch_size, seq_length, num_classes] + @param sequence_length: Input 1D tensor with sequence length. Shape: [batch_size] + @param blank_index: Scalar or 1D tensor with specifies the class index to use for the blank class. + Optional parameter. Default value is num_classes-1. + @return: The new node which performs CTCGreedyDecoderSeqLen. + """ + if blank_index is not None: + inputs = as_nodes(data, sequence_length, blank_index) + else: + inputs = as_nodes(data, sequence_length) + + attributes = { + "merge_repeated": merge_repeated, + "classes_index_type": classes_index_type, + "sequence_length_type": sequence_length_type + } + + return _get_node_factory_opset6().create("CTCGreedyDecoderSeqLen", inputs, attributes) + + +@nameable_op +def gather_elements( + data: NodeInput, + indices: NodeInput, + axis: Optional[int] = 0, + name: Optional[str] = None, +) -> Node: + """Return a node which performs GatherElements. + + @param data: N-D tensor with data for gathering + @param indices: N-D tensor with indices by which data is gathered + @param axis: axis along which elements are gathered + @return: The new node which performs GatherElements + """ + inputs = as_nodes(data, indices) + + attributes = { + "axis": axis + } + + return _get_node_factory_opset6().create("GatherElements", inputs, attributes) + + +@nameable_op +def mvn( + data: Node, + axes: Node, + normalize_variance: bool, + eps: float, + eps_mode: str, + name: Optional[str] = None, +) -> Node: + """Return a node which performs MeanVarianceNormalization (MVN). + + @param data: The node with data tensor. + @param axes: The node with axes to reduce on. + @param normalize_variance: Denotes whether to perform variance normalization. + @param eps: The number added to the variance to avoid division by zero + when normalizing the value. Scalar value. + @param eps_mode: how eps is applied (`inside_sqrt` or `outside_sqrt`) + @param name: Optional output node name. + @return The new node performing a MVN operation on input tensor. + """ + inputs = as_nodes(data, axes) + + attributes = { + "normalize_variance": normalize_variance, + "eps": eps, + "eps_mode": eps_mode + } + + return _get_node_factory_opset6().create("MVN", inputs, attributes) + + +@nameable_op +def assign(new_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: + """Return a node which produces the Assign operation. + + @param new_value: Node producing a value to be assigned to a variable. + @param variable_id: Id of a variable to be updated. + @param name: Optional name for output node. + @return Assign node + """ + return _get_node_factory_opset6().create( + "Assign", + [as_node(new_value)], + {"variable_id": variable_id} + ) + + +@nameable_op +def read_value(init_value: NodeInput, variable_id: str, name: Optional[str] = None) -> Node: + """Return a node which produces the Assign operation. + + @param init_value: Node producing a value to be returned instead of an unassigned variable. + @param variable_id: Id of a variable to be read. + @param name: Optional name for output node. + @return ReadValue node + """ + return _get_node_factory_opset6().create( + "ReadValue", + [as_node(init_value)], + {"variable_id": variable_id} + ) diff --git a/runtime/bindings/python/src/openvino/opset7/__init__.py b/runtime/bindings/python/src/openvino/opset7/__init__.py new file mode 100644 index 00000000000..f38670e313c --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset7/__init__.py @@ -0,0 +1,156 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.opset1.ops import absolute +from openvino.opset1.ops import absolute as abs +from openvino.opset1.ops import acos +from openvino.opset4.ops import acosh +from openvino.opset1.ops import add +from openvino.opset1.ops import asin +from openvino.opset4.ops import asinh +from openvino.opset3.ops import assign +from openvino.opset1.ops import atan +from openvino.opset4.ops import atanh +from openvino.opset1.ops import avg_pool +from openvino.opset5.ops import batch_norm_inference +from openvino.opset2.ops import batch_to_space +from openvino.opset1.ops import binary_convolution +from openvino.opset3.ops import broadcast +from openvino.opset3.ops import bucketize +from openvino.opset1.ops import ceiling +from openvino.opset1.ops import ceiling as ceil +from openvino.opset1.ops import clamp +from openvino.opset1.ops import concat +from openvino.opset1.ops import constant +from openvino.opset1.ops import convert +from openvino.opset1.ops import convert_like +from openvino.opset1.ops import convolution +from openvino.opset1.ops import convolution_backprop_data +from openvino.opset1.ops import cos +from openvino.opset1.ops import cosh +from openvino.opset1.ops import ctc_greedy_decoder +from openvino.opset6.ops import ctc_greedy_decoder_seq_len +from openvino.opset4.ops import ctc_loss +from openvino.opset3.ops import cum_sum +from openvino.opset3.ops import cum_sum as cumsum +from openvino.opset1.ops import deformable_convolution +from openvino.opset1.ops import deformable_psroi_pooling +from openvino.opset1.ops import depth_to_space +from openvino.opset1.ops import detection_output +from openvino.opset7.ops import dft +from openvino.opset1.ops import divide +from openvino.opset7.ops import einsum +from openvino.opset1.ops import elu +from openvino.opset3.ops import embedding_bag_offsets_sum +from openvino.opset3.ops import embedding_bag_packed_sum +from openvino.opset3.ops import embedding_segments_sum +from openvino.opset3.ops import extract_image_patches +from openvino.opset1.ops import equal +from openvino.opset1.ops import erf +from openvino.opset1.ops import exp +from openvino.opset1.ops import fake_quantize +from openvino.opset1.ops import floor +from openvino.opset1.ops import floor_mod +from openvino.opset7.ops import gather +from openvino.opset6.ops import gather_elements +from openvino.opset5.ops import gather_nd +from openvino.opset1.ops import gather_tree +from openvino.opset7.ops import gelu +from openvino.opset1.ops import greater +from openvino.opset1.ops import greater_equal +from openvino.opset1.ops import grn +from openvino.opset1.ops import group_convolution +from openvino.opset1.ops import group_convolution_backprop_data +from openvino.opset3.ops import gru_cell +from openvino.opset5.ops import gru_sequence +from openvino.opset1.ops import hard_sigmoid +from openvino.opset5.ops import hsigmoid +from openvino.opset4.ops import hswish +from openvino.opset7.ops import idft +from openvino.opset1.ops import interpolate +from openvino.opset1.ops import less +from openvino.opset1.ops import less_equal +from openvino.opset1.ops import log +from openvino.opset1.ops import logical_and +from openvino.opset1.ops import logical_not +from openvino.opset1.ops import logical_or +from openvino.opset1.ops import logical_xor +from openvino.opset5.ops import log_softmax +from openvino.opset5.ops import loop +from openvino.opset1.ops import lrn +from openvino.opset4.ops import lstm_cell +from openvino.opset1.ops import lstm_sequence +from openvino.opset1.ops import matmul +from openvino.opset1.ops import max_pool +from openvino.opset1.ops import maximum +from openvino.opset1.ops import minimum +from openvino.opset4.ops import mish +from openvino.opset1.ops import mod +from openvino.opset1.ops import multiply +from openvino.opset6.ops import mvn +from openvino.opset1.ops import negative +from openvino.opset5.ops import non_max_suppression +from openvino.opset3.ops import non_zero +from openvino.opset1.ops import normalize_l2 +from openvino.opset1.ops import not_equal +from openvino.opset1.ops import one_hot +from openvino.opset1.ops import pad +from openvino.opset1.ops import parameter +from openvino.opset1.ops import power +from openvino.opset1.ops import prelu +from openvino.opset1.ops import prior_box +from openvino.opset1.ops import prior_box_clustered +from openvino.opset1.ops import psroi_pooling +from openvino.opset4.ops import proposal +from openvino.opset1.ops import range +from openvino.opset3.ops import read_value +from openvino.opset4.ops import reduce_l1 +from openvino.opset4.ops import reduce_l2 +from openvino.opset1.ops import reduce_logical_and +from openvino.opset1.ops import reduce_logical_or +from openvino.opset1.ops import reduce_max +from openvino.opset1.ops import reduce_mean +from openvino.opset1.ops import reduce_min +from openvino.opset1.ops import reduce_prod +from openvino.opset1.ops import reduce_sum +from openvino.opset1.ops import region_yolo +from openvino.opset2.ops import reorg_yolo +from openvino.opset1.ops import relu +from openvino.opset1.ops import reshape +from openvino.opset1.ops import result +from openvino.opset1.ops import reverse_sequence +from openvino.opset3.ops import rnn_cell +from openvino.opset5.ops import rnn_sequence +from openvino.opset3.ops import roi_align +from openvino.opset2.ops import roi_pooling +from openvino.opset7.ops import roll +from openvino.opset5.ops import round +from openvino.opset3.ops import scatter_elements_update +from openvino.opset3.ops import scatter_update +from openvino.opset1.ops import select +from openvino.opset1.ops import selu +from openvino.opset3.ops import shape_of +from openvino.opset3.ops import shuffle_channels +from openvino.opset1.ops import sigmoid +from openvino.opset1.ops import sign +from openvino.opset1.ops import sin +from openvino.opset1.ops import sinh +from openvino.opset1.ops import softmax +from openvino.opset4.ops import softplus +from openvino.opset2.ops import space_to_batch +from openvino.opset1.ops import space_to_depth +from openvino.opset1.ops import split +from openvino.opset1.ops import sqrt +from openvino.opset1.ops import squared_difference +from openvino.opset1.ops import squeeze +from openvino.opset1.ops import strided_slice +from openvino.opset1.ops import subtract +from openvino.opset4.ops import swish +from openvino.opset1.ops import tan +from openvino.opset1.ops import tanh +from openvino.opset1.ops import tensor_iterator +from openvino.opset1.ops import tile +from openvino.opset3.ops import topk +from openvino.opset1.ops import transpose +from openvino.opset1.ops import unsqueeze +from openvino.opset1.ops import variadic_split diff --git a/runtime/bindings/python/src/openvino/opset7/ops.py b/runtime/bindings/python/src/openvino/opset7/ops.py new file mode 100644 index 00000000000..8d7259daa8e --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset7/ops.py @@ -0,0 +1,166 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for all openvino ops.""" +from functools import partial +from typing import Callable, Iterable, List, Optional, Set, Union + +import numpy as np +from openvino.impl import Node, Shape +from openvino.impl.op import Constant, Parameter +from openvino.opset_utils import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( + assert_list_of_ints, + check_valid_attributes, + is_non_negative_value, + is_positive_value, +) +from openvino.utils.node_factory import NodeFactory +from openvino.utils.tensor_iterator_types import ( + GraphBody, + TensorIteratorSliceInputDesc, + TensorIteratorMergedInputDesc, + TensorIteratorInvariantInputDesc, + TensorIteratorBodyOutputDesc, + TensorIteratorConcatOutputDesc, +) +from openvino.utils.types import ( + NodeInput, + NumericData, + NumericType, + ScalarData, + TensorShape, + as_node, + as_nodes, + get_dtype, + get_element_type, + get_element_type_str, + make_constant_node, +) + +_get_node_factory_opset7 = partial(_get_node_factory, "opset7") + + +# -------------------------------------------- ops ------------------------------------------------ + + +@nameable_op +def einsum( + inputs: List[Node], + equation: str +) -> Node: + """Return a node which performs Einsum operation. + + @param inputs: The list of input nodes + @param equation: Einsum equation + @return: The new node performing Einsum operation on the inputs + """ + attributes = { + "equation": equation + } + + return _get_node_factory_opset7().create("Einsum", as_nodes(*inputs), attributes) + + +@nameable_op +def gelu( + data: Node, + approximation_mode: str, + name: Optional[str] = None, +) -> Node: + """Return a node which performs Gelu activation function. + + @param data: The node with data tensor. + @param approximation_mode: defines which approximation to use ('tanh' or 'erf') + @param name: Optional output node name. + @return The new node performing a Gelu activation with the input tensor. + """ + inputs = as_nodes(data) + + attributes = { + "approximation_mode": approximation_mode + } + + return _get_node_factory_opset7().create("Gelu", inputs, attributes) + + +@nameable_op +def roll( + data: NodeInput, + shift: NodeInput, + axes: NodeInput, +) -> Node: + """Return a node which performs Roll operation. + + @param data: The node with data tensor. + @param shift: The node with the tensor with numbers of places by which elements are shifted. + @param axes: The node with the tensor with axes along which elements are shifted. + @return The new node performing a Roll operation on the input tensor. + """ + inputs = as_nodes(data, shift, axes) + + return _get_node_factory_opset7().create("Roll", inputs) + + +@nameable_op +def gather( + data: NodeInput, + indices: NodeInput, + axis: NodeInput, + batch_dims: Optional[int] = 0, +) -> Node: + """Return a node which performs Gather. + + @param data: N-D tensor with data for gathering + @param indices: N-D tensor with indices by which data is gathered + @param axis: axis along which elements are gathered + @param batch_dims: number of batch dimensions + @return: The new node which performs Gather + """ + inputs = as_nodes(data, indices, axis) + attributes = { + "batch_dims": batch_dims + } + return _get_node_factory_opset7().create("Gather", inputs, attributes) + + +def dft( + data: NodeInput, + axes: NodeInput, + signal_size: Optional[NodeInput] = None, +) -> Node: + """Return a node which performs DFT operation. + + @param data: Tensor with transformed data. + @param axes: Tensor with axes to transform. + @param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'. + @return: The new node which performs DFT operation on the input data tensor. + """ + if signal_size is None: + inputs = as_nodes(data, axes) + else: + inputs = as_nodes(data, axes, signal_size) + + return _get_node_factory_opset7().create("DFT", inputs) + + +@nameable_op +def idft( + data: NodeInput, + axes: NodeInput, + signal_size: Optional[NodeInput] = None, +) -> Node: + """Return a node which performs IDFT operation. + + @param data: Tensor with transformed data. + @param axes: Tensor with axes to transform. + @param signal_size: Tensor specifying signal size with respect to axes from the input 'axes'. + @return: The new node which performs IDFT operation on the input data tensor. + """ + if signal_size is None: + inputs = as_nodes(data, axes) + else: + inputs = as_nodes(data, axes, signal_size) + + return _get_node_factory_opset7().create("IDFT", inputs) diff --git a/runtime/bindings/python/src/openvino/opset8/__init__.py b/runtime/bindings/python/src/openvino/opset8/__init__.py new file mode 100644 index 00000000000..e280228ae99 --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset8/__init__.py @@ -0,0 +1,161 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.opset1.ops import absolute +from openvino.opset1.ops import absolute as abs +from openvino.opset1.ops import acos +from openvino.opset4.ops import acosh +from openvino.opset8.ops import adaptive_avg_pool +from openvino.opset8.ops import adaptive_max_pool +from openvino.opset1.ops import add +from openvino.opset1.ops import asin +from openvino.opset4.ops import asinh +from openvino.opset3.ops import assign +from openvino.opset1.ops import atan +from openvino.opset4.ops import atanh +from openvino.opset1.ops import avg_pool +from openvino.opset5.ops import batch_norm_inference +from openvino.opset2.ops import batch_to_space +from openvino.opset1.ops import binary_convolution +from openvino.opset3.ops import broadcast +from openvino.opset3.ops import bucketize +from openvino.opset1.ops import ceiling +from openvino.opset1.ops import ceiling as ceil +from openvino.opset1.ops import clamp +from openvino.opset1.ops import concat +from openvino.opset1.ops import constant +from openvino.opset1.ops import convert +from openvino.opset1.ops import convert_like +from openvino.opset1.ops import convolution +from openvino.opset1.ops import convolution_backprop_data +from openvino.opset1.ops import cos +from openvino.opset1.ops import cosh +from openvino.opset1.ops import ctc_greedy_decoder +from openvino.opset6.ops import ctc_greedy_decoder_seq_len +from openvino.opset4.ops import ctc_loss +from openvino.opset3.ops import cum_sum +from openvino.opset3.ops import cum_sum as cumsum +from openvino.opset8.ops import deformable_convolution +from openvino.opset1.ops import deformable_psroi_pooling +from openvino.opset1.ops import depth_to_space +from openvino.opset1.ops import detection_output +from openvino.opset7.ops import dft +from openvino.opset1.ops import divide +from openvino.opset7.ops import einsum +from openvino.opset1.ops import elu +from openvino.opset3.ops import embedding_bag_offsets_sum +from openvino.opset3.ops import embedding_bag_packed_sum +from openvino.opset3.ops import embedding_segments_sum +from openvino.opset3.ops import extract_image_patches +from openvino.opset1.ops import equal +from openvino.opset1.ops import erf +from openvino.opset1.ops import exp +from openvino.opset1.ops import fake_quantize +from openvino.opset1.ops import floor +from openvino.opset1.ops import floor_mod +from openvino.opset8.ops import gather +from openvino.opset6.ops import gather_elements +from openvino.opset5.ops import gather_nd +from openvino.opset1.ops import gather_tree +from openvino.opset7.ops import gelu +from openvino.opset1.ops import greater +from openvino.opset1.ops import greater_equal +from openvino.opset1.ops import grn +from openvino.opset1.ops import group_convolution +from openvino.opset1.ops import group_convolution_backprop_data +from openvino.opset3.ops import gru_cell +from openvino.opset5.ops import gru_sequence +from openvino.opset1.ops import hard_sigmoid +from openvino.opset5.ops import hsigmoid +from openvino.opset4.ops import hswish +from openvino.opset7.ops import idft +from openvino.opset1.ops import interpolate +from openvino.opset1.ops import less +from openvino.opset1.ops import less_equal +from openvino.opset1.ops import log +from openvino.opset1.ops import logical_and +from openvino.opset1.ops import logical_not +from openvino.opset1.ops import logical_or +from openvino.opset1.ops import logical_xor +from openvino.opset5.ops import log_softmax +from openvino.opset5.ops import loop +from openvino.opset1.ops import lrn +from openvino.opset4.ops import lstm_cell +from openvino.opset1.ops import lstm_sequence +from openvino.opset1.ops import matmul +from openvino.opset8.ops import matrix_nms +from openvino.opset8.ops import max_pool +from openvino.opset1.ops import maximum +from openvino.opset1.ops import minimum +from openvino.opset4.ops import mish +from openvino.opset1.ops import mod +from openvino.opset8.ops import multiclass_nms +from openvino.opset1.ops import multiply +from openvino.opset6.ops import mvn +from openvino.opset1.ops import negative +from openvino.opset5.ops import non_max_suppression +from openvino.opset3.ops import non_zero +from openvino.opset1.ops import normalize_l2 +from openvino.opset1.ops import not_equal +from openvino.opset1.ops import one_hot +from openvino.opset1.ops import pad +from openvino.opset1.ops import parameter +from openvino.opset1.ops import power +from openvino.opset1.ops import prelu +from openvino.opset1.ops import prior_box +from openvino.opset1.ops import prior_box_clustered +from openvino.opset1.ops import psroi_pooling +from openvino.opset4.ops import proposal +from openvino.opset1.ops import range +from openvino.opset8.ops import random_uniform +from openvino.opset3.ops import read_value +from openvino.opset4.ops import reduce_l1 +from openvino.opset4.ops import reduce_l2 +from openvino.opset1.ops import reduce_logical_and +from openvino.opset1.ops import reduce_logical_or +from openvino.opset1.ops import reduce_max +from openvino.opset1.ops import reduce_mean +from openvino.opset1.ops import reduce_min +from openvino.opset1.ops import reduce_prod +from openvino.opset1.ops import reduce_sum +from openvino.opset1.ops import region_yolo +from openvino.opset2.ops import reorg_yolo +from openvino.opset1.ops import relu +from openvino.opset1.ops import reshape +from openvino.opset1.ops import result +from openvino.opset1.ops import reverse_sequence +from openvino.opset3.ops import rnn_cell +from openvino.opset5.ops import rnn_sequence +from openvino.opset3.ops import roi_align +from openvino.opset2.ops import roi_pooling +from openvino.opset7.ops import roll +from openvino.opset5.ops import round +from openvino.opset3.ops import scatter_elements_update +from openvino.opset3.ops import scatter_update +from openvino.opset1.ops import select +from openvino.opset1.ops import selu +from openvino.opset3.ops import shape_of +from openvino.opset3.ops import shuffle_channels +from openvino.opset1.ops import sigmoid +from openvino.opset1.ops import sign +from openvino.opset1.ops import sin +from openvino.opset1.ops import sinh +from openvino.opset1.ops import softmax +from openvino.opset4.ops import softplus +from openvino.opset2.ops import space_to_batch +from openvino.opset1.ops import space_to_depth +from openvino.opset1.ops import split +from openvino.opset1.ops import sqrt +from openvino.opset1.ops import squared_difference +from openvino.opset1.ops import squeeze +from openvino.opset1.ops import strided_slice +from openvino.opset1.ops import subtract +from openvino.opset4.ops import swish +from openvino.opset1.ops import tan +from openvino.opset1.ops import tanh +from openvino.opset1.ops import tensor_iterator +from openvino.opset1.ops import tile +from openvino.opset3.ops import topk +from openvino.opset1.ops import transpose +from openvino.opset1.ops import unsqueeze +from openvino.opset1.ops import variadic_split diff --git a/runtime/bindings/python/src/openvino/opset8/ops.py b/runtime/bindings/python/src/openvino/opset8/ops.py new file mode 100644 index 00000000000..eff68108ea7 --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset8/ops.py @@ -0,0 +1,369 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for all openvino ops.""" +from functools import partial +from typing import Callable, Iterable, List, Optional, Set, Union + +import numpy as np +from openvino.impl import Node, Shape +from openvino.impl.op import Constant, Parameter +from openvino.opset_utils import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( + assert_list_of_ints, + check_valid_attributes, + is_non_negative_value, + is_positive_value, +) +from openvino.utils.node_factory import NodeFactory +from openvino.utils.tensor_iterator_types import ( + GraphBody, + TensorIteratorSliceInputDesc, + TensorIteratorMergedInputDesc, + TensorIteratorInvariantInputDesc, + TensorIteratorBodyOutputDesc, + TensorIteratorConcatOutputDesc, +) +from openvino.utils.types import ( + NodeInput, + NumericData, + NumericType, + ScalarData, + TensorShape, + as_node, + as_nodes, + get_dtype, + get_element_type, + get_element_type_str, + make_constant_node, +) + +_get_node_factory_opset8 = partial(_get_node_factory, "opset8") + + +# -------------------------------------------- ops ------------------------------------------------ + + +@nameable_op +def deformable_convolution( + data: NodeInput, + offsets: NodeInput, + filters: NodeInput, + strides: List[int], + pads_begin: List[int], + pads_end: List[int], + dilations: List[int], + mask: Optional[NodeInput] = None, + auto_pad: str = "EXPLICIT", + group: int = 1, + deformable_group: int = 1, + bilinear_interpolation_pad: bool = False, + name: Optional[str] = None, +) -> Node: + """Return a node which performs deformable convolution operation. + + @param data: The node providing data batch tensor. + @param offsets: The node providing offset tensor. + @param filters: The node providing filters tensor. + @param strides: The distance (in pixels) to slide the filter on the feature map over the axes. + @param pads_begin: The number of pixels to add to the beginning along each axis. + @param pads_end: The number of pixels to add to the end along each axis. + @param dilations: The distance in width and height between elements (weights) in the filter. + @param mask: The node providing modulation scalar (mask) tensor. + @param auto_pad: The type of padding. Range of values: explicit, same_upper, same_lower, valid. + @param group: The number of groups which both output and input should be split into. + @param deformable_group: The number of groups which deformable values and output should be split + into along the channel axis. + @param bilinear_interpolation_pad: The flag that determines the mode of bilinear interpolation + execution. + @param name: The optional new name for output node. + @return New node performing deformable convolution operation. + """ + if mask is None: + inputs = as_nodes(data, offsets, filters) + else: + inputs = as_nodes(data, offsets, filters, mask) + + return _get_node_factory_opset8().create( + "DeformableConvolution", + inputs, + { + "strides": strides, + "pads_begin": pads_begin, + "pads_end": pads_end, + "dilations": dilations, + "auto_pad": auto_pad, + "group": group, + "deformable_group": deformable_group, + "bilinear_interpolation_pad": bilinear_interpolation_pad + }, + ) + + +@nameable_op +def adaptive_avg_pool( + data: NodeInput, + output_shape: NodeInput +) -> Node: + """Return a node which performs AdaptiveAvgPool operation. + + @param data: The list of input nodes + @param output_shape: the shape of spatial dimentions after operation + @return: The new node performing AdaptiveAvgPool operation on the data + """ + inputs = as_nodes(data, output_shape) + return _get_node_factory_opset8().create("AdaptiveAvgPool", inputs) + + +@nameable_op +def adaptive_max_pool( + data: NodeInput, + output_shape: NodeInput, + index_element_type: str = "i64" +) -> Node: + """Return a node which performs AdaptiveMaxPool operation. + + @param data: The list of input nodes + @param output_shape: the shape of spatial dimentions after operation + @param index_element_type: Type of indices output. + @return: The new node performing AdaptiveMaxPool operation on the data + """ + inputs = as_nodes(data, output_shape) + + attributes = { + "index_element_type": index_element_type, + } + + return _get_node_factory_opset8().create("AdaptiveMaxPool", inputs, attributes) + + +@nameable_op +def multiclass_nms( + boxes: NodeInput, + scores: NodeInput, + sort_result_type: str = "none", + sort_result_across_batch: bool = False, + output_type: str = "i64", + iou_threshold: float = 0.0, + score_threshold: float = 0.0, + nms_top_k: int = -1, + keep_top_k: int = -1, + background_class: int = -1, + nms_eta: float = 1.0, + normalized: bool = True +) -> Node: + """Return a node which performs MulticlassNms. + + @param boxes: Tensor with box coordinates. + @param scores: Tensor with box scores. + @param sort_result_type: Specifies order of output elements, possible values: + 'class': sort selected boxes by class id (ascending) + 'score': sort selected boxes by score (descending) + 'none': do not guarantee the order. + @param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes + across batches or not + @param output_type: Specifies the output tensor type, possible values: + 'i64', 'i32' + @param iou_threshold: Specifies intersection over union threshold + @param score_threshold: Specifies minimum score to consider box for the processing + @param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning + to keep all boxes + @param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 + meaning to keep all boxes + @param background_class: Specifies the background class id, -1 meaning to keep all classes + @param nms_eta: Specifies eta parameter for adpative NMS, in close range [0, 1.0] + @param normalized: Specifies whether boxes are normalized or not + @return: The new node which performs MuticlassNms + """ + inputs = as_nodes(boxes, scores) + + attributes = { + "sort_result_type": sort_result_type, + "sort_result_across_batch": sort_result_across_batch, + "output_type": output_type, + "iou_threshold": iou_threshold, + "score_threshold": score_threshold, + "nms_top_k": nms_top_k, + "keep_top_k": keep_top_k, + "background_class": background_class, + "nms_eta": nms_eta, + "normalized": normalized + } + + return _get_node_factory_opset8().create("MulticlassNms", inputs, attributes) + + +@nameable_op +def matrix_nms( + boxes: NodeInput, + scores: NodeInput, + sort_result_type: str = "none", + sort_result_across_batch: bool = False, + output_type: str = "i64", + score_threshold: float = 0.0, + nms_top_k: int = -1, + keep_top_k: int = -1, + background_class: int = -1, + decay_function: str = "linear", + gaussian_sigma: float = 2.0, + post_threshold: float = 0.0, + normalized: bool = True +) -> Node: + """Return a node which performs MatrixNms. + + @param boxes: Tensor with box coordinates. + @param scores: Tensor with box scores. + @param sort_result_type: Specifies order of output elements, possible values: + 'class': sort selected boxes by class id (ascending) + 'score': sort selected boxes by score (descending) + 'none': do not guarantee the order. + @param sort_result_across_batch: Specifies whenever it is necessary to sort selected boxes + across batches or not + @param output_type: Specifies the output tensor type, possible values: + 'i64', 'i32' + @param score_threshold: Specifies minimum score to consider box for the processing + @param nms_top_k: Specifies maximum number of boxes to be selected per class, -1 meaning + to keep all boxes + @param keep_top_k: Specifies maximum number of boxes to be selected per batch element, -1 + meaning to keep all boxes + @param background_class: Specifies the background class id, -1 meaning to keep all classes + @param decay_function: Specifies decay function used to decay scores, possible values: + 'gaussian', 'linear' + @param gaussian_sigma: Specifies gaussian_sigma parameter for gaussian decay_function + @param post_threshold: Specifies threshold to filter out boxes with low confidence score + after decaying + @param normalized: Specifies whether boxes are normalized or not + @return: The new node which performs MatrixNms + """ + inputs = as_nodes(boxes, scores) + + attributes = { + "sort_result_type": sort_result_type, + "sort_result_across_batch": sort_result_across_batch, + "output_type": output_type, + "score_threshold": score_threshold, + "nms_top_k": nms_top_k, + "keep_top_k": keep_top_k, + "background_class": background_class, + "decay_function": decay_function, + "gaussian_sigma": gaussian_sigma, + "post_threshold": post_threshold, + "normalized": normalized + } + + return _get_node_factory_opset8().create("MatrixNms", inputs, attributes) + + +@nameable_op +def gather( + data: NodeInput, + indices: NodeInput, + axis: NodeInput, + batch_dims: Optional[int] = 0, +) -> Node: + """Return a node which performs Gather with support of negative indices. + + @param data: N-D tensor with data for gathering + @param indices: N-D tensor with indices by which data is gathered. Negative indices + indicate reverse indexing from the end + @param axis: axis along which elements are gathered + @param batch_dims: number of batch dimensions + @return: The new node which performs Gather + """ + inputs = as_nodes(data, indices, axis) + attributes = { + "batch_dims": batch_dims + } + return _get_node_factory_opset8().create("Gather", inputs, attributes) + + +@nameable_op +def max_pool( + data: NodeInput, + strides: List[int], + dilations: List[int], + pads_begin: List[int], + pads_end: List[int], + kernel_shape: TensorShape, + rounding_type: str = "floor", + auto_pad: Optional[str] = None, + index_element_type: Optional[str] = "i64", + axis: Optional[int] = 0, + name: Optional[str] = None, +) -> Node: + """Perform max pooling operation and return both values and indices of the selected elements. + + @param data: The node providing input data. + @param strides: The distance (in pixels) to slide the filter on the feature map + over the axes. + @param dilations: The dilation of filter elements(distance between elements). + @param pads_begin: The number of pixels to add at the beginning along each axis. + @param pads_end: The number of pixels to add at the end along each axis. + @param kernel_shape: The pooling operation kernel shape. + @param rounding_type: Determines used rounding schema when computing output shape. + Acceptable values are: ['floor', 'ceil']. Defaults to 'floor'. + @param auto_pad: Determines how the padding is calculated. Acceptable values: + [None, 'same_upper', 'same_lower', 'valid']. Defaults to None. + @param index_element_type: The data type used for the indices output of this operator. + Defaults to i64. + @param axis: The first dimension in the data shape used to determine the maximum + returned index value. The value is the product of all dimensions + starting at the provided axis. Defaults to 0. + @param name: The optional name for the created output node. + + @return The new node performing max pooling operation. + """ + if auto_pad is None: + auto_pad = "explicit" + return _get_node_factory_opset8().create( + "MaxPool", + [as_node(data)], + { + "strides": strides, + "dilations": dilations, + "pads_begin": pads_begin, + "pads_end": pads_end, + "kernel": kernel_shape, + "rounding_type": rounding_type.upper(), + "auto_pad": auto_pad.upper(), + "index_element_type": index_element_type, + "axis": axis, + }, + ) + + +@nameable_op +def random_uniform( + output_shape: NodeInput, + min_val: NodeInput, + max_val: NodeInput, + output_type: str, + global_seed: int = 0, + op_seed: int = 0 +) -> Node: + """Return a node which generates sequence of random values from uniform distribution. + + @param output_shape: Tensor with shape of the output tensor. + @param min_val: Tensor with the lower bound on the range of random values to generate. + @param max_val: Tensor with the upper bound on the range of random values to generate. + @param output_type: Specifies the output tensor type, possible values: + 'i64', 'i32', 'f64', 'f32', 'f16', 'bf16'. + @param global_seed: Specifies global seed value. Required to be a positive integer or 0. + @param op_seed: Specifies operational seed value. Required to be a positive integer or 0. + @return The new node which performs generation of random values from uniform distribution. + """ + inputs = as_nodes(output_shape, min_val, max_val) + + if global_seed < 0: + raise RuntimeError("global_seed should be positive or 0. Got: {}".format(global_seed)) + + if op_seed < 0: + raise RuntimeError("op_seed should be positive or 0. Got: {}".format(op_seed)) + + attributes = { + "output_type": output_type, + "global_seed": global_seed, + "op_seed": op_seed, + } + return _get_node_factory_opset8().create("RandomUniform", inputs, attributes) diff --git a/runtime/bindings/python/src/openvino/opset_utils.py b/runtime/bindings/python/src/openvino/opset_utils.py new file mode 100644 index 00000000000..12fd13ea1f8 --- /dev/null +++ b/runtime/bindings/python/src/openvino/opset_utils.py @@ -0,0 +1,21 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional +import numpy as np + +from openvino.impl import Node +from openvino.utils.decorators import nameable_op +from openvino.utils.node_factory import NodeFactory +from openvino.utils.types import ( + as_node, + NodeInput, +) + + +def _get_node_factory(opset_version: Optional[str] = None) -> NodeFactory: + """Return NodeFactory configured to create operators from specified opset version.""" + if opset_version: + return NodeFactory(opset_version) + else: + return NodeFactory() diff --git a/runtime/bindings/python/src/openvino/utils/__init__.py b/runtime/bindings/python/src/openvino/utils/__init__.py new file mode 100644 index 00000000000..92b6b085ab8 --- /dev/null +++ b/runtime/bindings/python/src/openvino/utils/__init__.py @@ -0,0 +1,4 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Generic utilities. Factor related functions out to separate files.""" diff --git a/runtime/bindings/python/src/openvino/utils/broadcasting.py b/runtime/bindings/python/src/openvino/utils/broadcasting.py new file mode 100644 index 00000000000..155fd5f6615 --- /dev/null +++ b/runtime/bindings/python/src/openvino/utils/broadcasting.py @@ -0,0 +1,36 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging +from typing import List + +import openvino as ng +from openvino.impl import AxisSet, Node +from openvino.utils.types import NodeInput, TensorShape, get_dtype, make_constant_node + +log = logging.getLogger(__name__) + + +def get_broadcast_axes( + output_shape: TensorShape, input_shape: TensorShape, axis: int = None +) -> AxisSet: + """Generate a list of broadcast axes for openvino broadcast. + + Informally, a broadcast "adds" axes to the input tensor, + replicating elements from the input tensor as needed to fill the new dimensions. + Function calculate which of the output axes are added in this way. + + @param output_shape: The new shape for the output tensor. + @param input_shape: The shape of input tensor. + @param axis: The axis along which we want to replicate elements. + @return The indices of added axes. + """ + axes_indexes = list(range(0, len(output_shape))) + if axis is None: + output_begin = len(output_shape) - len(input_shape) + else: + output_begin = axis + right_axes_indexes = list(range(output_begin, output_begin + len(input_shape))) + for index in reversed(right_axes_indexes): + del axes_indexes[index] + return AxisSet(set(axes_indexes)) diff --git a/runtime/bindings/python/src/openvino/utils/decorators.py b/runtime/bindings/python/src/openvino/utils/decorators.py new file mode 100644 index 00000000000..2442c0c6747 --- /dev/null +++ b/runtime/bindings/python/src/openvino/utils/decorators.py @@ -0,0 +1,52 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from functools import wraps +from typing import Any, Callable + +from openvino.impl import Node +from openvino.utils.types import NodeInput, as_node, as_nodes + + +def _set_node_friendly_name(node: Node, **kwargs: Any) -> Node: + if "name" in kwargs: + node.friendly_name = kwargs["name"] + return node + + +def nameable_op(node_factory_function: Callable) -> Callable: + """Set the name to the openvino operator returned by the wrapped function.""" + + @wraps(node_factory_function) + def wrapper(*args: Any, **kwargs: Any) -> Node: + node = node_factory_function(*args, **kwargs) + node = _set_node_friendly_name(node, **kwargs) + return node + + return wrapper + + +def unary_op(node_factory_function: Callable) -> Callable: + """Convert the first input value to a Constant Node if a numeric value is detected.""" + + @wraps(node_factory_function) + def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node: + input_node = as_node(input_value) + node = node_factory_function(input_node, *args, **kwargs) + node = _set_node_friendly_name(node, **kwargs) + return node + + return wrapper + + +def binary_op(node_factory_function: Callable) -> Callable: + """Convert the first two input values to Constant Nodes if numeric values are detected.""" + + @wraps(node_factory_function) + def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node: + left, right = as_nodes(left, right) + node = node_factory_function(left, right, *args, **kwargs) + node = _set_node_friendly_name(node, **kwargs) + return node + + return wrapper diff --git a/runtime/bindings/python/src/openvino/utils/input_validation.py b/runtime/bindings/python/src/openvino/utils/input_validation.py new file mode 100644 index 00000000000..80878aa3746 --- /dev/null +++ b/runtime/bindings/python/src/openvino/utils/input_validation.py @@ -0,0 +1,136 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Helper functions for validating user input.""" + +import logging +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type + +import numpy as np + +from openvino.exceptions import UserInputError + +log = logging.getLogger(__name__) + + +def assert_list_of_ints(value_list: Iterable[int], message: str) -> None: + """Verify that the provided value is an iterable of integers.""" + try: + for value in value_list: + if not isinstance(value, int): + raise TypeError + except TypeError: + log.warning(message) + raise UserInputError(message, value_list) + + +def _check_value(op_name, attr_key, value, val_type, cond=None): + # type: (str, str, Any, Type, Optional[Callable[[Any], bool]]) -> bool + """Check whether provided value satisfies specified criteria. + + @param op_name: The operator name which attributes are checked. + @param attr_key: The attribute name. + @param value: The value to check. + @param val_type: Required value type. + @param cond: The optional function running additional checks. + + :raises UserInputError: + @return True if attribute satisfies all criterias. Otherwise False. + """ + if not np.issubdtype(type(value), val_type): + raise UserInputError( + '{} operator attribute "{}" value must by of type {}.'.format( + op_name, attr_key, val_type + ) + ) + if cond is not None and not cond(value): + raise UserInputError( + '{} operator attribute "{}" value does not satisfy provided condition.'.format( + op_name, attr_key + ) + ) + return True + + +def check_valid_attribute(op_name, attr_dict, attr_key, val_type, cond=None, required=False): + # type: (str, dict, str, Type, Optional[Callable[[Any], bool]], Optional[bool]) -> bool + """Check whether specified attribute satisfies given criteria. + + @param op_name: The operator name which attributes are checked. + @param attr_dict: Dictionary containing key-value attributes to check. + @param attr_key: Key value for validated attribute. + @param val_type: Value type for validated attribute. + @param cond: Any callable wich accept attribute value and returns True or False. + @param required: Whether provided attribute key is not required. This mean it may be missing + from provided dictionary. + + :raises UserInputError: + + @return True if attribute satisfies all criterias. Otherwise False. + """ + result = True + + if required and attr_key not in attr_dict: + raise UserInputError( + 'Provided dictionary is missing {} operator required attribute "{}"'.format( + op_name, attr_key + ) + ) + + if attr_key not in attr_dict: + return result + + attr_value = attr_dict[attr_key] + + if np.isscalar(attr_value): + result = result and _check_value(op_name, attr_key, attr_value, val_type, cond) + else: + for v in attr_value: + result = result and _check_value(op_name, attr_key, v, val_type, cond) + + return result + + +def check_valid_attributes( + op_name, # type: str + attributes, # type: Dict[str, Any] + requirements, # type: List[Tuple[str, bool, Type, Optional[Callable]]] +): + # type: (...) -> bool + """Perform attributes validation according to specified type, value criteria. + + @param op_name: The operator name which attributes are checked. + @param attributes: The dictionary with user provided attributes to check. + @param requirements: The list of tuples describing attributes' requirements. The tuple should + contain following values: + (attr_name: str, + is_required: bool, + value_type: Type, + value_condition: Callable) + + :raises UserInputError: + @return True if all attributes satisfies criterias. Otherwise False. + """ + for attr, required, val_type, cond in requirements: + check_valid_attribute(op_name, attributes, attr, val_type, cond, required) + return True + + +def is_positive_value(x): # type: (Any) -> bool + """Determine whether the specified x is positive value. + + @param x: The value to check. + + @return True if the specified x is positive value, False otherwise. + """ + return x > 0 + + +def is_non_negative_value(x): # type: (Any) -> bool + """Determine whether the specified x is non-negative value. + + @param x: The value to check. + + @return True if the specified x is non-negative value, False otherwise. + """ + return x >= 0 diff --git a/runtime/bindings/python/src/openvino/utils/node_factory.py b/runtime/bindings/python/src/openvino/utils/node_factory.py new file mode 100644 index 00000000000..52c0d95c906 --- /dev/null +++ b/runtime/bindings/python/src/openvino/utils/node_factory.py @@ -0,0 +1,167 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging as log + +from functools import partial +from typing import Any, Dict, List, Optional, Union + +from openvino.pyopenvino import NodeFactory as _NodeFactory + +from openvino.impl import Node, Output + +from openvino.exceptions import UserInputError + +DEFAULT_OPSET = "opset8" + + +class NodeFactory(object): + """Factory front-end to create node objects.""" + + def __init__(self, opset_version: str = DEFAULT_OPSET) -> None: + """Create the NodeFactory object. + + @param opset_version: The opset version the factory will use to produce ops from. + """ + self.factory = _NodeFactory(opset_version) + + def create( + self, + op_type_name: str, + arguments: Optional[List[Union[Node, Output]]] = None, + attributes: Optional[Dict[str, Any]] = None, + ) -> Node: + """Create node object from provided description. + + The user does not have to provide all node's attributes, but only required ones. + + @param op_type_name: The operator type name. + @param arguments: The operator arguments. + @param attributes: The operator attributes. + + @return Node object representing requested operator with attributes set. + """ + if arguments is None and attributes is None: + node = self.factory.create(op_type_name) + node._attr_cache = {} + node._attr_cache_valid = False + return node + + if arguments is None and attributes is not None: + raise UserInputError( + 'Error: cannot create "{}" op without arguments.'.format( + op_type_name + ) + ) + + if attributes is None: + attributes = {} + + assert arguments is not None + + arguments = self._arguments_as_outputs(arguments) + node = self.factory.create(op_type_name, arguments, attributes) + + # Currently we don't support any attribute getters & setters for TensorIterator node. + if node.get_type_name() == "TensorIterator": + return node + + # Set getters and setters for each node's attribute. + # node.get_attribute_name() + # node.set_attribute_name() + # For compound (with more than one level of nesting) attributes of form ie.: + # node.class_member_name.some_metric.attr_name: + # node.get_some_metric_attr_name() + # node.set_some_metric_attr_name() + # Please see test_dyn_attributes.py for more usage examples. + all_attributes = node.get_attributes() + for attr_name in all_attributes.keys(): + setattr( + node, + self._normalize_attr_name_getter(attr_name), + partial(NodeFactory._get_node_attr_value, node, attr_name), + ) + setattr( + node, + self._normalize_attr_name_setter(attr_name), + partial(NodeFactory._set_node_attr_value, node, attr_name), + ) + + # Setup helper members for caching attribute values. + # The cache would be lazily populated at first access attempt. + node._attr_cache = {} + node._attr_cache_valid = False + + return node + + @staticmethod + def _arguments_as_outputs(arguments: List[Union[Node, Output]]) -> List[Output]: + outputs = [] + for argument in arguments: + if issubclass(type(argument), Output): + outputs.append(argument) + else: + log.warning("Op arguments were passed as Node, please avoid passing arguments in " + "this manner, and pass Output(s) instead, because accepting Nodes will " + "be deprecated in a future release.") + outputs.extend(argument.outputs()) + return outputs + + @staticmethod + def _normalize_attr_name(attr_name: str, prefix: str) -> str: + """Normalize attribute name. + + @param attr_name: The attribute name. + @param prefix: The prefix to attach to attribute name. + + @return The modified attribute name. + """ + # Trim first part of the name if there is only one level of attribute hierarchy. + if attr_name.count(".") == 1: + attr_name = attr_name[attr_name.find(".") + 1:] + return prefix + attr_name.replace(".", "_") + + @classmethod + def _normalize_attr_name_getter(cls, attr_name: str) -> str: + """Normalize atr name to be suitable for getter function name. + + @param attr_name: The attribute name to normalize + + @return The appropriate getter function name. + """ + return cls._normalize_attr_name(attr_name, "get_") + + @classmethod + def _normalize_attr_name_setter(cls, attr_name: str) -> str: + """Normalize attribute name to be suitable for setter function name. + + @param attr_name: The attribute name to normalize + + @return The appropriate setter function name. + """ + return cls._normalize_attr_name(attr_name, "set_") + + @staticmethod + def _get_node_attr_value(node: Node, attr_name: str) -> Any: + """Get provided node attribute value. + + @param node: The node we retrieve attribute value from. + @param attr_name: The attribute name. + + @return The node attribute value. + """ + if not node._attr_cache_valid: + node._attr_cache = node.get_attributes() + node._attr_cache_valid = True + return node._attr_cache[attr_name] + + @staticmethod + def _set_node_attr_value(node: Node, attr_name: str, value: Any) -> None: + """Set the node attribute value. + + @param node: The node we change attribute value for. + @param attr_name: The attribute name. + @param value: The new attribute value. + """ + node.set_attribute(attr_name, value) + node._attr_cache[attr_name] = value diff --git a/runtime/bindings/python/src/openvino/utils/reduction.py b/runtime/bindings/python/src/openvino/utils/reduction.py new file mode 100644 index 00000000000..ba903292179 --- /dev/null +++ b/runtime/bindings/python/src/openvino/utils/reduction.py @@ -0,0 +1,23 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from typing import Iterable, Optional + +from openvino.impl import Node + + +def get_reduction_axes(node: Node, reduction_axes: Optional[Iterable[int]]) -> Iterable[int]: + """Get reduction axes if it is None and convert it to set if its type is different. + + If reduction_axes is None we default to reduce all axes. + + @param node: The node we fill reduction axes for. + @param reduction_axes: The collection of indices of axes to reduce. May be None. + @return Set filled with indices of axes we want to reduce. + """ + if reduction_axes is None: + reduction_axes = set(range(len(node.shape))) + + if type(reduction_axes) is not set: + reduction_axes = set(reduction_axes) + return reduction_axes diff --git a/runtime/bindings/python/src/openvino/utils/tensor_iterator_types.py b/runtime/bindings/python/src/openvino/utils/tensor_iterator_types.py new file mode 100644 index 00000000000..bed3ab7287e --- /dev/null +++ b/runtime/bindings/python/src/openvino/utils/tensor_iterator_types.py @@ -0,0 +1,154 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Helper classes for aggregating TensorIterator input/output desciptor attributes.""" + +from typing import List + +from openvino.impl import Node +from openvino.impl.op import Parameter + + +class GraphBody(object): + """Class containing graph parameters and results.""" + + def __init__(self, parameters: List[Parameter], results: List[Node],) -> None: + self.parameters = parameters + self.results = results + + def serialize(self) -> dict: + """Serialize GraphBody as a dictionary.""" + return { + "parameters": self.parameters, + "results": self.results, + } + + +class TensorIteratorInputDesc(object): + """Represents a generic input descriptor for TensorIterator operator.""" + + def __init__(self, input_idx: int, body_parameter_idx: int,) -> None: + self.input_idx = input_idx + self.body_parameter_idx = body_parameter_idx + + def serialize(self) -> dict: + """Serialize TensorIteratorInputDesc as a dictionary.""" + return { + "input_idx": self.input_idx, + "body_parameter_idx": self.body_parameter_idx, + } + + +class TensorIteratorSliceInputDesc(TensorIteratorInputDesc): + """Represents a TensorIterator graph body input formed from slices of TensorIterator input.""" + + def __init__( + self, + input_idx: int, + body_parameter_idx: int, + start: int, + stride: int, + part_size: int, + end: int, + axis: int, + ) -> None: + super().__init__(input_idx, body_parameter_idx) + self.start = start + self.stride = stride + self.part_size = part_size + self.end = end + self.axis = axis + + def serialize(self) -> dict: + """Serialize TensorIteratorSliceInputDesc as a dictionary.""" + output = super().serialize() + output["start"] = self.start + output["stride"] = self.stride + output["part_size"] = self.part_size + output["end"] = self.end + output["axis"] = self.axis + return output + + +class TensorIteratorMergedInputDesc(TensorIteratorInputDesc): + """Represents a TensorIterator graph body input with initial value in the first iteration. + + Later on, this input value is computed inside graph body. + """ + + def __init__(self, input_idx: int, body_parameter_idx: int, body_value_idx: int,) -> None: + super().__init__(input_idx, body_parameter_idx) + self.body_value_idx = body_value_idx + + def serialize(self) -> dict: + """Serialize TensorIteratorMergedInputDesc as a dictionary.""" + output = super().serialize() + output["body_value_idx"] = self.body_value_idx + return output + + +class TensorIteratorInvariantInputDesc(TensorIteratorInputDesc): + """Represents a TensorIterator graph body input that has invariant value during iteration.""" + + def __init__(self, input_idx: int, body_parameter_idx: int,) -> None: + super().__init__(input_idx, body_parameter_idx) + + +class TensorIteratorOutputDesc(object): + """Represents a generic output descriptor for TensorIterator operator.""" + + def __init__(self, body_value_idx: int, output_idx: int,) -> None: + self.body_value_idx = body_value_idx + self.output_idx = output_idx + + def serialize(self) -> dict: + """Serialize TensorIteratorOutputDesc as a dictionary.""" + return { + "body_value_idx": self.body_value_idx, + "output_idx": self.output_idx, + } + + +class TensorIteratorBodyOutputDesc(TensorIteratorOutputDesc): + """Represents an output from a specific iteration.""" + + def __init__(self, body_value_idx: int, output_idx: int, iteration: int,) -> None: + super().__init__(body_value_idx, output_idx) + self.iteration = iteration + + def serialize(self) -> dict: + """Serialize TensorIteratorBodyOutputDesc as a dictionary.""" + output = super().serialize() + output["iteration"] = self.iteration + return output + + +class TensorIteratorConcatOutputDesc(TensorIteratorOutputDesc): + """Represents an output produced by concatenation of output from each iteration.""" + + def __init__( + self, + body_value_idx: int, + output_idx: int, + start: int, + stride: int, + part_size: int, + end: int, + axis: int, + ) -> None: + super().__init__(body_value_idx, output_idx) + self.start = start + self.stride = stride + self.part_size = part_size + self.end = end + self.axis = axis + + def serialize(self) -> dict: + """Serialize TensorIteratorConcatOutputDesc as a dictionary.""" + output = super().serialize() + output["start"] = self.start + output["stride"] = self.stride + output["part_size"] = self.part_size + output["end"] = self.end + output["axis"] = self.axis + return output diff --git a/runtime/bindings/python/src/openvino/utils/types.py b/runtime/bindings/python/src/openvino/utils/types.py new file mode 100644 index 00000000000..45edb4edfca --- /dev/null +++ b/runtime/bindings/python/src/openvino/utils/types.py @@ -0,0 +1,146 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Functions related to converting between Python and numpy types and openvino types.""" + +import logging +from typing import List, Union + +import numpy as np + +from openvino.exceptions import NgraphTypeError +from openvino.impl import Node, Shape, Output +from openvino.impl import Type as NgraphType +from openvino.impl.op import Constant + +log = logging.getLogger(__name__) + +TensorShape = List[int] +NumericData = Union[int, float, np.ndarray] +NumericType = Union[type, np.dtype] +ScalarData = Union[int, float] +NodeInput = Union[Node, NumericData] + +openvino_to_numpy_types_map = [ + (NgraphType.boolean, np.bool), + (NgraphType.f16, np.float16), + (NgraphType.f32, np.float32), + (NgraphType.f64, np.float64), + (NgraphType.i8, np.int8), + (NgraphType.i16, np.int16), + (NgraphType.i32, np.int32), + (NgraphType.i64, np.int64), + (NgraphType.u8, np.uint8), + (NgraphType.u16, np.uint16), + (NgraphType.u32, np.uint32), + (NgraphType.u64, np.uint64), + (NgraphType.bf16, np.uint16), +] + +openvino_to_numpy_types_str_map = [ + ("boolean", np.bool), + ("f16", np.float16), + ("f32", np.float32), + ("f64", np.float64), + ("i8", np.int8), + ("i16", np.int16), + ("i32", np.int32), + ("i64", np.int64), + ("u8", np.uint8), + ("u16", np.uint16), + ("u32", np.uint32), + ("u64", np.uint64), +] + + +def get_element_type(data_type: NumericType) -> NgraphType: + """Return an ngraph element type for a Python type or numpy.dtype.""" + if data_type is int: + log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.") + return NgraphType.i32 + + if data_type is float: + log.warning("Converting float type of undefined bitwidth to 32-bit ngraph float.") + return NgraphType.f32 + + ng_type = next( + (ng_type for (ng_type, np_type) in openvino_to_numpy_types_map if np_type == data_type), None + ) + if ng_type: + return ng_type + + raise NgraphTypeError("Unidentified data type %s", data_type) + + +def get_element_type_str(data_type: NumericType) -> str: + """Return an ngraph element type string representation for a Python type or numpy dtype.""" + if data_type is int: + log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.") + return "i32" + + if data_type is float: + log.warning("Converting float type of undefined bitwidth to 32-bit ngraph float.") + return "f32" + + ng_type = next( + (ng_type for (ng_type, np_type) in openvino_to_numpy_types_str_map if np_type == data_type), + None, + ) + if ng_type: + return ng_type + + raise NgraphTypeError("Unidentified data type %s", data_type) + + +def get_dtype(ngraph_type: NgraphType) -> np.dtype: + """Return a numpy.dtype for an ngraph element type.""" + np_type = next( + (np_type for (ng_type, np_type) in openvino_to_numpy_types_map if ng_type == ngraph_type), + None, + ) + + if np_type: + return np.dtype(np_type) + + raise NgraphTypeError("Unidentified data type %s", ngraph_type) + + +def get_ndarray(data: NumericData) -> np.ndarray: + """Wrap data into a numpy ndarray.""" + if type(data) == np.ndarray: + return data + return np.array(data) + + +def get_shape(data: NumericData) -> TensorShape: + """Return a shape of NumericData.""" + if type(data) == np.ndarray: + return data.shape # type: ignore + elif type(data) == list: + return [len(data)] # type: ignore + return [] + + +def make_constant_node(value: NumericData, dtype: NumericType = None) -> Constant: + """Return an ngraph Constant node with the specified value.""" + ndarray = get_ndarray(value) + if dtype: + element_type = get_element_type(dtype) + else: + element_type = get_element_type(ndarray.dtype) + + return Constant(element_type, Shape(ndarray.shape), ndarray.flatten().tolist()) + + +def as_node(input_value: NodeInput) -> Node: + """Return input values as nodes. Scalars will be converted to Constant nodes.""" + if issubclass(type(input_value), Node): + return input_value + if issubclass(type(input_value), Output): + return input_value + return make_constant_node(input_value) + + +def as_nodes(*input_values: NodeInput) -> List[Node]: + """Return input values as nodes. Scalars will be converted to Constant nodes.""" + return [as_node(input_value) for input_value in input_values] diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.cpp b/runtime/bindings/python/src/pyopenvino/core/containers.cpp index 0c5fa642556..096b6074325 100644 --- a/runtime/bindings/python/src/pyopenvino/core/containers.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/containers.cpp @@ -8,6 +8,7 @@ #include #include +PYBIND11_MAKE_OPAQUE(Containers::PyInputsDataMap); PYBIND11_MAKE_OPAQUE(Containers::PyConstInputsDataMap); PYBIND11_MAKE_OPAQUE(Containers::PyOutputsDataMap); PYBIND11_MAKE_OPAQUE(Containers::PyResults); @@ -16,6 +17,14 @@ namespace py = pybind11; namespace Containers { +void regclass_PyInputsDataMap(py::module m) { + auto py_inputs_data_map = py::bind_map(m, "PyInputsDataMap"); + + py_inputs_data_map.def("keys", [](PyInputsDataMap& self) { + return py::make_key_iterator(self.begin(), self.end()); + }); +} + void regclass_PyConstInputsDataMap(py::module m) { auto py_const_inputs_data_map = py::bind_map(m, "PyConstInputsDataMap"); diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.hpp b/runtime/bindings/python/src/pyopenvino/core/containers.hpp index 4be92c0d565..511d9053ea5 100644 --- a/runtime/bindings/python/src/pyopenvino/core/containers.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/containers.hpp @@ -13,6 +13,8 @@ namespace py = pybind11; namespace Containers { + using PyInputsDataMap = std::map>; + using PyConstInputsDataMap = std::map>; @@ -22,6 +24,7 @@ namespace Containers { using PyResults = std::map>; + void regclass_PyInputsDataMap(py::module m); void regclass_PyConstInputsDataMap(py::module m); void regclass_PyOutputsDataMap(py::module m); void regclass_PyResults(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp index 1fdaf5bf292..b57765f19cf 100644 --- a/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp @@ -10,20 +10,17 @@ #include -#include "ngraph/function.hpp" +#include "openvino/core/function.hpp" +#include "pyopenvino/core/containers.hpp" #include "pyopenvino/core/ie_input_info.hpp" -// using PyInputsDataMap = std::map>; -// -// PYBIND11_MAKE_OPAQUE(PyInputsDataMap); - namespace py = pybind11; void regclass_IENetwork(py::module m) { py::class_> cls(m, "IENetwork"); cls.def(py::init()); - cls.def(py::init([](std::shared_ptr& function) { + cls.def(py::init([](std::shared_ptr& function) { InferenceEngine::CNNNetwork cnnNetwork(function); return std::make_shared(cnnNetwork); })); @@ -82,14 +79,8 @@ void regclass_IENetwork(py::module m) { &InferenceEngine::CNNNetwork::getBatchSize, &InferenceEngine::CNNNetwork::setBatchSize); - // auto py_inputs_data_map = py::bind_map(m, "PyInputsDataMap"); - - // py_inputs_data_map.def("keys", [](PyInputsDataMap& self) { - // return py::make_key_iterator(self.begin(), self.end()); - // }); - cls.def_property_readonly("input_info", [](InferenceEngine::CNNNetwork& self) { - std::map> inputs; + Containers::PyInputsDataMap inputs; const InferenceEngine::InputsDataMap& inputsInfo = self.getInputsInfo(); for (auto& in : inputsInfo) { inputs[in.first] = in.second; diff --git a/runtime/bindings/python/src/pyopenvino/graph/axis_set.cpp b/runtime/bindings/python/src/pyopenvino/graph/axis_set.cpp new file mode 100644 index 00000000000..89e5737d8a5 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/axis_set.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/axis_set.hpp" // ov::AxisSet + +#include +#include + +#include +#include +#include + +#include "pyopenvino/graph/axis_set.hpp" + +namespace py = pybind11; + +void regclass_graph_AxisSet(py::module m) { + py::class_> axis_set(m, "AxisSet"); + axis_set.doc() = "openvino.impl.AxisSet wraps ov::AxisSet"; + axis_set.def(py::init&>(), py::arg("axes")); + axis_set.def(py::init&>(), py::arg("axes")); + axis_set.def(py::init&>(), py::arg("axes")); + axis_set.def(py::init(), py::arg("axes")); + + axis_set.def("__len__", [](const ov::AxisSet& v) { + return v.size(); + }); + + axis_set.def( + "__iter__", + [](ov::AxisSet& v) { + return py::make_iterator(v.begin(), v.end()); + }, + py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ + + axis_set.def("__repr__", [](const ov::AxisSet& self) -> std::string { + std::stringstream data_ss; + std::copy(self.begin(), self.end(), std::ostream_iterator(data_ss, ", ")); + std::string data_str = data_ss.str(); + return ""; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/axis_set.hpp b/runtime/bindings/python/src/pyopenvino/graph/axis_set.hpp new file mode 100644 index 00000000000..9ebfc5a1177 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/axis_set.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_AxisSet(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/axis_vector.cpp b/runtime/bindings/python/src/pyopenvino/graph/axis_vector.cpp new file mode 100644 index 00000000000..b35f1ec5f56 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/axis_vector.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/axis_vector.hpp" // ov::AxisVector + +#include +#include + +#include "pyopenvino/graph/axis_vector.hpp" + +namespace py = pybind11; + +void regclass_graph_AxisVector(py::module m) { + py::class_> axis_vector(m, "AxisVector"); + axis_vector.doc() = "openvino.impl.AxisVector wraps ov::AxisVector"; + axis_vector.def(py::init&>(), py::arg("axes")); + axis_vector.def(py::init&>(), py::arg("axes")); + axis_vector.def(py::init(), py::arg("axes")); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/axis_vector.hpp b/runtime/bindings/python/src/pyopenvino/graph/axis_vector.hpp new file mode 100644 index 00000000000..a365d36aa0f --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/axis_vector.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_AxisVector(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/coordinate.cpp b/runtime/bindings/python/src/pyopenvino/graph/coordinate.cpp new file mode 100644 index 00000000000..22e2f60cf2e --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/coordinate.cpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/coordinate.hpp" // ov::Coordinate + +#include +#include + +#include "pyopenvino/graph/coordinate.hpp" + +namespace py = pybind11; + +void regclass_graph_Coordinate(py::module m) { + py::class_> coordinate(m, "Coordinate"); + coordinate.doc() = "openvino.impl.Coordinate wraps ov::Coordinate"; + coordinate.def(py::init&>()); + coordinate.def(py::init()); + coordinate.def(py::init&>()); + coordinate.def(py::init()); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/coordinate.hpp b/runtime/bindings/python/src/pyopenvino/graph/coordinate.hpp new file mode 100644 index 00000000000..9cfc26c546d --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/coordinate.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_Coordinate(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp b/runtime/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp new file mode 100644 index 00000000000..9c3190af192 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/coordinate_diff.hpp" // ov::CoordinateDiff + +#include +#include + +#include +#include +#include + +#include "pyopenvino/graph/coordinate_diff.hpp" + +namespace py = pybind11; + +void regclass_graph_CoordinateDiff(py::module m) { + py::class_> coordinate_diff(m, "CoordinateDiff"); + coordinate_diff.doc() = "openvino.impl.CoordinateDiff wraps ov::CoordinateDiff"; + coordinate_diff.def(py::init&>()); + coordinate_diff.def(py::init&>()); + coordinate_diff.def(py::init()); + + coordinate_diff.def("__str__", [](const ov::CoordinateDiff& self) -> std::string { + std::stringstream stringstream; + std::copy(self.begin(), self.end(), std::ostream_iterator(stringstream, ", ")); + std::string string = stringstream.str(); + return string.substr(0, string.size() - 2); + }); + + coordinate_diff.def("__repr__", [](const ov::CoordinateDiff& self) -> std::string { + std::string class_name = py::cast(self).get_type().attr("__name__").cast(); + std::string shape_str = py::cast(self).attr("__str__")().cast(); + return "<" + class_name + ": (" + shape_str + ")>"; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/coordinate_diff.hpp b/runtime/bindings/python/src/pyopenvino/graph/coordinate_diff.hpp new file mode 100644 index 00000000000..ac4c1f1d4ff --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/coordinate_diff.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_CoordinateDiff(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.cpp b/runtime/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.cpp new file mode 100644 index 00000000000..4615ca64537 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.cpp @@ -0,0 +1,343 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +// These are not used here, but needed in order to not violate ODR, since +// these are included in other translation units, and specialize some types. +// Related: https://github.com/pybind/pybind11/issues/1055 +#include "dict_attribute_visitor.hpp" + +#include +#include + +#include "openvino/op/loop.hpp" +#include "openvino/op/util/sub_graph_base.hpp" + +namespace py = pybind11; + +util::DictAttributeDeserializer::DictAttributeDeserializer( + const py::dict& attributes, + std::unordered_map>& variables) + : m_attributes(attributes), + m_variables(variables) {} + +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + if (const auto& a = ov::as_type< + ov::AttributeAdapter>>>( + &adapter)) { + std::vector> input_descs; + const py::dict& input_desc = m_attributes[name.c_str()].cast(); + const auto& merged_input_desc = input_desc["merged_input_desc"].cast(); + const auto& slice_input_desc = input_desc["slice_input_desc"].cast(); + const auto& invariant_input_desc = input_desc["invariant_input_desc"].cast(); + for (py::handle h : slice_input_desc) { + const py::dict& desc = h.cast(); + auto slice_in = std::make_shared( + desc["input_idx"].cast(), + desc["body_parameter_idx"].cast(), + desc["start"].cast(), + desc["stride"].cast(), + desc["part_size"].cast(), + desc["end"].cast(), + desc["axis"].cast()); + input_descs.push_back(slice_in); + } + + for (py::handle h : merged_input_desc) { + const py::dict& desc = h.cast(); + auto merged_in = std::make_shared( + desc["input_idx"].cast(), + desc["body_parameter_idx"].cast(), + desc["body_value_idx"].cast()); + input_descs.push_back(merged_in); + } + + for (py::handle h : invariant_input_desc) { + const py::dict& desc = h.cast(); + auto invariant_in = std::make_shared( + desc["input_idx"].cast(), + desc["body_parameter_idx"].cast()); + input_descs.push_back(invariant_in); + } + a->set(input_descs); + } else if (const auto& a = ov::as_type< + ov::AttributeAdapter>>>( + &adapter)) { + std::vector> output_descs; + const py::dict& output_desc = m_attributes[name.c_str()].cast(); + const auto& body_output_desc = output_desc["body_output_desc"].cast(); + const auto& concat_output_desc = output_desc["concat_output_desc"].cast(); + for (py::handle h : body_output_desc) { + const py::dict& desc = h.cast(); + auto body_output = std::make_shared( + desc["body_value_idx"].cast(), + desc["output_idx"].cast(), + desc["iteration"].cast()); + output_descs.push_back(body_output); + } + + for (py::handle h : concat_output_desc) { + const py::dict& desc = h.cast(); + auto concat_output = std::make_shared( + desc["body_value_idx"].cast(), + desc["output_idx"].cast(), + desc["start"].cast(), + desc["stride"].cast(), + desc["part_size"].cast(), + desc["end"].cast(), + desc["axis"].cast()); + output_descs.push_back(concat_output); + } + a->set(output_descs); + } else if (const auto& a = ov::as_type>(&adapter)) { + ov::op::v5::Loop::SpecialBodyPorts special_body_ports; + const py::dict& special_ports_dict = m_attributes[name.c_str()].cast(); + special_body_ports.body_condition_output_idx = + special_ports_dict["body_condition_output_idx"].cast(); + special_body_ports.current_iteration_input_idx = + special_ports_dict["current_iteration_input_idx"].cast(); + a->set(special_body_ports); + } else if (const auto& a = + ov::as_type>>(&adapter)) { + std::string variable_id = m_attributes[name.c_str()].cast(); + if (!m_variables.count(variable_id)) { + m_variables[variable_id] = std::make_shared( + ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, variable_id}); + } + a->set(m_variables[variable_id]); + } else { + NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name); + } + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + adapter.set(m_attributes[name.c_str()].cast>()); + } +} + +void util::DictAttributeDeserializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + if (m_attributes.contains(name)) { + if (name == "body") { + const py::dict& body_attrs = m_attributes[name.c_str()].cast(); + const auto& body_outputs = as_output_vector(body_attrs["results"].cast()); + const auto& body_parameters = body_attrs["parameters"].cast(); + auto body = std::make_shared(body_outputs, body_parameters); + adapter.set(body); + } else { + NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name); + } + } +} + +util::DictAttributeSerializer::DictAttributeSerializer(const std::shared_ptr& node) { + node->visit_attributes(*this); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + if (m_attributes.contains(name)) { + NGRAPH_CHECK(false, "No AttributeVisitor support for accessing attribute named: ", name); + } +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, ov::ValueAccessor& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} +void util::DictAttributeSerializer::on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) { + m_attributes[name.c_str()] = adapter.get(); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.hpp b/runtime/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.hpp new file mode 100644 index 00000000000..c73cfe132d6 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/dict_attribute_visitor.hpp @@ -0,0 +1,131 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "openvino/core/attribute_visitor.hpp" +#include "openvino/core/function.hpp" +#include "openvino/core/node.hpp" +#include "openvino/op/util/variable.hpp" + +#include + +namespace py = pybind11; + +namespace util +{ + class DictAttributeDeserializer : public ov::AttributeVisitor + { + public: + DictAttributeDeserializer( + const py::dict& attributes, + std::unordered_map>& variables); + + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + + protected: + const py::dict& m_attributes; + std::unordered_map>& m_variables; + }; + + class DictAttributeSerializer : public ov::AttributeVisitor + { + public: + explicit DictAttributeSerializer(const std::shared_ptr& node); + + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, ov::ValueAccessor& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + void on_adapter(const std::string& name, + ov::ValueAccessor>& adapter) override; + + template + T get_attribute(const std::string& name) + { + NGRAPH_CHECK(m_attributes.contains(name), + "Couldn't find attribute \"", + name, + "\" in serialized node attribute dictionary."); + return m_attributes[name.c_str()].cast(); + } + + py::dict get_attributes() const { return m_attributes; } + + protected: + py::dict m_attributes; + }; +} // namespace util diff --git a/runtime/bindings/python/src/pyopenvino/graph/dimension.cpp b/runtime/bindings/python/src/pyopenvino/graph/dimension.cpp new file mode 100644 index 00000000000..330e97b6cdc --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/dimension.cpp @@ -0,0 +1,209 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/dimension.hpp" // ov::Dimension + +#include +#include + +#include +#include +#include + +#include "pyopenvino/graph/dimension.hpp" + +namespace py = pybind11; + +void regclass_graph_Dimension(py::module m) { + using value_type = ov::Dimension::value_type; + + py::class_> dim(m, "Dimension"); + dim.doc() = "openvino.impl.Dimension wraps ov::Dimension"; + dim.def(py::init<>()); + dim.def(py::init(), + py::arg("dimension"), + R"( + Construct a static dimension. + + Parameters + ---------- + dimension : int + Value of the dimension. + )"); + dim.def(py::init(), + py::arg("min_dimension"), + py::arg("max_dimension"), + R"( + Construct a dynamic dimension with bounded range. + + Parameters + ---------- + min_dimension : int + The lower inclusive limit for the dimension. + + max_dimension : int + The upper inclusive limit for the dimension. + )"); + + dim.def_static("dynamic", &ov::Dimension::dynamic); + + dim.def_property_readonly("is_dynamic", + &ov::Dimension::is_dynamic, + R"( + Check if Dimension is dynamic. + + Returns + ---------- + is_dynamic : bool + True if dynamic, else False. + )"); + dim.def_property_readonly("is_static", + &ov::Dimension::is_static, + R"( + Check if Dimension is static. + + Returns + ---------- + is_static : bool + True if static, else False. + )"); + + dim.def( + "__eq__", + [](const ov::Dimension& a, const ov::Dimension& b) { + return a == b; + }, + py::is_operator()); + dim.def( + "__eq__", + [](const ov::Dimension& a, const int64_t& b) { + return a == b; + }, + py::is_operator()); + + dim.def("__len__", &ov::Dimension::get_length); + dim.def("get_length", + &ov::Dimension::get_length, + R"( + Return this dimension as integer. + This dimension must be static and non-negative. + + Returns + ---------- + get_length : int + Value of the dimension. + )"); + dim.def("get_min_length", + &ov::Dimension::get_min_length, + R"( + Return this dimension's min_dimension as integer. + This dimension must be dynamic and non-negative. + + Returns + ---------- + get_min_length : int + Value of the dimension. + )"); + dim.def("get_max_length", + &ov::Dimension::get_max_length, + R"( + Return this dimension's max_dimension as integer. + This dimension must be dynamic and non-negative. + + Returns + ---------- + get_max_length : int + Value of the dimension. + )"); + + dim.def("same_scheme", + &ov::Dimension::same_scheme, + py::arg("dim"), + R"( + Return this dimension's max_dimension as integer. + This dimension must be dynamic and non-negative. + + Parameters + ---------- + dim : Dimension + The other dimension to compare this dimension to. + + Returns + ---------- + same_scheme : bool + True if this dimension and dim are both dynamic, + or if they are both static and equal, otherwise False. + )"); + dim.def("compatible", + &ov::Dimension::compatible, + py::arg("d"), + R"( + Check whether this dimension is capable of being merged + with the argument dimension. + + Parameters + ---------- + d : Dimension + The dimension to compare this dimension with. + + Returns + ---------- + compatible : bool + True if this dimension is compatible with d, else False. + )"); + dim.def("relaxes", + &ov::Dimension::relaxes, + py::arg("d"), + R"( + Check whether this dimension is a relaxation of the argument. + This dimension relaxes (or is a relaxation of) d if: + + (1) this and d are static and equal + (2) this dimension contains d dimension + + this.relaxes(d) is equivalent to d.refines(this). + + Parameters + ---------- + d : Dimension + The dimension to compare this dimension with. + + Returns + ---------- + relaxes : bool + True if this dimension relaxes d, else False. + )"); + dim.def("refines", + &ov::Dimension::refines, + py::arg("d"), + R"( + Check whether this dimension is a refinement of the argument. + This dimension refines (or is a refinement of) d if: + + (1) this and d are static and equal + (2) d dimension contains this dimension + + this.refines(d) is equivalent to d.relaxes(this). + + Parameters + ---------- + d : Dimension + The dimension to compare this dimension with. + + Returns + ---------- + relaxes : bool + True if this dimension refines d, else False. + )"); + + dim.def("__str__", [](const ov::Dimension& self) -> std::string { + std::stringstream ss; + ss << self; + return ss.str(); + }); + + dim.def("__repr__", [](const ov::Dimension& self) -> std::string { + return "() + ">"; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/dimension.hpp b/runtime/bindings/python/src/pyopenvino/graph/dimension.hpp new file mode 100644 index 00000000000..8a03f59de8e --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/dimension.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_Dimension(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/function.cpp b/runtime/bindings/python/src/pyopenvino/graph/function.cpp new file mode 100644 index 00000000000..3496e88d014 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/function.cpp @@ -0,0 +1,314 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/function.hpp" // ov::Function + +#include +#include + +#include "openvino/op/parameter.hpp" // ov::op::v0::Parameter +#include "openvino/op/sink.hpp" +#include "pyopenvino/graph/function.hpp" + +namespace py = pybind11; + +static const char* CAPSULE_NAME = "ngraph_function"; + +void regclass_graph_Function(py::module m) { + py::class_> function(m, "Function", py::module_local()); + function.doc() = "openvino.impl.Function wraps ov::Function"; + + function.def(py::init([](const ov::ResultVector& res, + const std::vector>& nodes, + const ov::ParameterVector& params, + const std::string& name) { + ov::SinkVector sinks; + for (const auto& node : nodes) { + auto sink = std::dynamic_pointer_cast(node); + NGRAPH_CHECK(sink != nullptr, "Node {} is not instance of Sink"); + sinks.push_back(sink); + } + return std::make_shared(res, sinks, params, name); + }), + py::arg("results"), + py::arg("sinks"), + py::arg("parameters"), + py::arg("name"), + R"( + Create user-defined Function which is a representation of a model. + + Parameters + ---------- + results : List[op.Result] + List of results. + + sinks : List[Node] + List of Nodes to be used as Sinks (e.g. Assign ops). + + parameters : List[op.Parameter] + List of parameters. + + name : str + String to set as function's friendly name. + )"); + + function.def(py::init>&, + const std::vector>&, + const std::string&>(), + py::arg("results"), + py::arg("parameters"), + py::arg("name"), + R"( + Create user-defined Function which is a representation of a model. + + Parameters + ---------- + results : List[Node] + List of Nodes to be used as results. + + parameters : List[op.Parameter] + List of parameters. + + name : str + String to set as function's friendly name. + )"); + + function.def(py::init&, + const std::vector>&, + const std::string&>(), + py::arg("result"), + py::arg("parameters"), + py::arg("name"), + R"( + Create user-defined Function which is a representation of a model. + + Parameters + ---------- + results : Node + Node to be used as result. + + parameters : List[op.Parameter] + List of parameters. + + name : str + String to set as function's friendly name. + )"); + function.def("get_output_size", + &ov::Function::get_output_size, + R"( + Return the number of outputs for the function. + + Returns + ---------- + get_output_size : int + Number of outputs. + )"); + function.def("get_ops", + &ov::Function::get_ops, + R"( + Return ops used in the function. + + Returns + ---------- + get_ops : List[Node] + List of Nodes representing ops used in function. + )"); + function.def("get_ordered_ops", + &ov::Function::get_ordered_ops, + R"( + Return ops used in the function in topological order. + + Returns + ---------- + get_ordered_ops : List[Node] + List of sorted Nodes representing ops used in function. + )"); + function.def("get_output_op", + &ov::Function::get_output_op, + py::arg("i"), + R"( + Return the op that generates output i + + Parameters + ---------- + i : int + output index + + Returns + ---------- + get_output_op : Node + Node object that generates output i + )"); + function.def("get_output_element_type", + &ov::Function::get_output_element_type, + py::arg("i"), + R"( + Return the element type of output i + + Parameters + ---------- + i : int + output index + + Returns + ---------- + get_output_op : Type + Type object of output i + )"); + function.def("get_output_shape", + &ov::Function::get_output_shape, + py::arg("i"), + R"( + Return the shape of element i + + Parameters + ---------- + i : int + element index + + Returns + ---------- + get_output_shape : Shape + Shape object of element i + )"); + function.def("get_output_partial_shape", + &ov::Function::get_output_partial_shape, + py::arg("i"), + R"( + Return the partial shape of element i + + Parameters + ---------- + i : int + element index + + Returns + ---------- + get_output_partial_shape : PartialShape + PartialShape object of element i + )"); + function.def("get_parameters", + &ov::Function::get_parameters, + R"( + Return the function parameters. + + Returns + ---------- + get_parameters : ParameterVector + ParameterVector containing function parameters. + )"); + function.def("get_results", + &ov::Function::get_results, + R"( + Return a list of function outputs. + + Returns + ---------- + get_results : ResultVector + ResultVector containing function parameters. + )"); + function.def("get_result", + &ov::Function::get_result, + R"( + Return single result. + + Returns + ---------- + get_result : Node + Node object representing result. + )"); + function.def("get_name", + &ov::Function::get_name, + R"( + Get the unique name of the function. + + Returns + ---------- + get_name : str + String with a name of the function. + )"); + function.def("get_friendly_name", + &ov::Function::get_friendly_name, + R"( + Gets the friendly name for a function. If no + friendly name has been set via set_friendly_name + then the function's unique name is returned. + + Returns + ---------- + get_friendly_name : str + String with a friendly name of the function. + )"); + function.def("set_friendly_name", + &ov::Function::set_friendly_name, + py::arg("name"), + R"( + Sets a friendly name for a function. This does + not overwrite the unique name of the function and + is retrieved via get_friendly_name(). Used mainly + for debugging. + + Parameters + ---------- + name : str + String to set as the friendly name. + )"); + function.def("is_dynamic", + &ov::Function::is_dynamic, + R"( + Returns true if any of the op's defined in the function + contains partial shape. + + Returns + ---------- + is_dynamic : bool + )"); + function.def("__repr__", [](const ov::Function& self) { + std::string class_name = py::cast(self).get_type().attr("__name__").cast(); + std::stringstream shapes_ss; + for (size_t i = 0; i < self.get_output_size(); ++i) { + if (i > 0) { + shapes_ss << ", "; + } + shapes_ss << self.get_output_partial_shape(i); + } + return "<" + class_name + ": '" + self.get_friendly_name() + "' (" + shapes_ss.str() + ")>"; + }); + function.def_static("from_capsule", [](py::object* capsule) { + // get the underlying PyObject* which is a PyCapsule pointer + auto* pybind_capsule_ptr = capsule->ptr(); + // extract the pointer stored in the PyCapsule under the name CAPSULE_NAME + auto* capsule_ptr = PyCapsule_GetPointer(pybind_capsule_ptr, CAPSULE_NAME); + + auto* ngraph_function = static_cast*>(capsule_ptr); + if (ngraph_function && *ngraph_function) { + return *ngraph_function; + } else { + throw std::runtime_error("The provided capsule does not contain an ov::Function"); + } + }); + function.def_static("to_capsule", [](std::shared_ptr& ngraph_function) { + // create a shared pointer on the heap before putting it in the capsule + // this secures the lifetime of the object transferred by the capsule + auto* sp_copy = new std::shared_ptr(ngraph_function); + + // a destructor callback that will delete the heap allocated shared_ptr + // when the capsule is destructed + auto sp_deleter = [](PyObject* capsule) { + auto* capsule_ptr = PyCapsule_GetPointer(capsule, CAPSULE_NAME); + auto* function_sp = static_cast*>(capsule_ptr); + if (function_sp) { + delete function_sp; + } + }; + + // put the shared_ptr in a new capsule under the same name as in "from_capsule" + auto pybind_capsule = py::capsule(sp_copy, CAPSULE_NAME, sp_deleter); + + return pybind_capsule; + }); + + function.def_property_readonly("name", &ov::Function::get_name); + function.def_property("friendly_name", &ov::Function::get_friendly_name, &ov::Function::set_friendly_name); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/function.hpp b/runtime/bindings/python/src/pyopenvino/graph/function.hpp new file mode 100644 index 00000000000..aa399c5ac21 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/function.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_Function(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/node.cpp b/runtime/bindings/python/src/pyopenvino/graph/node.cpp new file mode 100644 index 00000000000..242fd76c1e0 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/node.cpp @@ -0,0 +1,306 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/node.hpp" + +#include +#include +#include + +#include "dict_attribute_visitor.hpp" +#include "openvino/core/variant.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/subtract.hpp" +#include "pyopenvino/graph/node.hpp" +#include "pyopenvino/graph/rt_map.hpp" +#include "pyopenvino/graph/variant.hpp" + +class PyNode : public ov::Node { +public: + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& inputs) const override { + PYBIND11_OVERRIDE_PURE(std::shared_ptr, ov::Node, clone_with_new_inputs, inputs); + } + + const type_info_t& get_type_info() const override { + PYBIND11_OVERRIDE_PURE(type_info_t&, ov::Node, get_type_info, ); + } +}; + +namespace py = pybind11; + +using PyRTMap = std::map>; + +PYBIND11_MAKE_OPAQUE(PyRTMap); + +void regclass_graph_Node(py::module m) { + py::class_, PyNode> node(m, "Node", py::dynamic_attr()); + node.doc() = "openvino.impl.Node wraps ov::Node"; + node.def( + "__add__", + [](const std::shared_ptr& a, const std::shared_ptr b) { + return std::make_shared(a, b); + }, + py::is_operator()); + node.def( + "__sub__", + [](const std::shared_ptr& a, const std::shared_ptr b) { + return std::make_shared(a, b); + }, + py::is_operator()); + node.def( + "__mul__", + [](const std::shared_ptr& a, const std::shared_ptr b) { + return std::make_shared(a, b); + }, + py::is_operator()); + node.def( + "__div__", + [](const std::shared_ptr& a, const std::shared_ptr b) { + return std::make_shared(a, b); + }, + py::is_operator()); + node.def( + "__truediv__", + [](const std::shared_ptr& a, const std::shared_ptr b) { + return std::make_shared(a, b); + }, + py::is_operator()); + + node.def("__repr__", [](const ov::Node& self) { + std::string type_name = self.get_type_name(); + std::stringstream shapes_ss; + for (size_t i = 0; i < self.get_output_size(); ++i) { + if (i > 0) { + shapes_ss << ", "; + } + shapes_ss << self.get_output_partial_shape(i); + } + return "<" + type_name + ": '" + self.get_friendly_name() + "' (" + shapes_ss.str() + ")>"; + }); + + node.def("get_element_type", + &ov::Node::get_element_type, + R"( + Checks that there is exactly one output and returns it's element type. + + Returns + ---------- + get_element_type : Type + Type of the output. + )"); + node.def("get_output_size", + &ov::Node::get_output_size, + R"( + Returns the number of outputs from the node. + + Returns + ---------- + get_element_type : int + Number of outputs. + )"); + node.def("get_output_element_type", + &ov::Node::get_output_element_type, + py::arg("i"), + R"( + Returns the element type for output i + + Parameters + ---------- + i : int + Index of the output. + + Returns + ---------- + get_output_element_type : Type + Type of the output i + )"); + node.def("get_output_shape", + &ov::Node::get_output_shape, + py::arg("i"), + R"( + Returns the shape for output i + + Parameters + ---------- + i : int + Index of the output. + + Returns + ---------- + get_output_shape : Shape + Shape of the output i + )"); + node.def("get_output_partial_shape", + &ov::Node::get_output_partial_shape, + py::arg("i"), + R"( + Returns the partial shape for output i + + Parameters + ---------- + i : int + Index of the output. + + Returns + ---------- + get_output_partial_shape : PartialShape + PartialShape of the output i + )"); + node.def("get_type_name", + &ov::Node::get_type_name, + R"( + Returns Type's name from the node. + + Returns + ---------- + get_type_name : str + String repesenting Type's name. + )"); + node.def("get_name", + &ov::Node::get_name, + R"( + Get the unique name of the node + + Returns + ---------- + get_name : str + Unique name of the node. + )"); + node.def("get_friendly_name", + &ov::Node::get_friendly_name, + R"( + Gets the friendly name for a node. If no friendly name has + been set via set_friendly_name then the node's unique name + is returned. + + Returns + ---------- + get_name : str + Friendly name of the node. + )"); + node.def("get_type_info", &ov::Node::get_type_info); + node.def("set_friendly_name", + &ov::Node::set_friendly_name, + py::arg("name"), + R"( + Sets a friendly name for a node. This does not overwrite the unique name + of the node and is retrieved via get_friendly_name(). Used mainly for + debugging. The friendly name may be set exactly once. + + Parameters + ---------- + name : str + Friendly name to set. + )"); + node.def("input", + (ov::Input(ov::Node::*)(size_t)) & ov::Node::input, + py::arg("input_index"), + R"( + A handle to the input_index input of this node. + + Parameters + ---------- + input_index : int + Index of Input. + + Returns + ---------- + input : Input + Input of this node. + )"); + node.def("inputs", + (std::vector>(ov::Node::*)()) & ov::Node::inputs, + R"( + A list containing a handle for each of this node's inputs, in order. + + Returns + ---------- + inputs : List[Input] + List of node's inputs. + )"); + node.def("output", + (ov::Output(ov::Node::*)(size_t)) & ov::Node::output, + py::arg("output_index"), + R"( + A handle to the output_index output of this node. + + Parameters + ---------- + output_index : int + Index of Output. + + Returns + ---------- + input : Output + Output of this node. + )"); + node.def("outputs", + (std::vector>(ov::Node::*)()) & ov::Node::outputs, + R"( + A list containing a handle for each of this node's outputs, in order. + + Returns + ---------- + inputs : List[Output] + List of node's outputs. + )"); + node.def("get_rt_info", + (PyRTMap & (ov::Node::*)()) & ov::Node::get_rt_info, + py::return_value_policy::reference_internal, + R"( + Returns PyRTMap which is a dictionary of user defined runtime info. + + Returns + ---------- + get_rt_info : PyRTMap + A dictionary of user defined data. + )"); + node.def("get_version", + &ov::Node::get_version, + R"( + Returns operation's version of the node. + + Returns + ---------- + get_version : int + Operation version. + )"); + + node.def("set_argument", &ov::Node::set_argument); + node.def("set_arguments", [](const std::shared_ptr& self, const ov::NodeVector& args) { + self->set_arguments(args); + }); + node.def("set_arguments", [](const std::shared_ptr& self, const ov::OutputVector& args) { + self->set_arguments(args); + }); + + node.def_property_readonly("shape", &ov::Node::get_shape); + node.def_property_readonly("name", &ov::Node::get_name); + node.def_property_readonly("rt_info", + (PyRTMap & (ov::Node::*)()) & ov::Node::get_rt_info, + py::return_value_policy::reference_internal); + node.def_property_readonly("version", &ov::Node::get_version); + node.def_property_readonly("type_info", &ov::Node::get_type_info); + node.def_property("friendly_name", &ov::Node::get_friendly_name, &ov::Node::set_friendly_name); + + node.def("get_attributes", [](const std::shared_ptr& self) { + util::DictAttributeSerializer dict_serializer(self); + return dict_serializer.get_attributes(); + }); + node.def("set_attribute", [](std::shared_ptr& self, const std::string& atr_name, py::object value) { + py::dict attr_dict; + attr_dict[atr_name.c_str()] = value; + std::unordered_map> variables; + util::DictAttributeDeserializer dict_deserializer(attr_dict, variables); + self->visit_attributes(dict_deserializer); + }); + node.def("set_arguments", [](const std::shared_ptr& self, const ov::OutputVector& arguments) { + return self->set_arguments(arguments); + }); + node.def("validate", [](const std::shared_ptr& self) { + return self->constructor_validate_and_infer_types(); + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/node.hpp b/runtime/bindings/python/src/pyopenvino/graph/node.hpp new file mode 100644 index 00000000000..618569d847d --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/node.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_Node(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/node_factory.cpp b/runtime/bindings/python/src/pyopenvino/graph/node_factory.cpp new file mode 100644 index 00000000000..35c75edd465 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/node_factory.cpp @@ -0,0 +1,119 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "node_factory.hpp" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dict_attribute_visitor.hpp" +#include "ngraph/check.hpp" +#include "ngraph/log.hpp" +#include "openvino/core/except.hpp" +#include "openvino/core/node.hpp" +#include "openvino/op/util/op_types.hpp" +#include "openvino/op/util/variable.hpp" +#include "openvino/opsets/opset.hpp" + +namespace py = pybind11; + +namespace { +class NodeFactory { +public: + NodeFactory() {} + NodeFactory(const std::string& opset_name) : m_opset(get_opset(opset_name)) {} + + std::shared_ptr create(const std::string op_type_name, + const ov::OutputVector& arguments, + const py::dict& attributes = py::dict()) { + std::shared_ptr op_node = std::shared_ptr(m_opset.create(op_type_name)); + + NGRAPH_CHECK(op_node != nullptr, "Couldn't create operator: ", op_type_name); + NGRAPH_CHECK(!ov::op::util::is_constant(op_node), + "Currently NodeFactory doesn't support Constant node: ", + op_type_name); + + util::DictAttributeDeserializer visitor(attributes, m_variables); + + op_node->set_arguments(arguments); + op_node->visit_attributes(visitor); + op_node->constructor_validate_and_infer_types(); + + return op_node; + } + + std::shared_ptr create(const std::string op_type_name) { + std::shared_ptr op_node = std::shared_ptr(m_opset.create(op_type_name)); + + NGRAPH_CHECK(op_node != nullptr, "Couldn't create operator: ", op_type_name); + NGRAPH_CHECK(!ov::op::util::is_constant(op_node), + "Currently NodeFactory doesn't support Constant node: ", + op_type_name); + + NGRAPH_WARN << "Empty op created! Please assign inputs and attributes and run validate() before op is used."; + + return op_node; + } + +private: + const ov::OpSet& get_opset(std::string opset_ver) { + std::locale loc; + std::transform(opset_ver.begin(), opset_ver.end(), opset_ver.begin(), [&loc](char c) { + return std::tolower(c, loc); + }); + + using OpsetFunction = std::function; + + static const std::map s_opsets{ + {"opset1", OpsetFunction(ov::get_opset1)}, + {"opset2", OpsetFunction(ov::get_opset2)}, + {"opset3", OpsetFunction(ov::get_opset3)}, + {"opset4", OpsetFunction(ov::get_opset4)}, + {"opset5", OpsetFunction(ov::get_opset5)}, + {"opset6", OpsetFunction(ov::get_opset6)}, + {"opset7", OpsetFunction(ov::get_opset7)}, + {"opset8", OpsetFunction(ov::get_opset8)}, + }; + + auto it = s_opsets.find(opset_ver); + if (it == s_opsets.end()) { + throw ngraph::ngraph_error("Unsupported opset version requested."); + } + return it->second(); + } + + const ov::OpSet& m_opset = ov::get_opset8(); + std::unordered_map> m_variables; +}; +} // namespace + +void regclass_graph_NodeFactory(py::module m) { + py::class_ node_factory(m, "NodeFactory"); + node_factory.doc() = "NodeFactory creates nGraph nodes"; + + node_factory.def(py::init()); + node_factory.def(py::init()); + + node_factory.def("create", [](NodeFactory& self, const std::string name) { + return self.create(name); + }); + node_factory.def( + "create", + [](NodeFactory& self, const std::string name, const ov::OutputVector& arguments, const py::dict& attributes) { + return self.create(name, arguments, attributes); + }); + + node_factory.def("__repr__", [](const NodeFactory& self) { + return ""; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/node_factory.hpp b/runtime/bindings/python/src/pyopenvino/graph/node_factory.hpp new file mode 100644 index 00000000000..47845718b2b --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/node_factory.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_NodeFactory(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/node_input.cpp b/runtime/bindings/python/src/pyopenvino/graph/node_input.cpp new file mode 100644 index 00000000000..8e6dc32b0ab --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/node_input.cpp @@ -0,0 +1,78 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/node_input.hpp" + +#include + +#include "dict_attribute_visitor.hpp" +#include "pyopenvino/graph/node_input.hpp" + +namespace py = pybind11; + +void regclass_graph_Input(py::module m) { + py::class_, std::shared_ptr>> input(m, "Input", py::dynamic_attr()); + input.doc() = "openvino.impl.Input wraps ov::Input"; + + input.def("get_node", + &ov::Input::get_node, + R"( + Get node referenced by this input handle. + + Returns + ---------- + get_node : Node + Node object referenced by this input handle. + )"); + input.def("get_index", + &ov::Input::get_index, + R"( + The index of the input referred to by this input handle. + + Returns + ---------- + get_index : int + Index value as integer. + )"); + input.def("get_element_type", + &ov::Input::get_element_type, + R"( + The element type of the input referred to by this input handle. + + Returns + ---------- + get_element_type : Type + Type of the input. + )"); + input.def("get_shape", + &ov::Input::get_shape, + R"( + The shape of the input referred to by this input handle. + + Returns + ---------- + get_shape : Shape + Shape of the input. + )"); + input.def("get_partial_shape", + &ov::Input::get_partial_shape, + R"( + The partial shape of the input referred to by this input handle. + + Returns + ---------- + get_partial_shape : PartialShape + PartialShape of the input. + )"); + input.def("get_source_output", + &ov::Input::get_source_output, + R"( + A handle to the output that is connected to this input. + + Returns + ---------- + get_source_output : Output + Output that is connected to the input. + )"); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/node_input.hpp b/runtime/bindings/python/src/pyopenvino/graph/node_input.hpp new file mode 100644 index 00000000000..38c7b56aa61 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/node_input.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_Input(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp b/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp new file mode 100644 index 00000000000..56ac60e3ba9 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp @@ -0,0 +1,78 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/node_output.hpp" + +#include + +#include "dict_attribute_visitor.hpp" +#include "pyopenvino/graph/node_output.hpp" + +namespace py = pybind11; + +void regclass_graph_Output(py::module m) { + py::class_, std::shared_ptr>> output(m, "Output", py::dynamic_attr()); + output.doc() = "openvino.impl.Output wraps ov::Output"; + + output.def("get_node", + &ov::Output::get_node, + R"( + Get node referenced by this output handle. + + Returns + ---------- + get_node : Node + Node object referenced by this output handle. + )"); + output.def("get_index", + &ov::Output::get_index, + R"( + The index of the output referred to by this output handle. + + Returns + ---------- + get_index : int + Index value as integer. + )"); + output.def("get_element_type", + &ov::Output::get_element_type, + R"( + The element type of the output referred to by this output handle. + + Returns + ---------- + get_element_type : Type + Type of the output. + )"); + output.def("get_shape", + &ov::Output::get_shape, + R"( + The shape of the output referred to by this output handle. + + Returns + ---------- + get_shape : Shape + Shape of the output. + )"); + output.def("get_partial_shape", + &ov::Output::get_partial_shape, + R"( + The partial shape of the output referred to by this output handle. + + Returns + ---------- + get_partial_shape : PartialShape + PartialShape of the output. + )"); + output.def("get_target_inputs", + &ov::Output::get_target_inputs, + R"( + A set containing handles for all inputs targeted by the output + referenced by this output handle. + Returns + ---------- + get_target_inputs : Set[Input] + Set of Inputs. + )"); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp b/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp new file mode 100644 index 00000000000..9934c628b2e --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_Output(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/constant.cpp b/runtime/bindings/python/src/pyopenvino/graph/ops/constant.cpp new file mode 100644 index 00000000000..2b1aa8617d7 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/constant.cpp @@ -0,0 +1,142 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/constant.hpp" + +#include +#include +#include +#include + +#include +#include + +#include "openvino/core/shape.hpp" +#include "pyopenvino/graph/ops/constant.hpp" + +namespace py = pybind11; + +template +std::vector _get_byte_strides(const ov::Shape& s) { + std::vector byte_strides; + std::vector element_strides = ov::row_major_strides(s); + for (auto v : element_strides) { + byte_strides.push_back(static_cast(v) * sizeof(T)); + } + return byte_strides; +} + +template +py::buffer_info _get_buffer_info(const ov::op::v0::Constant& c) { + ov::Shape shape = c.get_shape(); + return py::buffer_info(const_cast(c.get_data_ptr()), /* Pointer to buffer */ + static_cast(c.get_element_type().size()), /* Size of one scalar */ + py::format_descriptor::format(), /* Python struct-style format descriptor */ + static_cast(shape.size()), /* Number of dimensions */ + std::vector{shape.begin(), shape.end()}, /* Buffer dimensions */ + _get_byte_strides(shape) /* Strides (in bytes) for each index */ + ); +} + +template <> +py::buffer_info _get_buffer_info(const ov::op::v0::Constant& c) { + ov::Shape shape = c.get_shape(); + return py::buffer_info(const_cast(c.get_data_ptr()), /* Pointer to buffer */ + static_cast(c.get_element_type().size()), /* Size of one scalar */ + std::string(1, 'H'), /* Python struct-style format descriptor */ + static_cast(shape.size()), /* Number of dimensions */ + std::vector{shape.begin(), shape.end()}, /* Buffer dimensions */ + _get_byte_strides(shape) /* Strides (in bytes) for each index */ + ); +} + +template +py::array _cast_vector(const ov::op::v0::Constant& self) { + auto vec = self.cast_vector(); + return py::array(vec.size(), vec.data()); +} + +void regclass_graph_op_Constant(py::module m) { + py::class_, ov::Node> constant(m, + "Constant", + py::buffer_protocol()); + constant.doc() = "openvino.impl.op.Constant wraps ov::op::v0::Constant"; + constant.def(py::init&>()); + constant.def(py::init&>()); + constant.def(py::init&>()); + constant.def(py::init&>()); + constant.def(py::init&>()); + constant.def(py::init&>()); + constant.def(py::init&>()); + constant.def(py::init&>()); + constant.def(py::init&>()); + constant.def(py::init&>()); + constant.def(py::init&>()); + constant.def(py::init&>()); + + constant.def("get_value_strings", &ov::op::v0::Constant::get_value_strings); + + constant.def("get_vector", [](const ov::op::v0::Constant& self) { + auto element_type = self.get_element_type(); + if (element_type == ov::element::boolean) { + return _cast_vector(self); + } else if (element_type == ov::element::f16) { + return _cast_vector(self); + } else if (element_type == ov::element::f32) { + return _cast_vector(self); + } else if (element_type == ov::element::f64) { + return _cast_vector(self); + } else if (element_type == ov::element::i8) { + return _cast_vector(self); + } else if (element_type == ov::element::i16) { + return _cast_vector(self); + } else if (element_type == ov::element::i32) { + return _cast_vector(self); + } else if (element_type == ov::element::i64) { + return _cast_vector(self); + } else if (element_type == ov::element::u8 || element_type == ov::element::u1) { + return _cast_vector(self); + } else if (element_type == ov::element::u16) { + return _cast_vector(self); + } else if (element_type == ov::element::u32) { + return _cast_vector(self); + } else if (element_type == ov::element::u64) { + return _cast_vector(self); + } else { + throw std::runtime_error("Unsupported data type!"); + } + }); + + // Provide buffer access + constant.def_buffer([](const ov::op::v0::Constant& self) -> py::buffer_info { + auto element_type = self.get_element_type(); + if (element_type == ov::element::boolean) { + return _get_buffer_info(self); + } else if (element_type == ov::element::f16) { + return _get_buffer_info(self); + } else if (element_type == ov::element::f32) { + return _get_buffer_info(self); + } else if (element_type == ov::element::f64) { + return _get_buffer_info(self); + } else if (element_type == ov::element::i8) { + return _get_buffer_info(self); + } else if (element_type == ov::element::i16) { + return _get_buffer_info(self); + } else if (element_type == ov::element::i32) { + return _get_buffer_info(self); + } else if (element_type == ov::element::i64) { + return _get_buffer_info(self); + } else if (element_type == ov::element::u8 || element_type == ov::element::u1) { + return _get_buffer_info(self); + } else if (element_type == ov::element::u16) { + return _get_buffer_info(self); + } else if (element_type == ov::element::u32) { + return _get_buffer_info(self); + } else if (element_type == ov::element::u64) { + return _get_buffer_info(self); + } else { + throw std::runtime_error("Unsupported data type!"); + } + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/constant.hpp b/runtime/bindings/python/src/pyopenvino/graph/ops/constant.hpp new file mode 100644 index 00000000000..e357ed8bf4e --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/constant.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_op_Constant(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/parameter.cpp b/runtime/bindings/python/src/pyopenvino/graph/ops/parameter.cpp new file mode 100644 index 00000000000..3eb8997f432 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/parameter.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/parameter.hpp" + +#include +#include + +#include + +#include "openvino/core/node.hpp" +#include "openvino/core/partial_shape.hpp" // ov::PartialShape +#include "pyopenvino/graph/ops/parameter.hpp" + +namespace py = pybind11; + +void regclass_graph_op_Parameter(py::module m) { + py::class_, ov::Node> parameter(m, "Parameter"); + parameter.doc() = "openvino.impl.op.Parameter wraps ov::op::v0::Parameter"; + parameter.def("__repr__", [](const ov::Node& self) { + std::string class_name = py::cast(self).get_type().attr("__name__").cast(); + std::string shape = py::cast(self.get_output_partial_shape(0)).attr("__str__")().cast(); + std::string type = self.get_element_type().c_type_string(); + return "<" + class_name + ": '" + self.get_friendly_name() + "' (" + shape + ", " + type + ")>"; + }); + + parameter.def(py::init()); + parameter.def(py::init()); + // parameter.def_property_readonly("description", &ov::op::v0::Parameter::description); + + parameter.def( + "get_partial_shape", + (const ov::PartialShape& (ov::op::v0::Parameter::*)() const) & ov::op::v0::Parameter::get_partial_shape); + parameter.def("get_partial_shape", + (ov::PartialShape & (ov::op::v0::Parameter::*)()) & ov::op::v0::Parameter::get_partial_shape); + parameter.def("set_partial_shape", &ov::op::v0::Parameter::set_partial_shape); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/parameter.hpp b/runtime/bindings/python/src/pyopenvino/graph/ops/parameter.hpp new file mode 100644 index 00000000000..66faafa64ed --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/parameter.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_op_Parameter(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/result.cpp b/runtime/bindings/python/src/pyopenvino/graph/ops/result.cpp new file mode 100644 index 00000000000..fb917d71d79 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/result.cpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/result.hpp" + +#include +#include + +#include + +#include "openvino/core/node.hpp" +#include "pyopenvino/graph/ops/result.hpp" + +namespace py = pybind11; + +void regclass_graph_op_Result(py::module m) { + py::class_, ov::Node> result(m, "Result"); + + result.doc() = "openvino.impl.op.Result wraps ov::op::v0::Result"; +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/result.hpp b/runtime/bindings/python/src/pyopenvino/graph/ops/result.hpp new file mode 100644 index 00000000000..10824fbb42d --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/result.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_op_Result(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.cpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.cpp new file mode 100644 index 00000000000..3026a13c44e --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/arithmetic_reduction.hpp" + +#include +#include + +#include "openvino/op/op.hpp" +#include "pyopenvino/graph/ops/util/arithmetic_reduction.hpp" + +namespace py = pybind11; + +void regclass_graph_op_util_ArithmeticReduction(py::module m) { + py::class_> + arithmeticReduction(m, "ArithmeticReduction"); + // arithmeticReduction.def(py::init&, + // const ov::AxisSet& >()); + arithmeticReduction.def("get_reduction_axes", &ov::op::util::ArithmeticReduction::get_reduction_axes); + arithmeticReduction.def("set_reduction_axes", &ov::op::util::ArithmeticReduction::set_reduction_axes); + + arithmeticReduction.def_property("reduction_axes", + &ov::op::util::ArithmeticReduction::get_reduction_axes, + &ov::op::util::ArithmeticReduction::set_reduction_axes); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.hpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.hpp new file mode 100644 index 00000000000..8825b7be11f --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/arithmetic_reduction.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_op_util_ArithmeticReduction(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.cpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.cpp new file mode 100644 index 00000000000..d778f00acd0 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" + +#include +#include + +#include "pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp" + +namespace py = pybind11; + +void regclass_graph_op_util_BinaryElementwiseArithmetic(py::module m) { + py::class_> + binaryElementwiseArithmetic(m, "BinaryElementwiseArithmetic"); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp new file mode 100644 index 00000000000..e4304695c4a --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_op_util_BinaryElementwiseArithmetic(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.cpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.cpp new file mode 100644 index 00000000000..27566a1358c --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/binary_elementwise_comparison.hpp" + +#include +#include + +#include "pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp" + +namespace py = pybind11; + +void regclass_graph_op_util_BinaryElementwiseComparison(py::module m) { + py::class_> + binaryElementwiseComparison(m, "BinaryElementwiseComparison"); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp new file mode 100644 index 00000000000..69727256098 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_op_util_BinaryElementwiseComparison(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.cpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.cpp new file mode 100644 index 00000000000..9058c32c9e0 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/binary_elementwise_logical.hpp" + +#include +#include + +#include "pyopenvino/graph/ops/util/binary_elementwise_logical.hpp" + +namespace py = pybind11; + +void regclass_graph_op_util_BinaryElementwiseLogical(py::module m) { + py::class_> + binaryElementwiseLogical(m, "BinaryElementwiseLogical"); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.hpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.hpp new file mode 100644 index 00000000000..1253fa08f99 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/binary_elementwise_logical.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_op_util_BinaryElementwiseLogical(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.cpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.cpp new file mode 100644 index 00000000000..bcbd8c1e668 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/index_reduction.hpp" + +#include +#include + +#include "openvino/op/op.hpp" +#include "pyopenvino/graph/ops/util/index_reduction.hpp" + +namespace py = pybind11; + +void regclass_graph_op_util_IndexReduction(py::module m) { + py::class_> indexReduction( + m, + "IndexRedection"); + + indexReduction.def("get_reduction_axis", &ov::op::util::IndexReduction::get_reduction_axis); + indexReduction.def("set_reduction_axis", &ov::op::util::IndexReduction::set_reduction_axis); + indexReduction.def("get_index_element_type", &ov::op::util::IndexReduction::get_index_element_type); + indexReduction.def("set_index_element_type", &ov::op::util::IndexReduction::set_index_element_type); + + indexReduction.def_property("reduction_axis", + &ov::op::util::IndexReduction::get_reduction_axis, + &ov::op::util::IndexReduction::set_reduction_axis); + indexReduction.def_property("index_element_type", + &ov::op::util::IndexReduction::get_index_element_type, + &ov::op::util::IndexReduction::set_index_element_type); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.hpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.hpp new file mode 100644 index 00000000000..c737321226a --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/index_reduction.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_op_util_IndexReduction(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.cpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.cpp new file mode 100644 index 00000000000..1ae197b06f0 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.cpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "regmodule_graph_op_util.hpp" + +#include + +namespace py = pybind11; + +void regmodule_graph_op_util(py::module m) { + py::module m_util = m.def_submodule("util", "module openvino.op.util"); + // regclass_graph_op_util_RequiresTensorViewArgs(m_util); + regclass_graph_op_util_ArithmeticReduction(m_util); + // regclass_graph_op_util_BinaryElementwise(m_util); + regclass_graph_op_util_BinaryElementwiseArithmetic(m_util); + regclass_graph_op_util_BinaryElementwiseComparison(m_util); + regclass_graph_op_util_BinaryElementwiseLogical(m_util); + // regclass_graph_op_util_UnaryElementwise(m_util); + regclass_graph_op_util_UnaryElementwiseArithmetic(m_util); + regclass_graph_op_util_IndexReduction(m_util); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.hpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.hpp new file mode 100644 index 00000000000..fcd016ac64b --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/regmodule_graph_op_util.hpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "pyopenvino/graph/ops/util/arithmetic_reduction.hpp" +#include "pyopenvino/graph/ops/util/binary_elementwise_arithmetic.hpp" +#include "pyopenvino/graph/ops/util/binary_elementwise_comparison.hpp" +#include "pyopenvino/graph/ops/util/binary_elementwise_logical.hpp" +#include "pyopenvino/graph/ops/util/index_reduction.hpp" +#include "pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp" + +namespace py = pybind11; + +void regmodule_graph_op_util(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.cpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.cpp new file mode 100644 index 00000000000..7ec65547523 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/unary_elementwise_arithmetic.hpp" + +#include +#include + +#include "pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp" + +namespace py = pybind11; + +void regclass_graph_op_util_UnaryElementwiseArithmetic(py::module m) { + py::class_> + unaryElementwiseArithmetic(m, "UnaryElementwiseArithmetic"); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp b/runtime/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp new file mode 100644 index 00000000000..1a226e182bb --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/ops/util/unary_elementwise_arithmetic.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_op_util_UnaryElementwiseArithmetic(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/partial_shape.cpp b/runtime/bindings/python/src/pyopenvino/graph/partial_shape.cpp new file mode 100644 index 00000000000..3eea18e334e --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/partial_shape.cpp @@ -0,0 +1,218 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/partial_shape.hpp" // ov::PartialShape + +#include +#include + +#include +#include +#include + +#include "openvino/core/dimension.hpp" // ov::Dimension +#include "openvino/core/shape.hpp" // ov::Shape +#include "pyopenvino/graph/partial_shape.hpp" + +namespace py = pybind11; + +static const char* CAPSULE_NAME = "ngraph_partial_shape"; + +void regclass_graph_PartialShape(py::module m) { + py::class_> shape(m, "PartialShape"); + shape.doc() = "openvino.impl.PartialShape wraps ov::PartialShape"; + + shape.def(py::init([](const std::vector& dimensions) { + return ov::PartialShape(std::vector(dimensions.begin(), dimensions.end())); + })); + shape.def(py::init&>()); + shape.def(py::init&>()); + shape.def(py::init&>()); + shape.def(py::init&>()); + shape.def(py::init()); + shape.def(py::init()); + + shape.def_static("dynamic", &ov::PartialShape::dynamic, py::arg("r") = ov::Dimension()); + + shape.def_property_readonly("is_dynamic", + &ov::PartialShape::is_dynamic, + R"( + False if this shape is static, else True. + A shape is considered static if it has static rank, + and all dimensions of the shape are static. + )"); + shape.def_property_readonly("is_static", + &ov::PartialShape::is_static, + R"( + True if this shape is static, else False. + A shape is considered static if it has static rank, + and all dimensions of the shape are static. + )"); + shape.def_property_readonly("rank", + &ov::PartialShape::rank, + R"( + The rank of the shape. + )"); + shape.def_property_readonly("all_non_negative", + &ov::PartialShape::all_non_negative, + R"( + True if all static dimensions of the tensor are + non-negative, else False. + )"); + + shape.def("compatible", + &ov::PartialShape::compatible, + py::arg("s"), + R"( + Check whether this shape is compatible with the argument, i.e., + whether it is possible to merge them. + + Parameters + ---------- + s : PartialShape + The shape to be checked for compatibility with this shape. + + + Returns + ---------- + compatible : bool + True if this shape is compatible with s, else False. + )"); + shape.def("refines", + &ov::PartialShape::refines, + py::arg("s"), + R"( + Check whether this shape is a refinement of the argument. + + Parameters + ---------- + s : PartialShape + The shape which is being compared against this shape. + + Returns + ---------- + refines : bool + True if this shape refines s, else False. + )"); + shape.def("relaxes", + &ov::PartialShape::relaxes, + py::arg("s"), + R"( + Check whether this shape is a relaxation of the argument. + + Parameters + ---------- + s : PartialShape + The shape which is being compared against this shape. + + Returns + ---------- + relaxes : bool + True if this shape relaxes s, else False. + )"); + shape.def("same_scheme", + &ov::PartialShape::same_scheme, + py::arg("s"), + R"( + Check whether this shape represents the same scheme as the argument. + + Parameters + ---------- + s : PartialShape + The shape which is being compared against this shape. + + Returns + ---------- + same_scheme : bool + True if shape represents the same scheme as s, else False. + )"); + shape.def("get_max_shape", + &ov::PartialShape::get_max_shape, + R"( + Returns + ---------- + get_max_shape : Shape + Get the max bounding shape. + )"); + shape.def("get_min_shape", + &ov::PartialShape::get_min_shape, + R"( + Returns + ---------- + get_min_shape : Shape + Get the min bounding shape. + )"); + shape.def("get_shape", + &ov::PartialShape::get_shape, + R"( + Returns + ---------- + get_shape : Shape + Get the unique shape. + )"); + shape.def("to_shape", + &ov::PartialShape::to_shape, + R"( + Returns + ---------- + to_shapess : Shape + Get the unique shape. + )"); + shape.def( + "get_dimension", + [](const ov::PartialShape& self, size_t index) -> ov::Dimension { + return self[index]; + }, + py::arg("index"), + R"( + Get the dimension at specified index of a partial shape. + + Parameters + ---------- + index : int + The index of dimension + + Returns + ---------- + get_dimension : Dimension + Get the particular dimension of a partial shape. + )"); + + shape.def( + "__eq__", + [](const ov::PartialShape& a, const ov::PartialShape& b) { + return a == b; + }, + py::is_operator()); + shape.def( + "__eq__", + [](const ov::PartialShape& a, const ov::Shape& b) { + return a == b; + }, + py::is_operator()); + + shape.def("__str__", [](const ov::PartialShape& self) -> std::string { + std::stringstream ss; + ss << self; + return ss.str(); + }); + + shape.def("__repr__", [](const ov::PartialShape& self) -> std::string { + return "() + ">"; + }); + + shape.def_static("from_capsule", [](py::object* capsule) { + // get the underlying PyObject* which is a PyCapsule pointer + auto* pybind_capsule_ptr = capsule->ptr(); + // extract the pointer stored in the PyCapsule under the name CAPSULE_NAME + auto* capsule_ptr = PyCapsule_GetPointer(pybind_capsule_ptr, CAPSULE_NAME); + + auto* ngraph_pShape = static_cast*>(capsule_ptr); + if (ngraph_pShape && *ngraph_pShape) { + return *ngraph_pShape; + } else { + throw std::runtime_error("The provided capsule does not contain an ov::PartialShape"); + } + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/partial_shape.hpp b/runtime/bindings/python/src/pyopenvino/graph/partial_shape.hpp new file mode 100644 index 00000000000..fb0602139ad --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/partial_shape.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_PartialShape(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/passes/manager.cpp b/runtime/bindings/python/src/pyopenvino/graph/passes/manager.cpp new file mode 100644 index 00000000000..163d92ceb58 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/passes/manager.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/pass/manager.hpp" + +#include +#include + +#include "openvino/pass/constant_folding.hpp" +#include "openvino/pass/pass.hpp" +#include "openvino/pass/validate.hpp" +#include "pyopenvino/graph/passes/manager.hpp" + +namespace py = pybind11; + +namespace { +class ManagerWrapper : public ov::pass::Manager { +public: + ManagerWrapper() {} + ~ManagerWrapper() {} + void register_pass(std::string pass_name) { + if (pass_name == "ConstantFolding") + push_pass(); + + if (m_per_pass_validation) { + push_pass(); + } + return; + } +}; +} // namespace + +void regclass_graph_passes_Manager(py::module m) { + py::class_ manager(m, "Manager"); + manager.doc() = "openvino.impl.passes.Manager wraps ov::pass::Manager using ManagerWrapper"; + + manager.def(py::init<>()); + + manager.def("set_per_pass_validation", &ManagerWrapper::set_per_pass_validation); + manager.def("run_passes", &ManagerWrapper::run_passes); + manager.def("register_pass", (void (ManagerWrapper::*)(std::string)) & ManagerWrapper::register_pass); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/passes/manager.hpp b/runtime/bindings/python/src/pyopenvino/graph/passes/manager.hpp new file mode 100644 index 00000000000..780fc2df7e3 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/passes/manager.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_passes_Manager(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp b/runtime/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp new file mode 100644 index 00000000000..a95987a1ad7 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp @@ -0,0 +1,14 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/graph/passes/regmodule_graph_passes.hpp" + +#include + +namespace py = pybind11; + +void regmodule_graph_passes(py::module m) { + py::module m_passes = m.def_submodule("passes", "Package openvino.impl.passes wraps ov::passes"); + regclass_graph_passes_Manager(m_passes); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.hpp b/runtime/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.hpp new file mode 100644 index 00000000000..e36dd12ceed --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.hpp @@ -0,0 +1,12 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "pyopenvino/graph/passes/manager.hpp" + +namespace py = pybind11; + +void regmodule_graph_passes(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/rt_map.cpp b/runtime/bindings/python/src/pyopenvino/graph/rt_map.cpp new file mode 100644 index 00000000000..ba52c2ca5c5 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/rt_map.cpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/graph/rt_map.hpp" + +#include +#include +#include + +#include "dict_attribute_visitor.hpp" +#include "openvino/core/node.hpp" +#include "openvino/core/variant.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/subtract.hpp" +#include "pyopenvino/graph/node.hpp" +#include "pyopenvino/graph/variant.hpp" + +namespace py = pybind11; + +using PyRTMap = std::map>; + +PYBIND11_MAKE_OPAQUE(PyRTMap); + +template +void _set_with_variant(PyRTMap& m, const std::string& k, const T v) { + auto new_v = std::make_shared>(ov::VariantWrapper(v)); + auto it = m.find(k); + if (it != m.end()) + it->second = new_v; + else + m.emplace(k, new_v); +} + +void regclass_graph_PyRTMap(py::module m) { + auto py_map = py::bind_map(m, "PyRTMap"); + py_map.doc() = "ngraph.impl.PyRTMap makes bindings for std::map>, which can later be used as ov::Node::RTMap"; + + py_map.def("__setitem__", [](PyRTMap& m, const std::string& k, const std::string v) { + _set_with_variant(m, k, v); + }); + py_map.def("__setitem__", [](PyRTMap& m, const std::string& k, const int64_t v) { + _set_with_variant(m, k, v); + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/rt_map.hpp b/runtime/bindings/python/src/pyopenvino/graph/rt_map.hpp new file mode 100644 index 00000000000..07f177baf9f --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/rt_map.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_PyRTMap(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/shape.cpp b/runtime/bindings/python/src/pyopenvino/graph/shape.cpp new file mode 100644 index 00000000000..02b906e6c82 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/shape.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/shape.hpp" // ov::Shape + +#include +#include + +#include +#include +#include + +#include "pyopenvino/graph/shape.hpp" + +namespace py = pybind11; + +void regclass_graph_Shape(py::module m) { + py::class_> shape(m, "Shape"); + shape.doc() = "openvino.impl.Shape wraps ov::Shape"; + shape.def(py::init&>(), py::arg("axis_lengths")); + shape.def(py::init&>(), py::arg("axis_lengths")); + shape.def(py::init(), py::arg("axis_lengths")); + shape.def("__len__", [](const ov::Shape& v) { + return v.size(); + }); + shape.def("__getitem__", [](const ov::Shape& v, int key) { + return v[key]; + }); + + shape.def( + "__iter__", + [](ov::Shape& v) { + return py::make_iterator(v.begin(), v.end()); + }, + py::keep_alive<0, 1>()); /* Keep vector alive while iterator is used */ + + shape.def("__str__", [](const ov::Shape& self) -> std::string { + std::stringstream ss; + ss << self; + return ss.str(); + }); + + shape.def("__repr__", [](const ov::Shape& self) -> std::string { + return "() + ">"; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/shape.hpp b/runtime/bindings/python/src/pyopenvino/graph/shape.hpp new file mode 100644 index 00000000000..f8d99518079 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/shape.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_Shape(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/strides.cpp b/runtime/bindings/python/src/pyopenvino/graph/strides.cpp new file mode 100644 index 00000000000..17bbf10de78 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/strides.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/strides.hpp" // ov::Strides + +#include +#include + +#include +#include +#include + +#include "pyopenvino/graph/strides.hpp" + +namespace py = pybind11; + +void regclass_graph_Strides(py::module m) { + py::class_> strides(m, "Strides"); + strides.doc() = "openvino.impl.Strides wraps ov::Strides"; + strides.def(py::init&>(), py::arg("axis_strides")); + strides.def(py::init&>(), py::arg("axis_strides")); + strides.def(py::init(), py::arg("axis_strides")); + + strides.def("__str__", [](const ov::Strides& self) -> std::string { + std::stringstream stringstream; + std::copy(self.begin(), self.end(), std::ostream_iterator(stringstream, ", ")); + std::string string = stringstream.str(); + return string.substr(0, string.size() - 2); + }); + + strides.def("__repr__", [](const ov::Strides& self) -> std::string { + std::string class_name = py::cast(self).get_type().attr("__name__").cast(); + std::string shape_str = py::cast(self).attr("__str__")().cast(); + return "<" + class_name + ": (" + shape_str + ")>"; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/strides.hpp b/runtime/bindings/python/src/pyopenvino/graph/strides.hpp new file mode 100644 index 00000000000..a5f589ce0b5 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/strides.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_Strides(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/types/element_type.cpp b/runtime/bindings/python/src/pyopenvino/graph/types/element_type.cpp new file mode 100644 index 00000000000..0a0f6fa6ad1 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/types/element_type.cpp @@ -0,0 +1,51 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/type/element_type.hpp" + +#include +#include + +#include "openvino/op/parameter.hpp" +#include "pyopenvino/graph/types/element_type.hpp" + +namespace py = pybind11; + +void regclass_graph_Type(py::module m) { + py::class_> type(m, "Type"); + type.doc() = "openvino.impl.Type wraps ov::element::Type"; + type.attr("boolean") = ov::element::boolean; + type.attr("f16") = ov::element::f16; + type.attr("f32") = ov::element::f32; + type.attr("f64") = ov::element::f64; + type.attr("i8") = ov::element::i8; + type.attr("i16") = ov::element::i16; + type.attr("i32") = ov::element::i32; + type.attr("i64") = ov::element::i64; + type.attr("u1") = ov::element::u1; + type.attr("u8") = ov::element::u8; + type.attr("u16") = ov::element::u16; + type.attr("u32") = ov::element::u32; + type.attr("u64") = ov::element::u64; + type.attr("bf16") = ov::element::bf16; + + type.def("__repr__", [](const ov::element::Type& self) { + std::string bitwidth = std::to_string(self.bitwidth()); + if (self.is_signed()) { + return ""; + } + return ""; + }); + + type.def( + "__eq__", + [](const ov::element::Type& a, const ov::element::Type& b) { + return a == b; + }, + py::is_operator()); + + type.def_property_readonly("bitwidth", &ov::element::Type::bitwidth); + type.def_property_readonly("is_real", &ov::element::Type::is_real); + type.def("get_type_name", &ov::element::Type::get_type_name); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/types/element_type.hpp b/runtime/bindings/python/src/pyopenvino/graph/types/element_type.hpp new file mode 100644 index 00000000000..2f1aa532ee8 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/types/element_type.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_graph_Type(py::module m); +void regclass_graph_Bool(py::module m); +void regclass_graph_Float32(py::module m); +void regclass_graph_Float64(py::module m); +void regclass_graph_Int8(py::module m); +// void regclass_graph_Int16(py::module m); +void regclass_graph_Int32(py::module m); +void regclass_graph_Int64(py::module m); +void regclass_graph_UInt8(py::module m); +// void regclass_graph_UInt16(py::module m); +void regclass_graph_UInt32(py::module m); +void regclass_graph_UInt64(py::module m); +void regclass_graph_BFloat16(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.cpp b/runtime/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.cpp new file mode 100644 index 00000000000..09f3ca406d6 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.cpp @@ -0,0 +1,13 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/graph/types/regmodule_graph_types.hpp" + +#include + +namespace py = pybind11; + +void regmodule_graph_types(py::module m) { + regclass_graph_Type(m); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.hpp b/runtime/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.hpp new file mode 100644 index 00000000000..57b0775edef --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/types/regmodule_graph_types.hpp @@ -0,0 +1,13 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "pyopenvino/graph/types/element_type.hpp" + +namespace py = pybind11; + +void regmodule_graph_types(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/util.cpp b/runtime/bindings/python/src/pyopenvino/graph/util.cpp new file mode 100644 index 00000000000..1dc7d037988 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/util.cpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/graph/util.hpp" + +#include + +#include "openvino/core/validation_util.hpp" + +namespace py = pybind11; + +void* numpy_to_c(py::array a) { + py::buffer_info info = a.request(); + return info.ptr; +} + +void regmodule_graph_util(py::module m) { + py::module mod = m.def_submodule("util", "ngraph.impl.util"); + mod.def("numpy_to_c", &numpy_to_c); + mod.def("get_constant_from_source", + &ov::get_constant_from_source, + py::arg("output"), + R"( + Runs an estimation of source tensor. + + Parameters + ---------- + output : Output + output node + + Returns + ---------- + get_constant_from_source : Constant or None + If it succeeded to calculate both bounds and + they are the same returns Constant operation + from the resulting bound, otherwise Null. + )"); +} diff --git a/runtime/bindings/python/src/pyopenvino/graph/util.hpp b/runtime/bindings/python/src/pyopenvino/graph/util.hpp new file mode 100644 index 00000000000..391676f87ab --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/util.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regmodule_graph_util(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/graph/util.py b/runtime/bindings/python/src/pyopenvino/graph/util.py new file mode 100644 index 00000000000..00752bc5db0 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/util.py @@ -0,0 +1,8 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# flake8: noqa + +from openvino.pyopenvino import util + +numpy_to_c = util.numpy_to_c diff --git a/runtime/bindings/python/src/pyopenvino/graph/variant.cpp b/runtime/bindings/python/src/pyopenvino/graph/variant.cpp new file mode 100644 index 00000000000..407b3965797 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/variant.cpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/variant.hpp" // ov::Variant + +#include + +#include "pyopenvino/graph/variant.hpp" + +namespace py = pybind11; + +void regclass_graph_Variant(py::module m) { + py::class_> variant_base(m, "Variant"); + variant_base.doc() = "openvino.impl.Variant wraps ov::Variant"; +} + +template void regclass_graph_VariantWrapper(py::module m, std::string typestring); +template void regclass_graph_VariantWrapper(py::module m, std::string typestring); diff --git a/runtime/bindings/python/src/pyopenvino/graph/variant.hpp b/runtime/bindings/python/src/pyopenvino/graph/variant.hpp new file mode 100644 index 00000000000..6d4bc06f0f5 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/graph/variant.hpp @@ -0,0 +1,76 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include +#include + +#include "openvino/core/variant.hpp" // ov::Variant + +namespace py = pybind11; + +void regclass_graph_Variant(py::module m); + +template +extern void regclass_graph_VariantWrapper(py::module m, std::string typestring) +{ + auto pyclass_name = py::detail::c_str((std::string("Variant") + typestring)); + py::class_, + std::shared_ptr>, + ov::Variant> + variant_wrapper(m, pyclass_name ); + variant_wrapper.doc() = + "openvino.impl.Variant[typestring] wraps ov::VariantWrapper"; + + variant_wrapper.def(py::init()); + + variant_wrapper.def( + "__eq__", + [](const ov::VariantWrapper& a, const ov::VariantWrapper& b) { + return a.get() == b.get(); + }, + py::is_operator()); + variant_wrapper.def( + "__eq__", + [](const ov::VariantWrapper& a, const std::string& b) { + return a.get() == b; + }, + py::is_operator()); + variant_wrapper.def( + "__eq__", + [](const ov::VariantWrapper& a, const int64_t& b) { return a.get() == b; }, + py::is_operator()); + + variant_wrapper.def("__repr__", [](const ov::VariantWrapper self) { + std::stringstream ret; + ret << self.get(); + return ret.str(); + }); + + variant_wrapper.def("get", + (VT & (ov::VariantWrapper::*)()) & ov::VariantWrapper::get, + R"( + Returns + ---------- + get : Variant + Value of Variant. + )"); + variant_wrapper.def("set", + &ov::VariantWrapper::set, + R"( + Parameters + ---------- + set : str or int + Value to be set in Variant. + )"); + + variant_wrapper.def_property("value", + (VT & (ov::VariantWrapper::*)()) & + ov::VariantWrapper::get, + &ov::VariantWrapper::set); +} diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index e3223a0751b..816c3ebfa09 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -8,20 +8,45 @@ #include #include -#include "core/containers.hpp" -#include "core/ie_blob.hpp" -#include "core/ie_core.hpp" -#include "core/ie_data.hpp" -#include "core/ie_executable_network.hpp" -#include "core/ie_infer_queue.hpp" -#include "core/ie_infer_request.hpp" -#include "core/ie_input_info.hpp" -#include "core/ie_network.hpp" -#include "core/ie_parameter.hpp" -#include "core/ie_preprocess_info.hpp" -#include "core/ie_version.hpp" -#include "core/tensor.hpp" -#include "core/tensor_description.hpp" +#include "pyopenvino/graph/axis_set.hpp" +#include "pyopenvino/graph/axis_vector.hpp" +#include "pyopenvino/graph/coordinate.hpp" +#include "pyopenvino/graph/coordinate_diff.hpp" +#include "pyopenvino/graph/function.hpp" +#include "pyopenvino/graph/node.hpp" +#include "pyopenvino/graph/node_factory.hpp" +#include "pyopenvino/graph/node_input.hpp" +#include "pyopenvino/graph/node_output.hpp" +#if defined(NGRAPH_ONNX_FRONTEND_ENABLE) +# include "pyopenvino/graph/onnx_import/onnx_import.hpp" +#endif +#include "pyopenvino/core/containers.hpp" +#include "pyopenvino/core/ie_blob.hpp" +#include "pyopenvino/core/ie_core.hpp" +#include "pyopenvino/core/ie_data.hpp" +#include "pyopenvino/core/ie_executable_network.hpp" +#include "pyopenvino/core/ie_infer_queue.hpp" +#include "pyopenvino/core/ie_infer_request.hpp" +#include "pyopenvino/core/ie_input_info.hpp" +#include "pyopenvino/core/ie_network.hpp" +#include "pyopenvino/core/ie_parameter.hpp" +#include "pyopenvino/core/ie_preprocess_info.hpp" +#include "pyopenvino/core/ie_version.hpp" +#include "pyopenvino/core/tensor.hpp" +#include "pyopenvino/core/tensor_description.hpp" +#include "pyopenvino/graph/dimension.hpp" +#include "pyopenvino/graph/ops/constant.hpp" +#include "pyopenvino/graph/ops/parameter.hpp" +#include "pyopenvino/graph/ops/result.hpp" +#include "pyopenvino/graph/ops/util/regmodule_graph_op_util.hpp" +#include "pyopenvino/graph/partial_shape.hpp" +#include "pyopenvino/graph/passes/regmodule_graph_passes.hpp" +#include "pyopenvino/graph/rt_map.hpp" +#include "pyopenvino/graph/shape.hpp" +#include "pyopenvino/graph/strides.hpp" +#include "pyopenvino/graph/types/regmodule_graph_types.hpp" +#include "pyopenvino/graph/util.hpp" +#include "pyopenvino/graph/variant.hpp" namespace py = pybind11; @@ -57,6 +82,35 @@ PYBIND11_MODULE(pyopenvino, m) { .value("STATUS_ONLY", InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY) .export_values(); + regclass_graph_PyRTMap(m); + regmodule_graph_types(m); + regclass_graph_Dimension(m); // Dimension must be registered before PartialShape + regclass_graph_Shape(m); + regclass_graph_PartialShape(m); + regclass_graph_Node(m); + regclass_graph_Input(m); + regclass_graph_Output(m); + regclass_graph_NodeFactory(m); + regclass_graph_Strides(m); + regclass_graph_CoordinateDiff(m); + regclass_graph_AxisSet(m); + regclass_graph_AxisVector(m); + regclass_graph_Coordinate(m); + py::module m_op = m.def_submodule("op", "Package ngraph.impl.op that wraps ov::op"); // TODO(!) + regclass_graph_op_Constant(m_op); + regclass_graph_op_Parameter(m_op); + regclass_graph_op_Result(m_op); +#if defined(NGRAPH_ONNX_FRONTEND_ENABLE) + regmodule_graph_onnx_import(m); +#endif + regmodule_graph_op_util(m_op); + regclass_graph_Function(m); + regmodule_graph_passes(m); + regmodule_graph_util(m); + regclass_graph_Variant(m); + regclass_graph_VariantWrapper(m, std::string("String")); + regclass_graph_VariantWrapper(m, std::string("Int")); + regclass_Core(m); regclass_IENetwork(m); @@ -81,6 +135,7 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_Tensor(m); // Registering specific types of containers + Containers::regclass_PyInputsDataMap(m); Containers::regclass_PyConstInputsDataMap(m); Containers::regclass_PyOutputsDataMap(m); Containers::regclass_PyResults(m); diff --git a/runtime/bindings/python/tests/__init__.py b/runtime/bindings/python/tests/__init__.py index 82701df69b2..244ce7b4016 100644 --- a/runtime/bindings/python/tests/__init__.py +++ b/runtime/bindings/python/tests/__init__.py @@ -23,6 +23,8 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): skip_segfault = pytest.mark.skip(reason="Segmentation fault error") +skip_issue_67415 = pytest.mark.skip(reason="RuntimeError: Unsupported data type for when filling blob!") +xfail_issue_67415 = xfail_test(reason="RuntimeError: Unsupported data type for when filling blob!") xfail_issue_33488 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " "MaxUnpool") xfail_issue_33538 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " diff --git a/runtime/bindings/python/tests/runtime.py b/runtime/bindings/python/tests/runtime.py index 4a312fccb07..cc5c49620a6 100644 --- a/runtime/bindings/python/tests/runtime.py +++ b/runtime/bindings/python/tests/runtime.py @@ -7,12 +7,12 @@ import logging from typing import Dict, List, Union import numpy as np -from openvino.inference_engine import IECore, IENetwork, Blob, DataPtr +from openvino import Core, IENetwork, Blob, DataPtr -from ngraph.exceptions import UserInputError -from ngraph.impl import Function, Node, PartialShape, Type -from ngraph.opset1.ops import result -from ngraph.utils.types import NumericData, get_shape, get_dtype +from openvino.exceptions import UserInputError +from openvino.impl import Function, Node, PartialShape, Type +from openvino.opset1.ops import result +from openvino.utils.types import NumericData, get_shape, get_dtype import tests @@ -36,6 +36,7 @@ def _convert_inputs(cnn_network: IENetwork) -> None: """WA converts unsupported input images formats.""" precision_map = { "FP64": "FP32", + "I64": "I32", "U32": "I32", } @@ -47,6 +48,18 @@ def _convert_inputs(cnn_network: IENetwork) -> None: pass +def _convert_val(val): + """WA converts unsupported input values.""" + if type(val) is np.ndarray: + if val.dtype == np.float64: + return np.array(val, dtype=np.float32) + elif val.dtype == np.int64: + return np.array(val, dtype=np.int32) + return np.array(val) + + return np.array(val, dtype=np.float32) + + def apply_ng_type(output: DataPtr, ng_type: Type): ng_ie_supported_type_map = { Type.boolean.get_type_name(): "BOOL", @@ -65,7 +78,7 @@ class Runtime(object): def __init__(self, backend_name: str) -> None: self.backend_name = backend_name log.debug("Creating Inference Engine for %s" % backend_name) - self.backend = IECore() + self.backend = Core() assert backend_name in self.backend.available_devices, ( 'The requested device "' + backend_name + '" is not supported!' ) @@ -147,14 +160,13 @@ class Computation(object): # ignore not needed input values input_values = input_values[:len(self.parameters)] - input_values = [np.array(input_value) for input_value in input_values] + input_values = [_convert_val(input_value) for input_value in input_values] input_shapes = [get_shape(input_value) for input_value in input_values] param_names = [param.friendly_name for param in self.parameters] if self.network_cache.get(str(input_shapes)) is None: - capsule = Function.to_capsule(self.function) - cnn_network = IENetwork(capsule) + cnn_network = IENetwork(self.function) if self.function.is_dynamic(): cnn_network.reshape(dict(zip(param_names, input_shapes))) # Convert unsupported inputs of the network @@ -180,7 +192,7 @@ class Computation(object): parameter_shape, ) - request = executable_network.requests[0] + request = executable_network.create_infer_request() request.infer(dict(zip(param_names, input_values))) # Set order of output blobs compatible with nG Function diff --git a/runtime/bindings/python/tests/test_inference_engine/test_core.py b/runtime/bindings/python/tests/test_inference_engine/test_core.py index 191baaab0bb..59ed993278c 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_core.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_core.py @@ -7,10 +7,10 @@ import os from sys import platform from pathlib import Path -import ngraph as ng -import openvino as ov -from ngraph.impl import Function, Shape, Type -from ngraph.impl.op import Parameter +import openvino.opset8 as ov +from openvino import Core, IENetwork, ExecutableNetwork, blob_from_file +from openvino.impl import Function, Shape, Type +from openvino.impl.op import Parameter from openvino import TensorDesc, Blob from ..conftest import model_path, model_onnx_path, plugins_path @@ -42,14 +42,14 @@ def test_blobs(): @pytest.mark.skip(reason="Fix") def test_ie_core_class(): input_shape = [1, 3, 4, 4] - param = ng.parameter(input_shape, np.float32, name="parameter") - relu = ng.relu(param, name="relu") + param = ov.parameter(input_shape, np.float32, name="parameter") + relu = ov.relu(param, name="relu") func = Function([relu], [param], "test") func.get_ordered_ops()[2].friendly_name = "friendly" - cnn_network = ov.IENetwork(func) + cnn_network = IENetwork(func) - ie_core = ov.Core() + ie_core = Core() ie_core.set_config({}, device_name="CPU") executable_network = ie_core.load_network(cnn_network, "CPU", {}) @@ -73,33 +73,33 @@ def test_ie_core_class(): def test_load_network(device): - ie = ov.Core() + ie = Core() net = ie.read_network(model=test_net_xml, weights=test_net_bin) exec_net = ie.load_network(net, device) - assert isinstance(exec_net, ov.ExecutableNetwork) + assert isinstance(exec_net, ExecutableNetwork) def test_read_network(): - ie_core = ov.Core() + ie_core = Core() net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - assert isinstance(net, ov.IENetwork) + assert isinstance(net, IENetwork) net = ie_core.read_network(model=test_net_xml) - assert isinstance(net, ov.IENetwork) + assert isinstance(net, IENetwork) def test_read_network_from_blob(): - ie_core = ov.Core() + ie_core = Core() model = open(test_net_xml).read() - blob = ov.blob_from_file(test_net_bin) + blob = blob_from_file(test_net_bin) net = ie_core.read_network(model=model, blob=blob) - assert isinstance(net, ov.IENetwork) + assert isinstance(net, IENetwork) def test_read_network_from_blob_valid(): - ie_core = ov.Core() + ie_core = Core() model = open(test_net_xml).read() - blob = ov.blob_from_file(test_net_bin) + blob = blob_from_file(test_net_bin) net = ie_core.read_network(model=model, blob=blob) ref_net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) assert net.name == ref_net.name @@ -113,41 +113,41 @@ def test_read_network_from_blob_valid(): def test_read_network_as_path(): - ie_core = ov.Core() + ie_core = Core() net = ie_core.read_network(model=Path(test_net_xml), weights=Path(test_net_bin)) - assert isinstance(net, ov.IENetwork) + assert isinstance(net, IENetwork) net = ie_core.read_network(model=test_net_xml, weights=Path(test_net_bin)) - assert isinstance(net, ov.IENetwork) + assert isinstance(net, IENetwork) net = ie_core.read_network(model=Path(test_net_xml)) - assert isinstance(net, ov.IENetwork) + assert isinstance(net, IENetwork) def test_read_network_from_onnx(): - ie_core = ov.Core() + ie_core = Core() net = ie_core.read_network(model=test_net_onnx) - assert isinstance(net, ov.IENetwork) + assert isinstance(net, IENetwork) def test_read_network_from_onnx_as_path(): - ie_core = ov.Core() + ie_core = Core() net = ie_core.read_network(model=Path(test_net_onnx)) - assert isinstance(net, ov.IENetwork) + assert isinstance(net, IENetwork) def test_read_net_from_buffer(): - ie_core = ov.Core() + ie_core = Core() with open(test_net_bin, "rb") as f: bin = f.read() with open(model_path()[0], "rb") as f: xml = f.read() net = ie_core.read_network(model=xml, weights=bin) - assert isinstance(net, ov.IENetwork) + assert isinstance(net, IENetwork) def test_net_from_buffer_valid(): - ie_core = ov.Core() + ie_core = Core() with open(test_net_bin, "rb") as f: bin = f.read() with open(model_path()[0], "rb") as f: @@ -165,7 +165,7 @@ def test_net_from_buffer_valid(): def test_get_version(device): - ie = ov.Core() + ie = Core() version = ie.get_versions(device) assert isinstance(version, dict), "Returned version must be a dictionary" assert device in version, "{} plugin version wasn't found in versions" @@ -176,14 +176,14 @@ def test_get_version(device): def test_available_devices(device): - ie = ov.Core() + ie = Core() devices = ie.available_devices assert device in devices, f"Current device '{device}' is not listed in " \ f"available devices '{', '.join(devices)}'" def test_get_config(): - ie = ov.Core() + ie = Core() conf = ie.get_config("CPU", "CPU_BIND_THREAD") assert conf == "YES" @@ -191,7 +191,7 @@ def test_get_config(): @pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") def test_get_metric_list_of_str(): - ie = ov.Core() + ie = Core() param = ie.get_metric("CPU", "OPTIMIZATION_CAPABILITIES") assert isinstance(param, list), "Parameter value for 'OPTIMIZATION_CAPABILITIES' " \ f"metric must be a list but {type(param)} is returned" @@ -202,7 +202,7 @@ def test_get_metric_list_of_str(): @pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") def test_get_metric_tuple_of_two_ints(): - ie = ov.Core() + ie = Core() param = ie.get_metric("CPU", "RANGE_FOR_STREAMS") assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_STREAMS' " \ f"metric must be tuple but {type(param)} is returned" @@ -213,7 +213,7 @@ def test_get_metric_tuple_of_two_ints(): @pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") def test_get_metric_tuple_of_three_ints(): - ie = ov.Core() + ie = Core() param = ie.get_metric("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS") assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' " \ f"metric must be tuple but {type(param)} is returned" @@ -224,14 +224,14 @@ def test_get_metric_tuple_of_three_ints(): @pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") def test_get_metric_str(): - ie = ov.Core() + ie = Core() param = ie.get_metric("CPU", "FULL_DEVICE_NAME") assert isinstance(param, str), "Parameter value for 'FULL_DEVICE_NAME' " \ f"metric must be string but {type(param)} is returned" def test_query_network(device): - ie = ov.Core() + ie = Core() net = ie.read_network(model=test_net_xml, weights=test_net_bin) query_res = ie.query_network(network=net, device_name=device) func_net = net.get_function() @@ -244,17 +244,17 @@ def test_query_network(device): @pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test") def test_register_plugin(): - ie = ov.Core() + ie = Core() ie.register_plugin("MKLDNNPlugin", "BLA") net = ie.read_network(model=test_net_xml, weights=test_net_bin) exec_net = ie.load_network(net, "BLA") - assert isinstance(exec_net, ov.ExecutableNetwork), \ + assert isinstance(exec_net, ExecutableNetwork), \ "Cannot load the network to the registered plugin with name 'BLA'" @pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test") def test_register_plugins(): - ie = ov.Core() + ie = Core() if platform == "linux" or platform == "linux2": ie.register_plugins(plugins_xml) elif platform == "darwin": @@ -265,17 +265,17 @@ def test_register_plugins(): net = ie.read_network(model=test_net_xml, weights=test_net_bin) exec_net = ie.load_network(net, "CUSTOM") assert isinstance(exec_net, - ov.ExecutableNetwork), "Cannot load the network to " \ - "the registered plugin with name 'CUSTOM' " \ - "registred in the XML file" + ExecutableNetwork), "Cannot load the network to " \ + "the registered plugin with name 'CUSTOM' " \ + "registred in the XML file" def test_create_IENetwork_from_nGraph(): element_type = Type.f32 param = Parameter(element_type, Shape([1, 3, 22, 22])) - relu = ng.relu(param) + relu = ov.relu(param) func = Function([relu], [param], "test") - cnnNetwork = ov.IENetwork(func) + cnnNetwork = IENetwork(func) assert cnnNetwork is not None func2 = cnnNetwork.get_function() assert func2 is not None diff --git a/runtime/bindings/python/tests/test_inference_engine/test_tensor.py b/runtime/bindings/python/tests/test_inference_engine/test_tensor.py index e9034c51598..c01b5952163 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_tensor.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_tensor.py @@ -4,28 +4,28 @@ import numpy as np import pytest from openvino import Tensor -import ngraph as ng +import openvino as ov @pytest.mark.parametrize("ov_type, numpy_dtype", [ - (ng.impl.Type.f32, np.float32), - (ng.impl.Type.f64, np.float64), - (ng.impl.Type.f16, np.float16), - (ng.impl.Type.bf16, np.float16), - (ng.impl.Type.i8, np.int8), - (ng.impl.Type.u8, np.uint8), - (ng.impl.Type.i32, np.int32), - (ng.impl.Type.u32, np.uint32), - (ng.impl.Type.i16, np.int16), - (ng.impl.Type.u16, np.uint16), - (ng.impl.Type.i64, np.int64), - (ng.impl.Type.u64, np.uint64), - (ng.impl.Type.boolean, np.bool), - (ng.impl.Type.u1, np.uint8), + (ov.impl.Type.f32, np.float32), + (ov.impl.Type.f64, np.float64), + (ov.impl.Type.f16, np.float16), + (ov.impl.Type.bf16, np.float16), + (ov.impl.Type.i8, np.int8), + (ov.impl.Type.u8, np.uint8), + (ov.impl.Type.i32, np.int32), + (ov.impl.Type.u32, np.uint32), + (ov.impl.Type.i16, np.int16), + (ov.impl.Type.u16, np.uint16), + (ov.impl.Type.i64, np.int64), + (ov.impl.Type.u64, np.uint64), + (ov.impl.Type.boolean, np.bool), + (ov.impl.Type.u1, np.uint8), ]) def test_init_with_ngraph(ov_type, numpy_dtype): ov_tensors = [] - ov_tensors.append(Tensor(type=ov_type, shape=ng.impl.Shape([1, 3, 32, 32]))) + ov_tensors.append(Tensor(type=ov_type, shape=ov.impl.Shape([1, 3, 32, 32]))) ov_tensors.append(Tensor(type=ov_type, shape=[1, 3, 32, 32])) assert np.all([list(ov_tensor.shape) == [1, 3, 32, 32] for ov_tensor in ov_tensors]) assert np.all(ov_tensor.element_type == ov_type for ov_tensor in ov_tensors) @@ -34,22 +34,22 @@ def test_init_with_ngraph(ov_type, numpy_dtype): @pytest.mark.parametrize("ov_type, numpy_dtype", [ - (ng.impl.Type.f32, np.float32), - (ng.impl.Type.f64, np.float64), - (ng.impl.Type.f16, np.float16), - (ng.impl.Type.i8, np.int8), - (ng.impl.Type.u8, np.uint8), - (ng.impl.Type.i32, np.int32), - (ng.impl.Type.u32, np.uint32), - (ng.impl.Type.i16, np.int16), - (ng.impl.Type.u16, np.uint16), - (ng.impl.Type.i64, np.int64), - (ng.impl.Type.u64, np.uint64), - (ng.impl.Type.boolean, np.bool) + (ov.impl.Type.f32, np.float32), + (ov.impl.Type.f64, np.float64), + (ov.impl.Type.f16, np.float16), + (ov.impl.Type.i8, np.int8), + (ov.impl.Type.u8, np.uint8), + (ov.impl.Type.i32, np.int32), + (ov.impl.Type.u32, np.uint32), + (ov.impl.Type.i16, np.int16), + (ov.impl.Type.u16, np.uint16), + (ov.impl.Type.i64, np.int64), + (ov.impl.Type.u64, np.uint64), + (ov.impl.Type.boolean, np.bool) ]) def test_init_with_numpy(ov_type, numpy_dtype): shape = (1, 3, 127, 127) - ov_shape = ng.impl.Shape(shape) + ov_shape = ov.impl.Shape(shape) ones_arr = np.ones(shape, numpy_dtype) ones_ov_tensor = Tensor(array=ones_arr) ov_tensors = [] @@ -81,47 +81,47 @@ def test_init_with_roi_tensor(): @pytest.mark.parametrize("ov_type, numpy_dtype", [ - (ng.impl.Type.f32, np.float32), - (ng.impl.Type.f64, np.float64), - (ng.impl.Type.f16, np.float16), - (ng.impl.Type.bf16, np.float16), - (ng.impl.Type.i8, np.int8), - (ng.impl.Type.u8, np.uint8), - (ng.impl.Type.i32, np.int32), - (ng.impl.Type.u32, np.uint32), - (ng.impl.Type.i16, np.int16), - (ng.impl.Type.u16, np.uint16), - (ng.impl.Type.i64, np.int64), - (ng.impl.Type.u64, np.uint64), - (ng.impl.Type.boolean, np.bool), - (ng.impl.Type.u1, np.uint8), + (ov.impl.Type.f32, np.float32), + (ov.impl.Type.f64, np.float64), + (ov.impl.Type.f16, np.float16), + (ov.impl.Type.bf16, np.float16), + (ov.impl.Type.i8, np.int8), + (ov.impl.Type.u8, np.uint8), + (ov.impl.Type.i32, np.int32), + (ov.impl.Type.u32, np.uint32), + (ov.impl.Type.i16, np.int16), + (ov.impl.Type.u16, np.uint16), + (ov.impl.Type.i64, np.int64), + (ov.impl.Type.u64, np.uint64), + (ov.impl.Type.boolean, np.bool), + (ov.impl.Type.u1, np.uint8), ]) def test_write_to_buffer(ov_type, numpy_dtype): - ov_tensor = Tensor(ov_type, ng.impl.Shape([1, 3, 32, 32])) + ov_tensor = Tensor(ov_type, ov.impl.Shape([1, 3, 32, 32])) ones_arr = np.ones([1, 3, 32, 32], numpy_dtype) ov_tensor.data[:] = ones_arr assert np.array_equal(ov_tensor.data, ones_arr) @pytest.mark.parametrize("ov_type, numpy_dtype", [ - (ng.impl.Type.f32, np.float32), - (ng.impl.Type.f64, np.float64), - (ng.impl.Type.f16, np.float16), - (ng.impl.Type.bf16, np.float16), - (ng.impl.Type.i8, np.int8), - (ng.impl.Type.u8, np.uint8), - (ng.impl.Type.i32, np.int32), - (ng.impl.Type.u32, np.uint32), - (ng.impl.Type.i16, np.int16), - (ng.impl.Type.u16, np.uint16), - (ng.impl.Type.i64, np.int64), - (ng.impl.Type.u64, np.uint64), - (ng.impl.Type.boolean, np.bool), - (ng.impl.Type.u1, np.uint8), + (ov.impl.Type.f32, np.float32), + (ov.impl.Type.f64, np.float64), + (ov.impl.Type.f16, np.float16), + (ov.impl.Type.bf16, np.float16), + (ov.impl.Type.i8, np.int8), + (ov.impl.Type.u8, np.uint8), + (ov.impl.Type.i32, np.int32), + (ov.impl.Type.u32, np.uint32), + (ov.impl.Type.i16, np.int16), + (ov.impl.Type.u16, np.uint16), + (ov.impl.Type.i64, np.int64), + (ov.impl.Type.u64, np.uint64), + (ov.impl.Type.boolean, np.bool), + (ov.impl.Type.u1, np.uint8), ]) def test_set_shape(ov_type, numpy_dtype): - shape = ng.impl.Shape([1, 3, 32, 32]) - ref_shape = ng.impl.Shape([1, 3, 48, 48]) + shape = ov.impl.Shape([1, 3, 32, 32]) + ref_shape = ov.impl.Shape([1, 3, 48, 48]) ref_shape_np = [1, 3, 28, 28] ov_tensor = Tensor(ov_type, shape) ov_tensor.shape = ref_shape diff --git a/runtime/bindings/python/tests/test_ngraph/__init__.py b/runtime/bindings/python/tests/test_ngraph/__init__.py index b274453fb17..4417c13d097 100644 --- a/runtime/bindings/python/tests/test_ngraph/__init__.py +++ b/runtime/bindings/python/tests/test_ngraph/__init__.py @@ -3,4 +3,4 @@ # ngraph.dll directory path visibility is needed to use _pyngraph module # import below causes adding this path to os.environ["PATH"] -import ngraph # noqa: F401 'imported but unused' +import openvino # noqa: F401 'imported but unused' diff --git a/runtime/bindings/python/tests/test_ngraph/test_adaptive_pool.py b/runtime/bindings/python/tests/test_ngraph/test_adaptive_pool.py index d1b9159f84e..7763a94e2b0 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_adaptive_pool.py +++ b/runtime/bindings/python/tests/test_ngraph/test_adaptive_pool.py @@ -1,4 +1,7 @@ -import ngraph as ng +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import openvino.opset8 as ov import numpy as np from tests.runtime import get_runtime @@ -12,10 +15,10 @@ def test_adaptive_avg_pool(): -1, -2, 3, 4, -3, -4, 1, 2, 0, -4, -5, -2, -2, -3, 2, 3, 1, -5, 2, -4, -2], (2, 3, 7)) - input_tensor = ng.constant(input) - output_shape = ng.constant(np.array([3], dtype=np.int32)) + input_tensor = ov.constant(input) + output_shape = ov.constant(np.array([3], dtype=np.int32)) - adaptive_pool_node = ng.adaptive_avg_pool(input_tensor, output_shape) + adaptive_pool_node = ov.adaptive_avg_pool(input_tensor, output_shape) computation = runtime.computation(adaptive_pool_node) adaptive_pool_results = computation() expected_results = np.reshape([1.66666663, 0.66666669, -3., @@ -38,10 +41,10 @@ def test_adaptive_max_pool(): -1, -2, 3, 4, -3, -4, 1, 2, 0, -4, -5, -2, -2, -3, 2, 3, 1, -5, 2, -4, -2], (2, 3, 7)) - input_tensor = ng.constant(input) - output_shape = ng.constant(np.array([3], dtype=np.int32)) + input_tensor = ov.constant(input) + output_shape = ov.constant(np.array([3], dtype=np.int32)) - adaptive_pool_node = ng.adaptive_max_pool(input_tensor, output_shape) + adaptive_pool_node = ov.adaptive_max_pool(input_tensor, output_shape) computation = runtime.computation(adaptive_pool_node) adaptive_pool_results = computation() expected_results = np.reshape([4, 3, -2, diff --git a/runtime/bindings/python/tests/test_ngraph/test_basic.py b/runtime/bindings/python/tests/test_ngraph/test_basic.py index 2ec656a800b..379b31f42e0 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_basic.py +++ b/runtime/bindings/python/tests/test_ngraph/test_basic.py @@ -6,21 +6,24 @@ import json import numpy as np import pytest -from _pyngraph import VariantInt, VariantString +import openvino.opset8 as ov -import ngraph as ng -from ngraph.exceptions import UserInputError -from ngraph.impl import Function, PartialShape, Shape, Type -from ngraph.impl.op import Parameter +from openvino.pyopenvino import VariantInt, VariantString + +from openvino.exceptions import UserInputError +from openvino.impl import Function, PartialShape, Shape, Type +from openvino.impl.op import Parameter from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node +from tests import skip_issue_67415 + def test_ngraph_function_api(): shape = [2, 2] - parameter_a = ng.parameter(shape, dtype=np.float32, name="A") - parameter_b = ng.parameter(shape, dtype=np.float32, name="B") - parameter_c = ng.parameter(shape, dtype=np.float32, name="C") + parameter_a = ov.parameter(shape, dtype=np.float32, name="A") + parameter_b = ov.parameter(shape, dtype=np.float32, name="B") + parameter_c = ov.parameter(shape, dtype=np.float32, name="C") model = (parameter_a + parameter_b) * parameter_c function = Function(model, [parameter_a, parameter_b, parameter_c], "TestFunction") @@ -44,7 +47,7 @@ def test_ngraph_function_api(): "dtype", [ np.float32, - np.float64, + pytest.param(np.float64, marks=skip_issue_67415), np.int8, np.int16, np.int32, @@ -59,9 +62,9 @@ def test_simple_computation_on_ndarrays(dtype): runtime = get_runtime() shape = [2, 2] - parameter_a = ng.parameter(shape, dtype=dtype, name="A") - parameter_b = ng.parameter(shape, dtype=dtype, name="B") - parameter_c = ng.parameter(shape, dtype=dtype, name="C") + parameter_a = ov.parameter(shape, dtype=dtype, name="A") + parameter_b = ov.parameter(shape, dtype=dtype, name="B") + parameter_c = ov.parameter(shape, dtype=dtype, name="C") model = (parameter_a + parameter_b) * parameter_c computation = runtime.computation(model, parameter_a, parameter_b, parameter_c) @@ -81,9 +84,9 @@ def test_simple_computation_on_ndarrays(dtype): def test_serialization(): dtype = np.float32 shape = [2, 2] - parameter_a = ng.parameter(shape, dtype=dtype, name="A") - parameter_b = ng.parameter(shape, dtype=dtype, name="B") - parameter_c = ng.parameter(shape, dtype=dtype, name="C") + parameter_a = ov.parameter(shape, dtype=dtype, name="A") + parameter_b = ov.parameter(shape, dtype=dtype, name="B") + parameter_c = ov.parameter(shape, dtype=dtype, name="C") model = (parameter_a + parameter_b) * parameter_c runtime = get_runtime() @@ -103,7 +106,7 @@ def test_broadcast_1(): new_shape = [3, 3] expected = [[1, 2, 3], [1, 2, 3], [1, 2, 3]] - result = run_op_node([input_data], ng.broadcast, new_shape) + result = run_op_node([input_data], ov.broadcast, new_shape) assert np.allclose(result, expected) @@ -111,7 +114,7 @@ def test_broadcast_2(): input_data = np.arange(4, dtype=np.int32) new_shape = [3, 4, 2, 4] expected = np.broadcast_to(input_data, new_shape) - result = run_op_node([input_data], ng.broadcast, new_shape) + result = run_op_node([input_data], ov.broadcast, new_shape) assert np.allclose(result, expected) @@ -121,7 +124,7 @@ def test_broadcast_3(): axis_mapping = [0] expected = [[1, 1, 1], [2, 2, 2], [3, 3, 3]] - result = run_op_node([input_data], ng.broadcast, new_shape, axis_mapping, "EXPLICIT") + result = run_op_node([input_data], ov.broadcast, new_shape, axis_mapping, "EXPLICIT") assert np.allclose(result, expected) @@ -131,7 +134,7 @@ def test_broadcast_3(): ) def test_convert_to_bool(destination_type, input_data): expected = np.array(input_data, dtype=bool) - result = run_op_node([input_data], ng.convert, destination_type) + result = run_op_node([input_data], ov.convert, destination_type) assert np.allclose(result, expected) assert np.array(result).dtype == bool @@ -149,7 +152,7 @@ def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type) np.random.seed(133391) input_data = np.random.randint(*rand_range, size=(2, 2), dtype=in_dtype) expected = np.array(input_data, dtype=expected_type) - result = run_op_node([input_data], ng.convert, destination_type) + result = run_op_node([input_data], ov.convert, destination_type) assert np.allclose(result, expected) assert np.array(result).dtype == expected_type @@ -171,7 +174,7 @@ def test_convert_to_int(destination_type, expected_type): np.random.seed(133391) input_data = (np.ceil(-8 + np.random.rand(2, 3, 4) * 16)).astype(np.float32) expected = np.array(input_data, dtype=expected_type) - result = run_op_node([input_data], ng.convert, destination_type) + result = run_op_node([input_data], ov.convert, destination_type) assert np.allclose(result, expected) assert np.array(result).dtype == expected_type @@ -193,14 +196,14 @@ def test_convert_to_uint(destination_type, expected_type): np.random.seed(133391) input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(np.float32) expected = np.array(input_data, dtype=expected_type) - result = run_op_node([input_data], ng.convert, destination_type) + result = run_op_node([input_data], ov.convert, destination_type) assert np.allclose(result, expected) assert np.array(result).dtype == expected_type def test_bad_data_shape(): - A = ng.parameter(shape=[2, 2], name="A", dtype=np.float32) - B = ng.parameter(shape=[2, 2], name="B") + A = ov.parameter(shape=[2, 2], name="A", dtype=np.float32) + B = ov.parameter(shape=[2, 2], name="B") model = A + B runtime = get_runtime() computation = runtime.computation(model, A, B) @@ -213,7 +216,7 @@ def test_bad_data_shape(): def test_constant_get_data_bool(): input_data = np.array([True, False, False, True]) - node = ng.constant(input_data, dtype=np.bool) + node = ov.constant(input_data, dtype=np.bool) retrieved_data = node.get_data() assert np.allclose(input_data, retrieved_data) @@ -225,7 +228,7 @@ def test_constant_get_data_floating_point(data_type): min_value = -1.0e20 max_value = 1.0e20 input_data = min_value + input_data * max_value * data_type(2) - node = ng.constant(input_data, dtype=data_type) + node = ov.constant(input_data, dtype=data_type) retrieved_data = node.get_data() assert np.allclose(input_data, retrieved_data) @@ -236,7 +239,7 @@ def test_constant_get_data_signed_integer(data_type): input_data = np.random.randint( np.iinfo(data_type).min, np.iinfo(data_type).max, size=[2, 3, 4], dtype=data_type ) - node = ng.constant(input_data, dtype=data_type) + node = ov.constant(input_data, dtype=data_type) retrieved_data = node.get_data() assert np.allclose(input_data, retrieved_data) @@ -248,7 +251,7 @@ def test_constant_get_data_unsigned_integer(data_type): input_data = ( np.iinfo(data_type).min + input_data * np.iinfo(data_type).max + input_data * np.iinfo(data_type).max ) - node = ng.constant(input_data, dtype=data_type) + node = ov.constant(input_data, dtype=data_type) retrieved_data = node.get_data() assert np.allclose(input_data, retrieved_data) @@ -260,10 +263,10 @@ def test_set_argument(): data2 = np.array([4, 5, 6]) data3 = np.array([7, 8, 9]) - node1 = ng.constant(data1, dtype=np.float32) - node2 = ng.constant(data2, dtype=np.float32) - node3 = ng.constant(data3, dtype=np.float32) - node_add = ng.add(node1, node2) + node1 = ov.constant(data1, dtype=np.float32) + node2 = ov.constant(data2, dtype=np.float32) + node3 = ov.constant(data3, dtype=np.float32) + node_add = ov.add(node1, node2) # Original arguments computation = runtime.computation(node_add) @@ -292,13 +295,13 @@ def test_set_argument(): def test_result(): - node = np.array([[11, 10], [1, 8], [3, 4]]) - result = run_op_node([node], ng.result) + node = np.array([[11, 10], [1, 8], [3, 4]], dtype=np.float32) + result = run_op_node([node], ov.result) assert np.allclose(result, node) def test_node_friendly_name(): - dummy_node = ng.parameter(shape=[1], name="dummy_name") + dummy_node = ov.parameter(shape=[1], name="dummy_name") assert(dummy_node.friendly_name == "dummy_name") @@ -316,9 +319,9 @@ def test_node_output(): splits = 3 expected_shape = len(input_array) // splits - input_tensor = ng.constant(input_array, dtype=np.int32) - axis = ng.constant(0, dtype=np.int64) - split_node = ng.split(input_tensor, axis, splits) + input_tensor = ov.constant(input_array, dtype=np.int32) + axis = ov.constant(0, dtype=np.int64) + split_node = ov.split(input_tensor, axis, splits) split_node_outputs = split_node.outputs() @@ -346,8 +349,8 @@ def test_node_output(): def test_node_input(): shape = [2, 2] - parameter_a = ng.parameter(shape, dtype=np.float32, name="A") - parameter_b = ng.parameter(shape, dtype=np.float32, name="B") + parameter_a = ov.parameter(shape, dtype=np.float32, name="A") + parameter_b = ov.parameter(shape, dtype=np.float32, name="B") model = parameter_a + parameter_b @@ -375,8 +378,8 @@ def test_node_input(): def test_node_target_inputs_soruce_output(): shape = [2, 2] - parameter_a = ng.parameter(shape, dtype=np.float32, name="A") - parameter_b = ng.parameter(shape, dtype=np.float32, name="B") + parameter_a = ov.parameter(shape, dtype=np.float32, name="A") + parameter_b = ov.parameter(shape, dtype=np.float32, name="B") model = parameter_a + parameter_b @@ -415,7 +418,7 @@ def test_runtime_info(): test_shape = PartialShape([1, 1, 1, 1]) test_type = Type.f32 test_param = Parameter(test_type, test_shape) - relu_node = ng.relu(test_param) + relu_node = ov.relu(test_param) runtime_info = relu_node.get_rt_info() runtime_info["affinity"] = "test_affinity" relu_node.set_friendly_name("testReLU") @@ -424,17 +427,17 @@ def test_runtime_info(): assert runtime_info_after["affinity"] == "test_affinity" -def test_mutiple_outputs(): +def test_multiple_outputs(): input_shape = [4, 4] - input_data = np.arange(-8, 8).reshape(input_shape) + input_data = np.arange(-8, 8).reshape(input_shape).astype(np.float32) expected_output = np.split(input_data, 2, axis=1)[0] expected_output[expected_output < 0] = 0 - test_param = ng.parameter(input_shape, dtype=np.float32, name="A") - split = ng.split(test_param, axis=1, num_splits=2) + test_param = ov.parameter(input_shape, dtype=np.float32, name="A") + split = ov.split(test_param, axis=1, num_splits=2) split_first_output = split.output(0) - relu = ng.relu(split_first_output) + relu = ov.relu(split_first_output) runtime = get_runtime() computation = runtime.computation(relu, test_param) @@ -444,11 +447,11 @@ def test_mutiple_outputs(): def test_sink_function_ctor(): - input_data = ng.parameter([2, 2], name="input_data", dtype=np.float32) - rv = ng.read_value(input_data, "var_id_667") - add = ng.add(rv, input_data, name="MemoryAdd") - node = ng.assign(add, "var_id_667") - res = ng.result(add, "res") + input_data = ov.parameter([2, 2], name="input_data", dtype=np.float32) + rv = ov.read_value(input_data, "var_id_667") + add = ov.add(rv, input_data, name="MemoryAdd") + node = ov.assign(add, "var_id_667") + res = ov.result(add, "res") function = Function(results=[res], sinks=[node], parameters=[input_data], name="TestFunction") ordered_ops = function.get_ordered_ops() @@ -466,7 +469,7 @@ def test_sink_function_ctor(): def test_node_version(): - node = ng.add([1], [2]) + node = ov.add([1], [2]) assert node.get_version() == 1 assert node.version == 1 diff --git a/runtime/bindings/python/tests/test_ngraph/test_convolution.py b/runtime/bindings/python/tests/test_ngraph/test_convolution.py index 6e6500ee63a..a6656559227 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_convolution.py +++ b/runtime/bindings/python/tests/test_ngraph/test_convolution.py @@ -3,7 +3,7 @@ import numpy as np -import ngraph as ng +import openvino.opset8 as ov from tests.runtime import get_runtime from tests.test_ngraph.test_ops import convolution2d from tests.test_ngraph.util import run_op_node @@ -38,7 +38,7 @@ def test_convolution_2d(): dilations = np.array([1, 1]) # convolution with padding=1 should produce 9 x 9 output: - result = run_op_node([input_x, input_filter], ng.convolution, strides, pads_begin, pads_end, dilations) + result = run_op_node([input_x, input_filter], ov.convolution, strides, pads_begin, pads_end, dilations) assert np.allclose( result, @@ -67,7 +67,7 @@ def test_convolution_2d(): pads_begin = np.array([0, 0]) pads_end = np.array([0, 0]) dilations = np.array([1, 1]) - result = run_op_node([input_x, input_filter], ng.convolution, strides, pads_begin, pads_end, dilations) + result = run_op_node([input_x, input_filter], ov.convolution, strides, pads_begin, pads_end, dilations) assert np.allclose( result, np.array( @@ -94,7 +94,7 @@ def test_convolution_2d(): dilations = np.array([1, 1]) # convolution with strides=2 should produce 4 x 4 output: - result = run_op_node([input_x, input_filter], ng.convolution, strides, pads_begin, pads_end, dilations) + result = run_op_node([input_x, input_filter], ov.convolution, strides, pads_begin, pads_end, dilations) assert np.allclose( result, @@ -119,7 +119,7 @@ def test_convolution_2d(): dilations = np.array([2, 2]) # convolution with dilation=2 should produce 5 x 5 output: - result = run_op_node([input_x, input_filter], ng.convolution, strides, pads_begin, pads_end, dilations) + result = run_op_node([input_x, input_filter], ov.convolution, strides, pads_begin, pads_end, dilations) assert np.allclose( result, np.array( @@ -147,11 +147,11 @@ def test_convolution_backprop_data(): data_shape = [1, 1, 7, 7] strides = [1, 1] - data_node = ng.parameter(shape=data_shape) - filter_node = ng.parameter(shape=filter_shape) - output_shape_node = ng.constant(np.array(output_spatial_shape, dtype=np.int64)) + data_node = ov.parameter(shape=data_shape) + filter_node = ov.parameter(shape=filter_shape) + output_shape_node = ov.constant(np.array(output_spatial_shape, dtype=np.int64)) - deconvolution = ng.convolution_backprop_data(data_node, filter_node, strides, output_shape_node) + deconvolution = ov.convolution_backprop_data(data_node, filter_node, strides, output_shape_node) input_data = np.array( [ @@ -212,7 +212,7 @@ def test_convolution_v1(): pads_end = np.array([0, 0]) dilations = np.array([1, 1]) - result = run_op_node([input_tensor, filters], ng.convolution, strides, pads_begin, pads_end, dilations) + result = run_op_node([input_tensor, filters], ov.convolution, strides, pads_begin, pads_end, dilations) expected = convolution2d(input_tensor[0, 0], filters[0, 0]).reshape(1, 1, 14, 14) diff --git a/runtime/bindings/python/tests/test_ngraph/test_core.py b/runtime/bindings/python/tests/test_ngraph/test_core.py index 9b7a6336508..be1ed89a945 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_core.py +++ b/runtime/bindings/python/tests/test_ngraph/test_core.py @@ -3,8 +3,8 @@ import numpy as np -import ngraph as ng -from ngraph.impl import Dimension, Function, PartialShape, Shape +import openvino.opset8 as ov +from openvino.impl import Dimension, Function, PartialShape, Shape def test_dimension(): @@ -224,8 +224,8 @@ def test_partial_shape_equals(): def test_repr_dynamic_shape(): shape = PartialShape([-1, 2]) - parameter_a = ng.parameter(shape, dtype=np.float32, name="A") - parameter_b = ng.parameter(shape, dtype=np.float32, name="B") + parameter_a = ov.parameter(shape, dtype=np.float32, name="A") + parameter_b = ov.parameter(shape, dtype=np.float32, name="B") model = parameter_a + parameter_b function = Function(model, [parameter_a, parameter_b], "simple_dyn_shapes_graph") @@ -238,12 +238,12 @@ def test_repr_dynamic_shape(): def test_discrete_type_info(): data_shape = [6, 12, 10, 24] - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + data_parameter = ov.parameter(data_shape, name="Data", dtype=np.float32) k = np.int32(3) axis = np.int32(1) - n1 = ng.topk(data_parameter, k, axis, "max", "value") - n2 = ng.topk(data_parameter, k, axis, "max", "value") - n3 = ng.sin(0.2) + n1 = ov.topk(data_parameter, k, axis, "max", "value") + n2 = ov.topk(data_parameter, k, axis, "max", "value") + n3 = ov.sin(0.2) assert n1.type_info.name == "TopK" assert n3.type_info.name == "Sin" diff --git a/runtime/bindings/python/tests/test_ngraph/test_create_op.py b/runtime/bindings/python/tests/test_ngraph/test_create_op.py index 673d7a2ebf1..4672341b360 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_create_op.py +++ b/runtime/bindings/python/tests/test_ngraph/test_create_op.py @@ -3,12 +3,12 @@ import numpy as np import pytest -from _pyngraph import PartialShape, Dimension +from openvino.pyopenvino import PartialShape, Dimension -import ngraph as ng -import ngraph.opset1 as ng_opset1 -import ngraph.opset5 as ng_opset5 -from ngraph.impl import Type +import openvino.opset8 as ov +import openvino.opset1 as ov_opset1 +import openvino.opset5 as ov_opset5 +from openvino.impl import Type np_types = [np.float32, np.int32] integral_np_types = [ @@ -25,10 +25,10 @@ integral_np_types = [ @pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_adaptive_avg_pool(dtype): - data = ng.parameter([2, 24, 34, 62], name="input", dtype=dtype) - output_shape = ng.constant(np.array([16, 16], dtype=np.int32)) + data = ov.parameter([2, 24, 34, 62], name="input", dtype=dtype) + output_shape = ov.constant(np.array([16, 16], dtype=np.int32)) - node = ng.adaptive_avg_pool(data, output_shape) + node = ov.adaptive_avg_pool(data, output_shape) assert node.get_type_name() == "AdaptiveAvgPool" assert node.get_output_size() == 1 @@ -38,10 +38,10 @@ def test_adaptive_avg_pool(dtype): @pytest.mark.parametrize("dtype", [np.float32, np.float64]) @pytest.mark.parametrize("ind_type", ["i32", "i64"]) def test_adaptive_max_pool(dtype, ind_type): - data = ng.parameter([2, 24, 34, 62], name="input", dtype=dtype) - output_shape = ng.constant(np.array([16, 16], dtype=np.int32)) + data = ov.parameter([2, 24, 34, 62], name="input", dtype=dtype) + output_shape = ov.constant(np.array([16, 16], dtype=np.int32)) - node = ng.adaptive_max_pool(data, output_shape, ind_type) + node = ov.adaptive_max_pool(data, output_shape, ind_type) assert node.get_type_name() == "AdaptiveMaxPool" assert node.get_output_size() == 2 @@ -63,10 +63,10 @@ def test_binary_convolution(dtype): input1_shape = [1, 1, 3, 3] expected_shape = [1, 1, 7, 7] - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype) - node = ng.binary_convolution( + node = ov.binary_convolution( parameter_input0, parameter_input1, strides, pads_begin, pads_end, dilations, mode, pad_value, ) @@ -81,10 +81,10 @@ def test_ctc_greedy_decoder(dtype): input1_shape = [20, 8] expected_shape = [8, 20, 1, 1] - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype) - node = ng.ctc_greedy_decoder(parameter_input0, parameter_input1) + node = ov.ctc_greedy_decoder(parameter_input0, parameter_input1) assert node.get_type_name() == "CTCGreedyDecoder" assert node.get_output_size() == 1 @@ -116,13 +116,13 @@ def test_ctc_greedy_decoder_seq_len(fp_dtype, int_dtype, int_ci, int_sl, merge_r input2_shape = [1] expected_shape = [8, 20] - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=fp_dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=int_dtype) + parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=fp_dtype) + parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=int_dtype) parameter_input2 = None if blank_index: - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=int_dtype) + parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=int_dtype) - node = ng.ctc_greedy_decoder_seq_len( + node = ov.ctc_greedy_decoder_seq_len( parameter_input0, parameter_input1, parameter_input2, merge_repeated, int_ci, int_sl ) @@ -143,11 +143,11 @@ def test_deformable_convolution_opset1(dtype): input2_shape = [1, 1, 3, 3] expected_shape = [1, 1, 7, 7] - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) + parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=dtype) - node = ng_opset1.deformable_convolution( + node = ov_opset1.deformable_convolution( parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations, ) @@ -168,11 +168,11 @@ def test_deformable_convolution(dtype): input2_shape = [1, 1, 3, 3] expected_shape = [1, 1, 7, 7] - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) + parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=dtype) - node = ng.deformable_convolution( + node = ov.deformable_convolution( parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations, ) @@ -194,12 +194,12 @@ def test_deformable_convolution_mask(dtype): input3_shape = [1, 9, 7, 7] expected_shape = [1, 1, 7, 7] - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) - parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype) + parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=dtype) + parameter_input3 = ov.parameter(input3_shape, name="Input3", dtype=dtype) - node = ng.deformable_convolution( + node = ov.deformable_convolution( parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations, parameter_input3 ) @@ -225,11 +225,11 @@ def test_deformable_psroi_pooling(dtype): input2_shape = [300, 2, 7, 7] expected_shape = [300, 8, 7, 7] - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) + parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=dtype) - node = ng.deformable_psroi_pooling( + node = ov.deformable_psroi_pooling( parameter_input0, parameter_input1, output_dim, @@ -254,10 +254,10 @@ def test_floor_mod(dtype): input1_shape = [7, 1, 5] expected_shape = [8, 7, 6, 5] - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype) - node = ng.floor_mod(parameter_input0, parameter_input1) + node = ov.floor_mod(parameter_input0, parameter_input1) assert node.get_type_name() == "FloorMod" assert node.get_output_size() == 1 @@ -272,12 +272,12 @@ def test_gather_tree(dtype): input3_shape = [] expected_shape = [100, 1, 10] - parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) - parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) - parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) - parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype) + parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=dtype) + parameter_input3 = ov.parameter(input3_shape, name="Input3", dtype=dtype) - node = ng.gather_tree(parameter_input0, parameter_input1, parameter_input2, parameter_input3) + node = ov.gather_tree(parameter_input0, parameter_input1, parameter_input2, parameter_input3) assert node.get_type_name() == "GatherTree" assert node.get_output_size() == 1 @@ -297,16 +297,16 @@ def test_lstm_cell_operator(dtype): R_shape = [4 * hidden_size, hidden_size] B_shape = [4 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ov.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) expected_shape = [1, 128] - node_default = ng.lstm_cell( + node_default = ov.lstm_cell( parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size, ) @@ -320,7 +320,7 @@ def test_lstm_cell_operator(dtype): activation_beta = [3.0, 2.0, 1.0] clip = 0.5 - node_param = ng.lstm_cell( + node_param = ov.lstm_cell( parameter_X, parameter_H_t, parameter_C_t, @@ -353,16 +353,16 @@ def test_lstm_cell_operator_opset1(dtype): R_shape = [4 * hidden_size, hidden_size] B_shape = [4 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ov.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) expected_shape = [1, 128] - node_default = ng_opset1.lstm_cell( + node_default = ov_opset1.lstm_cell( parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size, ) @@ -376,7 +376,7 @@ def test_lstm_cell_operator_opset1(dtype): activation_beta = [3.0, 2.0, 1.0] clip = 0.5 - node_param = ng_opset1.lstm_cell( + node_param = ov_opset1.lstm_cell( parameter_X, parameter_H_t, parameter_C_t, @@ -412,16 +412,16 @@ def test_lstm_sequence_operator_bidirectional_opset1(dtype): R_shape = [num_directions, 4 * hidden_size, hidden_size] B_shape = [num_directions, 4 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ov.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "BIDIRECTIONAL" - node = ng_opset1.lstm_sequence( + node = ov_opset1.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -441,7 +441,7 @@ def test_lstm_sequence_operator_bidirectional_opset1(dtype): activation_beta = [3.0, 2.0, 1.0] clip = 1.22 - node_param = ng_opset1.lstm_sequence( + node_param = ov_opset1.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -477,17 +477,17 @@ def test_lstm_sequence_operator_reverse_opset1(dtype): R_shape = [num_directions, 4 * hidden_size, hidden_size] B_shape = [num_directions, 4 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ov.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "REVERSE" - node_default = ng_opset1.lstm_sequence( + node_default = ov_opset1.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -507,7 +507,7 @@ def test_lstm_sequence_operator_reverse_opset1(dtype): activation_beta = [3.0, 2.0, 1.0] clip = 1.22 - node_param = ng_opset1.lstm_sequence( + node_param = ov_opset1.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -543,17 +543,17 @@ def test_lstm_sequence_operator_forward_opset1(dtype): R_shape = [num_directions, 4 * hidden_size, hidden_size] B_shape = [num_directions, 4 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ov.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "forward" - node_default = ng_opset1.lstm_sequence( + node_default = ov_opset1.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -573,7 +573,7 @@ def test_lstm_sequence_operator_forward_opset1(dtype): activation_beta = [1.0] clip = 0.5 - node = ng_opset1.lstm_sequence( + node = ov_opset1.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -604,15 +604,15 @@ def test_gru_cell_operator(): R_shape = [3 * hidden_size, hidden_size] B_shape = [3 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) - parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) - parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + parameter_X = ov.parameter(X_shape, name="X", dtype=np.float32) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=np.float32) + parameter_W = ov.parameter(W_shape, name="W", dtype=np.float32) + parameter_R = ov.parameter(R_shape, name="R", dtype=np.float32) + parameter_B = ov.parameter(B_shape, name="B", dtype=np.float32) expected_shape = [1, 128] - node_default = ng.gru_cell(parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B, hidden_size) + node_default = ov.gru_cell(parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B, hidden_size) assert node_default.get_type_name() == "GRUCell" assert node_default.get_output_size() == 1 @@ -626,9 +626,9 @@ def test_gru_cell_operator(): # If *linear_before_reset* is set True, then B tensor shape must be [4 * hidden_size] B_shape = [4 * hidden_size] - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + parameter_B = ov.parameter(B_shape, name="B", dtype=np.float32) - node_param = ng.gru_cell( + node_param = ov.gru_cell( parameter_X, parameter_H_t, parameter_W, @@ -662,16 +662,16 @@ def test_gru_sequence(): R_shape = [num_directions, 3 * hidden_size, hidden_size] B_shape = [num_directions, 3 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) - parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) - parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + parameter_X = ov.parameter(X_shape, name="X", dtype=np.float32) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=np.float32) + parameter_W = ov.parameter(W_shape, name="W", dtype=np.float32) + parameter_R = ov.parameter(R_shape, name="R", dtype=np.float32) + parameter_B = ov.parameter(B_shape, name="B", dtype=np.float32) expected_shape_y = [batch_size, num_directions, seq_len, hidden_size] expected_shape_h = [batch_size, num_directions, hidden_size] - node_default = ng.gru_sequence( + node_default = ov.gru_sequence( parameter_X, parameter_H_t, seq_lengths, @@ -695,9 +695,9 @@ def test_gru_sequence(): # If *linear_before_reset* is set True, then B tensor shape must be [4 * hidden_size] B_shape = [num_directions, 4 * hidden_size] - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + parameter_B = ov.parameter(B_shape, name="B", dtype=np.float32) - node_param = ng.gru_sequence( + node_param = ov.gru_sequence( parameter_X, parameter_H_t, seq_lengths, @@ -734,16 +734,16 @@ def test_rnn_sequence(): R_shape = [num_directions, hidden_size, hidden_size] B_shape = [num_directions, hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) - parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) - parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + parameter_X = ov.parameter(X_shape, name="X", dtype=np.float32) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=np.float32) + parameter_W = ov.parameter(W_shape, name="W", dtype=np.float32) + parameter_R = ov.parameter(R_shape, name="R", dtype=np.float32) + parameter_B = ov.parameter(B_shape, name="B", dtype=np.float32) expected_shape_y = [batch_size, num_directions, seq_len, hidden_size] expected_shape_h = [batch_size, num_directions, hidden_size] - node_default = ng.rnn_sequence( + node_default = ov.rnn_sequence( parameter_X, parameter_H_t, seq_lengths, @@ -764,7 +764,7 @@ def test_rnn_sequence(): activations_beta = [1.0] clip = 0.5 - node_param = ng.rnn_sequence( + node_param = ov.rnn_sequence( parameter_X, parameter_H_t, seq_lengths, @@ -786,7 +786,7 @@ def test_rnn_sequence(): def test_loop(): - from ngraph.utils.tensor_iterator_types import ( + from openvino.utils.tensor_iterator_types import ( GraphBody, TensorIteratorSliceInputDesc, TensorIteratorMergedInputDesc, @@ -795,30 +795,30 @@ def test_loop(): TensorIteratorConcatOutputDesc, ) - condition = ng.constant(True, dtype=np.bool) - trip_count = ng.constant(16, dtype=np.int32) + condition = ov.constant(True, dtype=np.bool) + trip_count = ov.constant(16, dtype=np.int32) # Body parameters - body_timestep = ng.parameter([], np.int32, "timestep") - body_data_in = ng.parameter([1, 2, 2], np.float32, "body_in") - body_prev_cma = ng.parameter([2, 2], np.float32, "body_prev_cma") - body_const_one = ng.parameter([], np.int32, "body_const_one") + body_timestep = ov.parameter([], np.int32, "timestep") + body_data_in = ov.parameter([1, 2, 2], np.float32, "body_in") + body_prev_cma = ov.parameter([2, 2], np.float32, "body_prev_cma") + body_const_one = ov.parameter([], np.int32, "body_const_one") # CMA = cumulative moving average - prev_cum_sum = ng.multiply(ng.convert(body_timestep, "f32"), body_prev_cma) - curr_cum_sum = ng.add(prev_cum_sum, ng.squeeze(body_data_in, [0])) - elem_cnt = ng.add(body_const_one, body_timestep) - curr_cma = ng.divide(curr_cum_sum, ng.convert(elem_cnt, "f32")) - cma_hist = ng.unsqueeze(curr_cma, [0]) + prev_cum_sum = ov.multiply(ov.convert(body_timestep, "f32"), body_prev_cma) + curr_cum_sum = ov.add(prev_cum_sum, ov.squeeze(body_data_in, [0])) + elem_cnt = ov.add(body_const_one, body_timestep) + curr_cma = ov.divide(curr_cum_sum, ov.convert(elem_cnt, "f32")) + cma_hist = ov.unsqueeze(curr_cma, [0]) # TI inputs - data = ng.parameter([16, 2, 2], np.float32, "data") + data = ov.parameter([16, 2, 2], np.float32, "data") # Iterations count - zero = ng.constant(0, dtype=np.int32) - one = ng.constant(1, dtype=np.int32) - initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32) - iter_cnt = ng.range(zero, np.int32(16), np.int32(1)) + zero = ov.constant(0, dtype=np.int32) + one = ov.constant(1, dtype=np.int32) + initial_cma = ov.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32) + iter_cnt = ov.range(zero, np.int32(16), np.int32(1)) ti_inputs = [iter_cnt, data, initial_cma, one] - body_const_condition = ng.constant(True, dtype=np.bool) + body_const_condition = ov.constant(True, dtype=np.bool) graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], [curr_cma, cma_hist, body_const_condition]) @@ -848,7 +848,7 @@ def test_loop(): TensorIteratorConcatOutputDesc(1, 1, 0, 1, 1, -1, 0), ] - node = ng.loop( + node = ov.loop( trip_count, condition, ti_inputs, @@ -871,9 +871,9 @@ def test_loop(): def test_roi_pooling(): - inputs = ng.parameter([2, 3, 4, 5], dtype=np.float32) - coords = ng.parameter([150, 5], dtype=np.float32) - node = ng.roi_pooling(inputs, coords, [6, 6], 0.0625, "Max") + inputs = ov.parameter([2, 3, 4, 5], dtype=np.float32) + coords = ov.parameter([150, 5], dtype=np.float32) + node = ov.roi_pooling(inputs, coords, [6, 6], 0.0625, "Max") assert node.get_type_name() == "ROIPooling" assert node.get_output_size() == [6, 6] @@ -882,9 +882,9 @@ def test_roi_pooling(): def test_psroi_pooling(): - inputs = ng.parameter([1, 72, 4, 5], dtype=np.float32) - coords = ng.parameter([150, 5], dtype=np.float32) - node = ng.psroi_pooling(inputs, coords, 2, 6, 0.0625, 0, 0, "average") + inputs = ov.parameter([1, 72, 4, 5], dtype=np.float32) + coords = ov.parameter([150, 5], dtype=np.float32) + node = ov.psroi_pooling(inputs, coords, 2, 6, 0.0625, 0, 0, "average") assert node.get_type_name() == "PSROIPooling" assert node.get_output_size() == 1 @@ -893,10 +893,10 @@ def test_psroi_pooling(): def test_convert_like(): - parameter_data = ng.parameter([1, 2, 3, 4], name="data", dtype=np.float32) - like = ng.constant(1, dtype=np.int8) + parameter_data = ov.parameter([1, 2, 3, 4], name="data", dtype=np.float32) + like = ov.constant(1, dtype=np.int8) - node = ng.convert_like(parameter_data, like) + node = ov.convert_like(parameter_data, like) assert node.get_type_name() == "ConvertLike" assert node.get_output_size() == 1 @@ -905,10 +905,10 @@ def test_convert_like(): def test_bucketize(): - data = ng.parameter([4, 3, 2, 1], name="data", dtype=np.float32) - buckets = ng.parameter([5], name="buckets", dtype=np.int64) + data = ov.parameter([4, 3, 2, 1], name="data", dtype=np.float32) + buckets = ov.parameter([5], name="buckets", dtype=np.int64) - node = ng.bucketize(data, buckets, "i32") + node = ov.bucketize(data, buckets, "i32") assert node.get_type_name() == "Bucketize" assert node.get_output_size() == 1 @@ -917,7 +917,7 @@ def test_bucketize(): def test_region_yolo(): - data = ng.parameter([1, 125, 13, 13], name="input", dtype=np.float32) + data = ov.parameter([1, 125, 13, 13], name="input", dtype=np.float32) num_coords = 4 num_classes = 80 num_regions = 1 @@ -926,7 +926,7 @@ def test_region_yolo(): end_axis = 3 do_softmax = False - node = ng.region_yolo(data, num_coords, num_classes, num_regions, do_softmax, mask, axis, end_axis) + node = ov.region_yolo(data, num_coords, num_classes, num_regions, do_softmax, mask, axis, end_axis) assert node.get_type_name() == "RegionYolo" assert node.get_output_size() == 1 @@ -935,10 +935,10 @@ def test_region_yolo(): def test_reorg_yolo(): - data = ng.parameter([2, 24, 34, 62], name="input", dtype=np.int32) + data = ov.parameter([2, 24, 34, 62], name="input", dtype=np.int32) stride = [2] - node = ng.reorg_yolo(data, stride) + node = ov.reorg_yolo(data, stride) assert node.get_type_name() == "ReorgYolo" assert node.get_output_size() == 1 @@ -947,12 +947,12 @@ def test_reorg_yolo(): def test_embedding_bag_offsets_sum_1(): - emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) - indices = ng.parameter([4], name="indices", dtype=np.int64) - offsets = ng.parameter([3], name="offsets", dtype=np.int64) - default_index = ng.parameter([], name="default_index", dtype=np.int64) + emb_table = ov.parameter([5, 2], name="emb_table", dtype=np.float32) + indices = ov.parameter([4], name="indices", dtype=np.int64) + offsets = ov.parameter([3], name="offsets", dtype=np.int64) + default_index = ov.parameter([], name="default_index", dtype=np.int64) - node = ng.embedding_bag_offsets_sum(emb_table, indices, offsets, default_index) + node = ov.embedding_bag_offsets_sum(emb_table, indices, offsets, default_index) assert node.get_type_name() == "EmbeddingBagOffsetsSum" assert node.get_output_size() == 1 @@ -961,14 +961,14 @@ def test_embedding_bag_offsets_sum_1(): def test_embedding_segments_sum_all_inputs(): - emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) - indices = ng.parameter([4], name="indices", dtype=np.int64) - segment_ids = ng.parameter([4], name="segment_ids", dtype=np.int64) - num_segments = ng.parameter([], name="num_segments", dtype=np.int64) - default_index = ng.parameter([], name="default_index", dtype=np.int64) - per_sample_weights = ng.parameter([4], name="per_sample_weights", dtype=np.float32) + emb_table = ov.parameter([5, 2], name="emb_table", dtype=np.float32) + indices = ov.parameter([4], name="indices", dtype=np.int64) + segment_ids = ov.parameter([4], name="segment_ids", dtype=np.int64) + num_segments = ov.parameter([], name="num_segments", dtype=np.int64) + default_index = ov.parameter([], name="default_index", dtype=np.int64) + per_sample_weights = ov.parameter([4], name="per_sample_weights", dtype=np.float32) - node = ng.embedding_segments_sum( + node = ov.embedding_segments_sum( emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights ) @@ -979,13 +979,13 @@ def test_embedding_segments_sum_all_inputs(): def test_embedding_segments_sum_with_some_opt_inputs(): - emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) - indices = ng.parameter([4], name="indices", dtype=np.int64) - segment_ids = ng.parameter([4], name="segment_ids", dtype=np.int64) - num_segments = ng.parameter([], name="num_segments", dtype=np.int64) + emb_table = ov.parameter([5, 2], name="emb_table", dtype=np.float32) + indices = ov.parameter([4], name="indices", dtype=np.int64) + segment_ids = ov.parameter([4], name="segment_ids", dtype=np.int64) + num_segments = ov.parameter([], name="num_segments", dtype=np.int64) # only 1 out of 3 optional inputs - node = ng.embedding_segments_sum(emb_table, indices, segment_ids, num_segments) + node = ov.embedding_segments_sum(emb_table, indices, segment_ids, num_segments) assert node.get_type_name() == "EmbeddingSegmentsSum" assert node.get_output_size() == 1 @@ -994,12 +994,12 @@ def test_embedding_segments_sum_with_some_opt_inputs(): def test_embedding_bag_packed_sum(): - emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) - indices = ng.parameter([3, 3], name="indices", dtype=np.int64) - per_sample_weights = ng.parameter([3, 3], name="per_sample_weights", dtype=np.float32) + emb_table = ov.parameter([5, 2], name="emb_table", dtype=np.float32) + indices = ov.parameter([3, 3], name="indices", dtype=np.int64) + per_sample_weights = ov.parameter([3, 3], name="per_sample_weights", dtype=np.float32) # only 1 out of 3 optional inputs - node = ng.embedding_bag_packed_sum(emb_table, indices, per_sample_weights) + node = ov.embedding_bag_packed_sum(emb_table, indices, per_sample_weights) assert node.get_type_name() == "EmbeddingBagPackedSum" assert node.get_output_size() == 1 @@ -1017,9 +1017,9 @@ def test_interpolate(dtype): "pads_begin": np.array([2, 2], dtype=dtype), } - image_node = ng.parameter(image_shape, dtype, name="Image") + image_node = ov.parameter(image_shape, dtype, name="Image") - node = ng.interpolate(image_node, output_shape, attributes) + node = ov.interpolate(image_node, output_shape, attributes) expected_shape = [1, 3, 64, 64] assert node.get_type_name() == "Interpolate" @@ -1051,9 +1051,9 @@ def test_prior_box(int_dtype, fp_dtype): "scale_all_sizes": False } - layer_shape = ng.constant(np.array([32, 32], dtype=int_dtype), int_dtype) + layer_shape = ov.constant(np.array([32, 32], dtype=int_dtype), int_dtype) - node = ng.prior_box(layer_shape, image_shape, attributes) + node = ov.prior_box(layer_shape, image_shape, attributes) assert node.get_type_name() == "PriorBox" assert node.get_output_size() == 1 @@ -1083,9 +1083,9 @@ def test_prior_box_clustered(int_dtype, fp_dtype): "height": np.array([1.0, 2.0, 1.0], dtype=fp_dtype), } - output_size = ng.constant(np.array([19, 19], dtype=int_dtype), int_dtype) + output_size = ov.constant(np.array([19, 19], dtype=int_dtype), int_dtype) - node = ng.prior_box_clustered(output_size, image_size, attributes) + node = ov.prior_box_clustered(output_size, image_size, attributes) assert node.get_type_name() == "PriorBoxClustered" assert node.get_output_size() == 1 @@ -1114,13 +1114,13 @@ def test_detection_output(int_dtype, fp_dtype): "nms_threshold": fp_dtype(0.645), } - box_logits = ng.parameter([4, 8], fp_dtype, "box_logits") - class_preds = ng.parameter([4, 170], fp_dtype, "class_preds") - proposals = ng.parameter([4, 2, 10], fp_dtype, "proposals") - aux_class_preds = ng.parameter([4, 4], fp_dtype, "aux_class_preds") - aux_box_preds = ng.parameter([4, 8], fp_dtype, "aux_box_preds") + box_logits = ov.parameter([4, 8], fp_dtype, "box_logits") + class_preds = ov.parameter([4, 170], fp_dtype, "class_preds") + proposals = ov.parameter([4, 2, 10], fp_dtype, "proposals") + aux_class_preds = ov.parameter([4, 4], fp_dtype, "aux_class_preds") + aux_box_preds = ov.parameter([4, 8], fp_dtype, "aux_box_preds") - node = ng.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds) + node = ov.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds) assert node.get_type_name() == "DetectionOutput" assert node.get_output_size() == 1 @@ -1151,10 +1151,10 @@ def test_proposal(int_dtype, fp_dtype): } batch_size = 7 - class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs") - bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas") - image_shape = ng.parameter([3], fp_dtype, "image_shape") - node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes) + class_probs = ov.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs") + bbox_deltas = ov.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas") + image_shape = ov.parameter([3], fp_dtype, "image_shape") + node = ov.proposal(class_probs, bbox_deltas, image_shape, attributes) assert node.get_type_name() == "Proposal" assert node.get_output_size() == 2 @@ -1162,7 +1162,7 @@ def test_proposal(int_dtype, fp_dtype): def test_tensor_iterator(): - from ngraph.utils.tensor_iterator_types import ( + from openvino.utils.tensor_iterator_types import ( GraphBody, TensorIteratorSliceInputDesc, TensorIteratorMergedInputDesc, @@ -1172,25 +1172,25 @@ def test_tensor_iterator(): ) # Body parameters - body_timestep = ng.parameter([], np.int32, "timestep") - body_data_in = ng.parameter([1, 2, 2], np.float32, "body_in") - body_prev_cma = ng.parameter([2, 2], np.float32, "body_prev_cma") - body_const_one = ng.parameter([], np.int32, "body_const_one") + body_timestep = ov.parameter([], np.int32, "timestep") + body_data_in = ov.parameter([1, 2, 2], np.float32, "body_in") + body_prev_cma = ov.parameter([2, 2], np.float32, "body_prev_cma") + body_const_one = ov.parameter([], np.int32, "body_const_one") # CMA = cumulative moving average - prev_cum_sum = ng.multiply(ng.convert(body_timestep, "f32"), body_prev_cma) - curr_cum_sum = ng.add(prev_cum_sum, ng.squeeze(body_data_in, [0])) - elem_cnt = ng.add(body_const_one, body_timestep) - curr_cma = ng.divide(curr_cum_sum, ng.convert(elem_cnt, "f32")) - cma_hist = ng.unsqueeze(curr_cma, [0]) + prev_cum_sum = ov.multiply(ov.convert(body_timestep, "f32"), body_prev_cma) + curr_cum_sum = ov.add(prev_cum_sum, ov.squeeze(body_data_in, [0])) + elem_cnt = ov.add(body_const_one, body_timestep) + curr_cma = ov.divide(curr_cum_sum, ov.convert(elem_cnt, "f32")) + cma_hist = ov.unsqueeze(curr_cma, [0]) # TI inputs - data = ng.parameter([16, 2, 2], np.float32, "data") + data = ov.parameter([16, 2, 2], np.float32, "data") # Iterations count - zero = ng.constant(0, dtype=np.int32) - one = ng.constant(1, dtype=np.int32) - initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32) - iter_cnt = ng.range(zero, np.int32(16), np.int32(1)) + zero = ov.constant(0, dtype=np.int32) + one = ov.constant(1, dtype=np.int32) + initial_cma = ov.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32) + iter_cnt = ov.range(zero, np.int32(16), np.int32(1)) ti_inputs = [iter_cnt, data, initial_cma, one] graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], [curr_cma, cma_hist]) @@ -1220,7 +1220,7 @@ def test_tensor_iterator(): TensorIteratorConcatOutputDesc(1, 1, 0, 1, 1, -1, 0), ] - node = ng.tensor_iterator( + node = ov.tensor_iterator( ti_inputs, graph_body, ti_slice_input_desc, @@ -1239,9 +1239,9 @@ def test_tensor_iterator(): def test_read_value_opset5(): - init_value = ng_opset5.parameter([2, 2], name="init_value", dtype=np.int32) + init_value = ov_opset5.parameter([2, 2], name="init_value", dtype=np.int32) - node = ng_opset5.read_value(init_value, "var_id_667") + node = ov_opset5.read_value(init_value, "var_id_667") assert node.get_type_name() == "ReadValue" assert node.get_output_size() == 1 @@ -1250,9 +1250,9 @@ def test_read_value_opset5(): def test_assign_opset5(): - input_data = ng_opset5.parameter([5, 7], name="input_data", dtype=np.int32) - rv = ng_opset5.read_value(input_data, "var_id_667") - node = ng_opset5.assign(rv, "var_id_667") + input_data = ov_opset5.parameter([5, 7], name="input_data", dtype=np.int32) + rv = ov_opset5.read_value(input_data, "var_id_667") + node = ov_opset5.assign(rv, "var_id_667") assert node.get_type_name() == "Assign" assert node.get_output_size() == 1 @@ -1261,9 +1261,9 @@ def test_assign_opset5(): def test_read_value(): - init_value = ng.parameter([2, 2], name="init_value", dtype=np.int32) + init_value = ov.parameter([2, 2], name="init_value", dtype=np.int32) - node = ng.read_value(init_value, "var_id_667") + node = ov.read_value(init_value, "var_id_667") assert node.get_type_name() == "ReadValue" assert node.get_output_size() == 1 @@ -1272,9 +1272,9 @@ def test_read_value(): def test_assign(): - input_data = ng.parameter([5, 7], name="input_data", dtype=np.int32) - rv = ng.read_value(input_data, "var_id_667") - node = ng.assign(rv, "var_id_667") + input_data = ov.parameter([5, 7], name="input_data", dtype=np.int32) + rv = ov.read_value(input_data, "var_id_667") + node = ov.assign(rv, "var_id_667") assert node.get_type_name() == "Assign" assert node.get_output_size() == 1 @@ -1283,12 +1283,12 @@ def test_assign(): def test_extract_image_patches(): - image = ng.parameter([64, 3, 10, 10], name="image", dtype=np.int32) + image = ov.parameter([64, 3, 10, 10], name="image", dtype=np.int32) sizes = [3, 3] strides = [5, 5] rates = [1, 1] padding = "VALID" - node = ng.extract_image_patches(image, sizes, strides, rates, padding) + node = ov.extract_image_patches(image, sizes, strides, rates, padding) assert node.get_type_name() == "ExtractImagePatches" assert node.get_output_size() == 1 @@ -1312,16 +1312,16 @@ def test_lstm_sequence_operator_bidirectional(dtype): R_shape = [num_directions, 4 * hidden_size, hidden_size] B_shape = [num_directions, 4 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ov.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "BIDIRECTIONAL" - node = ng.lstm_sequence( + node = ov.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -1341,7 +1341,7 @@ def test_lstm_sequence_operator_bidirectional(dtype): activation_beta = [3.0, 2.0, 1.0] clip = 1.22 - node_param = ng.lstm_sequence( + node_param = ov.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -1377,17 +1377,17 @@ def test_lstm_sequence_operator_reverse(dtype): R_shape = [num_directions, 4 * hidden_size, hidden_size] B_shape = [num_directions, 4 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ov.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "REVERSE" - node_default = ng.lstm_sequence( + node_default = ov.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -1407,7 +1407,7 @@ def test_lstm_sequence_operator_reverse(dtype): activation_beta = [3.0, 2.0, 1.0] clip = 1.22 - node_param = ng.lstm_sequence( + node_param = ov.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -1443,17 +1443,17 @@ def test_lstm_sequence_operator_forward(dtype): R_shape = [num_directions, 4 * hidden_size, hidden_size] B_shape = [num_directions, 4 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ov.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "forward" - node_default = ng.lstm_sequence( + node_default = ov.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -1473,7 +1473,7 @@ def test_lstm_sequence_operator_forward(dtype): activation_beta = [1.0] clip = 0.5 - node = ng.lstm_sequence( + node = ov.lstm_sequence( parameter_X, parameter_H_t, parameter_C_t, @@ -1508,15 +1508,15 @@ def test_gru_sequence_operator_bidirectional(dtype): R_shape = [num_directions, 3 * hidden_size, hidden_size] B_shape = [num_directions, 3 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "BIDIRECTIONAL" - node = ng.gru_sequence( + node = ov.gru_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1536,9 +1536,9 @@ def test_gru_sequence_operator_bidirectional(dtype): clip = 1.22 linear_before_reset = True B_shape = [num_directions, 4 * hidden_size] - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) - node_param = ng.gru_sequence( + node_param = ov.gru_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1573,16 +1573,16 @@ def test_gru_sequence_operator_reverse(dtype): R_shape = [num_directions, 3 * hidden_size, hidden_size] B_shape = [num_directions, 3 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "REVERSE" - node_default = ng.gru_sequence( + node_default = ov.gru_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1602,9 +1602,9 @@ def test_gru_sequence_operator_reverse(dtype): clip = 1.22 linear_before_reset = True B_shape = [num_directions, 4 * hidden_size] - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) - node_param = ng.gru_sequence( + node_param = ov.gru_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1639,16 +1639,16 @@ def test_gru_sequence_operator_forward(dtype): R_shape = [num_directions, 3 * hidden_size, hidden_size] B_shape = [num_directions, 3 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "forward" - node_default = ng.gru_sequence( + node_default = ov.gru_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1668,9 +1668,9 @@ def test_gru_sequence_operator_forward(dtype): clip = 0.5 linear_before_reset = True B_shape = [num_directions, 4 * hidden_size] - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) - node = ng.gru_sequence( + node = ov.gru_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1705,15 +1705,15 @@ def test_rnn_sequence_operator_bidirectional(dtype): R_shape = [num_directions, hidden_size, hidden_size] B_shape = [num_directions, hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "BIDIRECTIONAL" - node = ng.rnn_sequence( + node = ov.rnn_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1732,7 +1732,7 @@ def test_rnn_sequence_operator_bidirectional(dtype): activation_beta = [3.0, 2.0, 1.0] clip = 1.22 - node_param = ng.rnn_sequence( + node_param = ov.rnn_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1766,16 +1766,16 @@ def test_rnn_sequence_operator_reverse(dtype): R_shape = [num_directions, hidden_size, hidden_size] B_shape = [num_directions, hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "REVERSE" - node_default = ng.rnn_sequence( + node_default = ov.rnn_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1794,7 +1794,7 @@ def test_rnn_sequence_operator_reverse(dtype): activation_beta = [3.0, 2.0, 1.0] clip = 1.22 - node_param = ng.rnn_sequence( + node_param = ov.rnn_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1828,16 +1828,16 @@ def test_rnn_sequence_operator_forward(dtype): R_shape = [num_directions, hidden_size, hidden_size] B_shape = [num_directions, hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) - parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) - parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) - parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) - parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + parameter_X = ov.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ov.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ov.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ov.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ov.parameter(B_shape, name="B", dtype=dtype) direction = "forward" - node_default = ng.rnn_sequence( + node_default = ov.rnn_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1856,7 +1856,7 @@ def test_rnn_sequence_operator_forward(dtype): activation_beta = [1.0] clip = 0.5 - node = ng.rnn_sequence( + node = ov.rnn_sequence( parameter_X, parameter_H_t, parameter_seq_len, @@ -1880,13 +1880,13 @@ def test_multiclass_nms(): 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32") boxes_data = boxes_data.reshape([1, 6, 4]) - box = ng.constant(boxes_data, dtype=np.float) + box = ov.constant(boxes_data, dtype=np.float) scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3, 0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32") scores_data = scores_data.reshape([1, 2, 6]) - score = ng.constant(scores_data, dtype=np.float) + score = ov.constant(scores_data, dtype=np.float) - nms_node = ng.multiclass_nms(box, score, output_type="i32", nms_top_k=3, + nms_node = ov.multiclass_nms(box, score, output_type="i32", nms_top_k=3, iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid", nms_eta=1.0) @@ -1905,13 +1905,13 @@ def test_matrix_nms(): 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32") boxes_data = boxes_data.reshape([1, 6, 4]) - box = ng.constant(boxes_data, dtype=np.float) + box = ov.constant(boxes_data, dtype=np.float) scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3, 0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32") scores_data = scores_data.reshape([1, 2, 6]) - score = ng.constant(scores_data, dtype=np.float) + score = ov.constant(scores_data, dtype=np.float) - nms_node = ng.matrix_nms(box, score, output_type="i32", nms_top_k=3, + nms_node = ov.matrix_nms(box, score, output_type="i32", nms_top_k=3, score_threshold=0.0, sort_result_type="score", background_class=0, decay_function="linear", gaussian_sigma=2.0, post_threshold=0.0) diff --git a/runtime/bindings/python/tests/test_ngraph/test_ctc_loss.py b/runtime/bindings/python/tests/test_ngraph/test_ctc_loss.py index 7c977eacb75..4780c66d99d 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ctc_loss.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ctc_loss.py @@ -3,23 +3,23 @@ import numpy as np -import ngraph as ng -from ngraph.impl import Type +import openvino.opset8 as ov +from openvino.impl import Type def test_ctc_loss_props(): ind_dtype = np.int32 float_dtype = np.float32 - logits = ng.parameter([2, 100, 80], dtype=float_dtype, name="logits") - logit_length = ng.parameter([2], dtype=ind_dtype, name="logit_length") - labels = ng.parameter([2, 100], dtype=ind_dtype, name="labels") - label_length = ng.parameter([2], dtype=ind_dtype, name="label_length") - blank_index = ng.parameter([], dtype=ind_dtype, name="blank_index") + logits = ov.parameter([2, 100, 80], dtype=float_dtype, name="logits") + logit_length = ov.parameter([2], dtype=ind_dtype, name="logit_length") + labels = ov.parameter([2, 100], dtype=ind_dtype, name="labels") + label_length = ov.parameter([2], dtype=ind_dtype, name="label_length") + blank_index = ov.parameter([], dtype=ind_dtype, name="blank_index") preprocess_collapse_repeated = False ctc_merge_repeated = True unique = False - node = ng.ctc_loss(logits, logit_length, labels, label_length, blank_index, + node = ov.ctc_loss(logits, logit_length, labels, label_length, blank_index, preprocess_collapse_repeated, ctc_merge_repeated, unique) assert node.get_type_name() == "CTCLoss" assert node.get_output_size() == 1 diff --git a/runtime/bindings/python/tests/test_ngraph/test_data_movement.py b/runtime/bindings/python/tests/test_ngraph/test_data_movement.py index f9693147e97..0baac90c011 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_data_movement.py +++ b/runtime/bindings/python/tests/test_ngraph/test_data_movement.py @@ -3,11 +3,13 @@ import numpy as np -import ngraph as ng -from ngraph.impl import Type, Shape +import openvino.opset8 as ov +from openvino.impl import Type, Shape from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node +from tests import xfail_issue_67415 + def test_reverse_sequence(): input_data = np.array( @@ -67,9 +69,9 @@ def test_reverse_sequence(): batch_axis = 2 sequence_axis = 1 - input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32) - seq_lengths_param = ng.parameter(seq_lengths.shape, name="sequence lengths", dtype=np.int32) - model = ng.reverse_sequence(input_param, seq_lengths_param, batch_axis, sequence_axis) + input_param = ov.parameter(input_data.shape, name="input", dtype=np.int32) + seq_lengths_param = ov.parameter(seq_lengths.shape, name="sequence lengths", dtype=np.int32) + model = ov.reverse_sequence(input_param, seq_lengths_param, batch_axis, sequence_axis) runtime = get_runtime() computation = runtime.computation(model, input_param, seq_lengths_param) @@ -131,12 +133,12 @@ def test_reverse_sequence(): def test_pad_edge(): - input_data = np.arange(1, 13).reshape([3, 4]) + input_data = np.arange(1, 13).reshape([3, 4]).astype(np.int32) pads_begin = np.array([0, 1], dtype=np.int32) pads_end = np.array([2, 3], dtype=np.int32) - input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32) - model = ng.pad(input_param, pads_begin, pads_end, "edge") + input_param = ov.parameter(input_data.shape, name="input", dtype=np.int32) + model = ov.pad(input_param, pads_begin, pads_end, "edge") runtime = get_runtime() computation = runtime.computation(model, input_param) @@ -155,12 +157,12 @@ def test_pad_edge(): def test_pad_constant(): - input_data = np.arange(1, 13).reshape([3, 4]) + input_data = np.arange(1, 13).reshape([3, 4]).astype(np.int32) pads_begin = np.array([0, 1], dtype=np.int32) pads_end = np.array([2, 3], dtype=np.int32) - input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32) - model = ng.pad(input_param, pads_begin, pads_end, "constant", arg_pad_value=np.array(100, dtype=np.int32)) + input_param = ov.parameter(input_data.shape, name="input", dtype=np.int32) + model = ov.pad(input_param, pads_begin, pads_end, "constant", arg_pad_value=np.array(100, dtype=np.int32)) runtime = get_runtime() computation = runtime.computation(model, input_param) @@ -178,25 +180,26 @@ def test_pad_constant(): assert np.allclose(result, expected) +@xfail_issue_67415 def test_select(): cond = np.array([[False, False], [True, False], [True, True]]) then_node = np.array([[-1, 0], [1, 2], [3, 4]], dtype=np.int32) else_node = np.array([[11, 10], [9, 8], [7, 6]], dtype=np.int32) excepted = np.array([[11, 10], [1, 8], [3, 4]], dtype=np.int32) - result = run_op_node([cond, then_node, else_node], ng.select) + result = run_op_node([cond, then_node, else_node], ov.select) assert np.allclose(result, excepted) def test_gather_nd(): indices_type = np.int32 data_dtype = np.float32 - data = ng.parameter([2, 10, 80, 30, 50], dtype=data_dtype, name="data") - indices = ng.parameter([2, 10, 30, 40, 2], dtype=indices_type, name="indices") + data = ov.parameter([2, 10, 80, 30, 50], dtype=data_dtype, name="data") + indices = ov.parameter([2, 10, 30, 40, 2], dtype=indices_type, name="indices") batch_dims = 2 expected_shape = [20, 30, 40, 50] - node = ng.gather_nd(data, indices, batch_dims) + node = ov.gather_nd(data, indices, batch_dims) assert node.get_type_name() == "GatherND" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == expected_shape @@ -206,12 +209,12 @@ def test_gather_nd(): def test_gather_elements(): indices_type = np.int32 data_dtype = np.float32 - data = ng.parameter(Shape([2, 5]), dtype=data_dtype, name="data") - indices = ng.parameter(Shape([2, 100]), dtype=indices_type, name="indices") + data = ov.parameter(Shape([2, 5]), dtype=data_dtype, name="data") + indices = ov.parameter(Shape([2, 100]), dtype=indices_type, name="indices") axis = 1 expected_shape = [2, 100] - node = ng.gather_elements(data, indices, axis) + node = ov.gather_elements(data, indices, axis) assert node.get_type_name() == "GatherElements" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == expected_shape diff --git a/runtime/bindings/python/tests/test_ngraph/test_dft.py b/runtime/bindings/python/tests/test_ngraph/test_dft.py index 27645f02958..37002a8bc5e 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_dft.py +++ b/runtime/bindings/python/tests/test_ngraph/test_dft.py @@ -1,4 +1,7 @@ -import ngraph as ng +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import openvino.opset8 as ov import numpy as np from tests.runtime import get_runtime @@ -11,10 +14,10 @@ def build_fft_input_data(): def test_dft_1d(): runtime = get_runtime() input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([2], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([2], dtype=np.int64)) - dft_node = ng.dft(input_tensor, input_axes) + dft_node = ov.dft(input_tensor, input_axes) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.fft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), @@ -26,10 +29,10 @@ def test_dft_1d(): def test_dft_2d(): runtime = get_runtime() input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([1, 2], dtype=np.int64)) - dft_node = ng.dft(input_tensor, input_axes) + dft_node = ov.dft(input_tensor, input_axes) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), @@ -41,10 +44,10 @@ def test_dft_2d(): def test_dft_3d(): runtime = get_runtime() input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([0, 1, 2], dtype=np.int64)) - dft_node = ng.dft(input_tensor, input_axes) + dft_node = ov.dft(input_tensor, input_axes) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.fftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), @@ -56,11 +59,11 @@ def test_dft_3d(): def test_dft_1d_signal_size(): runtime = get_runtime() input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([-2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([20], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([-2], dtype=np.int64)) + input_signal_size = ov.constant(np.array([20], dtype=np.int64)) - dft_node = ng.dft(input_tensor, input_axes, input_signal_size) + dft_node = ov.dft(input_tensor, input_axes, input_signal_size) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.fft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), n=20, @@ -72,11 +75,11 @@ def test_dft_1d_signal_size(): def test_dft_2d_signal_size_1(): runtime = get_runtime() input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([0, 2], dtype=np.int64)) + input_signal_size = ov.constant(np.array([4, 5], dtype=np.int64)) - dft_node = ng.dft(input_tensor, input_axes, input_signal_size) + dft_node = ov.dft(input_tensor, input_axes, input_signal_size) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5], @@ -88,11 +91,11 @@ def test_dft_2d_signal_size_1(): def test_dft_2d_signal_size_2(): runtime = get_runtime() input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([1, 2], dtype=np.int64)) + input_signal_size = ov.constant(np.array([4, 5], dtype=np.int64)) - dft_node = ng.dft(input_tensor, input_axes, input_signal_size) + dft_node = ov.dft(input_tensor, input_axes, input_signal_size) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5], @@ -104,11 +107,11 @@ def test_dft_2d_signal_size_2(): def test_dft_3d_signal_size(): runtime = get_runtime() input_data = build_fft_input_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5, 16], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([0, 1, 2], dtype=np.int64)) + input_signal_size = ov.constant(np.array([4, 5, 16], dtype=np.int64)) - dft_node = ng.dft(input_tensor, input_axes, input_signal_size) + dft_node = ov.dft(input_tensor, input_axes, input_signal_size) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.fftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), diff --git a/runtime/bindings/python/tests/test_ngraph/test_dyn_attributes.py b/runtime/bindings/python/tests/test_ngraph/test_dyn_attributes.py index a945ec91ef0..7081811a250 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_dyn_attributes.py +++ b/runtime/bindings/python/tests/test_ngraph/test_dyn_attributes.py @@ -4,7 +4,7 @@ import numpy as np import pytest -import ngraph as ng +import openvino.opset8 as ov @pytest.fixture() @@ -21,16 +21,16 @@ def _proposal_node(): } batch_size = 7 - class_probs = ng.parameter([batch_size, 12, 34, 62], np.float64, "class_probs") - bbox_deltas = ng.parameter([batch_size, 24, 34, 62], np.float64, "bbox_deltas") - image_shape = ng.parameter([3], np.float64, "image_shape") - return ng.proposal(class_probs, bbox_deltas, image_shape, attributes) + class_probs = ov.parameter([batch_size, 12, 34, 62], np.float64, "class_probs") + bbox_deltas = ov.parameter([batch_size, 24, 34, 62], np.float64, "bbox_deltas") + image_shape = ov.parameter([3], np.float64, "image_shape") + return ov.proposal(class_probs, bbox_deltas, image_shape, attributes) def test_dynamic_attributes_softmax(): axis = 2 - data = ng.parameter([1, 2, 3, 4], np.float32, "data_in") - node = ng.softmax(data, axis) + data = ov.parameter([1, 2, 3, 4], np.float32, "data_in") + node = ov.softmax(data, axis) assert node.get_axis() == axis node.set_axis(3) @@ -72,13 +72,13 @@ def test_dynamic_get_attribute_value(int_dtype, fp_dtype): "objectness_score": fp_dtype(0.77), } - box_logits = ng.parameter([4, 680], fp_dtype, "box_logits") - class_preds = ng.parameter([4, 170], fp_dtype, "class_preds") - proposals = ng.parameter([4, 1, 8], fp_dtype, "proposals") - aux_class_preds = ng.parameter([4, 4], fp_dtype, "aux_class_preds") - aux_box_preds = ng.parameter([4, 680], fp_dtype, "aux_box_preds") + box_logits = ov.parameter([4, 680], fp_dtype, "box_logits") + class_preds = ov.parameter([4, 170], fp_dtype, "class_preds") + proposals = ov.parameter([4, 1, 8], fp_dtype, "proposals") + aux_class_preds = ov.parameter([4, 4], fp_dtype, "aux_class_preds") + aux_box_preds = ov.parameter([4, 680], fp_dtype, "aux_box_preds") - node = ng.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds) + node = ov.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds) assert node.get_num_classes() == int_dtype(85) assert node.get_background_label_id() == int_dtype(13) @@ -123,10 +123,10 @@ def test_dynamic_set_attribute_value(int_dtype, fp_dtype): } batch_size = 7 - class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs") - bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas") - image_shape = ng.parameter([3], fp_dtype, "image_shape") - node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes) + class_probs = ov.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs") + bbox_deltas = ov.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas") + image_shape = ov.parameter([3], fp_dtype, "image_shape") + node = ov.proposal(class_probs, bbox_deltas, image_shape, attributes) node.set_base_size(int_dtype(15)) node.set_pre_nms_topn(int_dtype(7)) @@ -193,11 +193,11 @@ def test_dynamic_attributes_simple(): R_shape = [3 * hidden_size, hidden_size] B_shape = [4 * hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) - parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) - parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + parameter_X = ov.parameter(X_shape, name="X", dtype=np.float32) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=np.float32) + parameter_W = ov.parameter(W_shape, name="W", dtype=np.float32) + parameter_R = ov.parameter(R_shape, name="R", dtype=np.float32) + parameter_B = ov.parameter(B_shape, name="B", dtype=np.float32) activations = ["tanh", "relu"] activations_alpha = [1.0, 2.0] @@ -205,7 +205,7 @@ def test_dynamic_attributes_simple(): clip = 0.5 linear_before_reset = True - node = ng.gru_cell( + node = ov.gru_cell( parameter_X, parameter_H_t, parameter_W, diff --git a/runtime/bindings/python/tests/test_ngraph/test_einsum.py b/runtime/bindings/python/tests/test_ngraph/test_einsum.py index fb7581d9160..ce6c7f37595 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_einsum.py +++ b/runtime/bindings/python/tests/test_ngraph/test_einsum.py @@ -1,8 +1,8 @@ -import ngraph as ng +import openvino.opset8 as ov import numpy as np import pytest -from ngraph.utils.types import get_element_type +from openvino.utils.types import get_element_type from tests import xfail_issue_58033 from tests.runtime import get_runtime @@ -33,10 +33,10 @@ def einsum_op_exec(input_shapes: list, equation: str, data_type: np.dtype, for i in range(num_inputs): input_i = np.random.random_integers(10, size=input_shapes[i]).astype(data_type) np_inputs.append(input_i) - ng_inputs.append(ng.parameter(input_i.shape, dtype=data_type)) + ng_inputs.append(ov.parameter(input_i.shape, dtype=data_type)) expected_result = np.einsum(equation, *np_inputs) - einsum_model = ng.einsum(ng_inputs, equation) + einsum_model = ov.einsum(ng_inputs, equation) # check the output shape and type assert einsum_model.get_type_name() == "Einsum" diff --git a/runtime/bindings/python/tests/test_ngraph/test_gather.py b/runtime/bindings/python/tests/test_ngraph/test_gather.py index 93714d6dcb4..ffa4c20b1d3 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_gather.py +++ b/runtime/bindings/python/tests/test_ngraph/test_gather.py @@ -1,7 +1,7 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import ngraph as ng +import openvino.opset8 as ov import numpy as np from tests import xfail_issue_54630 @@ -19,7 +19,7 @@ def test_gather(): (3, 1, 2) ) - result = run_op_node([input_data], ng.gather, input_indices, input_axis) + result = run_op_node([input_data], ov.gather, input_indices, input_axis) assert np.allclose(result, expected) @@ -34,7 +34,7 @@ def test_gather_with_scalar_axis(): (3, 1, 2) ) - result = run_op_node([input_data], ng.gather, input_indices, input_axis) + result = run_op_node([input_data], ov.gather, input_indices, input_axis) assert np.allclose(result, expected) @@ -51,7 +51,7 @@ def test_gather_batch_dims_1(): expected = np.array([[1, 1, 5], [10, 6, 6]], np.float32) - result = run_op_node([input_data], ng.gather, input_indices, input_axis, batch_dims) + result = run_op_node([input_data], ov.gather, input_indices, input_axis, batch_dims) assert np.allclose(result, expected) @@ -67,7 +67,7 @@ def test_gather_negative_indices(): (3, 1, 2) ) - result = run_op_node([input_data], ng.gather, input_indices, input_axis) + result = run_op_node([input_data], ov.gather, input_indices, input_axis) assert np.allclose(result, expected) @@ -85,5 +85,5 @@ def test_gather_batch_dims_1_negative_indices(): expected = np.array([[1, 2, 4], [9, 6, 6]], np.float32) - result = run_op_node([input_data], ng.gather, input_indices, input_axis, batch_dims) + result = run_op_node([input_data], ov.gather, input_indices, input_axis, batch_dims) assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests/test_ngraph/test_idft.py b/runtime/bindings/python/tests/test_ngraph/test_idft.py index 472e8dfb4e5..13ab67fbba0 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_idft.py +++ b/runtime/bindings/python/tests/test_ngraph/test_idft.py @@ -1,4 +1,4 @@ -import ngraph as ng +import openvino.opset8 as ov import numpy as np from tests.runtime import get_runtime @@ -14,10 +14,10 @@ def test_idft_1d(): complex_input_data = np.fft.fft(np.squeeze(expected_results.view(dtype=np.complex64), axis=-1), axis=2).astype(np.complex64) input_data = np.stack((complex_input_data.real, complex_input_data.imag), axis=-1) - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([2], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([2], dtype=np.int64)) - dft_node = ng.idft(input_tensor, input_axes) + dft_node = ov.idft(input_tensor, input_axes) computation = runtime.computation(dft_node) dft_results = computation() assert np.allclose(dft_results, expected_results, atol=0.000002) @@ -29,10 +29,10 @@ def test_idft_2d(): complex_input_data = np.fft.fft2(np.squeeze(expected_results.view(dtype=np.complex64), axis=-1), axes=[1, 2]).astype(np.complex64) input_data = np.stack((complex_input_data.real, complex_input_data.imag), axis=-1) - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([1, 2], dtype=np.int64)) - dft_node = ng.idft(input_tensor, input_axes) + dft_node = ov.idft(input_tensor, input_axes) computation = runtime.computation(dft_node) dft_results = computation() assert np.allclose(dft_results, expected_results, atol=0.000002) @@ -44,10 +44,10 @@ def test_idft_3d(): complex_input_data = np.fft.fft2(np.squeeze(expected_results.view(dtype=np.complex64), axis=-1), axes=[0, 1, 2]).astype(np.complex64) input_data = np.stack((complex_input_data.real, complex_input_data.imag), axis=-1) - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([0, 1, 2], dtype=np.int64)) - dft_node = ng.idft(input_tensor, input_axes) + dft_node = ov.idft(input_tensor, input_axes) computation = runtime.computation(dft_node) dft_results = computation() assert np.allclose(dft_results, expected_results, atol=0.000003) @@ -56,11 +56,11 @@ def test_idft_3d(): def test_idft_1d_signal_size(): runtime = get_runtime() input_data = get_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([-2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([20], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([-2], dtype=np.int64)) + input_signal_size = ov.constant(np.array([20], dtype=np.int64)) - dft_node = ng.idft(input_tensor, input_axes, input_signal_size) + dft_node = ov.idft(input_tensor, input_axes, input_signal_size) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.ifft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), n=20, @@ -72,11 +72,11 @@ def test_idft_1d_signal_size(): def test_idft_2d_signal_size_1(): runtime = get_runtime() input_data = get_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([0, 2], dtype=np.int64)) + input_signal_size = ov.constant(np.array([4, 5], dtype=np.int64)) - dft_node = ng.idft(input_tensor, input_axes, input_signal_size) + dft_node = ov.idft(input_tensor, input_axes, input_signal_size) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.ifft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5], @@ -88,11 +88,11 @@ def test_idft_2d_signal_size_1(): def test_idft_2d_signal_size_2(): runtime = get_runtime() input_data = get_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([1, 2], dtype=np.int64)) + input_signal_size = ov.constant(np.array([4, 5], dtype=np.int64)) - dft_node = ng.idft(input_tensor, input_axes, input_signal_size) + dft_node = ov.idft(input_tensor, input_axes, input_signal_size) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.ifft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5], @@ -104,11 +104,11 @@ def test_idft_2d_signal_size_2(): def test_idft_3d_signal_size(): runtime = get_runtime() input_data = get_data() - input_tensor = ng.constant(input_data) - input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) - input_signal_size = ng.constant(np.array([4, 5, 16], dtype=np.int64)) + input_tensor = ov.constant(input_data) + input_axes = ov.constant(np.array([0, 1, 2], dtype=np.int64)) + input_signal_size = ov.constant(np.array([4, 5, 16], dtype=np.int64)) - dft_node = ng.idft(input_tensor, input_axes, input_signal_size) + dft_node = ov.idft(input_tensor, input_axes, input_signal_size) computation = runtime.computation(dft_node) dft_results = computation() np_results = np.fft.ifftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), diff --git a/runtime/bindings/python/tests/test_ngraph/test_input_validation.py b/runtime/bindings/python/tests/test_ngraph/test_input_validation.py index ca31e065b8f..0fa9091104f 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_input_validation.py +++ b/runtime/bindings/python/tests/test_ngraph/test_input_validation.py @@ -4,8 +4,8 @@ import numpy as np import pytest -from ngraph.exceptions import UserInputError -from ngraph.utils.input_validation import ( +from openvino.exceptions import UserInputError +from openvino.utils.input_validation import ( _check_value, check_valid_attribute, check_valid_attributes, diff --git a/runtime/bindings/python/tests/test_ngraph/test_log_softmax.py b/runtime/bindings/python/tests/test_ngraph/test_log_softmax.py index 2506c591c05..ba2b325f1f1 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_log_softmax.py +++ b/runtime/bindings/python/tests/test_ngraph/test_log_softmax.py @@ -2,15 +2,15 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np -import ngraph as ng -from ngraph.impl import Shape, Type +import openvino.opset8 as ov +from openvino.impl import Shape, Type def test_log_softmax(): float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - node = ng.log_softmax(data, 1) + node = ov.log_softmax(data, 1) assert node.get_type_name() == "LogSoftmax" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [3, 10] diff --git a/runtime/bindings/python/tests/test_ngraph/test_manager.py b/runtime/bindings/python/tests/test_ngraph/test_manager.py index 2c0c79e3f5a..2618a313842 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_manager.py +++ b/runtime/bindings/python/tests/test_ngraph/test_manager.py @@ -8,15 +8,15 @@ import json import numpy as np import pytest -import ngraph as ng -from ngraph.impl import Function, PartialShape, Shape -from ngraph.impl.passes import Manager +import openvino.opset8 as ov +from openvino.impl import Function, PartialShape, Shape +from openvino.impl.passes import Manager from tests.test_ngraph.util import count_ops_of_type def test_constant_folding(): - node_constant = ng.constant(np.array([[0.0, 0.1, -0.1], [-2.5, 2.5, 3.0]], dtype=np.float32)) - node_ceil = ng.ceiling(node_constant) + node_constant = ov.constant(np.array([[0.0, 0.1, -0.1], [-2.5, 2.5, 3.0]], dtype=np.float32)) + node_ceil = ov.ceiling(node_constant) func = Function(node_ceil, [], "TestFunction") assert count_ops_of_type(func, node_ceil) == 1 diff --git a/runtime/bindings/python/tests/test_ngraph/test_node_factory.py b/runtime/bindings/python/tests/test_ngraph/test_node_factory.py index 14fe3d62d04..c3cbfeb19ea 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_node_factory.py +++ b/runtime/bindings/python/tests/test_ngraph/test_node_factory.py @@ -2,17 +2,17 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np -import ngraph as ng -from ngraph.exceptions import UserInputError -from ngraph.utils.node_factory import NodeFactory +import openvino.opset8 as ov +from openvino.exceptions import UserInputError +from openvino.utils.node_factory import NodeFactory from _pyngraph import NodeFactory as _NodeFactory def test_node_factory_add(): shape = [2, 2] dtype = np.int8 - parameter_a = ng.parameter(shape, dtype=dtype, name="A") - parameter_b = ng.parameter(shape, dtype=dtype, name="B") + parameter_a = ov.parameter(shape, dtype=dtype, name="A") + parameter_b = ov.parameter(shape, dtype=dtype, name="B") factory = _NodeFactory("opset1") arguments = NodeFactory._arguments_as_outputs([parameter_a, parameter_b]) @@ -26,10 +26,10 @@ def test_node_factory_add(): def test_node_factory_wrapper_add(): shape = [2, 2] dtype = np.int8 - parameter_a = ng.parameter(shape, dtype=dtype, name="A") - parameter_b = ng.parameter(shape, dtype=dtype, name="B") + parameter_a = ov.parameter(shape, dtype=dtype, name="A") + parameter_b = ov.parameter(shape, dtype=dtype, name="B") - node = ng.add(parameter_a, parameter_b, name="TestNode") + node = ov.add(parameter_a, parameter_b, name="TestNode") assert node.get_type_name() == "Add" assert node.get_output_size() == 1 @@ -39,8 +39,8 @@ def test_node_factory_wrapper_add(): def test_node_factory_topk(): dtype = np.int32 - data = ng.parameter([2, 10], dtype=dtype, name="A") - k = ng.constant(3, dtype=dtype, name="B") + data = ov.parameter([2, 10], dtype=dtype, name="A") + k = ov.constant(3, dtype=dtype, name="B") factory = _NodeFactory("opset1") arguments = NodeFactory._arguments_as_outputs([data, k]) node = factory.create( @@ -65,8 +65,8 @@ def test_node_factory_empty_topk(): def test_node_factory_empty_topk_with_args_and_attrs(): dtype = np.int32 - data = ng.parameter([2, 10], dtype=dtype, name="A") - k = ng.constant(3, dtype=dtype, name="B") + data = ov.parameter([2, 10], dtype=dtype, name="A") + k = ov.constant(3, dtype=dtype, name="B") factory = NodeFactory("opset1") arguments = NodeFactory._arguments_as_outputs([data, k]) node = factory.create("TopK", None, None) diff --git a/runtime/bindings/python/tests/test_ngraph/test_normalization.py b/runtime/bindings/python/tests/test_ngraph/test_normalization.py index 50d3a1c5a21..3051c8655b3 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_normalization.py +++ b/runtime/bindings/python/tests/test_ngraph/test_normalization.py @@ -3,7 +3,7 @@ import numpy as np -import ngraph as ng +import openvino.opset8 as ov from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node @@ -13,7 +13,7 @@ def test_lrn(): input_image = np.arange(int(np.prod(input_image_shape))).reshape(input_image_shape).astype("f") axes = np.array([1], dtype=np.int64) runtime = get_runtime() - model = ng.lrn(ng.constant(input_image), ng.constant(axes), alpha=1.0, beta=2.0, bias=1.0, size=3) + model = ov.lrn(ov.constant(input_image), ov.constant(axes), alpha=1.0, beta=2.0, bias=1.0, size=3) computation = runtime.computation(model) result = computation() assert np.allclose( @@ -28,7 +28,7 @@ def test_lrn(): ) # Test LRN default parameter values - model = ng.lrn(ng.constant(input_image), ng.constant(axes)) + model = ov.lrn(ov.constant(input_image), ov.constant(axes)) computation = runtime.computation(model) result = computation() assert np.allclose( @@ -83,7 +83,7 @@ def test_lrn_factory(): ], dtype=np.float32, ) - result = run_op_node([x], ng.lrn, axis, alpha, beta, bias, nsize) + result = run_op_node([x], ov.lrn, axis, alpha, beta, bias, nsize) assert np.allclose(result, excepted) @@ -97,7 +97,7 @@ def test_batch_norm_inference(): epsilon = 9.99e-06 excepted = np.array([[2.0, 6.0, 12.0], [-2.0, -6.0, -12.0]], dtype=np.float32) - result = run_op_node([data, gamma, beta, mean, variance], ng.batch_norm_inference, epsilon) + result = run_op_node([data, gamma, beta, mean, variance], ov.batch_norm_inference, epsilon) assert np.allclose(result, excepted) @@ -114,7 +114,7 @@ def test_mvn_no_variance(): -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4], dtype=np.float32).reshape([1, 3, 3, 3]) - result = run_op_node([data], ng.mvn, axes, normalize_variance, epsilon, eps_mode) + result = run_op_node([data], ov.mvn, axes, normalize_variance, epsilon, eps_mode) assert np.allclose(result, excepted) @@ -137,6 +137,6 @@ def test_mvn(): -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934], dtype=np.float32).reshape([1, 3, 3, 3]) - result = run_op_node([data], ng.mvn, axes, normalize_variance, epsilon, eps_mode) + result = run_op_node([data], ov.mvn, axes, normalize_variance, epsilon, eps_mode) assert np.allclose(result, excepted) diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops.py b/runtime/bindings/python/tests/test_ngraph/test_ops.py index 89993329d34..3a57632dfda 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops.py @@ -5,48 +5,50 @@ import numpy as np -import ngraph as ng -from ngraph.impl import AxisSet, Function, Shape, Type -from ngraph.impl.op import Constant, Parameter +import openvino.opset8 as ov +from openvino.impl import AxisSet, Function, Shape, Type +from openvino.impl.op import Constant, Parameter from tests.runtime import get_runtime +from tests import xfail_issue_67415 + def binary_op(op_str, a, b): if op_str == "+": return a + b elif op_str == "Add": - return ng.add(a, b) + return ov.add(a, b) elif op_str == "-": return a - b elif op_str == "Sub": - return ng.subtract(a, b) + return ov.subtract(a, b) elif op_str == "*": return a * b elif op_str == "Mul": - return ng.multiply(a, b) + return ov.multiply(a, b) elif op_str == "/": return a / b elif op_str == "Div": - return ng.divide(a, b) + return ov.divide(a, b) elif op_str == "Equal": - return ng.equal(a, b) + return ov.equal(a, b) elif op_str == "Greater": - return ng.greater(a, b) + return ov.greater(a, b) elif op_str == "GreaterEq": - return ng.greater_equal(a, b) + return ov.greater_equal(a, b) elif op_str == "Less": - return ng.less(a, b) + return ov.less(a, b) elif op_str == "LessEq": - return ng.less_equal(a, b) + return ov.less_equal(a, b) elif op_str == "Maximum": - return ng.maximum(a, b) + return ov.maximum(a, b) elif op_str == "Minimum": - return ng.minimum(a, b) + return ov.minimum(a, b) elif op_str == "NotEqual": - return ng.not_equal(a, b) + return ov.not_equal(a, b) elif op_str == "Power": - return ng.power(a, b) + return ov.power(a, b) def binary_op_ref(op_str, a, b): @@ -192,7 +194,7 @@ def test_add_with_mul(): B = Parameter(element_type, shape) C = Parameter(element_type, shape) parameter_list = [A, B, C] - function = Function([ng.multiply(ng.add(A, B), C)], parameter_list, "test") + function = Function([ov.multiply(ov.add(A, B), C)], parameter_list, "test") runtime = get_runtime() computation = runtime.computation(function, A, B, C) @@ -212,45 +214,45 @@ def test_add_with_mul(): def unary_op(op_str, a): if op_str == "Abs": - return ng.abs(a) + return ov.abs(a) elif op_str == "Acos": - return ng.acos(a) + return ov.acos(a) elif op_str == "Acosh": - return ng.acosh(a) + return ov.acosh(a) elif op_str == "Asin": - return ng.asin(a) + return ov.asin(a) elif op_str == "Asinh": - return ng.asinh(a) + return ov.asinh(a) elif op_str == "Atan": - return ng.atan(a) + return ov.atan(a) elif op_str == "Atanh": - return ng.atanh(a) + return ov.atanh(a) elif op_str == "Ceiling": - return ng.ceiling(a) + return ov.ceiling(a) elif op_str == "Cos": - return ng.cos(a) + return ov.cos(a) elif op_str == "Cosh": - return ng.cosh(a) + return ov.cosh(a) elif op_str == "Floor": - return ng.floor(a) + return ov.floor(a) elif op_str == "log": - return ng.log(a) + return ov.log(a) elif op_str == "exp": - return ng.exp(a) + return ov.exp(a) elif op_str == "negative": - return ng.negative(a) + return ov.negative(a) elif op_str == "Sign": - return ng.sign(a) + return ov.sign(a) elif op_str == "Sin": - return ng.sin(a) + return ov.sin(a) elif op_str == "Sinh": - return ng.sinh(a) + return ov.sinh(a) elif op_str == "Sqrt": - return ng.sqrt(a) + return ov.sqrt(a) elif op_str == "Tan": - return ng.tan(a) + return ov.tan(a) elif op_str == "Tanh": - return ng.tanh(a) + return ov.tanh(a) def unary_op_ref(op_str, a): @@ -442,7 +444,7 @@ def test_reshape(): shape = Shape([2, 3]) A = Parameter(element_type, shape) parameter_list = [A] - function = Function([ng.reshape(A, Shape([3, 2]), special_zero=False)], parameter_list, "test") + function = Function([ov.reshape(A, Shape([3, 2]), special_zero=False)], parameter_list, "test") runtime = get_runtime() computation = runtime.computation(function, *parameter_list) @@ -457,7 +459,7 @@ def test_broadcast(): element_type = Type.f32 A = Parameter(element_type, Shape([3])) parameter_list = [A] - function = Function([ng.broadcast(A, [3, 3])], parameter_list, "test") + function = Function([ov.broadcast(A, [3, 3])], parameter_list, "test") runtime = get_runtime() computation = runtime.computation(function, *parameter_list) @@ -490,7 +492,7 @@ def test_concat(): C = Parameter(element_type, Shape([1, 2])) parameter_list = [A, B, C] axis = 0 - function = Function([ng.concat([A, B, C], axis)], parameter_list, "test") + function = Function([ov.concat([A, B, C], axis)], parameter_list, "test") a_arr = np.array([[1, 2]], dtype=np.float32) b_arr = np.array([[5, 6]], dtype=np.float32) @@ -520,6 +522,7 @@ def test_axisset(): assert set(tuple_axisset) == set(set_axisset) +@xfail_issue_67415 def test_select(): element_type = Type.f32 A = Parameter(Type.boolean, Shape([1, 2])) @@ -527,7 +530,7 @@ def test_select(): C = Parameter(element_type, Shape([1, 2])) parameter_list = [A, B, C] - function = Function([ng.select(A, B, C)], parameter_list, "test") + function = Function([ov.select(A, B, C)], parameter_list, "test") runtime = get_runtime() computation = runtime.computation(function, *parameter_list) @@ -559,7 +562,7 @@ def test_max_pool(): auto_pad = "explicit" idx_elem_type = "i32" - model = ng.max_pool( + model = ov.max_pool( A, strides, dilations, @@ -584,7 +587,7 @@ def test_max_pool(): pads_begin = [0] * len(window_shape) pads_end = [0] * len(window_shape) - model = ng.max_pool( + model = ov.max_pool( A, strides, dilations, @@ -618,7 +621,7 @@ def test_max_pool(): pads_begin = [0, 0] pads_end = [0, 0] - model = ng.max_pool( + model = ov.max_pool( A, strides, dilations, @@ -643,7 +646,7 @@ def test_max_pool(): pads_begin = [0, 0] pads_end = [0, 0] - model = ng.max_pool( + model = ov.max_pool( A, strides, dilations, @@ -731,7 +734,7 @@ def test_convolution_simple(): pads_end = [0, 0] dilations = [1, 1] - model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) + model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations) function = Function([model], parameter_list, "test") runtime = get_runtime() @@ -759,7 +762,7 @@ def test_convolution_with_strides(): pads_end = [0, 0] dilations = [1, 1] - model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) + model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations) function = Function([model], parameter_list, "test") runtime = get_runtime() @@ -786,7 +789,7 @@ def test_convolution_with_filter_dilation(): pads_end = [0, 0] dilations = [2, 2] - model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) + model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations) function = Function([model], parameter_list, "test") runtime = get_runtime() @@ -814,7 +817,7 @@ def test_convolution_with_padding(): pads_begin = [0, 0] pads_end = [0, 0] - model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) + model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations) function = Function([model], parameter_list, "test") runtime = get_runtime() @@ -843,7 +846,7 @@ def test_convolution_with_non_zero_padding(): pads_begin = [2, 1] pads_end = [1, 2] - model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) + model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations) function = Function([model], parameter_list, "test") runtime = get_runtime() diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops_binary.py b/runtime/bindings/python/tests/test_ngraph/test_ops_binary.py index 118c06faf6a..7a910eae3c5 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops_binary.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops_binary.py @@ -6,35 +6,37 @@ import operator import numpy as np import pytest -import ngraph as ng +import openvino.opset8 as ov from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node +from tests import xfail_issue_67415 + @pytest.mark.parametrize( "ng_api_helper,numpy_function", [ - (ng.add, np.add), - (ng.divide, np.divide), - (ng.multiply, np.multiply), - (ng.subtract, np.subtract), - (ng.minimum, np.minimum), - (ng.maximum, np.maximum), - (ng.mod, np.mod), - (ng.equal, np.equal), - (ng.not_equal, np.not_equal), - (ng.greater, np.greater), - (ng.greater_equal, np.greater_equal), - (ng.less, np.less), - (ng.less_equal, np.less_equal), + (ov.add, np.add), + (ov.divide, np.divide), + (ov.multiply, np.multiply), + (ov.subtract, np.subtract), + (ov.minimum, np.minimum), + (ov.maximum, np.maximum), + (ov.mod, np.mod), + (ov.equal, np.equal), + (ov.not_equal, np.not_equal), + (ov.greater, np.greater), + (ov.greater_equal, np.greater_equal), + (ov.less, np.less), + (ov.less_equal, np.less_equal), ], ) def test_binary_op(ng_api_helper, numpy_function): runtime = get_runtime() shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=np.float32) - parameter_b = ng.parameter(shape, name="B", dtype=np.float32) + parameter_a = ov.parameter(shape, name="A", dtype=np.float32) + parameter_b = ov.parameter(shape, name="B", dtype=np.float32) model = ng_api_helper(parameter_a, parameter_b) computation = runtime.computation(model, parameter_a, parameter_b) @@ -50,19 +52,19 @@ def test_binary_op(ng_api_helper, numpy_function): @pytest.mark.parametrize( "ng_api_helper,numpy_function", [ - (ng.add, np.add), - (ng.divide, np.divide), - (ng.multiply, np.multiply), - (ng.subtract, np.subtract), - (ng.minimum, np.minimum), - (ng.maximum, np.maximum), - (ng.mod, np.mod), - (ng.equal, np.equal), - (ng.not_equal, np.not_equal), - (ng.greater, np.greater), - (ng.greater_equal, np.greater_equal), - (ng.less, np.less), - (ng.less_equal, np.less_equal), + (ov.add, np.add), + (ov.divide, np.divide), + (ov.multiply, np.multiply), + (ov.subtract, np.subtract), + (ov.minimum, np.minimum), + (ov.maximum, np.maximum), + (ov.mod, np.mod), + (ov.equal, np.equal), + (ov.not_equal, np.not_equal), + (ov.greater, np.greater), + (ov.greater_equal, np.greater_equal), + (ov.less, np.less), + (ov.less_equal, np.less_equal), ], ) def test_binary_op_with_scalar(ng_api_helper, numpy_function): @@ -72,7 +74,7 @@ def test_binary_op_with_scalar(ng_api_helper, numpy_function): value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=np.float32) + parameter_a = ov.parameter(shape, name="A", dtype=np.float32) model = ng_api_helper(parameter_a, value_b) computation = runtime.computation(model, parameter_a) @@ -82,16 +84,17 @@ def test_binary_op_with_scalar(ng_api_helper, numpy_function): assert np.allclose(result, expected) +@xfail_issue_67415 @pytest.mark.parametrize( "ng_api_helper,numpy_function", - [(ng.logical_and, np.logical_and), (ng.logical_or, np.logical_or), (ng.logical_xor, np.logical_xor)], + [(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)], ) def test_binary_logical_op(ng_api_helper, numpy_function): runtime = get_runtime() shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=np.bool) - parameter_b = ng.parameter(shape, name="B", dtype=np.bool) + parameter_a = ov.parameter(shape, name="A", dtype=np.bool) + parameter_b = ov.parameter(shape, name="B", dtype=np.bool) model = ng_api_helper(parameter_a, parameter_b) computation = runtime.computation(model, parameter_a, parameter_b) @@ -104,9 +107,10 @@ def test_binary_logical_op(ng_api_helper, numpy_function): assert np.allclose(result, expected) +@xfail_issue_67415 @pytest.mark.parametrize( "ng_api_helper,numpy_function", - [(ng.logical_and, np.logical_and), (ng.logical_or, np.logical_or), (ng.logical_xor, np.logical_xor)], + [(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)], ) def test_binary_logical_op_with_scalar(ng_api_helper, numpy_function): runtime = get_runtime() @@ -115,7 +119,7 @@ def test_binary_logical_op_with_scalar(ng_api_helper, numpy_function): value_b = np.array([[False, True], [False, True]], dtype=np.bool) shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=np.bool) + parameter_a = ov.parameter(shape, name="A", dtype=np.bool) model = ng_api_helper(parameter_a, value_b) computation = runtime.computation(model, parameter_a) @@ -147,7 +151,7 @@ def test_binary_operators(operator, numpy_function): value_b = np.array([[4, 5], [1, 7]], dtype=np.float32) shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=np.float32) + parameter_a = ov.parameter(shape, name="A", dtype=np.float32) model = operator(parameter_a, value_b) computation = runtime.computation(model, parameter_a) @@ -179,7 +183,7 @@ def test_binary_operators_with_scalar(operator, numpy_function): value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) shape = [2, 2] - parameter_a = ng.parameter(shape, name="A", dtype=np.float32) + parameter_a = ov.parameter(shape, name="A", dtype=np.float32) model = operator(parameter_a, value_b) computation = runtime.computation(model, parameter_a) @@ -194,7 +198,7 @@ def test_multiply(): B = np.arange(35, dtype=np.int32).reshape((7, 1, 5)) expected = np.multiply(A, B) - result = run_op_node([A, B], ng.multiply) + result = run_op_node([A, B], ov.multiply) assert np.allclose(result, expected) @@ -204,6 +208,6 @@ def test_power_v1(): B = np.arange(20, dtype=np.float32).reshape((4, 1, 5)) expected = np.power(A, B) - result = run_op_node([A, B], ng.power) + result = run_op_node([A, B], ov.power) assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops_fused.py b/runtime/bindings/python/tests/test_ngraph/test_ops_fused.py index 6db4a5f29c4..0ebec975d5a 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops_fused.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops_fused.py @@ -4,7 +4,7 @@ import numpy as np import pytest -import ngraph as ng +import openvino.opset8 as ov from tests.runtime import get_runtime from tests import xfail_issue_36486 @@ -15,7 +15,7 @@ def test_elu_operator_with_scalar_and_array(): data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) alpha_value = np.float32(3) - model = ng.elu(data_value, alpha_value) + model = ov.elu(data_value, alpha_value) computation = runtime.computation(model) result = computation() @@ -30,9 +30,9 @@ def test_elu_operator_with_scalar(): alpha_value = np.float32(3) data_shape = [2, 2] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.elu(parameter_data, alpha_value) + model = ov.elu(parameter_data, alpha_value) computation = runtime.computation(model, parameter_data) result = computation(data_value) @@ -52,13 +52,13 @@ def test_fake_quantize(): data_shape = [1, 2, 3, 4] bound_shape = [] - parameter_data = ng.parameter(data_shape, name="data", dtype=np.float32) - parameter_input_low = ng.parameter(bound_shape, name="input_low", dtype=np.float32) - parameter_input_high = ng.parameter(bound_shape, name="input_high", dtype=np.float32) - parameter_output_low = ng.parameter(bound_shape, name="output_low", dtype=np.float32) - parameter_output_high = ng.parameter(bound_shape, name="output_high", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="data", dtype=np.float32) + parameter_input_low = ov.parameter(bound_shape, name="input_low", dtype=np.float32) + parameter_input_high = ov.parameter(bound_shape, name="input_high", dtype=np.float32) + parameter_output_low = ov.parameter(bound_shape, name="output_low", dtype=np.float32) + parameter_output_high = ov.parameter(bound_shape, name="output_high", dtype=np.float32) - model = ng.fake_quantize( + model = ov.fake_quantize( parameter_data, parameter_input_low, parameter_input_high, @@ -117,9 +117,9 @@ def test_depth_to_space(): block_size = np.float32(2) data_shape = [1, 4, 2, 3] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.depth_to_space(parameter_data, mode, block_size) + model = ov.depth_to_space(parameter_data, mode, block_size) computation = runtime.computation(model, parameter_data) result = computation(data_value) @@ -140,9 +140,9 @@ def test_space_to_batch(): pads_begin = np.array([0, 0, 1, 0], dtype=np.int64) pads_end = np.array([0, 0, 0, 1], dtype=np.int64) - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.space_to_batch(parameter_data, block_shape, pads_begin, pads_end) + model = ov.space_to_batch(parameter_data, block_shape, pads_begin, pads_end) computation = runtime.computation(model, parameter_data) result = computation(data_value) @@ -192,9 +192,9 @@ def test_batch_to_space(): crops_begin = np.array([0, 0, 1, 0], dtype=np.int64) crops_end = np.array([0, 0, 0, 1], dtype=np.int64) - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.batch_to_space(parameter_data, block_shape, crops_begin, crops_end) + model = ov.batch_to_space(parameter_data, block_shape, crops_begin, crops_end) computation = runtime.computation(model, parameter_data) result = computation(data) @@ -207,11 +207,11 @@ def test_clamp_operator(): runtime = get_runtime() data_shape = [2, 2] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) min_value = np.float32(3) max_value = np.float32(12) - model = ng.clamp(parameter_data, min_value, max_value) + model = ov.clamp(parameter_data, min_value, max_value) computation = runtime.computation(model, parameter_data) data_value = np.array([[-5, 9], [45, 3]], dtype=np.float32) @@ -228,7 +228,7 @@ def test_clamp_operator_with_array(): min_value = np.float32(3) max_value = np.float32(12) - model = ng.clamp(data_value, min_value, max_value) + model = ov.clamp(data_value, min_value, max_value) computation = runtime.computation(model) result = computation() @@ -241,10 +241,10 @@ def test_squeeze_operator(): runtime = get_runtime() data_shape = [1, 2, 1, 3, 1, 1] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) data_value = np.arange(6.0, dtype=np.float32).reshape([1, 2, 1, 3, 1, 1]) axes = [2, 4] - model = ng.squeeze(parameter_data, axes) + model = ov.squeeze(parameter_data, axes) computation = runtime.computation(model, parameter_data) result = computation(data_value) @@ -258,13 +258,13 @@ def test_squared_difference_operator(): x1_shape = [1, 2, 3, 4] x2_shape = [2, 3, 4] - parameter_x1 = ng.parameter(x1_shape, name="x1", dtype=np.float32) - parameter_x2 = ng.parameter(x2_shape, name="x2", dtype=np.float32) + parameter_x1 = ov.parameter(x1_shape, name="x1", dtype=np.float32) + parameter_x2 = ov.parameter(x2_shape, name="x2", dtype=np.float32) x1_value = np.arange(24.0, dtype=np.float32).reshape(x1_shape) x2_value = np.arange(start=4.0, stop=28.0, step=1.0, dtype=np.float32).reshape(x2_shape) - model = ng.squared_difference(parameter_x1, parameter_x2) + model = ov.squared_difference(parameter_x1, parameter_x2) computation = runtime.computation(model, parameter_x1, parameter_x2) result = computation(x1_value, x2_value) @@ -279,11 +279,11 @@ def test_shuffle_channels_operator(): axis = 1 groups = 5 - parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter = ov.parameter(data_shape, name="Data", dtype=np.float32) data_value = np.arange(60.0, dtype=np.float32).reshape(data_shape) - model = ng.shuffle_channels(parameter, axis, groups) + model = ov.shuffle_channels(parameter, axis, groups) computation = runtime.computation(model, parameter) result = computation(data_value) @@ -316,10 +316,10 @@ def test_unsqueeze(): runtime = get_runtime() data_shape = [3, 4, 5] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) data_value = np.arange(60.0, dtype=np.float32).reshape(3, 4, 5) axes = [0, 4] - model = ng.unsqueeze(parameter_data, axes) + model = ov.unsqueeze(parameter_data, axes) computation = runtime.computation(model, parameter_data) result = computation(data_value) @@ -335,9 +335,9 @@ def test_grn_operator(): data_shape = [1, 2, 3, 4] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.grn(parameter_data, bias) + model = ov.grn(parameter_data, bias) computation = runtime.computation(model, parameter_data) result = computation(data_value) @@ -370,10 +370,10 @@ def test_prelu_operator(): data_value = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape(data_shape) slope_value = np.arange(start=-10.0, stop=-4.0, dtype=np.float32).reshape(slope_shape) - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - parameter_slope = ng.parameter(slope_shape, name="Slope", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) + parameter_slope = ov.parameter(slope_shape, name="Slope", dtype=np.float32) - model = ng.prelu(parameter_data, parameter_slope) + model = ov.prelu(parameter_data, parameter_slope) computation = runtime.computation(model, parameter_data, parameter_slope) result = computation(data_value, slope_value) @@ -390,8 +390,8 @@ def test_selu_operator(): alpha = np.array(1.6733, dtype=np.float32) lambda_value = np.array(1.0507, dtype=np.float32) - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.selu(parameter_data, alpha, lambda_value) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) + model = ov.selu(parameter_data, alpha, lambda_value) computation = runtime.computation(model, parameter_data) result = computation(data) @@ -409,11 +409,11 @@ def test_hard_sigmoid_operator(): data_value = np.array([-1, 0, 1], dtype=np.float32) - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - parameter_alpha = ng.parameter([], name="Alpha", dtype=np.float32) - parameter_beta = ng.parameter([], name="Beta", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) + parameter_alpha = ov.parameter([], name="Alpha", dtype=np.float32) + parameter_beta = ov.parameter([], name="Beta", dtype=np.float32) - model = ng.hard_sigmoid(parameter_data, parameter_alpha, parameter_beta) + model = ov.hard_sigmoid(parameter_data, parameter_alpha, parameter_beta) computation = runtime.computation(model, parameter_data, parameter_alpha, parameter_beta) result = computation(data_value, alpha_value, beta_value) @@ -451,9 +451,9 @@ def test_mvn_operator(): dtype=np.float32, ) - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.mvn(parameter_data, axes, normalize_variance, eps, eps_mode) + model = ov.mvn(parameter_data, axes, normalize_variance, eps, eps_mode) computation = runtime.computation(model, parameter_data) result = computation(data_value) @@ -490,9 +490,9 @@ def test_space_to_depth_operator(): mode = "blocks_first" block_size = 2 - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.space_to_depth(parameter_data, mode, block_size) + model = ov.space_to_depth(parameter_data, mode, block_size) computation = runtime.computation(model, parameter_data) result = computation(data_value) @@ -545,11 +545,11 @@ def test_space_to_depth_operator(): R_shape = [hidden_size, hidden_size] B_shape = [hidden_size] - parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) - parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) - parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) - parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) - parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + parameter_X = ov.parameter(X_shape, name="X", dtype=np.float32) + parameter_H_t = ov.parameter(H_t_shape, name="H_t", dtype=np.float32) + parameter_W = ov.parameter(W_shape, name="W", dtype=np.float32) + parameter_R = ov.parameter(R_shape, name="R", dtype=np.float32) + parameter_B = ov.parameter(B_shape, name="B", dtype=np.float32) X_value = np.array( [0.3432185, 0.612268, 0.20272376, 0.9513413, 0.30585995, 0.7265472], dtype=np.float32 @@ -591,7 +591,7 @@ def test_space_to_depth_operator(): activation_beta = [] clip = 2.88 - model = ng.rnn_cell( + model = ov.rnn_cell( parameter_X, parameter_H_t, parameter_W, @@ -621,8 +621,8 @@ def test_group_convolution_operator(): data_shape = [1, 4, 2, 2] filters_shape = [2, 1, 2, 1, 1] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) - parameter_filters = ng.parameter(filters_shape, name="Filters", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) + parameter_filters = ov.parameter(filters_shape, name="Filters", dtype=np.float32) data_value = np.arange(start=1.0, stop=17.0, dtype=np.float32).reshape(data_shape) filters_value = np.arange(start=1.0, stop=5.0, dtype=np.float32).reshape(filters_shape) @@ -631,7 +631,7 @@ def test_group_convolution_operator(): pads_begin = [0, 0] pads_end = [0, 0] - model = ng.group_convolution(parameter_data, parameter_filters, strides, pads_begin, pads_end, dilations) + model = ov.group_convolution(parameter_data, parameter_filters, strides, pads_begin, pads_end, dilations) computation = runtime.computation(model, parameter_data, parameter_filters) result = computation(data_value, filters_value) @@ -651,9 +651,9 @@ def test_group_convolution_backprop_data(): pads_begin = [1, 1] pads_end = [1, 1] - data_node = ng.parameter(data_shape, name="Data", dtype=np.float32) - filters_node = ng.parameter(filters_shape, name="Filters", dtype=np.float32) - model = ng.group_convolution_backprop_data( + data_node = ov.parameter(data_shape, name="Data", dtype=np.float32) + filters_node = ov.parameter(filters_shape, name="Filters", dtype=np.float32) + model = ov.group_convolution_backprop_data( data_node, filters_node, strides, None, pads_begin, pads_end, output_padding=output_padding ) @@ -742,11 +742,11 @@ def test_group_convolution_backprop_data_output_shape(): filters_shape = [1, 1, 1, 1, 5] strides = [1, 1] - data_node = ng.parameter(data_shape, name="Data", dtype=np.float32) - filters_node = ng.parameter(filters_shape, name="Filters", dtype=np.float32) - output_shape_node = ng.constant(np.array([1, 14], dtype=np.int64)) + data_node = ov.parameter(data_shape, name="Data", dtype=np.float32) + filters_node = ov.parameter(filters_shape, name="Filters", dtype=np.float32) + output_shape_node = ov.constant(np.array([1, 14], dtype=np.int64)) - model = ng.group_convolution_backprop_data( + model = ov.group_convolution_backprop_data( data_node, filters_node, strides, output_shape_node, auto_pad="same_upper" ) diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops_matmul.py b/runtime/bindings/python/tests/test_ngraph/test_ops_matmul.py index 330bb296758..a2ead563255 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops_matmul.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops_matmul.py @@ -4,7 +4,7 @@ import numpy as np import pytest -import ngraph as ng +import openvino.opset8 as ov from tests.test_ngraph.util import run_op_node @@ -29,7 +29,7 @@ def test_matmul(shape_a, shape_b, transpose_a, transpose_b): left_input = -100.0 + np.random.rand(*shape_a).astype(np.float32) * 200.0 right_input = -100.0 + np.random.rand(*shape_b).astype(np.float32) * 200.0 - result = run_op_node([left_input, right_input], ng.matmul, transpose_a, transpose_b) + result = run_op_node([left_input, right_input], ov.matmul, transpose_a, transpose_b) if transpose_a: left_input = np.transpose(left_input) diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops_multioutput.py b/runtime/bindings/python/tests/test_ngraph/test_ops_multioutput.py index 9f0c7efd46d..e80174c028a 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops_multioutput.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops_multioutput.py @@ -3,17 +3,17 @@ import numpy as np -import ngraph as ng +import openvino.opset8 as ov from tests.runtime import get_runtime def test_split(): runtime = get_runtime() - input_tensor = ng.constant(np.array([0, 1, 2, 3, 4, 5], dtype=np.int32)) - axis = ng.constant(0, dtype=np.int64) + input_tensor = ov.constant(np.array([0, 1, 2, 3, 4, 5], dtype=np.int32)) + axis = ov.constant(0, dtype=np.int64) splits = 3 - split_node = ng.split(input_tensor, axis, splits) + split_node = ov.split(input_tensor, axis, splits) computation = runtime.computation(split_node) split_results = computation() expected_results = np.array([[0, 1], [2, 3], [4, 5]], dtype=np.int32) @@ -22,11 +22,11 @@ def test_split(): def test_variadic_split(): runtime = get_runtime() - input_tensor = ng.constant(np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]], dtype=np.int32)) - axis = ng.constant(1, dtype=np.int64) - splits = ng.constant(np.array([2, 4], dtype=np.int64)) + input_tensor = ov.constant(np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]], dtype=np.int32)) + axis = ov.constant(1, dtype=np.int64) + splits = ov.constant(np.array([2, 4], dtype=np.int64)) - v_split_node = ng.variadic_split(input_tensor, axis, splits) + v_split_node = ov.variadic_split(input_tensor, axis, splits) computation = runtime.computation(v_split_node) results = computation() split0 = np.array([[0, 1], [6, 7]], dtype=np.int32) diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops_reshape.py b/runtime/bindings/python/tests/test_ngraph/test_ops_reshape.py index 8930c49ffc5..9a477a434db 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops_reshape.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops_reshape.py @@ -1,7 +1,7 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import ngraph as ng +import openvino.opset8 as ov import numpy as np import pytest @@ -10,15 +10,15 @@ from tests.test_ngraph.util import run_op_node, run_op_numeric_data def test_concat(): - a = np.array([[1, 2], [3, 4]]) - b = np.array([[5, 6]]) + a = np.array([[1, 2], [3, 4]]).astype(np.float32) + b = np.array([[5, 6]]).astype(np.float32) axis = 0 expected = np.concatenate((a, b), axis=0) runtime = get_runtime() - parameter_a = ng.parameter(list(a.shape), name="A", dtype=np.float32) - parameter_b = ng.parameter(list(b.shape), name="B", dtype=np.float32) - node = ng.concat([parameter_a, parameter_b], axis) + parameter_a = ov.parameter(list(a.shape), name="A", dtype=np.float32) + parameter_b = ov.parameter(list(b.shape), name="B", dtype=np.float32) + node = ov.concat([parameter_a, parameter_b], axis) computation = runtime.computation(node, parameter_a, parameter_b) result = computation(a, b) assert np.allclose(result, expected) @@ -29,7 +29,7 @@ def test_concat(): ) def test_constant_from_bool(val_type, value): expected = np.array(value, dtype=val_type) - result = run_op_numeric_data(value, ng.constant, val_type) + result = run_op_numeric_data(value, ov.constant, val_type) assert np.allclose(result, expected) @@ -50,7 +50,7 @@ def test_constant_from_bool(val_type, value): ) def test_constant_from_scalar(val_type, value): expected = np.array(value, dtype=val_type) - result = run_op_numeric_data(value, ng.constant, val_type) + result = run_op_numeric_data(value, ov.constant, val_type) assert np.allclose(result, expected) @@ -64,7 +64,7 @@ def test_constant_from_scalar(val_type, value): def test_constant_from_float_array(val_type): np.random.seed(133391) input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type) - result = run_op_numeric_data(input_data, ng.constant, val_type) + result = run_op_numeric_data(input_data, ov.constant, val_type) assert np.allclose(result, input_data) @@ -86,7 +86,7 @@ def test_constant_from_integer_array(val_type, range_start, range_end): input_data = np.array( np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type ) - result = run_op_numeric_data(input_data, ng.constant, val_type) + result = run_op_numeric_data(input_data, ov.constant, val_type) assert np.allclose(result, input_data) @@ -94,12 +94,12 @@ def test_broadcast_numpy(): data_shape = [16, 1, 1] target_shape_shape = [4] - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - target_shape_parameter = ng.parameter( + data_parameter = ov.parameter(data_shape, name="Data", dtype=np.float32) + target_shape_parameter = ov.parameter( target_shape_shape, name="Target_shape", dtype=np.int64 ) - node = ng.broadcast(data_parameter, target_shape_parameter) + node = ov.broadcast(data_parameter, target_shape_parameter) assert node.get_type_name() == "Broadcast" assert node.get_output_size() == 1 @@ -109,12 +109,12 @@ def test_broadcast_bidirectional(): data_shape = [16, 1, 1] target_shape_shape = [4] - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - target_shape_parameter = ng.parameter( + data_parameter = ov.parameter(data_shape, name="Data", dtype=np.float32) + target_shape_parameter = ov.parameter( target_shape_shape, name="Target_shape", dtype=np.int64 ) - node = ng.broadcast(data_parameter, target_shape_parameter, "BIDIRECTIONAL") + node = ov.broadcast(data_parameter, target_shape_parameter, "BIDIRECTIONAL") assert node.get_type_name() == "Broadcast" assert node.get_output_size() == 1 @@ -126,7 +126,7 @@ def test_transpose(): ) input_order = np.array([0, 2, 3, 1], dtype=np.int32) - result = run_op_node([input_tensor], ng.transpose, input_order) + result = run_op_node([input_tensor], ov.transpose, input_order) expected = np.transpose(input_tensor, input_order) @@ -140,7 +140,7 @@ def test_tile(): input_tensor = np.arange(6, dtype=np.int32).reshape((2, 1, 3)) repeats = np.array([2, 1], dtype=np.int32) - result = run_op_node([input_tensor], ng.tile, repeats) + result = run_op_node([input_tensor], ov.tile, repeats) expected = np.array([0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5]).reshape((2, 2, 3)) @@ -163,7 +163,7 @@ def test_strided_slice(): result = run_op_node( [input_tensor], - ng.strided_slice, + ov.strided_slice, begin, end, strides, @@ -188,7 +188,7 @@ def test_reshape_v1(): expected_shape = np.array([2, 150, 4]) expected = np.reshape(A, expected_shape) - result = run_op_node([A], ng.reshape, shape, special_zero) + result = run_op_node([A], ov.reshape, shape, special_zero) assert np.allclose(result, expected) @@ -196,6 +196,6 @@ def test_reshape_v1(): def test_shape_of(): input_tensor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32) - result = run_op_node([input_tensor], ng.shape_of) + result = run_op_node([input_tensor], ov.shape_of) assert np.allclose(result, [3, 3]) diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops_scatter.py b/runtime/bindings/python/tests/test_ngraph/test_ops_scatter.py index b762f1068a1..1d843f29447 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops_scatter.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops_scatter.py @@ -3,18 +3,18 @@ import numpy as np -import ngraph as ng -from ngraph.impl import Type +import openvino.opset8 as ov +from openvino.impl import Type def test_scatter_update_props(): dtype = np.int8 - parameter_r = ng.parameter([2, 3, 4], dtype=dtype, name="data") - parameter_i = ng.parameter([2, 1], dtype=dtype, name="indices") - parameter_u = ng.parameter([2, 2, 1, 4], dtype=dtype, name="updates") + parameter_r = ov.parameter([2, 3, 4], dtype=dtype, name="data") + parameter_i = ov.parameter([2, 1], dtype=dtype, name="indices") + parameter_u = ov.parameter([2, 2, 1, 4], dtype=dtype, name="updates") axis = np.array([1], dtype=np.int8) - node = ng.scatter_update(parameter_r, parameter_i, parameter_u, axis) + node = ov.scatter_update(parameter_r, parameter_i, parameter_u, axis) assert node.get_type_name() == "ScatterUpdate" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [2, 3, 4] @@ -23,12 +23,12 @@ def test_scatter_update_props(): def test_scatter_update_elements_props(): dtype = np.int8 - parameter_r = ng.parameter([2, 4, 5, 7], dtype=dtype, name="data") - parameter_i = ng.parameter([2, 2, 2, 2], dtype=dtype, name="indices") - parameter_u = ng.parameter([2, 2, 2, 2], dtype=dtype, name="updates") + parameter_r = ov.parameter([2, 4, 5, 7], dtype=dtype, name="data") + parameter_i = ov.parameter([2, 2, 2, 2], dtype=dtype, name="indices") + parameter_u = ov.parameter([2, 2, 2, 2], dtype=dtype, name="updates") axis = np.array([1], dtype=np.int8) - node = ng.scatter_elements_update(parameter_r, parameter_i, parameter_u, axis) + node = ov.scatter_elements_update(parameter_r, parameter_i, parameter_u, axis) assert node.get_type_name() == "ScatterElementsUpdate" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [2, 4, 5, 7] diff --git a/runtime/bindings/python/tests/test_ngraph/test_ops_unary.py b/runtime/bindings/python/tests/test_ngraph/test_ops_unary.py index 5a56d12f38f..e3049141802 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_ops_unary.py +++ b/runtime/bindings/python/tests/test_ngraph/test_ops_unary.py @@ -4,37 +4,39 @@ import numpy as np import pytest -import ngraph as ng -from ngraph.impl import Shape, Type +import openvino.opset8 as ov +from openvino.impl import Shape, Type from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node +from tests import xfail_issue_67415 + @pytest.mark.parametrize( "ng_api_fn, numpy_fn, range_start, range_end", [ - (ng.absolute, np.abs, -1, 1), - (ng.abs, np.abs, -1, 1), - (ng.acos, np.arccos, -1, 1), - (ng.acosh, np.arccosh, 1, 2), - (ng.asin, np.arcsin, -1, 1), - (ng.asinh, np.arcsinh, -1, 1), - (ng.atan, np.arctan, -100.0, 100.0), - (ng.atanh, np.arctanh, 0.0, 1.0), - (ng.ceiling, np.ceil, -100.0, 100.0), - (ng.ceil, np.ceil, -100.0, 100.0), - (ng.cos, np.cos, -100.0, 100.0), - (ng.cosh, np.cosh, -100.0, 100.0), - (ng.exp, np.exp, -100.0, 100.0), - (ng.floor, np.floor, -100.0, 100.0), - (ng.log, np.log, 0, 100.0), - (ng.relu, lambda x: np.maximum(0, x), -100.0, 100.0), - (ng.sign, np.sign, -100.0, 100.0), - (ng.sin, np.sin, -100.0, 100.0), - (ng.sinh, np.sinh, -100.0, 100.0), - (ng.sqrt, np.sqrt, 0.0, 100.0), - (ng.tan, np.tan, -1.0, 1.0), - (ng.tanh, np.tanh, -100.0, 100.0), + (ov.absolute, np.abs, -1, 1), + (ov.abs, np.abs, -1, 1), + (ov.acos, np.arccos, -1, 1), + (ov.acosh, np.arccosh, 1, 2), + (ov.asin, np.arcsin, -1, 1), + (ov.asinh, np.arcsinh, -1, 1), + (ov.atan, np.arctan, -100.0, 100.0), + (ov.atanh, np.arctanh, 0.0, 1.0), + (ov.ceiling, np.ceil, -100.0, 100.0), + (ov.ceil, np.ceil, -100.0, 100.0), + (ov.cos, np.cos, -100.0, 100.0), + (ov.cosh, np.cosh, -100.0, 100.0), + (ov.exp, np.exp, -100.0, 100.0), + (ov.floor, np.floor, -100.0, 100.0), + (ov.log, np.log, 0, 100.0), + (ov.relu, lambda x: np.maximum(0, x), -100.0, 100.0), + (ov.sign, np.sign, -100.0, 100.0), + (ov.sin, np.sin, -100.0, 100.0), + (ov.sinh, np.sinh, -100.0, 100.0), + (ov.sqrt, np.sqrt, 0.0, 100.0), + (ov.tan, np.tan, -1.0, 1.0), + (ov.tanh, np.tanh, -100.0, 100.0), ], ) def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end): @@ -49,25 +51,25 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end): @pytest.mark.parametrize( "ng_api_fn, numpy_fn, input_data", [ - pytest.param(ng.absolute, np.abs, np.float32(-3)), - pytest.param(ng.abs, np.abs, np.float32(-3)), - pytest.param(ng.acos, np.arccos, np.float32(-0.5)), - pytest.param(ng.asin, np.arcsin, np.float32(-0.5)), - pytest.param(ng.atan, np.arctan, np.float32(-0.5)), - pytest.param(ng.ceiling, np.ceil, np.float32(1.5)), - pytest.param(ng.ceil, np.ceil, np.float32(1.5)), - pytest.param(ng.cos, np.cos, np.float32(np.pi / 4.0)), - pytest.param(ng.cosh, np.cosh, np.float32(np.pi / 4.0)), - pytest.param(ng.exp, np.exp, np.float32(1.5)), - pytest.param(ng.floor, np.floor, np.float32(1.5)), - pytest.param(ng.log, np.log, np.float32(1.5)), - pytest.param(ng.relu, lambda x: np.maximum(0, x), np.float32(-0.125)), - pytest.param(ng.sign, np.sign, np.float32(0.0)), - pytest.param(ng.sin, np.sin, np.float32(np.pi / 4.0)), - pytest.param(ng.sinh, np.sinh, np.float32(0.0)), - pytest.param(ng.sqrt, np.sqrt, np.float32(3.5)), - pytest.param(ng.tan, np.tan, np.float32(np.pi / 4.0)), - pytest.param(ng.tanh, np.tanh, np.float32(0.1234)), + pytest.param(ov.absolute, np.abs, np.float32(-3)), + pytest.param(ov.abs, np.abs, np.float32(-3)), + pytest.param(ov.acos, np.arccos, np.float32(-0.5)), + pytest.param(ov.asin, np.arcsin, np.float32(-0.5)), + pytest.param(ov.atan, np.arctan, np.float32(-0.5)), + pytest.param(ov.ceiling, np.ceil, np.float32(1.5)), + pytest.param(ov.ceil, np.ceil, np.float32(1.5)), + pytest.param(ov.cos, np.cos, np.float32(np.pi / 4.0)), + pytest.param(ov.cosh, np.cosh, np.float32(np.pi / 4.0)), + pytest.param(ov.exp, np.exp, np.float32(1.5)), + pytest.param(ov.floor, np.floor, np.float32(1.5)), + pytest.param(ov.log, np.log, np.float32(1.5)), + pytest.param(ov.relu, lambda x: np.maximum(0, x), np.float32(-0.125)), + pytest.param(ov.sign, np.sign, np.float32(0.0)), + pytest.param(ov.sin, np.sin, np.float32(np.pi / 4.0)), + pytest.param(ov.sinh, np.sinh, np.float32(0.0)), + pytest.param(ov.sqrt, np.sqrt, np.float32(3.5)), + pytest.param(ov.tan, np.tan, np.float32(np.pi / 4.0)), + pytest.param(ov.tanh, np.tanh, np.float32(0.1234)), ], ) def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data): @@ -77,19 +79,20 @@ def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data): assert np.allclose(result, expected) +@xfail_issue_67415 @pytest.mark.parametrize( "input_data", [(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))] ) def test_logical_not(input_data): expected = np.logical_not(input_data) - result = run_op_node([input_data], ng.logical_not) + result = run_op_node([input_data], ov.logical_not) assert np.allclose(result, expected) def test_sigmoid(): input_data = np.array([-3.14, -1.0, 0.0, 2.71001, 1000.0], dtype=np.float32) - result = run_op_node([input_data], ng.sigmoid) + result = run_op_node([input_data], ov.sigmoid) def sigmoid(x): return 1.0 / (1.0 + np.exp(-x)) @@ -103,7 +106,7 @@ def test_softmax(): axis = 1 input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) - result = run_op_node([input_tensor], ng.softmax, axis) + result = run_op_node([input_tensor], ov.softmax, axis) expected = [[0.09003056, 0.24472842, 0.6652409], [0.09003056, 0.24472842, 0.6652409]] @@ -114,15 +117,15 @@ def test_erf(): input_tensor = np.array([-1.0, 0.0, 1.0, 2.5, 3.14, 4.0], dtype=np.float32) expected = [-0.842701, 0.0, 0.842701, 0.999593, 0.999991, 1.0] - result = run_op_node([input_tensor], ng.erf) + result = run_op_node([input_tensor], ov.erf) assert np.allclose(result, expected) def test_hswish(): float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - node = ng.hswish(data) + node = ov.hswish(data) assert node.get_type_name() == "HSwish" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [3, 10] @@ -131,9 +134,9 @@ def test_hswish(): def test_round_even(): float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - node = ng.round(data, "HALF_TO_EVEN") + node = ov.round(data, "HALF_TO_EVEN") assert node.get_type_name() == "Round" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [3, 10] @@ -142,15 +145,15 @@ def test_round_even(): input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32) expected = [-2.0, -2.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0] - result = run_op_node([input_tensor], ng.round, "HALF_TO_EVEN") + result = run_op_node([input_tensor], ov.round, "HALF_TO_EVEN") assert np.allclose(result, expected) def test_round_away(): float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - node = ng.round(data, "HALF_AWAY_FROM_ZERO") + node = ov.round(data, "HALF_AWAY_FROM_ZERO") assert node.get_type_name() == "Round" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [3, 10] @@ -159,15 +162,15 @@ def test_round_away(): input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32) expected = [-3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0] - result = run_op_node([input_tensor], ng.round, "HALF_AWAY_FROM_ZERO") + result = run_op_node([input_tensor], ov.round, "HALF_AWAY_FROM_ZERO") assert np.allclose(result, expected) def test_hsigmoid(): float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - node = ng.hsigmoid(data) + node = ov.hsigmoid(data) assert node.get_type_name() == "HSigmoid" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [3, 10] @@ -180,9 +183,9 @@ def test_gelu_operator_with_parameters(): data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) data_shape = [2, 2] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.gelu(parameter_data, "erf") + model = ov.gelu(parameter_data, "erf") computation = runtime.computation(model, parameter_data) result = computation(data_value) @@ -195,7 +198,7 @@ def test_gelu_operator_with_array(): data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) - model = ng.gelu(data_value, "erf") + model = ov.gelu(data_value, "erf") computation = runtime.computation(model) result = computation() @@ -209,9 +212,9 @@ def test_gelu_tanh_operator_with_parameters(): data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) data_shape = [2, 2] - parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) - model = ng.gelu(parameter_data, "tanh") + model = ov.gelu(parameter_data, "tanh") computation = runtime.computation(model, parameter_data) result = computation(data_value) @@ -224,7 +227,7 @@ def test_gelu_tanh_operator_with_array(): data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) - model = ng.gelu(data_value, "tanh") + model = ov.gelu(data_value, "tanh") computation = runtime.computation(model) result = computation() diff --git a/runtime/bindings/python/tests/test_ngraph/test_pooling.py b/runtime/bindings/python/tests/test_ngraph/test_pooling.py index 77da435ff5e..3145dc0068a 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_pooling.py +++ b/runtime/bindings/python/tests/test_ngraph/test_pooling.py @@ -4,7 +4,7 @@ import numpy as np import pytest -import ngraph as ng +import openvino.opset8 as ov from tests.runtime import get_runtime @@ -16,7 +16,7 @@ def _ndarray_1x1x4x4(): def test_avg_pool_2d(_ndarray_1x1x4x4): runtime = get_runtime() input_data = _ndarray_1x1x4x4 - param = ng.parameter(input_data.shape, name="A", dtype=np.float32) + param = ov.parameter(input_data.shape, name="A", dtype=np.float32) kernel_shape = [2, 2] spatial_dim_count = len(kernel_shape) @@ -26,14 +26,14 @@ def test_avg_pool_2d(_ndarray_1x1x4x4): exclude_pad = True expected = [[[[13.5, 15.5], [21.5, 23.5]]]] - avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + avg_pool_node = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) computation = runtime.computation(avg_pool_node, param) result = computation(input_data) assert np.allclose(result, expected) expected = [[[[13.5, 14.5, 15.5], [17.5, 18.5, 19.5], [21.5, 22.5, 23.5]]]] strides = [1, 1] - avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + avg_pool_node = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) computation = runtime.computation(avg_pool_node, param) result = computation(input_data) assert np.allclose(result, expected) @@ -44,14 +44,14 @@ def test_avg_pool_2d(_ndarray_1x1x4x4): exclude_pad = True expected = [[[[11.0, 12.5, 14.0], [17.0, 18.5, 20.0], [23.0, 24.5, 26.0]]]] - avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + avg_pool_node = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) computation = runtime.computation(avg_pool_node, param) result = computation(input_data) assert np.allclose(result, expected) exclude_pad = False expected = [[[[2.75, 6.25, 3.5], [8.5, 18.5, 10.0], [5.75, 12.25, 6.5]]]] - avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + avg_pool_node = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) computation = runtime.computation(avg_pool_node, param) result = computation(input_data) assert np.allclose(result, expected) @@ -61,7 +61,7 @@ def test_avg_pooling_3d(_ndarray_1x1x4x4): rt = get_runtime() data = _ndarray_1x1x4x4 data = np.broadcast_to(data, (1, 1, 4, 4, 4)) - param = ng.parameter(list(data.shape)) + param = ov.parameter(list(data.shape)) kernel_shape = [2, 2, 2] strides = [2, 2, 2] spatial_dim_count = len(kernel_shape) @@ -69,7 +69,7 @@ def test_avg_pooling_3d(_ndarray_1x1x4x4): pads_end = [0] * spatial_dim_count exclude_pad = True - avgpool = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + avgpool = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) comp = rt.computation(avgpool, param) result = comp(data) result_ref = [[[[[13.5, 15.5], [21.5, 23.5]], [[13.5, 15.5], [21.5, 23.5]]]]] @@ -93,8 +93,8 @@ def test_max_pool_basic(): auto_pad = None index_et = "i32" - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( + data_node = ov.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ov.max_pool( data_node, strides, dilations, @@ -133,8 +133,8 @@ def test_max_pool_strides(): auto_pad = None index_et = "i32" - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( + data_node = ov.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ov.max_pool( data_node, strides, dilations, @@ -171,8 +171,8 @@ def test_max_pool_kernel_shape1x1(): auto_pad = None index_et = "i32" - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( + data_node = ov.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ov.max_pool( data_node, strides, dilations, @@ -207,8 +207,8 @@ def test_max_pool_kernel_shape3x3(): auto_pad = None index_et = "i32" - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( + data_node = ov.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ov.max_pool( data_node, strides, dilations, @@ -249,8 +249,8 @@ def test_max_pool_non_zero_pads(): auto_pad = None index_et = "i32" - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( + data_node = ov.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ov.max_pool( data_node, strides, dilations, @@ -318,8 +318,8 @@ def test_max_pool_same_upper_auto_pads(): rounding_type = "floor" index_et = "i32" - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( + data_node = ov.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ov.max_pool( data_node, strides, dilations, @@ -385,8 +385,8 @@ def test_max_pool_same_lower_auto_pads(): rounding_type = "floor" index_et = "i32" - data_node = ng.parameter(data.shape, name="A", dtype=np.float32) - maxpool_node = ng.max_pool( + data_node = ov.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ov.max_pool( data_node, strides, dilations, diff --git a/runtime/bindings/python/tests/test_ngraph/test_proposal.py b/runtime/bindings/python/tests/test_ngraph/test_proposal.py index 0c99934b1ba..d8ed7d1d17e 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_proposal.py +++ b/runtime/bindings/python/tests/test_ngraph/test_proposal.py @@ -2,17 +2,17 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np -import ngraph as ng -from ngraph.impl import Shape, Type +import openvino.opset8 as ov +from openvino.impl import Shape, Type def test_proposal_props(): float_dtype = np.float32 batch_size = 1 post_nms_topn = 20 - probs = ng.parameter(Shape([batch_size, 8, 255, 255]), dtype=float_dtype, name="probs") - deltas = ng.parameter(Shape([batch_size, 16, 255, 255]), dtype=float_dtype, name="bbox_deltas") - im_info = ng.parameter(Shape([4]), dtype=float_dtype, name="im_info") + probs = ov.parameter(Shape([batch_size, 8, 255, 255]), dtype=float_dtype, name="probs") + deltas = ov.parameter(Shape([batch_size, 16, 255, 255]), dtype=float_dtype, name="bbox_deltas") + im_info = ov.parameter(Shape([4]), dtype=float_dtype, name="im_info") attrs = { "base_size": np.uint32(85), @@ -25,7 +25,7 @@ def test_proposal_props(): "scale": np.array([2, 3, 3, 4], dtype=np.float32), } - node = ng.proposal(probs, deltas, im_info, attrs) + node = ov.proposal(probs, deltas, im_info, attrs) assert node.get_type_name() == "Proposal" assert node.get_output_size() == 2 diff --git a/runtime/bindings/python/tests/test_ngraph/test_random_uniform.py b/runtime/bindings/python/tests/test_ngraph/test_random_uniform.py index cd1895e1aa9..58164aff274 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_random_uniform.py +++ b/runtime/bindings/python/tests/test_ngraph/test_random_uniform.py @@ -1,15 +1,15 @@ -import ngraph as ng +import openvino.opset8 as ov import numpy as np from tests.runtime import get_runtime def test_random_uniform(): runtime = get_runtime() - input_tensor = ng.constant(np.array([2, 4, 3], dtype=np.int32)) - min_val = ng.constant(np.array([-2.7], dtype=np.float32)) - max_val = ng.constant(np.array([3.5], dtype=np.float32)) + input_tensor = ov.constant(np.array([2, 4, 3], dtype=np.int32)) + min_val = ov.constant(np.array([-2.7], dtype=np.float32)) + max_val = ov.constant(np.array([3.5], dtype=np.float32)) - random_uniform_node = ng.random_uniform(input_tensor, min_val, max_val, + random_uniform_node = ov.random_uniform(input_tensor, min_val, max_val, output_type="f32", global_seed=7461, op_seed=1546) computation = runtime.computation(random_uniform_node) diff --git a/runtime/bindings/python/tests/test_ngraph/test_reduction.py b/runtime/bindings/python/tests/test_ngraph/test_reduction.py index 2222b98b027..12334fbe82f 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_reduction.py +++ b/runtime/bindings/python/tests/test_ngraph/test_reduction.py @@ -5,27 +5,29 @@ import numpy as np import pytest from _pyngraph import PartialShape, Dimension -import ngraph as ng -from ngraph.utils.types import make_constant_node +import openvino.opset8 as ov +from openvino.utils.types import make_constant_node from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node +from tests import xfail_issue_67415 + @pytest.mark.parametrize( "ng_api_helper, numpy_function, reduction_axes", [ - (ng.reduce_max, np.max, np.array([0, 1, 2, 3])), - (ng.reduce_min, np.min, np.array([0, 1, 2, 3])), - (ng.reduce_sum, np.sum, np.array([0, 1, 2, 3])), - (ng.reduce_prod, np.prod, np.array([0, 1, 2, 3])), - (ng.reduce_max, np.max, np.array([0])), - (ng.reduce_min, np.min, np.array([0])), - (ng.reduce_sum, np.sum, np.array([0])), - (ng.reduce_prod, np.prod, np.array([0])), - (ng.reduce_max, np.max, np.array([0, 2])), - (ng.reduce_min, np.min, np.array([0, 2])), - (ng.reduce_sum, np.sum, np.array([0, 2])), - (ng.reduce_prod, np.prod, np.array([0, 2])), + (ov.reduce_max, np.max, np.array([0, 1, 2, 3])), + (ov.reduce_min, np.min, np.array([0, 1, 2, 3])), + (ov.reduce_sum, np.sum, np.array([0, 1, 2, 3])), + (ov.reduce_prod, np.prod, np.array([0, 1, 2, 3])), + (ov.reduce_max, np.max, np.array([0])), + (ov.reduce_min, np.min, np.array([0])), + (ov.reduce_sum, np.sum, np.array([0])), + (ov.reduce_prod, np.prod, np.array([0])), + (ov.reduce_max, np.max, np.array([0, 2])), + (ov.reduce_min, np.min, np.array([0, 2])), + (ov.reduce_sum, np.sum, np.array([0, 2])), + (ov.reduce_prod, np.prod, np.array([0, 2])), ], ) def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes): @@ -38,15 +40,16 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes): assert np.allclose(result, expected) +@xfail_issue_67415 @pytest.mark.parametrize( "ng_api_helper, numpy_function, reduction_axes", [ - (ng.reduce_logical_and, np.logical_and.reduce, np.array([0])), - (ng.reduce_logical_or, np.logical_or.reduce, np.array([0])), - (ng.reduce_logical_and, np.logical_and.reduce, np.array([0, 2])), - (ng.reduce_logical_or, np.logical_or.reduce, np.array([0, 2])), - (ng.reduce_logical_and, np.logical_and.reduce, np.array([0, 1, 2, 3])), - (ng.reduce_logical_or, np.logical_or.reduce, np.array([0, 1, 2, 3])), + (ov.reduce_logical_and, np.logical_and.reduce, np.array([0])), + (ov.reduce_logical_or, np.logical_or.reduce, np.array([0])), + (ov.reduce_logical_and, np.logical_and.reduce, np.array([0, 2])), + (ov.reduce_logical_or, np.logical_or.reduce, np.array([0, 2])), + (ov.reduce_logical_and, np.logical_and.reduce, np.array([0, 1, 2, 3])), + (ov.reduce_logical_or, np.logical_or.reduce, np.array([0, 1, 2, 3])), ], ) def test_reduction_logical_ops(ng_api_helper, numpy_function, reduction_axes): @@ -61,10 +64,10 @@ def test_reduction_logical_ops(ng_api_helper, numpy_function, reduction_axes): def test_topk(): data_shape = [6, 12, 10, 24] - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + data_parameter = ov.parameter(data_shape, name="Data", dtype=np.float32) K = np.int32(3) axis = np.int32(1) - node = ng.topk(data_parameter, K, axis, "max", "value") + node = ov.topk(data_parameter, K, axis, "max", "value") assert node.get_type_name() == "TopK" assert node.get_output_size() == 2 assert list(node.get_output_shape(0)) == [6, 3, 10, 24] @@ -74,9 +77,9 @@ def test_topk(): @pytest.mark.parametrize( "ng_api_helper, numpy_function, reduction_axes", [ - (ng.reduce_mean, np.mean, np.array([0, 1, 2, 3])), - (ng.reduce_mean, np.mean, np.array([0])), - (ng.reduce_mean, np.mean, np.array([0, 2])), + (ov.reduce_mean, np.mean, np.array([0, 1, 2, 3])), + (ov.reduce_mean, np.mean, np.array([0])), + (ov.reduce_mean, np.mean, np.array([0, 2])), ], ) def test_reduce_mean_op(ng_api_helper, numpy_function, reduction_axes): @@ -93,10 +96,10 @@ def test_non_max_suppression(): boxes_shape = [1, 1000, 4] scores_shape = [1, 1, 1000] - boxes_parameter = ng.parameter(boxes_shape, name="Boxes", dtype=np.float32) - scores_parameter = ng.parameter(scores_shape, name="Scores", dtype=np.float32) + boxes_parameter = ov.parameter(boxes_shape, name="Boxes", dtype=np.float32) + scores_parameter = ov.parameter(scores_shape, name="Scores", dtype=np.float32) - node = ng.non_max_suppression(boxes_parameter, scores_parameter, make_constant_node(1000, np.int64)) + node = ov.non_max_suppression(boxes_parameter, scores_parameter, make_constant_node(1000, np.int64)) assert node.get_type_name() == "NonMaxSuppression" assert node.get_output_size() == 3 @@ -109,9 +112,9 @@ def test_non_zero(): data_shape = [3, 10, 100, 200] - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + data_parameter = ov.parameter(data_shape, name="Data", dtype=np.float32) - node = ng.non_zero(data_parameter) + node = ov.non_zero(data_parameter) assert node.get_type_name() == "NonZero" assert node.get_output_size() == 1 @@ -124,16 +127,16 @@ def test_roi_align(): batch_indices = [1000] expected_shape = [1000, 256, 6, 6] - data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) - rois_parameter = ng.parameter(rois, name="Rois", dtype=np.float32) - batch_indices_parameter = ng.parameter(batch_indices, name="Batch_indices", dtype=np.int32) + data_parameter = ov.parameter(data_shape, name="Data", dtype=np.float32) + rois_parameter = ov.parameter(rois, name="Rois", dtype=np.float32) + batch_indices_parameter = ov.parameter(batch_indices, name="Batch_indices", dtype=np.int32) pooled_h = 6 pooled_w = 6 sampling_ratio = 2 spatial_scale = np.float32(16) mode = "avg" - node = ng.roi_align( + node = ov.roi_align( data_parameter, rois_parameter, batch_indices_parameter, @@ -162,7 +165,7 @@ def test_cum_sum(input_shape, cumsum_axis, reverse): expected = np.cumsum(input_data, axis=cumsum_axis) runtime = get_runtime() - node = ng.cum_sum(input_data, cumsum_axis, reverse=reverse) + node = ov.cum_sum(input_data, cumsum_axis, reverse=reverse) computation = runtime.computation(node) result = computation() assert np.allclose(result, expected) @@ -177,7 +180,7 @@ def test_normalize_l2(): eps_mode = "add" runtime = get_runtime() - node = ng.normalize_l2(input_data, axes, eps, eps_mode) + node = ov.normalize_l2(input_data, axes, eps, eps_mode) computation = runtime.computation(node) result = computation() diff --git a/runtime/bindings/python/tests/test_ngraph/test_roll.py b/runtime/bindings/python/tests/test_ngraph/test_roll.py index 877e22d098e..425b393c2cf 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_roll.py +++ b/runtime/bindings/python/tests/test_ngraph/test_roll.py @@ -1,4 +1,4 @@ -import ngraph as ng +import openvino.opset8 as ov import numpy as np from tests.runtime import get_runtime @@ -6,11 +6,11 @@ from tests.runtime import get_runtime def test_roll(): runtime = get_runtime() input = np.reshape(np.arange(10), (2, 5)) - input_tensor = ng.constant(input) - input_shift = ng.constant(np.array([-10, 7], dtype=np.int32)) - input_axes = ng.constant(np.array([-1, 0], dtype=np.int32)) + input_tensor = ov.constant(input) + input_shift = ov.constant(np.array([-10, 7], dtype=np.int32)) + input_axes = ov.constant(np.array([-1, 0], dtype=np.int32)) - roll_node = ng.roll(input_tensor, input_shift, input_axes) + roll_node = ov.roll(input_tensor, input_shift, input_axes) computation = runtime.computation(roll_node) roll_results = computation() expected_results = np.roll(input, shift=(-10, 7), axis=(-1, 0)) diff --git a/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py b/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py index d3884763246..2fba3c8a9f6 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py +++ b/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py @@ -3,7 +3,7 @@ import numpy as np -import ngraph as ng +import openvino.opset8 as ov from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node from tests import (xfail_issue_47337, @@ -12,8 +12,8 @@ from tests import (xfail_issue_47337, def test_onehot(): runtime = get_runtime() - param = ng.parameter([3], dtype=np.int32) - model = ng.one_hot(param, 3, 1, 0, 0) + param = ov.parameter([3], dtype=np.int32) + model = ov.one_hot(param, 3, 1, 0, 0) computation = runtime.computation(model, param) expected = np.eye(3)[np.array([1, 0, 2])] @@ -31,7 +31,7 @@ def test_one_hot(): axis = -1 excepted = [[5, 10], [10, 5], [10, 10]] - result = run_op_node([data, depth, on_value, off_value], ng.one_hot, axis) + result = run_op_node([data, depth, on_value, off_value], ov.one_hot, axis) assert np.allclose(result, excepted) @@ -41,5 +41,5 @@ def test_range(): stop = 35 step = 5 - result = run_op_node([start, stop, step], ng.range) + result = run_op_node([start, stop, step], ov.range) assert np.allclose(result, [5, 10, 15, 20, 25, 30]) diff --git a/runtime/bindings/python/tests/test_ngraph/test_swish.py b/runtime/bindings/python/tests/test_ngraph/test_swish.py index 17b418beaca..338cf4fcf33 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_swish.py +++ b/runtime/bindings/python/tests/test_ngraph/test_swish.py @@ -2,16 +2,16 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np -import ngraph as ng -from ngraph.impl import Shape, Type +import openvino.opset8 as ov +from openvino.impl import Shape, Type def test_swish_props_with_beta(): float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - beta = ng.parameter(Shape([]), dtype=float_dtype, name="beta") + data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + beta = ov.parameter(Shape([]), dtype=float_dtype, name="beta") - node = ng.swish(data, beta) + node = ov.swish(data, beta) assert node.get_type_name() == "Swish" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [3, 10] @@ -20,9 +20,9 @@ def test_swish_props_with_beta(): def test_swish_props_without_beta(): float_dtype = np.float32 - data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data") - node = ng.swish(data) + node = ov.swish(data) assert node.get_type_name() == "Swish" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [3, 10] diff --git a/runtime/bindings/python/tests/test_ngraph/test_utils.py b/runtime/bindings/python/tests/test_ngraph/test_utils.py index 49b90017305..31a89b7894e 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_utils.py +++ b/runtime/bindings/python/tests/test_ngraph/test_utils.py @@ -2,17 +2,17 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np -import ngraph as ng -from ngraph.impl import Shape +import openvino as ov +from openvino.impl import Shape def test_get_constant_from_source_success(): dtype = np.int - input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1") - input2 = ng.parameter(Shape([25]), dtype=dtype, name="input_2") - shape_of = ng.shape_of(input2, name="shape_of") - reshape = ng.reshape(input1, shape_of, special_zero=True) - folded_const = ng.impl.util.get_constant_from_source(reshape.input(1).get_source_output()) + input1 = ov.opset8.parameter(Shape([5, 5]), dtype=dtype, name="input_1") + input2 = ov.opset8.parameter(Shape([25]), dtype=dtype, name="input_2") + shape_of = ov.opset8.shape_of(input2, name="shape_of") + reshape = ov.opset8.reshape(input1, shape_of, special_zero=True) + folded_const = ov.impl.util.get_constant_from_source(reshape.input(1).get_source_output()) assert folded_const is not None assert folded_const.get_vector() == [25] @@ -20,9 +20,9 @@ def test_get_constant_from_source_success(): def test_get_constant_from_source_failed(): dtype = np.int - input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1") - input2 = ng.parameter(Shape([1]), dtype=dtype, name="input_2") - reshape = ng.reshape(input1, input2, special_zero=True) - folded_const = ng.impl.util.get_constant_from_source(reshape.input(1).get_source_output()) + input1 = ov.opset8.parameter(Shape([5, 5]), dtype=dtype, name="input_1") + input2 = ov.opset8.parameter(Shape([1]), dtype=dtype, name="input_2") + reshape = ov.opset8.reshape(input1, input2, special_zero=True) + folded_const = ov.impl.util.get_constant_from_source(reshape.input(1).get_source_output()) assert folded_const is None diff --git a/runtime/bindings/python/tests/test_ngraph/util.py b/runtime/bindings/python/tests/test_ngraph/util.py index 18fe01fc18e..24674b91868 100644 --- a/runtime/bindings/python/tests/test_ngraph/util.py +++ b/runtime/bindings/python/tests/test_ngraph/util.py @@ -5,8 +5,8 @@ from typing import Any, Callable, List, Union import numpy as np -import ngraph as ng -from ngraph.utils.types import NumericData +import openvino.opset8 as ov +from openvino.utils.types import NumericData from tests.runtime import get_runtime from string import ascii_uppercase @@ -37,9 +37,9 @@ def run_op_node(input_data, op_fun, *args): for idx, data in enumerate(input_data): node = None if np.isscalar(data): - node = ng.parameter([], name=ascii_uppercase[idx], dtype=_get_numpy_dtype(data)) + node = ov.parameter([], name=ascii_uppercase[idx], dtype=_get_numpy_dtype(data)) else: - node = ng.parameter(data.shape, name=ascii_uppercase[idx], dtype=data.dtype) + node = ov.parameter(data.shape, name=ascii_uppercase[idx], dtype=data.dtype) op_fun_args.append(node) comp_args.append(node) comp_inputs.append(data) diff --git a/runtime/bindings/python/tests/test_onnx/test_backend.py b/runtime/bindings/python/tests/test_onnx/test_backend.py index 6fd59dd924c..b92e64dd71e 100644 --- a/runtime/bindings/python/tests/test_onnx/test_backend.py +++ b/runtime/bindings/python/tests/test_onnx/test_backend.py @@ -7,6 +7,7 @@ import onnx.backend.test from tests import ( BACKEND_NAME, skip_rng_tests, + xfail_issue_67415, xfail_issue_33488, xfail_issue_33538, xfail_issue_33581, @@ -114,6 +115,46 @@ OnnxBackendPyTorchConvertedModelTest = None globals().update(backend_test.enable_report().test_cases) tests_expected_to_fail = [ + ( + xfail_issue_67415, + "OnnxBackendNodeModelTest.test_and2d_cpu", + "OnnxBackendNodeModelTest.test_and3d_cpu", + "OnnxBackendNodeModelTest.test_and4d_cpu", + "OnnxBackendNodeModelTest.test_and_bcast3v1d_cpu", + "OnnxBackendNodeModelTest.test_and_bcast3v2d_cpu", + "OnnxBackendNodeModelTest.test_and_bcast4v2d_cpu", + "OnnxBackendNodeModelTest.test_and_bcast4v3d_cpu", + "OnnxBackendNodeModelTest.test_and_bcast4v4d_cpu", + "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu", + "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT16_to_DOUBLE_expanded_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT16_to_FLOAT_expanded_cpu", + "OnnxBackendNodeModelTest.test_if_cpu", + "OnnxBackendNodeModelTest.test_max_float16_cpu", + "OnnxBackendNodeModelTest.test_min_float16_cpu", + "OnnxBackendNodeModelTest.test_mod_mixed_sign_float16_cpu", + "OnnxBackendNodeModelTest.test_not_2d_cpu", + "OnnxBackendNodeModelTest.test_not_3d_cpu", + "OnnxBackendNodeModelTest.test_not_4d_cpu", + "OnnxBackendNodeModelTest.test_or2d_cpu", + "OnnxBackendNodeModelTest.test_or3d_cpu", + "OnnxBackendNodeModelTest.test_or4d_cpu", + "OnnxBackendNodeModelTest.test_or_bcast3v1d_cpu", + "OnnxBackendNodeModelTest.test_or_bcast3v2d_cpu", + "OnnxBackendNodeModelTest.test_or_bcast4v2d_cpu", + "OnnxBackendNodeModelTest.test_or_bcast4v3d_cpu", + "OnnxBackendNodeModelTest.test_or_bcast4v4d_cpu", + "OnnxBackendNodeModelTest.test_where_example_cpu", + "OnnxBackendNodeModelTest.test_where_long_example_cpu", + "OnnxBackendNodeModelTest.test_xor2d_cpu", + "OnnxBackendNodeModelTest.test_xor3d_cpu", + "OnnxBackendNodeModelTest.test_xor4d_cpu", + "OnnxBackendNodeModelTest.test_xor_bcast3v1d_cpu", + "OnnxBackendNodeModelTest.test_xor_bcast3v2d_cpu", + "OnnxBackendNodeModelTest.test_xor_bcast4v2d_cpu", + "OnnxBackendNodeModelTest.test_xor_bcast4v3d_cpu", + "OnnxBackendNodeModelTest.test_xor_bcast4v4d_cpu", + ), ( xfail_issue_49207, "OnnxBackendNodeModelTest.test_rnn_seq_length_cpu", diff --git a/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py b/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py index 243119a23f7..cbeb316c79a 100644 --- a/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py +++ b/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py @@ -4,18 +4,17 @@ import os import numpy as np -import ngraph as ng -from openvino.inference_engine import IECore +from openvino import Core from tests.runtime import get_runtime def test_import_onnx_with_external_data(): model_path = os.path.join(os.path.dirname(__file__), "models/external_data.onnx") - ie = IECore() - ie_network = ie.read_network(model=model_path) + ie = Core() + network = ie.read_network(model=model_path) - ng_function = ng.function_from_cnn(ie_network) + ng_function = network.get_function() dtype = np.float32 value_a = np.array([1.0, 3.0, 5.0], dtype=dtype) diff --git a/runtime/bindings/python/tests/test_onnx/test_onnx_import.py b/runtime/bindings/python/tests/test_onnx/test_onnx_import.py index 6c2c5d41094..2886ff592f3 100644 --- a/runtime/bindings/python/tests/test_onnx/test_onnx_import.py +++ b/runtime/bindings/python/tests/test_onnx/test_onnx_import.py @@ -4,10 +4,9 @@ import os import numpy as np -import ngraph as ng import onnx from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info -from openvino.inference_engine import IECore +from openvino import Core from tests.runtime import get_runtime from tests.test_onnx.utils.onnx_helpers import import_onnx_model @@ -15,10 +14,10 @@ from tests.test_onnx.utils.onnx_helpers import import_onnx_model def test_import_onnx_function(): model_path = os.path.join(os.path.dirname(__file__), "models/add_abc.onnx") - ie = IECore() - ie_network = ie.read_network(model=model_path) + ie = Core() + network = ie.read_network(model=model_path) - ng_function = ng.function_from_cnn(ie_network) + ng_function = network.get_function() dtype = np.float32 value_a = np.array([1.0], dtype=dtype) diff --git a/runtime/bindings/python/tests/test_onnx/test_ops_binary.py b/runtime/bindings/python/tests/test_onnx/test_ops_binary.py index a0c1747e96b..ea0d3a6bb99 100644 --- a/runtime/bindings/python/tests/test_onnx/test_ops_binary.py +++ b/runtime/bindings/python/tests/test_onnx/test_ops_binary.py @@ -22,6 +22,7 @@ def import_and_compute(op_type, input_data_left, input_data_right, opset=7, **no graph = make_graph([onnx_node], "compute_graph", input_tensors, output_tensors) model = make_model(graph, producer_name="ngraph ONNX Importer") model.opset_import[0].version = opset + inputs = [i.astype(np.float32) for i in inputs] # WA for new Python API return run_model(model, inputs)[0] diff --git a/runtime/bindings/python/tests/test_onnx/test_ops_logical.py b/runtime/bindings/python/tests/test_onnx/test_ops_logical.py index 150d5067358..aabd4304277 100644 --- a/runtime/bindings/python/tests/test_onnx/test_ops_logical.py +++ b/runtime/bindings/python/tests/test_onnx/test_ops_logical.py @@ -7,13 +7,15 @@ import pytest from tests.test_onnx.utils import run_node +from tests import xfail_issue_67415 + @pytest.mark.parametrize( "onnx_op, numpy_func, data_type", [ - pytest.param("And", np.logical_and, np.bool), - pytest.param("Or", np.logical_or, np.bool), - pytest.param("Xor", np.logical_xor, np.bool), + pytest.param("And", np.logical_and, np.bool, marks=xfail_issue_67415), + pytest.param("Or", np.logical_or, np.bool, marks=xfail_issue_67415), + pytest.param("Xor", np.logical_xor, np.bool, marks=xfail_issue_67415), pytest.param("Equal", np.equal, np.int32), pytest.param("Greater", np.greater, np.int32), pytest.param("Less", np.less, np.int32), @@ -35,6 +37,7 @@ def test_logical(onnx_op, numpy_func, data_type): assert np.array_equal(ng_results, [expected_output]) +@xfail_issue_67415 def test_logical_not(): input_data = np.array([[False, True, True], [False, True, False], [False, False, True]]) expected_output = np.logical_not(input_data) diff --git a/runtime/bindings/python/tests/test_onnx/test_ops_matmul.py b/runtime/bindings/python/tests/test_onnx/test_ops_matmul.py index b6ed1cd1d56..bdd8281dc55 100644 --- a/runtime/bindings/python/tests/test_onnx/test_ops_matmul.py +++ b/runtime/bindings/python/tests/test_onnx/test_ops_matmul.py @@ -27,8 +27,8 @@ def make_onnx_model_for_matmul_op(input_left, input_right): def import_and_compute_matmul(input_left, input_right): - input_data_left = np.array(input_left) - input_data_right = np.array(input_right) + input_data_left = np.array(input_left).astype(np.float32) + input_data_right = np.array(input_right).astype(np.float32) onnx_model = make_onnx_model_for_matmul_op(input_data_left, input_data_right) transformer = get_runtime() ng_model_function = import_onnx_model(onnx_model) @@ -37,7 +37,10 @@ def import_and_compute_matmul(input_left, input_right): def numpy_gemm(input_a, input_b, input_c, alpha=1, beta=1, trans_a=False, trans_b=False, broadcast=False): - input_a, input_b, input_c = np.array(input_a), np.array(input_b), np.array(input_c) + input_a = np.array(input_a).astype(np.float32) + input_b = np.array(input_b).astype(np.float32) + input_c = np.array(input_c).astype(np.float32) + if trans_a: input_a = input_a.T if trans_b: @@ -71,7 +74,9 @@ def make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs): def import_and_compute_gemm(input_a, input_b, input_c, **kwargs): - input_a, input_b, input_c = np.array(input_a), np.array(input_b), np.array(input_c) + input_a = np.array(input_a).astype(np.float32) + input_b = np.array(input_b).astype(np.float32) + input_c = np.array(input_c).astype(np.float32) if kwargs.get("trans_a"): kwargs["transA"] = kwargs["trans_a"] diff --git a/runtime/bindings/python/tests/test_onnx/test_ops_unary.py b/runtime/bindings/python/tests/test_onnx/test_ops_unary.py index d7b12dd2227..041466663d2 100644 --- a/runtime/bindings/python/tests/test_onnx/test_ops_unary.py +++ b/runtime/bindings/python/tests/test_onnx/test_ops_unary.py @@ -7,10 +7,12 @@ import onnx.mapping import pytest from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info -from ngraph.exceptions import NgraphTypeError +from openvino.exceptions import NgraphTypeError from tests.runtime import get_runtime from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node +from tests import skip_issue_67415 + @pytest.mark.parametrize( "input_data", @@ -331,6 +333,7 @@ def test_cast_to_bool(val_type, input_data): assert np.allclose(result, expected) +@skip_issue_67415 @pytest.mark.parametrize( "val_type, range_start, range_end, in_dtype", [ diff --git a/runtime/bindings/python/tests/test_onnx/test_zoo_models.py b/runtime/bindings/python/tests/test_onnx/test_zoo_models.py index e2eb84f2060..2042d7a0f6d 100644 --- a/runtime/bindings/python/tests/test_onnx/test_zoo_models.py +++ b/runtime/bindings/python/tests/test_onnx/test_zoo_models.py @@ -11,6 +11,7 @@ from tests.test_onnx.utils import OpenVinoOnnxBackend from tests.test_onnx.utils.model_importer import ModelImportRunner from tests import ( + xfail_issue_67415, xfail_issue_38701, xfail_issue_45457, xfail_issue_37957, @@ -157,6 +158,12 @@ if len(zoo_models) > 0: test_cases = backend_test.test_cases["OnnxBackendModelExecutionTest"] if tests.MODEL_ZOO_XFAIL: execution_xfail_list = [ + # New Python API - fp16 blob + (xfail_issue_67415, "test_MSFT_opset7_fp16_inception_v1_onnxzoo_lotus_inception_v1_cpu"), + (xfail_issue_67415, "test_MSFT_opset7_fp16_shufflenet_onnxzoo_lotus_shufflenet_cpu"), + (xfail_issue_67415, "test_MSFT_opset8_fp16_inception_v1_onnxzoo_lotus_inception_v1_cpu"), + (xfail_issue_67415, "test_MSFT_opset8_fp16_shufflenet_onnxzoo_lotus_shufflenet_cpu"), + # ONNX Model Zoo (xfail_issue_39669, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_encoder_12_t5_encoder_cpu"), (xfail_issue_39669, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_decoder_with_lm_head_12_t5_decoder_with_lm_head_cpu"), diff --git a/runtime/bindings/python/tests/test_onnx/utils/onnx_backend.py b/runtime/bindings/python/tests/test_onnx/utils/onnx_backend.py index 20be8c9ccef..4a1460c755d 100644 --- a/runtime/bindings/python/tests/test_onnx/utils/onnx_backend.py +++ b/runtime/bindings/python/tests/test_onnx/utils/onnx_backend.py @@ -15,7 +15,7 @@ import onnx from onnx.backend.base import Backend, BackendRep from onnx.helper import make_graph, make_model, make_tensor_value_info -from ngraph.impl import Function +from openvino import Function from tests.runtime import get_runtime from tests.test_onnx.utils.onnx_helpers import import_onnx_model, np_dtype_to_tensor_type diff --git a/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py b/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py index 6e4a9d99f23..53c5487d314 100644 --- a/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py +++ b/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py @@ -4,10 +4,9 @@ import numpy as np import onnx from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE -from openvino.inference_engine import IECore -import ngraph as ng -from ngraph.impl import Function +from openvino import Core, Blob, TensorDesc +from openvino.impl import Function def np_dtype_to_tensor_type(data_type: np.dtype) -> int: @@ -23,8 +22,8 @@ def import_onnx_model(model: onnx.ModelProto) -> Function: onnx.checker.check_model(model) model_byte_string = model.SerializeToString() - ie = IECore() - ie_network = ie.read_network(model=model_byte_string, weights=b"", init_from_buffer=True) + ie = Core() + ie_network = ie.read_network(bytes(model_byte_string), Blob(TensorDesc("U8", [], "C"))) - ng_function = ng.function_from_cnn(ie_network) + ng_function = ie_network.get_function() return ng_function diff --git a/runtime/bindings/python/tests_compatibility/__init__.py b/runtime/bindings/python/tests_compatibility/__init__.py new file mode 100644 index 00000000000..13fdeb51ebb --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/__init__.py @@ -0,0 +1,152 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +# test.BACKEND_NAME is a configuration variable determining which +# nGraph backend tests will use. It's set during pytest configuration time. +# See `pytest_configure` hook in `conftest.py` for more details. +BACKEND_NAME = None + +# test.MODEL_ZOO_DIR is a configuration variable providing the path +# to the ZOO of ONNX models to test. It's set during pytest configuration time. +# See `pytest_configure` hook in `conftest.py` for more +# details. +MODEL_ZOO_DIR = None + +# test.MODEL_ZOO_XFAIL is a configuration variable which enable xfails for model zoo. +MODEL_ZOO_XFAIL = False + + +def xfail_test(reason="Mark the test as expected to fail", strict=True): + return pytest.mark.xfail(reason=reason, strict=strict) + + +skip_segfault = pytest.mark.skip(reason="Segmentation fault error") +xfail_issue_33488 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " + "MaxUnpool") +xfail_issue_33538 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " + "Scan") +skip_issue_38084 = pytest.mark.skip(reason="Aborted (core dumped) Assertion " + "`(layer->get_output_partial_shape(i).is_static())' failed.") +xfail_issue_33589 = xfail_test(reason="nGraph does not support the following ONNX operations: " + "IsNaN and isInf") +xfail_issue_33595 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " + "Unique") +xfail_issue_33596 = xfail_test(reason="RuntimeError: nGraph does not support different sequence operations: " + "ConcatFromSequence, SequenceConstruct, SequenceAt, SplitToSequence, " + "SequenceEmpty, SequenceInsert, SequenceErase, SequenceLength ") +xfail_issue_33606 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " + "Det") +xfail_issue_33651 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " + "TfIdfVectorizer") +xfail_issue_33581 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " + "GatherElements") +xfail_issue_33633 = xfail_test(reason="MaxPool: dilations unsupported") +xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is not supported") +xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable") +xfail_issue_36486 = xfail_test(reason="RuntimeError: HardSigmoid operation should be converted " + "to HardSigmoid_IE") +xfail_issue_38084 = xfail_test(reason="RuntimeError: AssertionFailed: layer->get_output_partial_shape(i)." + "is_static() nGraph operation with name: cannot be " + "converted to layer with name: because output " + "with index 0 contains dynamic shapes: {}. Try to use " + "CNNNetwork::reshape() method in order to specialize shapes " + "before the conversion.") +xfail_issue_38091 = xfail_test(reason="AssertionError: Mismatched elements") +xfail_issue_38699 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " + "ai.onnx.preview.training.Gradient") +xfail_issue_38701 = xfail_test(reason="RuntimeError: unsupported element type: STRING") +xfail_issue_38706 = xfail_test(reason="RuntimeError: output_3.0 has zero dimension which is not allowed") +xfail_issue_38708 = xfail_test(reason="RuntimeError: While validating ONNX node '': " + "Axes input must be constant") +xfail_issue_38710 = xfail_test(reason="RuntimeError: data has zero dimension which is not allowed") +xfail_issue_38713 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: " + "ai.onnx.preview.training.Momentum") +xfail_issue_45457 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v5::Loop " + "Not constant termination condition body output is not supported") +xfail_issue_38722 = xfail_test(reason="RuntimeError: While validating ONNX nodes MatMulInteger " + "and QLinearMatMul " + "Input0 scale and input0 zero point shape must be same and 1") +xfail_issue_38724 = xfail_test(reason="RuntimeError: While validating ONNX node '': " + "tf_crop_and_resize - this type of coordinate transformation mode " + "is not supported. Choose one of the following modes: " + "tf_half_pixel_for_nn, asymmetric, align_corners, pytorch_half_pixel, " + "half_pixel") +xfail_issue_38725 = xfail_test(reason="RuntimeError: While validating ONNX node 'get_input_partial_shape(i), autob)' failed at " + "/openvino/ngraph/src/ngraph/op/util/elementwise_args.cpp:48:") +xfail_issue_39656 = xfail_test(reason="RuntimeError: Reshape reshaped has dynamic second input!") +xfail_issue_39658 = xfail_test(reason="RuntimeError: Tile operation has a form that is not supported." + " z should be converted to TileIE operation.") +xfail_issue_39659 = xfail_test(reason="RuntimeError: Broadcast operation has a form that is not supported." + " y should be converted to Tile operation.") +xfail_issue_45344 = xfail_test(reason="Unsupported dynamic ops: v3::NonMaxSuppressionIE3") +xfail_issue_39662 = xfail_test(reason="RuntimeError: 'ScatterElementsUpdate' layer with name 'y' have " + "indices value that points to non-existing output tensor element") + + +xfail_issue_37973 = xfail_test(reason="TF Inception V2 - AssertionError: zoo models results mismatch") +xfail_issue_47430 = xfail_test(reason="FCN ResNet models - AssertionError: zoo models results mismatch") +xfail_issue_47495 = xfail_test(reason="BertSquad-10 from MSFT - AssertionError: zoo models results mismatch") +xfail_issue_49207 = xfail_test(reason="Function references undeclared parameters") +xfail_issue_48145 = xfail_test(reason="BertSquad-8 - AssertionError: Items are not equal: ACTUAL: 4 " + "DESIRED: 3") +xfail_issue_48190 = xfail_test(reason="RobertaBase-11 - AssertionError: Items are not equal: " + "ACTUAL: dtype('float64') DESIRED: dtype('float32')") +xfail_issue_49750 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v4::Interpolate") +xfail_issue_49752 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::Pad") +xfail_issue_49753 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::StridedSlice") +xfail_issue_49754 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::TopKIE") +xfail_issue_52463 = xfail_test(reason="test_operator_add_size1_singleton_broadcast_cpu - " + "Not equal to tolerance") +xfail_issue_58033 = xfail_test(reason="Einsum operation misses support for complex ellipsis equations") +xfail_issue_58676 = xfail_test(reason="AssertionError: Not equal to tolerance rtol=0.001, atol=1e-07") +xfail_issue_onnx_models_140 = xfail_test(reason="https://github.com/onnx/models/issues/140") +xfail_issue_54630 = xfail_test(reason="Gather with negative indices is not yet implemented on CPU") + +xfail_issue_63033 = xfail_test(reason="BatchNormalization: Training mode is not supported") +xfail_issue_63036 = xfail_test(reason="Changes in ConvTranspose padding") +xfail_issue_63039 = xfail_test(reason="Result mismatches with UINT8 operations") +xfail_issue_63043 = xfail_test(reason="Recurrent node expects constants as W, R, B inputs.") +xfail_issue_63044 = xfail_test(reason="ONNX opset 14 operation: Trilu") +xfail_issue_63045 = xfail_test(reason="Maxpool with strides, padding and dilations fail") + +skip_rng_tests = pytest.mark.skip(reason="Tests use random number generator with no seed.") +xfail_issue_63136 = xfail_test(reason="Unsupported operation: CastLike") +xfail_issue_63137 = xfail_test(reason="Unsupported operations: OptionalHasElement, OptionalGetElement") +xfail_issue_63138 = xfail_test(reason="Missing ONNX Shape-15 support") +xfail_issue_63643 = xfail_test(reason="RuntimeError: Unsupported operation of type: Convolution name") diff --git a/runtime/bindings/python/tests_compatibility/conftest.py b/runtime/bindings/python/tests_compatibility/conftest.py new file mode 100644 index 00000000000..95bf61715f8 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/conftest.py @@ -0,0 +1,114 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import pytest + +import tests_compatibility + +from pathlib import Path + + +def image_path(): + path_to_repo = os.environ["DATA_PATH"] + path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp") + return path_to_img + + +def model_path(is_myriad=False): + path_to_repo = os.environ["MODELS_PATH"] + if not is_myriad: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin") + else: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin") + return (test_xml, test_bin) + + +def model_onnx_path(): + path_to_repo = os.environ["MODELS_PATH"] + test_onnx = os.path.join(path_to_repo, "models", "test_model", "test_model.onnx") + return test_onnx + + +def plugins_path(): + path_to_repo = os.environ["DATA_PATH"] + plugins_xml = os.path.join(path_to_repo, "ie_class", "plugins.xml") + plugins_win_xml = os.path.join(path_to_repo, "ie_class", "plugins_win.xml") + plugins_osx_xml = os.path.join(path_to_repo, "ie_class", "plugins_apple.xml") + return (plugins_xml, plugins_win_xml, plugins_osx_xml) + + +def _get_default_model_zoo_dir(): + return Path(os.getenv("ONNX_HOME", Path.home() / ".onnx/model_zoo")) + + +def pytest_addoption(parser): + parser.addoption( + "--backend", + default="CPU", + choices=["CPU", "GPU", "HDDL", "MYRIAD", "HETERO", "TEMPLATE"], + help="Select target device", + ) + parser.addoption( + "--model_zoo_dir", + default=_get_default_model_zoo_dir(), + type=str, + help="location of the model zoo", + ) + parser.addoption( + "--model_zoo_xfail", + action="store_true", + help="treat model zoo known issues as xfails instead of failures", + ) + + +def pytest_configure(config): + backend_name = config.getvalue("backend") + tests_compatibility.BACKEND_NAME = backend_name + tests_compatibility.MODEL_ZOO_DIR = Path(config.getvalue("model_zoo_dir")) + tests_compatibility.MODEL_ZOO_XFAIL = config.getvalue("model_zoo_xfail") + + # register additional markers + config.addinivalue_line("markers", "skip_on_cpu: Skip test on CPU") + config.addinivalue_line("markers", "skip_on_gpu: Skip test on GPU") + config.addinivalue_line("markers", "skip_on_hddl: Skip test on HDDL") + config.addinivalue_line("markers", "skip_on_myriad: Skip test on MYRIAD") + config.addinivalue_line("markers", "skip_on_hetero: Skip test on HETERO") + config.addinivalue_line("markers", "skip_on_template: Skip test on TEMPLATE") + config.addinivalue_line("markers", "onnx_coverage: Collect ONNX operator coverage") + + +def pytest_collection_modifyitems(config, items): + backend_name = config.getvalue("backend") + tests_compatibility.MODEL_ZOO_DIR = Path(config.getvalue("model_zoo_dir")) + tests_compatibility.MODEL_ZOO_XFAIL = config.getvalue("model_zoo_xfail") + + keywords = { + "CPU": "skip_on_cpu", + "GPU": "skip_on_gpu", + "HDDL": "skip_on_hddl", + "MYRIAD": "skip_on_myriad", + "HETERO": "skip_on_hetero", + "TEMPLATE": "skip_on_template", + } + + skip_markers = { + "CPU": pytest.mark.skip(reason="Skipping test on the CPU backend."), + "GPU": pytest.mark.skip(reason="Skipping test on the GPU backend."), + "HDDL": pytest.mark.skip(reason="Skipping test on the HDDL backend."), + "MYRIAD": pytest.mark.skip(reason="Skipping test on the MYRIAD backend."), + "HETERO": pytest.mark.skip(reason="Skipping test on the HETERO backend."), + "TEMPLATE": pytest.mark.skip(reason="Skipping test on the TEMPLATE backend."), + } + + for item in items: + skip_this_backend = keywords[backend_name] + if skip_this_backend in item.keywords: + item.add_marker(skip_markers[backend_name]) + + +@pytest.fixture(scope="session") +def device(): + return os.environ.get("TEST_DEVICE") if os.environ.get("TEST_DEVICE") else "CPU" diff --git a/runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/CMakeLists.txt b/runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/CMakeLists.txt new file mode 100644 index 00000000000..ac793895ad1 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/CMakeLists.txt @@ -0,0 +1,24 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_FE_NAME "mock_py_ngraph_frontend") + +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) +file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) + +source_group("src" FILES ${LIBRARY_SRC}) +source_group("include" FILES ${LIBRARY_HEADERS}) + +# Create shared library +add_library(${TARGET_FE_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS}) + +target_include_directories(${TARGET_FE_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) + +target_link_libraries(${TARGET_FE_NAME} PRIVATE frontend_manager::static) + +add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME}) + +install(TARGETS ${TARGET_FE_NAME} + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT tests_compatibility EXCLUDE_FROM_ALL + LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT tests_compatibility EXCLUDE_FROM_ALL) diff --git a/runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp b/runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp new file mode 100644 index 00000000000..dd760e26246 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/mock_py_frontend.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "mock_py_frontend.hpp" + +#include "frontend_manager/frontend_manager.hpp" +#include "frontend_manager/frontend_manager_defs.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +extern "C" MOCK_API FrontEndVersion GetAPIVersion() { + return OV_FRONTEND_API_VERSION; +} + +extern "C" MOCK_API void* GetFrontEndData() { + FrontEndPluginInfo* res = new FrontEndPluginInfo(); + res->m_name = "mock_py"; + res->m_creator = []() { + return std::make_shared(); + }; + + return res; +} diff --git a/runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp b/runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp new file mode 100644 index 00000000000..3143aa96fda --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp @@ -0,0 +1,665 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "frontend_manager/frontend_manager.hpp" +#include "frontend_manager/frontend_manager_defs.hpp" +#include "ngraph/visibility.hpp" + +// Defined if we are building the plugin DLL (instead of using it) +#ifdef mock_py_ngraph_frontend_EXPORTS +# define MOCK_API NGRAPH_HELPER_DLL_EXPORT +#else +# define MOCK_API NGRAPH_HELPER_DLL_IMPORT +#endif // mock1_ngraph_frontend_EXPORTS + +// OK to have 'using' in mock header + +using namespace ngraph; +using namespace ngraph::frontend; + +//////////////////////////////// + +struct MOCK_API PlaceStat { + int m_get_names = 0; + int m_get_consuming_operations = 0; + int m_get_target_tensor = 0; + int m_get_producing_operation = 0; + int m_get_producing_port = 0; + int m_get_input_port = 0; + int m_get_output_port = 0; + int m_get_consuming_ports = 0; + int m_is_input = 0; + int m_is_output = 0; + int m_is_equal = 0; + int m_is_equal_data = 0; + int m_get_source_tensor = 0; + + // Arguments tracking + std::string m_lastArgString; + int m_lastArgInt; + Place::Ptr m_lastArgPlace = nullptr; + + // Getters + int get_names() const { + return m_get_names; + } + int get_consuming_operations() const { + return m_get_consuming_operations; + } + int get_target_tensor() const { + return m_get_target_tensor; + } + int get_producing_operation() const { + return m_get_producing_operation; + } + int get_producing_port() const { + return m_get_producing_port; + } + int get_input_port() const { + return m_get_input_port; + } + int get_output_port() const { + return m_get_output_port; + } + int get_consuming_ports() const { + return m_get_consuming_ports; + } + int is_input() const { + return m_is_input; + } + int is_output() const { + return m_is_output; + } + int is_equal() const { + return m_is_equal; + } + int is_equal_data() const { + return m_is_equal_data; + } + int get_source_tensor() const { + return m_get_source_tensor; + } + + // Arguments getters + std::string get_lastArgString() const { + return m_lastArgString; + } + int get_lastArgInt() const { + return m_lastArgInt; + } + Place::Ptr get_lastArgPlace() const { + return m_lastArgPlace; + } +}; + +class MOCK_API PlaceMockPy : public Place { + mutable PlaceStat m_stat; + +public: + std::vector get_names() const override { + m_stat.m_get_names++; + return {}; + } + + std::vector get_consuming_operations() const override { + m_stat.m_get_consuming_operations++; + m_stat.m_lastArgInt = -1; + m_stat.m_lastArgString = ""; + return {std::make_shared()}; + } + + std::vector get_consuming_operations(int outputPortIndex) const override { + m_stat.m_get_consuming_operations++; + m_stat.m_lastArgInt = outputPortIndex; + m_stat.m_lastArgString = ""; + return {std::make_shared()}; + } + + std::vector get_consuming_operations(const std::string& outputName) const override { + m_stat.m_get_consuming_operations++; + m_stat.m_lastArgInt = -1; + m_stat.m_lastArgString = outputName; + return {std::make_shared()}; + } + + std::vector get_consuming_operations(const std::string& outputName, + int outputPortIndex) const override { + m_stat.m_get_consuming_operations++; + m_stat.m_lastArgInt = outputPortIndex; + m_stat.m_lastArgString = outputName; + return {std::make_shared()}; + } + + Place::Ptr get_target_tensor() const override { + m_stat.m_get_target_tensor++; + m_stat.m_lastArgInt = -1; + return std::make_shared(); + } + + Place::Ptr get_target_tensor(int outputPortIndex) const override { + m_stat.m_get_target_tensor++; + m_stat.m_lastArgInt = outputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_target_tensor(const std::string& outputName) const override { + m_stat.m_get_target_tensor++; + m_stat.m_lastArgInt = -1; + m_stat.m_lastArgString = outputName; + return {std::make_shared()}; + } + + Place::Ptr get_target_tensor(const std::string& outputName, int outputPortIndex) const override { + m_stat.m_get_target_tensor++; + m_stat.m_lastArgInt = outputPortIndex; + m_stat.m_lastArgString = outputName; + return {std::make_shared()}; + } + + Place::Ptr get_producing_operation() const override { + m_stat.m_get_producing_operation++; + m_stat.m_lastArgInt = -1; + return std::make_shared(); + } + + Place::Ptr get_producing_operation(int inputPortIndex) const override { + m_stat.m_get_producing_operation++; + m_stat.m_lastArgInt = inputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_producing_operation(const std::string& inputName) const override { + m_stat.m_get_producing_operation++; + m_stat.m_lastArgInt = -1; + m_stat.m_lastArgString = inputName; + return {std::make_shared()}; + } + + Place::Ptr get_producing_operation(const std::string& inputName, int inputPortIndex) const override { + m_stat.m_get_producing_operation++; + m_stat.m_lastArgInt = inputPortIndex; + m_stat.m_lastArgString = inputName; + return {std::make_shared()}; + } + + Place::Ptr get_producing_port() const override { + m_stat.m_get_producing_port++; + return std::make_shared(); + } + + Place::Ptr get_input_port() const override { + m_stat.m_get_input_port++; + m_stat.m_lastArgInt = -1; + return std::make_shared(); + } + + Place::Ptr get_input_port(int inputPortIndex) const override { + m_stat.m_get_input_port++; + m_stat.m_lastArgInt = inputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_input_port(const std::string& inputName) const override { + m_stat.m_get_input_port++; + m_stat.m_lastArgInt = -1; + m_stat.m_lastArgString = inputName; + return std::make_shared(); + } + + Place::Ptr get_input_port(const std::string& inputName, int inputPortIndex) const override { + m_stat.m_get_input_port++; + m_stat.m_lastArgInt = inputPortIndex; + m_stat.m_lastArgString = inputName; + return std::make_shared(); + } + + Place::Ptr get_output_port() const override { + m_stat.m_get_output_port++; + m_stat.m_lastArgInt = -1; + return std::make_shared(); + } + + Place::Ptr get_output_port(int outputPortIndex) const override { + m_stat.m_get_output_port++; + m_stat.m_lastArgInt = outputPortIndex; + return std::make_shared(); + } + + Place::Ptr get_output_port(const std::string& outputName) const override { + m_stat.m_get_output_port++; + m_stat.m_lastArgInt = -1; + m_stat.m_lastArgString = outputName; + return std::make_shared(); + } + + Place::Ptr get_output_port(const std::string& outputName, int outputPortIndex) const override { + m_stat.m_get_output_port++; + m_stat.m_lastArgInt = outputPortIndex; + m_stat.m_lastArgString = outputName; + return std::make_shared(); + } + + std::vector get_consuming_ports() const override { + m_stat.m_get_consuming_ports++; + return {std::make_shared()}; + } + + bool is_input() const override { + m_stat.m_is_input++; + return false; + } + + bool is_output() const override { + m_stat.m_is_output++; + return false; + } + + bool is_equal(Ptr another) const override { + m_stat.m_is_equal++; + m_stat.m_lastArgPlace = another; + return false; + } + + bool is_equal_data(Ptr another) const override { + m_stat.m_is_equal_data++; + m_stat.m_lastArgPlace = another; + return false; + } + + Place::Ptr get_source_tensor(int inputPortIndex) const override { + m_stat.m_get_source_tensor++; + m_stat.m_lastArgInt = inputPortIndex; + return {std::make_shared()}; + } + + Place::Ptr get_source_tensor() const override { + m_stat.m_get_source_tensor++; + m_stat.m_lastArgInt = -1; + return {std::make_shared()}; + } + + Place::Ptr get_source_tensor(const std::string& inputName) const override { + m_stat.m_get_source_tensor++; + m_stat.m_lastArgInt = -1; + m_stat.m_lastArgString = inputName; + return {std::make_shared()}; + } + + Place::Ptr get_source_tensor(const std::string& inputName, int inputPortIndex) const override { + m_stat.m_get_source_tensor++; + m_stat.m_lastArgInt = inputPortIndex; + m_stat.m_lastArgString = inputName; + return {std::make_shared()}; + } + + //---------------Stat-------------------- + PlaceStat get_stat() const { + return m_stat; + } +}; + +//////////////////////////////// + +struct MOCK_API ModelStat { + int m_get_inputs = 0; + int m_get_outputs = 0; + int m_get_place_by_tensor_name = 0; + int m_get_place_by_operation_name = 0; + int m_get_place_by_operation_and_input_port = 0; + int m_get_place_by_operation_and_output_port = 0; + int m_set_name_for_tensor = 0; + int m_add_name_for_tensor = 0; + int m_set_name_for_operation = 0; + int m_free_name_for_tensor = 0; + int m_free_name_for_operation = 0; + int m_set_name_for_dimension = 0; + int m_cut_and_add_new_input = 0; + int m_cut_and_add_new_output = 0; + int m_add_output = 0; + int m_remove_output = 0; + int m_set_partial_shape = 0; + int m_get_partial_shape = 0; + int m_set_element_type = 0; + + int m_extract_subgraph = 0; + int m_override_all_inputs = 0; + int m_override_all_outputs = 0; + + // Arguments tracking + std::string m_lastArgString; + int m_lastArgInt; + Place::Ptr m_lastArgPlace = nullptr; + std::vector m_lastArgInputPlaces; + std::vector m_lastArgOutputPlaces; + ngraph::element::Type m_lastArgElementType; + ngraph::PartialShape m_lastArgPartialShape; + + // Getters + int get_inputs() const { + return m_get_inputs; + } + int get_outputs() const { + return m_get_outputs; + } + int extract_subgraph() const { + return m_extract_subgraph; + } + int override_all_inputs() const { + return m_override_all_inputs; + } + int override_all_outputs() const { + return m_override_all_outputs; + } + int get_place_by_tensor_name() const { + return m_get_place_by_tensor_name; + } + int get_place_by_operation_name() const { + return m_get_place_by_operation_name; + } + int get_place_by_operation_and_input_port() const { + return m_get_place_by_operation_and_input_port; + } + int get_place_by_operation_and_output_port() const { + return m_get_place_by_operation_and_output_port; + } + int set_name_for_tensor() const { + return m_set_name_for_tensor; + } + int add_name_for_tensor() const { + return m_add_name_for_tensor; + } + int set_name_for_operation() const { + return m_set_name_for_operation; + } + int free_name_for_tensor() const { + return m_free_name_for_tensor; + } + int free_name_for_operation() const { + return m_free_name_for_operation; + } + int set_name_for_dimension() const { + return m_set_name_for_dimension; + } + int cut_and_add_new_input() const { + return m_cut_and_add_new_input; + } + int cut_and_add_new_output() const { + return m_cut_and_add_new_output; + } + int add_output() const { + return m_add_output; + } + int remove_output() const { + return m_remove_output; + } + int set_partial_shape() const { + return m_set_partial_shape; + } + int get_partial_shape() const { + return m_get_partial_shape; + } + int set_element_type() const { + return m_set_element_type; + } + + // Arguments getters + std::string get_lastArgString() const { + return m_lastArgString; + } + int get_lastArgInt() const { + return m_lastArgInt; + } + Place::Ptr get_lastArgPlace() const { + return m_lastArgPlace; + } + std::vector get_lastArgInputPlaces() const { + return m_lastArgInputPlaces; + } + std::vector get_lastArgOutputPlaces() const { + return m_lastArgOutputPlaces; + } + ngraph::element::Type get_lastArgElementType() const { + return m_lastArgElementType; + } + ngraph::PartialShape get_lastArgPartialShape() const { + return m_lastArgPartialShape; + } +}; + +class MOCK_API InputModelMockPy : public InputModel { + mutable ModelStat m_stat; + +public: + std::vector get_inputs() const override { + m_stat.m_get_inputs++; + return {std::make_shared()}; + } + + std::vector get_outputs() const override { + m_stat.m_get_outputs++; + return {std::make_shared()}; + } + + Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const override { + m_stat.m_get_place_by_tensor_name++; + m_stat.m_lastArgString = tensorName; + return std::make_shared(); + } + + Place::Ptr get_place_by_operation_name(const std::string& operationName) const override { + m_stat.m_get_place_by_operation_name++; + m_stat.m_lastArgString = operationName; + return std::make_shared(); + } + + Place::Ptr get_place_by_operation_name_and_input_port(const std::string& operationName, + int inputPortIndex) override { + m_stat.m_get_place_by_operation_and_input_port++; + m_stat.m_lastArgInt = inputPortIndex; + m_stat.m_lastArgString = operationName; + return std::make_shared(); + } + + Place::Ptr get_place_by_operation_name_and_output_port(const std::string& operationName, + int outputPortIndex) override { + m_stat.m_get_place_by_operation_and_output_port++; + m_stat.m_lastArgInt = outputPortIndex; + m_stat.m_lastArgString = operationName; + return std::make_shared(); + } + + void set_name_for_tensor(Place::Ptr tensor, const std::string& newName) override { + m_stat.m_set_name_for_tensor++; + m_stat.m_lastArgPlace = tensor; + m_stat.m_lastArgString = newName; + } + + void add_name_for_tensor(Place::Ptr tensor, const std::string& newName) override { + m_stat.m_add_name_for_tensor++; + m_stat.m_lastArgPlace = tensor; + m_stat.m_lastArgString = newName; + } + + void set_name_for_operation(Place::Ptr operation, const std::string& newName) override { + m_stat.m_set_name_for_operation++; + m_stat.m_lastArgPlace = operation; + m_stat.m_lastArgString = newName; + } + + void free_name_for_tensor(const std::string& name) override { + m_stat.m_free_name_for_tensor++; + m_stat.m_lastArgString = name; + } + + void free_name_for_operation(const std::string& name) override { + m_stat.m_free_name_for_operation++; + m_stat.m_lastArgString = name; + } + + void set_name_for_dimension(Place::Ptr place, size_t shapeDimIndex, const std::string& dimName) override { + m_stat.m_set_name_for_dimension++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgInt = static_cast(shapeDimIndex); + m_stat.m_lastArgString = dimName; + } + + void cut_and_add_new_input(Place::Ptr place, const std::string& newNameOptional) override { + m_stat.m_cut_and_add_new_input++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgString = newNameOptional; + } + + void cut_and_add_new_output(Place::Ptr place, const std::string& newNameOptional) override { + m_stat.m_cut_and_add_new_output++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgString = newNameOptional; + } + + Place::Ptr add_output(Place::Ptr place) override { + m_stat.m_add_output++; + m_stat.m_lastArgPlace = place; + return std::make_shared(); + } + + void remove_output(Place::Ptr place) override { + m_stat.m_remove_output++; + m_stat.m_lastArgPlace = place; + } + + void override_all_outputs(const std::vector& outputs) override { + m_stat.m_override_all_outputs++; + m_stat.m_lastArgOutputPlaces = outputs; + } + + void override_all_inputs(const std::vector& inputs) override { + m_stat.m_override_all_inputs++; + m_stat.m_lastArgInputPlaces = inputs; + } + + void extract_subgraph(const std::vector& inputs, const std::vector& outputs) override { + m_stat.m_extract_subgraph++; + m_stat.m_lastArgInputPlaces = inputs; + m_stat.m_lastArgOutputPlaces = outputs; + } + + // Setting tensor properties + void set_partial_shape(Place::Ptr place, const ngraph::PartialShape& shape) override { + m_stat.m_set_partial_shape++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgPartialShape = shape; + } + + ngraph::PartialShape get_partial_shape(Place::Ptr place) const override { + m_stat.m_get_partial_shape++; + m_stat.m_lastArgPlace = place; + return {}; + } + + void set_element_type(Place::Ptr place, const ngraph::element::Type& type) override { + m_stat.m_set_element_type++; + m_stat.m_lastArgPlace = place; + m_stat.m_lastArgElementType = type; + } + + //---------------Stat-------------------- + ModelStat get_stat() const { + return m_stat; + } +}; + +///////////////////////////////////////////////////////// + +struct MOCK_API FeStat { + std::vector m_load_paths; + int m_convert_model = 0; + int m_convert = 0; + int m_convert_partially = 0; + int m_decode = 0; + int m_normalize = 0; + int m_get_name = 0; + int m_supported = 0; + // Getters + std::vector load_paths() const { + return m_load_paths; + } + int convert_model() const { + return m_convert_model; + } + int convert() const { + return m_convert; + } + int convert_partially() const { + return m_convert_partially; + } + int decode() const { + return m_decode; + } + int normalize() const { + return m_normalize; + } + int get_name() const { + return m_get_name; + } + int supported() const { + return m_supported; + } +}; + +class MOCK_API FrontEndMockPy : public FrontEnd { + mutable FeStat m_stat; + +public: + FrontEndMockPy() {} + + InputModel::Ptr load_impl(const std::vector>& params) const override { + if (params.size() > 0 && ov::is_type>(params[0])) + m_stat.m_load_paths.push_back(ov::as_type_ptr>(params[0])->get()); + return std::make_shared(); + } + + bool supported_impl(const std::vector>& params) const override { + m_stat.m_supported++; + if (params.size() > 0 && ov::is_type>(params[0])) { + auto path = ov::as_type_ptr>(params[0])->get(); + if (path.find(".test_mock_py_mdl") != std::string::npos) { + return true; + } + } + return false; + } + + std::shared_ptr convert(InputModel::Ptr model) const override { + m_stat.m_convert_model++; + return std::make_shared(NodeVector{}, ParameterVector{}); + } + + void convert(std::shared_ptr func) const override { + m_stat.m_convert++; + } + + std::shared_ptr convert_partially(InputModel::Ptr model) const override { + m_stat.m_convert_partially++; + return std::make_shared(NodeVector{}, ParameterVector{}); + } + + std::shared_ptr decode(InputModel::Ptr model) const override { + m_stat.m_decode++; + return std::make_shared(NodeVector{}, ParameterVector{}); + } + + void normalize(std::shared_ptr function) const override { + m_stat.m_normalize++; + } + + std::string get_name() const override { + m_stat.m_get_name++; + return "mock_py"; + } + + FeStat get_stat() const { + return m_stat; + } +}; diff --git a/runtime/bindings/python/tests_compatibility/mock/pyngraph_fe_mock_api/CMakeLists.txt b/runtime/bindings/python/tests_compatibility/mock/pyngraph_fe_mock_api/CMakeLists.txt new file mode 100644 index 00000000000..957ab59e0a0 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/mock/pyngraph_fe_mock_api/CMakeLists.txt @@ -0,0 +1,20 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_FE_NAME "mock_py_ngraph_frontend") +set(PYBIND_FE_NAME "pybind_mock_frontend") + +set(PYBIND_FE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/pyngraph_mock_frontend_api.cpp) + +source_group("src" FILES ${PYBIND_FE_SRC}) + +pybind11_add_module(${PYBIND_FE_NAME} MODULE ${PYBIND_FE_SRC}) + +target_link_libraries(${PYBIND_FE_NAME} PRIVATE ${TARGET_FE_NAME} frontend_manager::static) + +add_clang_format_target(${PYBIND_FE_NAME}_clang FOR_TARGETS ${PYBIND_FE_NAME}) + +install(TARGETS ${PYBIND_FE_NAME} + DESTINATION python/${PYTHON_VERSION} + COMPONENT tests_compatibility EXCLUDE_FROM_ALL) diff --git a/runtime/bindings/python/tests_compatibility/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp b/runtime/bindings/python/tests_compatibility/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp new file mode 100644 index 00000000000..54e022b978f --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/mock/pyngraph_fe_mock_api/pyngraph_mock_frontend_api.cpp @@ -0,0 +1,127 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "../mock_py_ngraph_frontend/mock_py_frontend.hpp" + +namespace py = pybind11; +using namespace ngraph; +using namespace ngraph::frontend; + +static void register_mock_frontend_stat(py::module m) { + m.def( + "get_fe_stat", + [](const std::shared_ptr& fe) { + std::shared_ptr ptr = std::dynamic_pointer_cast(fe); + if (ptr) { + auto stat = ptr->get_stat(); + return stat; + } + return FeStat(); + }, + py::arg("frontend")); + + py::class_ feStat(m, "FeStat", py::dynamic_attr()); + feStat.def_property_readonly("load_paths", &FeStat::load_paths); + feStat.def_property_readonly("convert_model", &FeStat::convert_model); + feStat.def_property_readonly("convert", &FeStat::convert); + feStat.def_property_readonly("convert_partially", &FeStat::convert_partially); + feStat.def_property_readonly("decode", &FeStat::decode); + feStat.def_property_readonly("normalize", &FeStat::normalize); + feStat.def_property_readonly("get_name", &FeStat::get_name); + feStat.def_property_readonly("supported", &FeStat::supported); +} + +static void register_mock_model_stat(py::module m) { + m.def( + "get_mdl_stat", + [](const std::shared_ptr& mdl) { + std::shared_ptr ptr = std::dynamic_pointer_cast(mdl); + if (ptr) { + auto stat = ptr->get_stat(); + return stat; + } + return ModelStat(); + }, + py::arg("model")); + + py::class_ mdlStat(m, "ModelStat", py::dynamic_attr()); + mdlStat.def_property_readonly("get_inputs", &ModelStat::get_inputs); + mdlStat.def_property_readonly("get_outputs", &ModelStat::get_outputs); + mdlStat.def_property_readonly("get_place_by_tensor_name", &ModelStat::get_place_by_tensor_name); + mdlStat.def_property_readonly("get_place_by_operation_name", &ModelStat::get_place_by_operation_name); + mdlStat.def_property_readonly("get_place_by_operation_and_input_port", + &ModelStat::get_place_by_operation_and_input_port); + mdlStat.def_property_readonly("get_place_by_operation_and_output_port", + &ModelStat::get_place_by_operation_and_output_port); + + mdlStat.def_property_readonly("set_name_for_tensor", &ModelStat::set_name_for_tensor); + mdlStat.def_property_readonly("add_name_for_tensor", &ModelStat::add_name_for_tensor); + mdlStat.def_property_readonly("set_name_for_operation", &ModelStat::set_name_for_operation); + mdlStat.def_property_readonly("free_name_for_tensor", &ModelStat::free_name_for_tensor); + mdlStat.def_property_readonly("free_name_for_operation", &ModelStat::free_name_for_operation); + mdlStat.def_property_readonly("set_name_for_dimension", &ModelStat::set_name_for_dimension); + mdlStat.def_property_readonly("cut_and_add_new_input", &ModelStat::cut_and_add_new_input); + mdlStat.def_property_readonly("cut_and_add_new_output", &ModelStat::cut_and_add_new_output); + mdlStat.def_property_readonly("add_output", &ModelStat::add_output); + mdlStat.def_property_readonly("remove_output", &ModelStat::remove_output); + mdlStat.def_property_readonly("set_partial_shape", &ModelStat::set_partial_shape); + mdlStat.def_property_readonly("get_partial_shape", &ModelStat::get_partial_shape); + mdlStat.def_property_readonly("set_element_type", &ModelStat::set_element_type); + mdlStat.def_property_readonly("extract_subgraph", &ModelStat::extract_subgraph); + mdlStat.def_property_readonly("override_all_inputs", &ModelStat::override_all_inputs); + mdlStat.def_property_readonly("override_all_outputs", &ModelStat::override_all_outputs); + + // Arguments tracking + mdlStat.def_property_readonly("lastArgString", &ModelStat::get_lastArgString); + mdlStat.def_property_readonly("lastArgInt", &ModelStat::get_lastArgInt); + mdlStat.def_property_readonly("lastArgPlace", &ModelStat::get_lastArgPlace); + mdlStat.def_property_readonly("lastArgInputPlaces", &ModelStat::get_lastArgInputPlaces); + mdlStat.def_property_readonly("lastArgOutputPlaces", &ModelStat::get_lastArgOutputPlaces); + mdlStat.def_property_readonly("lastArgElementType", &ModelStat::get_lastArgElementType); + mdlStat.def_property_readonly("lastArgPartialShape", &ModelStat::get_lastArgPartialShape); +} + +static void register_mock_place_stat(py::module m) { + m.def( + "get_place_stat", + [](const Place::Ptr& fe) { + std::shared_ptr ptr = std::dynamic_pointer_cast(fe); + if (ptr) { + auto stat = ptr->get_stat(); + return stat; + } + return PlaceStat(); + }, + py::arg("place")); + + py::class_ placeStat(m, "PlaceStat", py::dynamic_attr()); + + placeStat.def_property_readonly("lastArgString", &PlaceStat::get_lastArgString); + placeStat.def_property_readonly("lastArgInt", &PlaceStat::get_lastArgInt); + placeStat.def_property_readonly("lastArgPlace", &PlaceStat::get_lastArgPlace); + + placeStat.def_property_readonly("get_names", &PlaceStat::get_names); + placeStat.def_property_readonly("get_consuming_operations", &PlaceStat::get_consuming_operations); + placeStat.def_property_readonly("get_target_tensor", &PlaceStat::get_target_tensor); + placeStat.def_property_readonly("get_producing_operation", &PlaceStat::get_producing_operation); + placeStat.def_property_readonly("get_producing_port", &PlaceStat::get_producing_port); + placeStat.def_property_readonly("get_input_port", &PlaceStat::get_input_port); + placeStat.def_property_readonly("get_output_port", &PlaceStat::get_output_port); + placeStat.def_property_readonly("get_consuming_ports", &PlaceStat::get_consuming_ports); + placeStat.def_property_readonly("is_input", &PlaceStat::is_input); + placeStat.def_property_readonly("is_output", &PlaceStat::is_output); + placeStat.def_property_readonly("is_equal", &PlaceStat::is_equal); + placeStat.def_property_readonly("is_equal_data", &PlaceStat::is_equal_data); + placeStat.def_property_readonly("get_source_tensor", &PlaceStat::get_source_tensor); +} + +PYBIND11_MODULE(pybind_mock_frontend, m) { + m.doc() = "Mock frontend call counters for testing Pyngraph frontend bindings"; + register_mock_frontend_stat(m); + register_mock_model_stat(m); + register_mock_place_stat(m); +} diff --git a/runtime/bindings/python/tests_compatibility/runtime.py b/runtime/bindings/python/tests_compatibility/runtime.py new file mode 100644 index 00000000000..9a5305c09ff --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/runtime.py @@ -0,0 +1,193 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Provide a layer of abstraction for an OpenVINO runtime environment.""" + +import logging +from typing import Dict, List, Union + +import numpy as np +from openvino.inference_engine import IECore, IENetwork, Blob, DataPtr + +from ngraph.exceptions import UserInputError +from ngraph.impl import Function, Node, PartialShape, Type +from ngraph.opset1.ops import result +from ngraph.utils.types import NumericData, get_shape, get_dtype + +import tests_compatibility + +log = logging.getLogger(__name__) + + +def runtime(backend_name: str = "CPU") -> "Runtime": + """Create a Runtime object (helper factory).""" + return Runtime(backend_name) + + +def get_runtime(): + """Return runtime object.""" + if tests_compatibility.BACKEND_NAME is not None: + return runtime(backend_name=tests_compatibility.BACKEND_NAME) + else: + return runtime() + + +def _convert_inputs(cnn_network: IENetwork) -> None: + """WA converts unsupported input images formats.""" + precision_map = { + "FP64": "FP32", + "U32": "I32", + } + + for cnn_input in cnn_network.input_info: + try: + _precision = precision_map[cnn_network.input_info[cnn_input].precision] + cnn_network.input_info[cnn_input].precision = _precision + except KeyError: + pass + + +def apply_ng_type(output: DataPtr, ng_type: Type): + ng_ie_supported_type_map = { + Type.boolean.get_type_name(): "BOOL", + Type.f32.get_type_name(): "FP32", + Type.i8.get_type_name(): "I8", + Type.i32.get_type_name(): "I32", + Type.u8.get_type_name(): "U8", + } + if ng_type.get_type_name() in ng_ie_supported_type_map: + output.precision = ng_ie_supported_type_map[ng_type.get_type_name()] + + +class Runtime(object): + """Represents an nGraph runtime environment.""" + + def __init__(self, backend_name: str) -> None: + self.backend_name = backend_name + log.debug("Creating Inference Engine for %s" % backend_name) + self.backend = IECore() + assert backend_name in self.backend.available_devices, ( + 'The requested device "' + backend_name + '" is not supported!' + ) + + def set_config(self, config: Dict[str, str]) -> None: + """Set the inference engine configuration.""" + self.backend.set_config(config, device_name=self.backend_name) + + def __repr__(self) -> str: + return "".format(self.backend_name) + + def computation(self, node_or_function: Union[Node, Function], *inputs: Node) -> "Computation": + """Return a callable Computation object.""" + if isinstance(node_or_function, Node): + ng_function = Function(node_or_function, inputs, node_or_function.name) + return Computation(self, ng_function) + elif isinstance(node_or_function, Function): + return Computation(self, node_or_function) + else: + raise TypeError( + "Runtime.computation must be called with an nGraph Function object " + "or an nGraph node object an optionally Parameter node objects. " + "Called with: %s", + node_or_function, + ) + + +class Computation(object): + """nGraph callable computation object.""" + + def __init__(self, runtime: Runtime, ng_function: Function) -> None: + self.runtime = runtime + self.function = ng_function + self.parameters = ng_function.get_parameters() + self.results = ng_function.get_results() + self.network_cache = {} + + def __repr__(self) -> str: + params_string = ", ".join([param.name for param in self.parameters]) + return "".format(self.function.get_name(), params_string) + + def _get_ie_output_blob_name(self, outputs: Dict, ng_result: result) -> str: + if len(self.results) == 1: + return next(iter(outputs.keys())) + else: + prev_layer = ng_result.input(0).get_source_output() + out_name = prev_layer.get_node().get_friendly_name() + if prev_layer.get_node().get_output_size() != 1: + out_name += "." + str(prev_layer.get_index()) + return out_name + + def _get_ie_output_blob_buffer(self, output_blobs: Dict[str, Blob], ng_result: result) -> np.ndarray: + out_name = self._get_ie_output_blob_name(output_blobs, ng_result) + out_blob = output_blobs[out_name] + + if out_blob.tensor_desc.layout == "SCALAR": + return out_blob.buffer.reshape(()) + else: + return out_blob.buffer + + def convert_buffers(self, source_buffers, target_dtypes): + converted_buffers = [] + for i in range(len(source_buffers)): + target_dtype = target_dtypes[i] + # custom conversion for bf16 + if self.results[i].get_output_element_type(0) == Type.bf16: + converted_buffers.append((source_buffers[i].view(np.uint32) >> 16).astype(np.uint16)) + else: + converted_buffers.append(source_buffers[i].astype(target_dtype)) + return converted_buffers + + def __call__(self, *input_values: NumericData) -> List[NumericData]: + """Run computation on input values and return result.""" + # Input validation + if len(input_values) < len(self.parameters): + raise UserInputError( + "Expected %s params, received not enough %s values.", len(self.parameters), len(input_values) + ) + # ignore not needed input values + input_values = input_values[:len(self.parameters)] + + input_values = [np.array(input_value) for input_value in input_values] + input_shapes = [get_shape(input_value) for input_value in input_values] + + param_names = [param.friendly_name for param in self.parameters] + + if self.network_cache.get(str(input_shapes)) is None: + capsule = Function.to_capsule(self.function) + cnn_network = IENetwork(capsule) + if self.function.is_dynamic(): + cnn_network.reshape(dict(zip(param_names, input_shapes))) + # Convert unsupported inputs of the network + _convert_inputs(cnn_network) + self.network_cache[str(input_shapes)] = cnn_network + else: + cnn_network = self.network_cache[str(input_shapes)] + + # set output blobs precission based on nG results + for ng_result in self.results: + ie_out_name = self._get_ie_output_blob_name(cnn_network.outputs, ng_result) + apply_ng_type(cnn_network.outputs[ie_out_name], ng_result.get_output_element_type(0)) + + executable_network = self.runtime.backend.load_network(cnn_network, self.runtime.backend_name) + + for parameter, input in zip(self.parameters, input_values): + parameter_shape = parameter.get_output_partial_shape(0) + input_shape = PartialShape(input.shape) + if len(input.shape) > 0 and not parameter_shape.compatible(input_shape): + raise UserInputError( + "Provided tensor's shape: %s does not match the expected: %s.", + input_shape, + parameter_shape, + ) + + request = executable_network.requests[0] + request.infer(dict(zip(param_names, input_values))) + + # Set order of output blobs compatible with nG Function + result_buffers = [self._get_ie_output_blob_buffer(request.output_blobs, result) + for result in self.results] + + # Since OV overwrite result data type we have to convert results to the original one. + original_dtypes = [get_dtype(result.get_output_element_type(0)) for result in self.results] + converted_buffers = self.convert_buffers(result_buffers, original_dtypes) + return converted_buffers diff --git a/runtime/bindings/python/tests/test_frontend/test_frontend_onnx.py b/runtime/bindings/python/tests_compatibility/test_frontend/test_frontend_onnx.py similarity index 98% rename from runtime/bindings/python/tests/test_frontend/test_frontend_onnx.py rename to runtime/bindings/python/tests_compatibility/test_frontend/test_frontend_onnx.py index f75e3713448..221a5aa3372 100644 --- a/runtime/bindings/python/tests/test_frontend/test_frontend_onnx.py +++ b/runtime/bindings/python/tests_compatibility/test_frontend/test_frontend_onnx.py @@ -8,7 +8,7 @@ from onnx.helper import make_graph, make_model, make_tensor_value_info import pytest from ngraph.frontend import FrontEndManager -from tests.runtime import get_runtime +from tests_compatibility.runtime import get_runtime def create_onnx_model(): diff --git a/runtime/bindings/python/tests/test_frontend/test_frontend_onnx_editor.py b/runtime/bindings/python/tests_compatibility/test_frontend/test_frontend_onnx_editor.py similarity index 100% rename from runtime/bindings/python/tests/test_frontend/test_frontend_onnx_editor.py rename to runtime/bindings/python/tests_compatibility/test_frontend/test_frontend_onnx_editor.py diff --git a/runtime/bindings/python/tests/test_frontend/test_frontendmanager.py b/runtime/bindings/python/tests_compatibility/test_frontend/test_frontendmanager.py similarity index 100% rename from runtime/bindings/python/tests/test_frontend/test_frontendmanager.py rename to runtime/bindings/python/tests_compatibility/test_frontend/test_frontendmanager.py diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/__init__.py b/runtime/bindings/python/tests_compatibility/test_ngraph/__init__.py new file mode 100644 index 00000000000..b274453fb17 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/__init__.py @@ -0,0 +1,6 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# ngraph.dll directory path visibility is needed to use _pyngraph module +# import below causes adding this path to os.environ["PATH"] +import ngraph # noqa: F401 'imported but unused' diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py new file mode 100644 index 00000000000..ba7fe7b28f3 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_adaptive_pool.py @@ -0,0 +1,63 @@ +import ngraph as ng +import numpy as np +from tests_compatibility.runtime import get_runtime + + +def test_adaptive_avg_pool(): + runtime = get_runtime() + input = np.reshape([0.0, 4, 1, 3, -2, -5, -2, + -2, 1, -3, 1, -3, -4, 0, + -2, 1, -1, -2, 3, -1, -3, + + -1, -2, 3, 4, -3, -4, 1, + 2, 0, -4, -5, -2, -2, -3, + 2, 3, 1, -5, 2, -4, -2], (2, 3, 7)) + input_tensor = ng.constant(input) + output_shape = ng.constant(np.array([3], dtype=np.int32)) + + adaptive_pool_node = ng.adaptive_avg_pool(input_tensor, output_shape) + computation = runtime.computation(adaptive_pool_node) + adaptive_pool_results = computation() + expected_results = np.reshape([1.66666663, 0.66666669, -3., + -1.33333337, -1.66666663, -2.33333325, + -0.66666669, 0., -0.33333334, + + 0., 1.33333337, -2., + -0.66666669, -3.66666675, -2.33333325, + 2., -0.66666669, -1.33333337], (2, 3, 3)) + + assert np.allclose(adaptive_pool_results, expected_results) + + +def test_adaptive_max_pool(): + runtime = get_runtime() + input = np.reshape([0, 4, 1, 3, -2, -5, -2, + -2, 1, -3, 1, -3, -4, 0, + -2, 1, -1, -2, 3, -1, -3, + + -1, -2, 3, 4, -3, -4, 1, + 2, 0, -4, -5, -2, -2, -3, + 2, 3, 1, -5, 2, -4, -2], (2, 3, 7)) + input_tensor = ng.constant(input) + output_shape = ng.constant(np.array([3], dtype=np.int32)) + + adaptive_pool_node = ng.adaptive_max_pool(input_tensor, output_shape) + computation = runtime.computation(adaptive_pool_node) + adaptive_pool_results = computation() + expected_results = np.reshape([4, 3, -2, + 1, 1, 0, + 1, 3, 3, + + 3, 4, 1, + 2, -2, -2, + 3, 2, 2], (2, 3, 3)) + + expected_indices = np.reshape([1, 3, 4, + 1, 3, 6, + 1, 4, 4, + + 2, 3, 6, + 0, 4, 4, + 1, 4, 4], (2, 3, 3)) + + assert np.allclose(adaptive_pool_results, [expected_results, expected_indices]) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_basic.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_basic.py new file mode 100644 index 00000000000..dc7198e364a --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_basic.py @@ -0,0 +1,472 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import json + +import numpy as np +import pytest + +from _pyngraph import VariantInt, VariantString + +import ngraph as ng +from ngraph.exceptions import UserInputError +from ngraph.impl import Function, PartialShape, Shape, Type +from ngraph.impl.op import Parameter +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_ngraph.util import run_op_node + + +def test_ngraph_function_api(): + shape = [2, 2] + parameter_a = ng.parameter(shape, dtype=np.float32, name="A") + parameter_b = ng.parameter(shape, dtype=np.float32, name="B") + parameter_c = ng.parameter(shape, dtype=np.float32, name="C") + model = (parameter_a + parameter_b) * parameter_c + function = Function(model, [parameter_a, parameter_b, parameter_c], "TestFunction") + + function.get_parameters()[1].set_partial_shape(PartialShape([3, 4, 5])) + + ordered_ops = function.get_ordered_ops() + op_types = [op.get_type_name() for op in ordered_ops] + assert op_types == ["Parameter", "Parameter", "Parameter", "Add", "Multiply", "Result"] + assert len(function.get_ops()) == 6 + assert function.get_output_size() == 1 + assert function.get_output_op(0).get_type_name() == "Result" + assert function.get_output_element_type(0) == parameter_a.get_element_type() + assert list(function.get_output_shape(0)) == [2, 2] + assert (function.get_parameters()[1].get_partial_shape()) == PartialShape([3, 4, 5]) + assert len(function.get_parameters()) == 3 + assert len(function.get_results()) == 1 + assert function.get_friendly_name() == "TestFunction" + + +@pytest.mark.parametrize( + "dtype", + [ + np.float32, + np.float64, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + ], +) +def test_simple_computation_on_ndarrays(dtype): + runtime = get_runtime() + + shape = [2, 2] + parameter_a = ng.parameter(shape, dtype=dtype, name="A") + parameter_b = ng.parameter(shape, dtype=dtype, name="B") + parameter_c = ng.parameter(shape, dtype=dtype, name="C") + model = (parameter_a + parameter_b) * parameter_c + computation = runtime.computation(model, parameter_a, parameter_b, parameter_c) + + value_a = np.array([[1, 2], [3, 4]], dtype=dtype) + value_b = np.array([[5, 6], [7, 8]], dtype=dtype) + value_c = np.array([[2, 3], [4, 5]], dtype=dtype) + result = computation(value_a, value_b, value_c) + assert np.allclose(result, np.array([[12, 24], [40, 60]], dtype=dtype)) + + value_a = np.array([[9, 10], [11, 12]], dtype=dtype) + value_b = np.array([[13, 14], [15, 16]], dtype=dtype) + value_c = np.array([[5, 4], [3, 2]], dtype=dtype) + result = computation(value_a, value_b, value_c) + assert np.allclose(result, np.array([[110, 96], [78, 56]], dtype=dtype)) + + +def test_serialization(): + dtype = np.float32 + shape = [2, 2] + parameter_a = ng.parameter(shape, dtype=dtype, name="A") + parameter_b = ng.parameter(shape, dtype=dtype, name="B") + parameter_c = ng.parameter(shape, dtype=dtype, name="C") + model = (parameter_a + parameter_b) * parameter_c + + runtime = get_runtime() + computation = runtime.computation(model, parameter_a, parameter_b, parameter_c) + try: + serialized = computation.serialize(2) + serial_json = json.loads(serialized) + + assert serial_json[0]["name"] != "" + assert 10 == len(serial_json[0]["ops"]) + except Exception: + pass + + +def test_broadcast_1(): + input_data = np.array([1, 2, 3], dtype=np.int32) + + new_shape = [3, 3] + expected = [[1, 2, 3], [1, 2, 3], [1, 2, 3]] + result = run_op_node([input_data], ng.broadcast, new_shape) + assert np.allclose(result, expected) + + +def test_broadcast_2(): + input_data = np.arange(4, dtype=np.int32) + new_shape = [3, 4, 2, 4] + expected = np.broadcast_to(input_data, new_shape) + result = run_op_node([input_data], ng.broadcast, new_shape) + assert np.allclose(result, expected) + + +def test_broadcast_3(): + input_data = np.array([1, 2, 3], dtype=np.int32) + new_shape = [3, 3] + axis_mapping = [0] + expected = [[1, 1, 1], [2, 2, 2], [3, 3, 3]] + + result = run_op_node([input_data], ng.broadcast, new_shape, axis_mapping, "EXPLICIT") + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "destination_type, input_data", + [(bool, np.zeros((2, 2), dtype=np.int32)), ("boolean", np.zeros((2, 2), dtype=np.int32))], +) +def test_convert_to_bool(destination_type, input_data): + expected = np.array(input_data, dtype=bool) + result = run_op_node([input_data], ng.convert, destination_type) + assert np.allclose(result, expected) + assert np.array(result).dtype == bool + + +@pytest.mark.parametrize( + "destination_type, rand_range, in_dtype, expected_type", + [ + pytest.param(np.float32, (-8, 8), np.int32, np.float32), + pytest.param(np.float64, (-16383, 16383), np.int64, np.float64), + pytest.param("f32", (-8, 8), np.int32, np.float32), + pytest.param("f64", (-16383, 16383), np.int64, np.float64), + ], +) +def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type): + np.random.seed(133391) + input_data = np.random.randint(*rand_range, size=(2, 2), dtype=in_dtype) + expected = np.array(input_data, dtype=expected_type) + result = run_op_node([input_data], ng.convert, destination_type) + assert np.allclose(result, expected) + assert np.array(result).dtype == expected_type + + +@pytest.mark.parametrize( + "destination_type, expected_type", + [ + (np.int8, np.int8), + (np.int16, np.int16), + (np.int32, np.int32), + (np.int64, np.int64), + ("i8", np.int8), + ("i16", np.int16), + ("i32", np.int32), + ("i64", np.int64), + ], +) +def test_convert_to_int(destination_type, expected_type): + np.random.seed(133391) + input_data = (np.ceil(-8 + np.random.rand(2, 3, 4) * 16)).astype(np.float32) + expected = np.array(input_data, dtype=expected_type) + result = run_op_node([input_data], ng.convert, destination_type) + assert np.allclose(result, expected) + assert np.array(result).dtype == expected_type + + +@pytest.mark.parametrize( + "destination_type, expected_type", + [ + (np.uint8, np.uint8), + (np.uint16, np.uint16), + (np.uint32, np.uint32), + (np.uint64, np.uint64), + ("u8", np.uint8), + ("u16", np.uint16), + ("u32", np.uint32), + ("u64", np.uint64), + ], +) +def test_convert_to_uint(destination_type, expected_type): + np.random.seed(133391) + input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(np.float32) + expected = np.array(input_data, dtype=expected_type) + result = run_op_node([input_data], ng.convert, destination_type) + assert np.allclose(result, expected) + assert np.array(result).dtype == expected_type + + +def test_bad_data_shape(): + A = ng.parameter(shape=[2, 2], name="A", dtype=np.float32) + B = ng.parameter(shape=[2, 2], name="B") + model = A + B + runtime = get_runtime() + computation = runtime.computation(model, A, B) + + value_a = np.array([[1, 2]], dtype=np.float32) + value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) + with pytest.raises(UserInputError): + computation(value_a, value_b) + + +def test_constant_get_data_bool(): + input_data = np.array([True, False, False, True]) + node = ng.constant(input_data, dtype=np.bool) + retrieved_data = node.get_data() + assert np.allclose(input_data, retrieved_data) + + +@pytest.mark.parametrize("data_type", [np.float32, np.float64]) +def test_constant_get_data_floating_point(data_type): + np.random.seed(133391) + input_data = np.random.randn(2, 3, 4).astype(data_type) + min_value = -1.0e20 + max_value = 1.0e20 + input_data = min_value + input_data * max_value * data_type(2) + node = ng.constant(input_data, dtype=data_type) + retrieved_data = node.get_data() + assert np.allclose(input_data, retrieved_data) + + +@pytest.mark.parametrize("data_type", [np.int64, np.int32, np.int16, np.int8]) +def test_constant_get_data_signed_integer(data_type): + np.random.seed(133391) + input_data = np.random.randint( + np.iinfo(data_type).min, np.iinfo(data_type).max, size=[2, 3, 4], dtype=data_type + ) + node = ng.constant(input_data, dtype=data_type) + retrieved_data = node.get_data() + assert np.allclose(input_data, retrieved_data) + + +@pytest.mark.parametrize("data_type", [np.uint64, np.uint32, np.uint16, np.uint8]) +def test_constant_get_data_unsigned_integer(data_type): + np.random.seed(133391) + input_data = np.random.randn(2, 3, 4).astype(data_type) + input_data = ( + np.iinfo(data_type).min + input_data * np.iinfo(data_type).max + input_data * np.iinfo(data_type).max + ) + node = ng.constant(input_data, dtype=data_type) + retrieved_data = node.get_data() + assert np.allclose(input_data, retrieved_data) + + +def test_set_argument(): + runtime = get_runtime() + + data1 = np.array([1, 2, 3]) + data2 = np.array([4, 5, 6]) + data3 = np.array([7, 8, 9]) + + node1 = ng.constant(data1, dtype=np.float32) + node2 = ng.constant(data2, dtype=np.float32) + node3 = ng.constant(data3, dtype=np.float32) + node_add = ng.add(node1, node2) + + # Original arguments + computation = runtime.computation(node_add) + output = computation() + assert np.allclose(data1 + data2, output) + + # Arguments changed by set_argument + node_add.set_argument(1, node3.output(0)) + output = computation() + assert np.allclose(data1 + data3, output) + + # Arguments changed by set_argument + node_add.set_argument(0, node3.output(0)) + output = computation() + assert np.allclose(data3 + data3, output) + + # Arguments changed by set_argument(OutputVector) + node_add.set_arguments([node2.output(0), node3.output(0)]) + output = computation() + assert np.allclose(data2 + data3, output) + + # Arguments changed by set_arguments(NodeVector) + node_add.set_arguments([node1, node2]) + output = computation() + assert np.allclose(data1 + data2, output) + + +def test_result(): + node = np.array([[11, 10], [1, 8], [3, 4]]) + result = run_op_node([node], ng.result) + assert np.allclose(result, node) + + +def test_node_friendly_name(): + dummy_node = ng.parameter(shape=[1], name="dummy_name") + + assert(dummy_node.friendly_name == "dummy_name") + + dummy_node.set_friendly_name("changed_name") + + assert(dummy_node.get_friendly_name() == "changed_name") + + dummy_node.friendly_name = "new_name" + + assert(dummy_node.get_friendly_name() == "new_name") + + +def test_node_output(): + input_array = np.array([0, 1, 2, 3, 4, 5]) + splits = 3 + expected_shape = len(input_array) // splits + + input_tensor = ng.constant(input_array, dtype=np.int32) + axis = ng.constant(0, dtype=np.int64) + split_node = ng.split(input_tensor, axis, splits) + + split_node_outputs = split_node.outputs() + + assert len(split_node_outputs) == splits + assert [output_node.get_index() for output_node in split_node_outputs] == [0, 1, 2] + assert np.equal( + [output_node.get_element_type() for output_node in split_node_outputs], + input_tensor.get_element_type(), + ).all() + assert np.equal( + [output_node.get_shape() for output_node in split_node_outputs], + Shape([expected_shape]), + ).all() + assert np.equal( + [output_node.get_partial_shape() for output_node in split_node_outputs], + PartialShape([expected_shape]), + ).all() + + output0 = split_node.output(0) + output1 = split_node.output(1) + output2 = split_node.output(2) + + assert [output0.get_index(), output1.get_index(), output2.get_index()] == [0, 1, 2] + + +def test_node_input(): + shape = [2, 2] + parameter_a = ng.parameter(shape, dtype=np.float32, name="A") + parameter_b = ng.parameter(shape, dtype=np.float32, name="B") + + model = parameter_a + parameter_b + + model_inputs = model.inputs() + + assert len(model_inputs) == 2 + assert [input_node.get_index() for input_node in model_inputs] == [0, 1] + assert np.equal( + [input_node.get_element_type() for input_node in model_inputs], + model.get_element_type(), + ).all() + assert np.equal( + [input_node.get_shape() for input_node in model_inputs], Shape(shape) + ).all() + assert np.equal( + [input_node.get_partial_shape() for input_node in model_inputs], + PartialShape(shape), + ).all() + + input0 = model.input(0) + input1 = model.input(1) + + assert [input0.get_index(), input1.get_index()] == [0, 1] + + +def test_node_target_inputs_soruce_output(): + shape = [2, 2] + parameter_a = ng.parameter(shape, dtype=np.float32, name="A") + parameter_b = ng.parameter(shape, dtype=np.float32, name="B") + + model = parameter_a + parameter_b + + out_a = list(parameter_a.output(0).get_target_inputs())[0] + out_b = list(parameter_b.output(0).get_target_inputs())[0] + + assert out_a.get_node().name == model.name + assert out_b.get_node().name == model.name + assert np.equal([out_a.get_shape()], [model.get_output_shape(0)]).all() + assert np.equal([out_b.get_shape()], [model.get_output_shape(0)]).all() + + in_model0 = model.input(0).get_source_output() + in_model1 = model.input(1).get_source_output() + + assert in_model0.get_node().name == parameter_a.name + assert in_model1.get_node().name == parameter_b.name + assert np.equal([in_model0.get_shape()], [model.get_output_shape(0)]).all() + assert np.equal([in_model1.get_shape()], [model.get_output_shape(0)]).all() + + +def test_variants(): + variant_int = VariantInt(32) + variant_str = VariantString("test_text") + + assert variant_int.get() == 32 + assert variant_str.get() == "test_text" + + variant_int.set(777) + variant_str.set("another_text") + + assert variant_int.get() == 777 + assert variant_str.get() == "another_text" + + +def test_runtime_info(): + test_shape = PartialShape([1, 1, 1, 1]) + test_type = Type.f32 + test_param = Parameter(test_type, test_shape) + relu_node = ng.relu(test_param) + runtime_info = relu_node.get_rt_info() + runtime_info["affinity"] = "test_affinity" + relu_node.set_friendly_name("testReLU") + runtime_info_after = relu_node.get_rt_info() + + assert runtime_info_after["affinity"] == "test_affinity" + + +def test_mutiple_outputs(): + input_shape = [4, 4] + input_data = np.arange(-8, 8).reshape(input_shape) + + expected_output = np.split(input_data, 2, axis=1)[0] + expected_output[expected_output < 0] = 0 + + test_param = ng.parameter(input_shape, dtype=np.float32, name="A") + split = ng.split(test_param, axis=1, num_splits=2) + split_first_output = split.output(0) + relu = ng.relu(split_first_output) + + runtime = get_runtime() + computation = runtime.computation(relu, test_param) + output = computation(input_data) + + assert np.equal(output, expected_output).all() + + +def test_sink_function_ctor(): + input_data = ng.parameter([2, 2], name="input_data", dtype=np.float32) + rv = ng.read_value(input_data, "var_id_667") + add = ng.add(rv, input_data, name="MemoryAdd") + node = ng.assign(add, "var_id_667") + res = ng.result(add, "res") + function = Function(results=[res], sinks=[node], parameters=[input_data], name="TestFunction") + + ordered_ops = function.get_ordered_ops() + op_types = [op.get_type_name() for op in ordered_ops] + assert op_types == ["Parameter", "ReadValue", "Add", "Assign", "Result"] + assert len(function.get_ops()) == 5 + assert function.get_output_size() == 1 + assert function.get_output_op(0).get_type_name() == "Result" + assert function.get_output_element_type(0) == input_data.get_element_type() + assert list(function.get_output_shape(0)) == [2, 2] + assert (function.get_parameters()[0].get_partial_shape()) == PartialShape([2, 2]) + assert len(function.get_parameters()) == 1 + assert len(function.get_results()) == 1 + assert function.get_friendly_name() == "TestFunction" + + +def test_node_version(): + node = ng.add([1], [2]) + + assert node.get_version() == 1 + assert node.version == 1 diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_convolution.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_convolution.py new file mode 100644 index 00000000000..da736f6c8ad --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_convolution.py @@ -0,0 +1,219 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import ngraph as ng +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_ngraph.test_ops import convolution2d +from tests_compatibility.test_ngraph.util import run_op_node + + +def test_convolution_2d(): + + # input_x should have shape N(batch) x C x H x W + input_x = np.array( + [ + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float32, + ).reshape(1, 1, 9, 9) + + # filter weights should have shape M x C x kH x kW + input_filter = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape( + 1, 1, 3, 3 + ) + + strides = np.array([1, 1]) + pads_begin = np.array([1, 1]) + pads_end = np.array([1, 1]) + dilations = np.array([1, 1]) + + # convolution with padding=1 should produce 9 x 9 output: + result = run_op_node([input_x, input_filter], ng.convolution, strides, pads_begin, pads_end, dilations) + + assert np.allclose( + result, + np.array( + [ + [ + [ + [0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0], + ] + ] + ], + dtype=np.float32, + ), + ) + + # convolution with padding=0 should produce 7 x 7 output: + strides = np.array([1, 1]) + pads_begin = np.array([0, 0]) + pads_end = np.array([0, 0]) + dilations = np.array([1, 1]) + result = run_op_node([input_x, input_filter], ng.convolution, strides, pads_begin, pads_end, dilations) + assert np.allclose( + result, + np.array( + [ + [ + [ + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + ] + ] + ], + dtype=np.float32, + ), + ) + + strides = np.array([2, 2]) + pads_begin = np.array([0, 0]) + pads_end = np.array([0, 0]) + dilations = np.array([1, 1]) + + # convolution with strides=2 should produce 4 x 4 output: + result = run_op_node([input_x, input_filter], ng.convolution, strides, pads_begin, pads_end, dilations) + + assert np.allclose( + result, + np.array( + [ + [ + [ + [-20.0, 20.0, 0.0, 0.0], + [-20.0, 20.0, 0.0, 0.0], + [-20.0, 20.0, 0.0, 0.0], + [-20.0, 20.0, 0.0, 0.0], + ] + ] + ], + dtype=np.float32, + ), + ) + + strides = np.array([1, 1]) + pads_begin = np.array([0, 0]) + pads_end = np.array([0, 0]) + dilations = np.array([2, 2]) + + # convolution with dilation=2 should produce 5 x 5 output: + result = run_op_node([input_x, input_filter], ng.convolution, strides, pads_begin, pads_end, dilations) + assert np.allclose( + result, + np.array( + [ + [ + [ + [0, 0, 20, 20, 0], + [0, 0, 20, 20, 0], + [0, 0, 20, 20, 0], + [0, 0, 20, 20, 0], + [0, 0, 20, 20, 0], + ] + ] + ], + dtype=np.float32, + ), + ) + + +def test_convolution_backprop_data(): + runtime = get_runtime() + + output_spatial_shape = [9, 9] + filter_shape = [1, 1, 3, 3] + data_shape = [1, 1, 7, 7] + strides = [1, 1] + + data_node = ng.parameter(shape=data_shape) + filter_node = ng.parameter(shape=filter_shape) + output_shape_node = ng.constant(np.array(output_spatial_shape, dtype=np.int64)) + + deconvolution = ng.convolution_backprop_data(data_node, filter_node, strides, output_shape_node) + + input_data = np.array( + [ + [ + [ + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + ] + ] + ], + dtype=np.float32, + ) + + filter_data = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape( + 1, 1, 3, 3 + ) + + model = runtime.computation(deconvolution, data_node, filter_node) + result = model(input_data, filter_data) + assert np.allclose( + result, + np.array( + [ + [ + [ + [-20.0, -20.0, 40.0, 40.0, -20.0, -20.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 120.0, 120.0, -60.0, -60.0, 0.0, 0.0, 0.0], + [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], + [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], + [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], + [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], + [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 120.0, 120.0, -60.0, -60.0, 0.0, 0.0, 0.0], + [-20.0, -20.0, 40.0, 40.0, -20.0, -20.0, 0.0, 0.0, 0.0], + ] + ] + ], + dtype=np.float32, + ), + ) + + +def test_convolution_v1(): + input_tensor = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16) + filters = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3) + filters[0, 0, 0, 0] = -1 + filters[0, 0, 1, 1] = -1 + filters[0, 0, 2, 2] = -1 + filters[0, 0, 0, 2] = -1 + filters[0, 0, 2, 0] = -1 + strides = np.array([1, 1]) + pads_begin = np.array([0, 0]) + pads_end = np.array([0, 0]) + dilations = np.array([1, 1]) + + result = run_op_node([input_tensor, filters], ng.convolution, strides, pads_begin, pads_end, dilations) + + expected = convolution2d(input_tensor[0, 0], filters[0, 0]).reshape(1, 1, 14, 14) + + assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_core.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_core.py new file mode 100644 index 00000000000..9b7a6336508 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_core.py @@ -0,0 +1,262 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import ngraph as ng +from ngraph.impl import Dimension, Function, PartialShape, Shape + + +def test_dimension(): + dim = Dimension() + assert dim.is_dynamic + assert not dim.is_static + assert repr(dim) == "" + + dim = Dimension.dynamic() + assert dim.is_dynamic + assert not dim.is_static + assert repr(dim) == "" + + dim = Dimension(10) + assert dim.is_static + assert len(dim) == 10 + assert dim.get_length() == 10 + assert dim.get_min_length() == 10 + assert dim.get_max_length() == 10 + assert repr(dim) == "" + + dim = Dimension(5, 15) + assert dim.is_dynamic + assert dim.get_min_length() == 5 + assert dim.get_max_length() == 15 + assert repr(dim) == "" + + +def test_dimension_comparisons(): + d1 = Dimension.dynamic() + d2 = Dimension.dynamic() + assert d1 == d2 + assert d1 == -1 + assert d1.refines(d2) + assert d1.relaxes(d2) + assert d2.refines(d1) + assert d2.relaxes(d1) + assert d2.compatible(d1) + assert d2.same_scheme(d1) + + d1 = Dimension.dynamic() + d2 = Dimension(3) + assert d1 != d2 + assert d2 == 3 + assert not d1.refines(d2) + assert d1.relaxes(d2) + assert d2.refines(d1) + assert not d2.relaxes(d1) + assert d2.compatible(d1) + assert not d2.same_scheme(d1) + + d1 = Dimension(3) + d2 = Dimension(3) + assert d1 == d2 + assert d1.refines(d2) + assert d1.relaxes(d2) + assert d2.refines(d1) + assert d2.relaxes(d1) + assert d2.compatible(d1) + assert d2.same_scheme(d1) + + d1 = Dimension(4) + d2 = Dimension(3) + assert d1 != d2 + assert not d1.refines(d2) + assert not d1.relaxes(d2) + assert not d2.refines(d1) + assert not d2.relaxes(d1) + assert not d2.compatible(d1) + assert not d2.same_scheme(d1) + + +def test_partial_shape(): + ps = PartialShape([1, 2, 3, 4]) + assert ps.is_static + assert not ps.is_dynamic + assert ps.rank == 4 + assert repr(ps) == "" + assert ps.get_dimension(0) == Dimension(1) + assert ps.get_dimension(1) == Dimension(2) + assert ps.get_dimension(2) == Dimension(3) + assert ps.get_dimension(3) == Dimension(4) + + shape = Shape([1, 2, 3]) + ps = PartialShape(shape) + assert ps.is_static + assert not ps.is_dynamic + assert ps.all_non_negative + assert ps.rank == 3 + assert list(ps.get_shape()) == [1, 2, 3] + assert list(ps.get_max_shape()) == [1, 2, 3] + assert list(ps.get_min_shape()) == [1, 2, 3] + assert list(ps.to_shape()) == [1, 2, 3] + assert repr(shape) == "" + assert repr(ps) == "" + + ps = PartialShape([Dimension(1), Dimension(2), Dimension(3), Dimension.dynamic()]) + assert not ps.is_static + assert ps.is_dynamic + assert ps.all_non_negative + assert ps.rank == 4 + assert list(ps.get_min_shape()) == [1, 2, 3, 0] + assert list(ps.get_max_shape())[3] > 1000000000 + assert repr(ps) == "" + assert ps.get_dimension(0) == Dimension(1) + assert ps.get_dimension(1) == Dimension(2) + assert ps.get_dimension(2) == Dimension(3) + assert ps.get_dimension(3) == Dimension.dynamic() + + ps = PartialShape([1, 2, 3, -1]) + assert not ps.is_static + assert ps.is_dynamic + assert ps.all_non_negative + assert ps.rank == 4 + assert list(ps.get_min_shape()) == [1, 2, 3, 0] + assert list(ps.get_max_shape())[3] > 1000000000 + assert repr(ps) == "" + + ps = PartialShape.dynamic() + assert not ps.is_static + assert ps.is_dynamic + assert ps.rank == Dimension.dynamic() + assert list(ps.get_min_shape()) == [] + assert list(ps.get_max_shape()) == [] + assert repr(ps) == "" + + ps = PartialShape.dynamic(r=Dimension(2)) + assert not ps.is_static + assert ps.is_dynamic + assert ps.rank == 2 + assert 2 == ps.rank + assert list(ps.get_min_shape()) == [0, 0] + assert list(ps.get_max_shape())[0] > 1000000000 + assert repr(ps) == "" + + +def test_partial_shape_compatible(): + ps1 = PartialShape.dynamic() + ps2 = PartialShape.dynamic() + assert ps1.compatible(ps2) + + ps1 = PartialShape([3]) + ps2 = PartialShape.dynamic() + assert ps1.compatible(ps2) + + ps1 = PartialShape.dynamic() + ps2 = PartialShape([4]) + assert ps1.compatible(ps2) + + ps1 = PartialShape([2, -1, 3, -1, 5]) + ps2 = PartialShape([2, -1, -1, 4, 5]) + assert ps1.compatible(ps2) + + ps1 = PartialShape([2, -1, 3, -1, 5]) + ps2 = PartialShape([1, -1, -1, 4, 5]) + assert not ps1.compatible(ps2) + + +def test_partial_shape_same_scheme(): + ps1 = PartialShape([1, 2, -1]) + ps2 = PartialShape([1, 3, -1]) + assert not ps1.same_scheme(ps2) + + ps1 = PartialShape([1, 2, -1]) + ps2 = PartialShape([1, 2, -1]) + assert ps1.same_scheme(ps2) + + ps1 = PartialShape([1, 2, 3]) + ps2 = PartialShape([1, 2, 3]) + assert ps1.same_scheme(ps2) + + ps1 = PartialShape([-1, 2, 3]) + ps2 = PartialShape([1, -1, 3]) + assert not ps1.same_scheme(ps2) + + ps1 = PartialShape.dynamic() + ps2 = PartialShape.dynamic() + assert ps1.same_scheme(ps2) + + +def test_partial_shape_refinement(): + ps1 = PartialShape.dynamic() + ps2 = PartialShape.dynamic() + assert ps1.refines(ps2) + assert ps1.relaxes(ps2) + assert ps2.refines(ps1) + assert ps2.relaxes(ps1) + + ps1 = PartialShape.dynamic() + ps2 = PartialShape([3, -1, 7, 9]) + assert not ps1.refines(ps2) + assert ps1.relaxes(ps2) + assert ps2.refines(ps1) + assert not ps2.relaxes(ps1) + + ps1 = PartialShape.dynamic() + ps2 = PartialShape([3, 5, 7, 9]) + assert not ps1.refines(ps2) + assert ps1.relaxes(ps2) + assert ps2.refines(ps1) + assert not ps2.relaxes(ps1) + + +def test_partial_shape_equals(): + ps1 = PartialShape.dynamic() + ps2 = PartialShape.dynamic() + assert ps1 == ps2 + + ps1 = PartialShape([1, 2, 3]) + ps2 = PartialShape([1, 2, 3]) + assert ps1 == ps2 + + shape = Shape([1, 2, 3]) + ps = PartialShape([1, 2, 3]) + assert shape == ps + + +def test_repr_dynamic_shape(): + shape = PartialShape([-1, 2]) + parameter_a = ng.parameter(shape, dtype=np.float32, name="A") + parameter_b = ng.parameter(shape, dtype=np.float32, name="B") + model = parameter_a + parameter_b + function = Function(model, [parameter_a, parameter_b], "simple_dyn_shapes_graph") + + assert repr(function) == "" + + ops = function.get_ordered_ops() + for op in ops: + assert "{?,2}" in repr(op) + + +def test_discrete_type_info(): + data_shape = [6, 12, 10, 24] + data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + k = np.int32(3) + axis = np.int32(1) + n1 = ng.topk(data_parameter, k, axis, "max", "value") + n2 = ng.topk(data_parameter, k, axis, "max", "value") + n3 = ng.sin(0.2) + + assert n1.type_info.name == "TopK" + assert n3.type_info.name == "Sin" + assert n1.get_type_info().name == "TopK" + assert n3.get_type_info().name == "Sin" + assert n1.type_info.name == n2.type_info.name + assert n1.type_info.version == n2.type_info.version + assert n1.type_info.parent == n2.type_info.parent + assert n1.get_type_info().name == n2.get_type_info().name + assert n1.get_type_info().version == n2.get_type_info().version + assert n1.get_type_info().parent == n2.get_type_info().parent + assert n1.get_type_info().name != n3.get_type_info().name + assert n1.get_type_info().name > n3.get_type_info().name + assert n1.get_type_info().name >= n3.get_type_info().name + assert n3.get_type_info().name < n1.get_type_info().name + assert n3.get_type_info().name <= n1.get_type_info().name diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_create_op.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_create_op.py new file mode 100644 index 00000000000..673d7a2ebf1 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_create_op.py @@ -0,0 +1,1925 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +from _pyngraph import PartialShape, Dimension + +import ngraph as ng +import ngraph.opset1 as ng_opset1 +import ngraph.opset5 as ng_opset5 +from ngraph.impl import Type + +np_types = [np.float32, np.int32] +integral_np_types = [ + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, +] + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_adaptive_avg_pool(dtype): + data = ng.parameter([2, 24, 34, 62], name="input", dtype=dtype) + output_shape = ng.constant(np.array([16, 16], dtype=np.int32)) + + node = ng.adaptive_avg_pool(data, output_shape) + + assert node.get_type_name() == "AdaptiveAvgPool" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2, 24, 16, 16] + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +@pytest.mark.parametrize("ind_type", ["i32", "i64"]) +def test_adaptive_max_pool(dtype, ind_type): + data = ng.parameter([2, 24, 34, 62], name="input", dtype=dtype) + output_shape = ng.constant(np.array([16, 16], dtype=np.int32)) + + node = ng.adaptive_max_pool(data, output_shape, ind_type) + + assert node.get_type_name() == "AdaptiveMaxPool" + assert node.get_output_size() == 2 + assert list(node.get_output_shape(0)) == [2, 24, 16, 16] + assert list(node.get_output_shape(1)) == [2, 24, 16, 16] + assert node.get_output_element_type(1) == Type.i32 if ind_type == "i32" else Type.i64 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_binary_convolution(dtype): + strides = np.array([1, 1]) + pads_begin = np.array([0, 0]) + pads_end = np.array([0, 0]) + dilations = np.array([1, 1]) + mode = "xnor-popcount" + pad_value = 0.0 + + input0_shape = [1, 1, 9, 9] + input1_shape = [1, 1, 3, 3] + expected_shape = [1, 1, 7, 7] + + parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + + node = ng.binary_convolution( + parameter_input0, parameter_input1, strides, pads_begin, pads_end, dilations, mode, pad_value, + ) + + assert node.get_type_name() == "BinaryConvolution" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize("dtype", np_types) +def test_ctc_greedy_decoder(dtype): + input0_shape = [20, 8, 128] + input1_shape = [20, 8] + expected_shape = [8, 20, 1, 1] + + parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + + node = ng.ctc_greedy_decoder(parameter_input0, parameter_input1) + + assert node.get_type_name() == "CTCGreedyDecoder" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize("fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index", + [ + (np.float32, np.int32, "i32", "i32", True, True), + (np.float32, np.int32, "i64", "i32", True, True), + (np.float32, np.int32, "i32", "i64", True, True), + (np.float32, np.int32, "i64", "i64", True, True), + (np.float64, np.int64, "i32", "i32", False, True), + (np.float64, np.int64, "i64", "i32", False, True), + (np.float64, np.int64, "i32", "i64", False, True), + (np.float64, np.int64, "i64", "i64", False, True), + (np.float32, np.int32, "i32", "i32", True, False), + (np.float32, np.int32, "i64", "i32", True, False), + (np.float32, np.int32, "i32", "i64", True, False), + (np.float32, np.int32, "i64", "i64", True, False), + (np.float64, np.int64, "i32", "i32", False, False), + (np.float64, np.int64, "i64", "i32", False, False), + (np.float64, np.int64, "i32", "i64", False, False), + (np.float64, np.int64, "i64", "i64", False, False) + ],) +def test_ctc_greedy_decoder_seq_len(fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index): + input0_shape = [8, 20, 128] + input1_shape = [8] + input2_shape = [1] + expected_shape = [8, 20] + + parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=fp_dtype) + parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=int_dtype) + parameter_input2 = None + if blank_index: + parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=int_dtype) + + node = ng.ctc_greedy_decoder_seq_len( + parameter_input0, parameter_input1, parameter_input2, merge_repeated, int_ci, int_sl + ) + + assert node.get_type_name() == "CTCGreedyDecoderSeqLen" + assert node.get_output_size() == 2 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize("dtype", np_types) +def test_deformable_convolution_opset1(dtype): + strides = np.array([1, 1]) + pads_begin = np.array([0, 0]) + pads_end = np.array([0, 0]) + dilations = np.array([1, 1]) + + input0_shape = [1, 1, 9, 9] + input1_shape = [1, 18, 7, 7] + input2_shape = [1, 1, 3, 3] + expected_shape = [1, 1, 7, 7] + + parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) + + node = ng_opset1.deformable_convolution( + parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations, + ) + + assert node.get_type_name() == "DeformableConvolution" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize("dtype", np_types) +def test_deformable_convolution(dtype): + strides = np.array([1, 1]) + pads_begin = np.array([0, 0]) + pads_end = np.array([0, 0]) + dilations = np.array([1, 1]) + + input0_shape = [1, 1, 9, 9] + input1_shape = [1, 18, 7, 7] + input2_shape = [1, 1, 3, 3] + expected_shape = [1, 1, 7, 7] + + parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) + + node = ng.deformable_convolution( + parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations, + ) + + assert node.get_type_name() == "DeformableConvolution" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize("dtype", np_types) +def test_deformable_convolution_mask(dtype): + strides = np.array([1, 1]) + pads_begin = np.array([0, 0]) + pads_end = np.array([0, 0]) + dilations = np.array([1, 1]) + + input0_shape = [1, 1, 9, 9] + input1_shape = [1, 18, 7, 7] + input2_shape = [1, 1, 3, 3] + input3_shape = [1, 9, 7, 7] + expected_shape = [1, 1, 7, 7] + + parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) + parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype) + + node = ng.deformable_convolution( + parameter_input0, parameter_input1, parameter_input2, strides, + pads_begin, pads_end, dilations, parameter_input3 + ) + + assert node.get_type_name() == "DeformableConvolution" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize("dtype", np_types) +def test_deformable_psroi_pooling(dtype): + output_dim = 8 + spatial_scale = 0.0625 + group_size = 7 + mode = "bilinear_deformable" + spatial_bins_x = 4 + spatial_bins_y = 4 + trans_std = 0.1 + part_size = 7 + + input0_shape = [1, 392, 38, 63] + input1_shape = [300, 5] + input2_shape = [300, 2, 7, 7] + expected_shape = [300, 8, 7, 7] + + parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) + + node = ng.deformable_psroi_pooling( + parameter_input0, + parameter_input1, + output_dim, + spatial_scale, + group_size, + mode, + spatial_bins_x, + spatial_bins_y, + trans_std, + part_size, + offsets=parameter_input2, + ) + + assert node.get_type_name() == "DeformablePSROIPooling" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize("dtype", np_types) +def test_floor_mod(dtype): + input0_shape = [8, 1, 6, 1] + input1_shape = [7, 1, 5] + expected_shape = [8, 7, 6, 5] + + parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + + node = ng.floor_mod(parameter_input0, parameter_input1) + + assert node.get_type_name() == "FloorMod" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize("dtype", np_types) +def test_gather_tree(dtype): + input0_shape = [100, 1, 10] + input1_shape = [100, 1, 10] + input2_shape = [1] + input3_shape = [] + expected_shape = [100, 1, 10] + + parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype) + parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype) + parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype) + parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype) + + node = ng.gather_tree(parameter_input0, parameter_input1, parameter_input2, parameter_input3) + + assert node.get_type_name() == "GatherTree" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_lstm_cell_operator(dtype): + batch_size = 1 + input_size = 16 + hidden_size = 128 + + X_shape = [batch_size, input_size] + H_t_shape = [batch_size, hidden_size] + C_t_shape = [batch_size, hidden_size] + W_shape = [4 * hidden_size, input_size] + R_shape = [4 * hidden_size, hidden_size] + B_shape = [4 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + expected_shape = [1, 128] + + node_default = ng.lstm_cell( + parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size, + ) + + assert node_default.get_type_name() == "LSTMCell" + assert node_default.get_output_size() == 2 + assert list(node_default.get_output_shape(0)) == expected_shape + assert list(node_default.get_output_shape(1)) == expected_shape + + activations = ["tanh", "Sigmoid", "RELU"] + activation_alpha = [1.0, 2.0, 3.0] + activation_beta = [3.0, 2.0, 1.0] + clip = 0.5 + + node_param = ng.lstm_cell( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node_param.get_type_name() == "LSTMCell" + assert node_param.get_output_size() == 2 + assert list(node_param.get_output_shape(0)) == expected_shape + assert list(node_param.get_output_shape(1)) == expected_shape + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_lstm_cell_operator_opset1(dtype): + batch_size = 1 + input_size = 16 + hidden_size = 128 + + X_shape = [batch_size, input_size] + H_t_shape = [batch_size, hidden_size] + C_t_shape = [batch_size, hidden_size] + W_shape = [4 * hidden_size, input_size] + R_shape = [4 * hidden_size, hidden_size] + B_shape = [4 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + expected_shape = [1, 128] + + node_default = ng_opset1.lstm_cell( + parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size, + ) + + assert node_default.get_type_name() == "LSTMCell" + assert node_default.get_output_size() == 2 + assert list(node_default.get_output_shape(0)) == expected_shape + assert list(node_default.get_output_shape(1)) == expected_shape + + activations = ["tanh", "Sigmoid", "RELU"] + activation_alpha = [1.0, 2.0, 3.0] + activation_beta = [3.0, 2.0, 1.0] + clip = 0.5 + + node_param = ng_opset1.lstm_cell( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node_param.get_type_name() == "LSTMCell" + assert node_param.get_output_size() == 2 + assert list(node_param.get_output_shape(0)) == expected_shape + assert list(node_param.get_output_shape(1)) == expected_shape + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_lstm_sequence_operator_bidirectional_opset1(dtype): + batch_size = 1 + input_size = 16 + hidden_size = 128 + num_directions = 2 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + C_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, 4 * hidden_size, input_size] + R_shape = [num_directions, 4 * hidden_size, hidden_size] + B_shape = [num_directions, 4 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "BIDIRECTIONAL" + node = ng_opset1.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node.get_type_name() == "LSTMSequence" + assert node.get_output_size() == 3 + + activations = ["RELU", "tanh", "Sigmoid"] + activation_alpha = [1.0, 2.0, 3.0] + activation_beta = [3.0, 2.0, 1.0] + clip = 1.22 + + node_param = ng_opset1.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node_param.get_type_name() == "LSTMSequence" + assert node_param.get_output_size() == 3 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_lstm_sequence_operator_reverse_opset1(dtype): + batch_size = 2 + input_size = 4 + hidden_size = 3 + num_directions = 1 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + C_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, 4 * hidden_size, input_size] + R_shape = [num_directions, 4 * hidden_size, hidden_size] + B_shape = [num_directions, 4 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "REVERSE" + + node_default = ng_opset1.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node_default.get_type_name() == "LSTMSequence" + assert node_default.get_output_size() == 3 + + activations = ["RELU", "tanh", "Sigmoid"] + activation_alpha = [1.0, 2.0, 3.0] + activation_beta = [3.0, 2.0, 1.0] + clip = 1.22 + + node_param = ng_opset1.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node_param.get_type_name() == "LSTMSequence" + assert node_param.get_output_size() == 3 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_lstm_sequence_operator_forward_opset1(dtype): + batch_size = 2 + input_size = 4 + hidden_size = 3 + num_directions = 1 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + C_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, 4 * hidden_size, input_size] + R_shape = [num_directions, 4 * hidden_size, hidden_size] + B_shape = [num_directions, 4 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "forward" + + node_default = ng_opset1.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node_default.get_type_name() == "LSTMSequence" + assert node_default.get_output_size() == 3 + + activations = ["RELU", "tanh", "Sigmoid"] + activation_alpha = [2.0] + activation_beta = [1.0] + clip = 0.5 + + node = ng_opset1.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node.get_type_name() == "LSTMSequence" + assert node.get_output_size() == 3 + + +def test_gru_cell_operator(): + batch_size = 1 + input_size = 16 + hidden_size = 128 + + X_shape = [batch_size, input_size] + H_t_shape = [batch_size, hidden_size] + W_shape = [3 * hidden_size, input_size] + R_shape = [3 * hidden_size, hidden_size] + B_shape = [3 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) + parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) + parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) + parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + + expected_shape = [1, 128] + + node_default = ng.gru_cell(parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B, hidden_size) + + assert node_default.get_type_name() == "GRUCell" + assert node_default.get_output_size() == 1 + assert list(node_default.get_output_shape(0)) == expected_shape + + activations = ["tanh", "relu"] + activations_alpha = [1.0, 2.0] + activations_beta = [1.0, 2.0] + clip = 0.5 + linear_before_reset = True + + # If *linear_before_reset* is set True, then B tensor shape must be [4 * hidden_size] + B_shape = [4 * hidden_size] + parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + + node_param = ng.gru_cell( + parameter_X, + parameter_H_t, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + activations, + activations_alpha, + activations_beta, + clip, + linear_before_reset, + ) + + assert node_param.get_type_name() == "GRUCell" + assert node_param.get_output_size() == 1 + assert list(node_param.get_output_shape(0)) == expected_shape + + +def test_gru_sequence(): + batch_size = 2 + input_size = 16 + hidden_size = 32 + seq_len = 8 + seq_lengths = [seq_len] * batch_size + num_directions = 1 + direction = "FORWARD" + + X_shape = [batch_size, seq_len, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + W_shape = [num_directions, 3 * hidden_size, input_size] + R_shape = [num_directions, 3 * hidden_size, hidden_size] + B_shape = [num_directions, 3 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) + parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) + parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) + parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + + expected_shape_y = [batch_size, num_directions, seq_len, hidden_size] + expected_shape_h = [batch_size, num_directions, hidden_size] + + node_default = ng.gru_sequence( + parameter_X, + parameter_H_t, + seq_lengths, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node_default.get_type_name() == "GRUSequence" + assert node_default.get_output_size() == 2 + assert list(node_default.get_output_shape(0)) == expected_shape_y + assert list(node_default.get_output_shape(1)) == expected_shape_h + + activations = ["tanh", "relu"] + activations_alpha = [1.0, 2.0] + activations_beta = [1.0, 2.0] + clip = 0.5 + linear_before_reset = True + + # If *linear_before_reset* is set True, then B tensor shape must be [4 * hidden_size] + B_shape = [num_directions, 4 * hidden_size] + parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + + node_param = ng.gru_sequence( + parameter_X, + parameter_H_t, + seq_lengths, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activations_alpha, + activations_beta, + clip, + linear_before_reset, + ) + + assert node_param.get_type_name() == "GRUSequence" + assert node_param.get_output_size() == 2 + assert list(node_param.get_output_shape(0)) == expected_shape_y + assert list(node_param.get_output_shape(1)) == expected_shape_h + + +def test_rnn_sequence(): + batch_size = 2 + input_size = 16 + hidden_size = 32 + seq_len = 8 + seq_lengths = [seq_len] * batch_size + num_directions = 1 + direction = "FORWARD" + + X_shape = [batch_size, seq_len, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + W_shape = [num_directions, hidden_size, input_size] + R_shape = [num_directions, hidden_size, hidden_size] + B_shape = [num_directions, hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) + parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) + parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) + parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + + expected_shape_y = [batch_size, num_directions, seq_len, hidden_size] + expected_shape_h = [batch_size, num_directions, hidden_size] + + node_default = ng.rnn_sequence( + parameter_X, + parameter_H_t, + seq_lengths, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node_default.get_type_name() == "RNNSequence" + assert node_default.get_output_size() == 2 + assert list(node_default.get_output_shape(0)) == expected_shape_y + assert list(node_default.get_output_shape(1)) == expected_shape_h + + activations = ["relu"] + activations_alpha = [2.0] + activations_beta = [1.0] + clip = 0.5 + + node_param = ng.rnn_sequence( + parameter_X, + parameter_H_t, + seq_lengths, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activations_alpha, + activations_beta, + clip, + ) + + assert node_param.get_type_name() == "RNNSequence" + assert node_param.get_output_size() == 2 + assert list(node_param.get_output_shape(0)) == expected_shape_y + assert list(node_param.get_output_shape(1)) == expected_shape_h + + +def test_loop(): + from ngraph.utils.tensor_iterator_types import ( + GraphBody, + TensorIteratorSliceInputDesc, + TensorIteratorMergedInputDesc, + TensorIteratorInvariantInputDesc, + TensorIteratorBodyOutputDesc, + TensorIteratorConcatOutputDesc, + ) + + condition = ng.constant(True, dtype=np.bool) + trip_count = ng.constant(16, dtype=np.int32) + # Body parameters + body_timestep = ng.parameter([], np.int32, "timestep") + body_data_in = ng.parameter([1, 2, 2], np.float32, "body_in") + body_prev_cma = ng.parameter([2, 2], np.float32, "body_prev_cma") + body_const_one = ng.parameter([], np.int32, "body_const_one") + + # CMA = cumulative moving average + prev_cum_sum = ng.multiply(ng.convert(body_timestep, "f32"), body_prev_cma) + curr_cum_sum = ng.add(prev_cum_sum, ng.squeeze(body_data_in, [0])) + elem_cnt = ng.add(body_const_one, body_timestep) + curr_cma = ng.divide(curr_cum_sum, ng.convert(elem_cnt, "f32")) + cma_hist = ng.unsqueeze(curr_cma, [0]) + + # TI inputs + data = ng.parameter([16, 2, 2], np.float32, "data") + # Iterations count + zero = ng.constant(0, dtype=np.int32) + one = ng.constant(1, dtype=np.int32) + initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32) + iter_cnt = ng.range(zero, np.int32(16), np.int32(1)) + ti_inputs = [iter_cnt, data, initial_cma, one] + body_const_condition = ng.constant(True, dtype=np.bool) + + graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], + [curr_cma, cma_hist, body_const_condition]) + ti_slice_input_desc = [ + # timestep + # input_idx, body_param_idx, start, stride, part_size, end, axis + TensorIteratorSliceInputDesc(2, 0, 0, 1, 1, -1, 0), + # data + TensorIteratorSliceInputDesc(3, 1, 0, 1, 1, -1, 0), + ] + ti_merged_input_desc = [ + # body prev/curr_cma + TensorIteratorMergedInputDesc(4, 2, 0), + ] + ti_invariant_input_desc = [ + # body const one + TensorIteratorInvariantInputDesc(5, 3), + ] + + # TI outputs + ti_body_output_desc = [ + # final average + TensorIteratorBodyOutputDesc(0, 0, -1), + ] + ti_concat_output_desc = [ + # history of cma + TensorIteratorConcatOutputDesc(1, 1, 0, 1, 1, -1, 0), + ] + + node = ng.loop( + trip_count, + condition, + ti_inputs, + graph_body, + ti_slice_input_desc, + ti_merged_input_desc, + ti_invariant_input_desc, + ti_body_output_desc, + ti_concat_output_desc, + 2, + -1, + ) + + assert node.get_type_name() == "Loop" + assert node.get_output_size() == 2 + # final average + assert list(node.get_output_shape(0)) == [2, 2] + # cma history + assert list(node.get_output_shape(1)) == [16, 2, 2] + + +def test_roi_pooling(): + inputs = ng.parameter([2, 3, 4, 5], dtype=np.float32) + coords = ng.parameter([150, 5], dtype=np.float32) + node = ng.roi_pooling(inputs, coords, [6, 6], 0.0625, "Max") + + assert node.get_type_name() == "ROIPooling" + assert node.get_output_size() == [6, 6] + assert list(node.get_output_shape(0)) == [150, 3, 6, 6] + assert node.get_output_element_type(0) == Type.f32 + + +def test_psroi_pooling(): + inputs = ng.parameter([1, 72, 4, 5], dtype=np.float32) + coords = ng.parameter([150, 5], dtype=np.float32) + node = ng.psroi_pooling(inputs, coords, 2, 6, 0.0625, 0, 0, "average") + + assert node.get_type_name() == "PSROIPooling" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [150, 2, 6, 6] + assert node.get_output_element_type(0) == Type.f32 + + +def test_convert_like(): + parameter_data = ng.parameter([1, 2, 3, 4], name="data", dtype=np.float32) + like = ng.constant(1, dtype=np.int8) + + node = ng.convert_like(parameter_data, like) + + assert node.get_type_name() == "ConvertLike" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [1, 2, 3, 4] + assert node.get_output_element_type(0) == Type.i8 + + +def test_bucketize(): + data = ng.parameter([4, 3, 2, 1], name="data", dtype=np.float32) + buckets = ng.parameter([5], name="buckets", dtype=np.int64) + + node = ng.bucketize(data, buckets, "i32") + + assert node.get_type_name() == "Bucketize" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [4, 3, 2, 1] + assert node.get_output_element_type(0) == Type.i32 + + +def test_region_yolo(): + data = ng.parameter([1, 125, 13, 13], name="input", dtype=np.float32) + num_coords = 4 + num_classes = 80 + num_regions = 1 + mask = [6, 7, 8] + axis = 0 + end_axis = 3 + do_softmax = False + + node = ng.region_yolo(data, num_coords, num_classes, num_regions, do_softmax, mask, axis, end_axis) + + assert node.get_type_name() == "RegionYolo" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [1, (80 + 4 + 1) * 3, 13, 13] + assert node.get_output_element_type(0) == Type.f32 + + +def test_reorg_yolo(): + data = ng.parameter([2, 24, 34, 62], name="input", dtype=np.int32) + stride = [2] + + node = ng.reorg_yolo(data, stride) + + assert node.get_type_name() == "ReorgYolo" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2, 96, 17, 31] + assert node.get_output_element_type(0) == Type.i32 + + +def test_embedding_bag_offsets_sum_1(): + emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) + indices = ng.parameter([4], name="indices", dtype=np.int64) + offsets = ng.parameter([3], name="offsets", dtype=np.int64) + default_index = ng.parameter([], name="default_index", dtype=np.int64) + + node = ng.embedding_bag_offsets_sum(emb_table, indices, offsets, default_index) + + assert node.get_type_name() == "EmbeddingBagOffsetsSum" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [3, 2] + assert node.get_output_element_type(0) == Type.f32 + + +def test_embedding_segments_sum_all_inputs(): + emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) + indices = ng.parameter([4], name="indices", dtype=np.int64) + segment_ids = ng.parameter([4], name="segment_ids", dtype=np.int64) + num_segments = ng.parameter([], name="num_segments", dtype=np.int64) + default_index = ng.parameter([], name="default_index", dtype=np.int64) + per_sample_weights = ng.parameter([4], name="per_sample_weights", dtype=np.float32) + + node = ng.embedding_segments_sum( + emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights + ) + + assert node.get_type_name() == "EmbeddingSegmentsSum" + assert node.get_output_size() == 1 + assert node.get_output_partial_shape(0).same_scheme(PartialShape([-1, 2])) + assert node.get_output_element_type(0) == Type.f32 + + +def test_embedding_segments_sum_with_some_opt_inputs(): + emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) + indices = ng.parameter([4], name="indices", dtype=np.int64) + segment_ids = ng.parameter([4], name="segment_ids", dtype=np.int64) + num_segments = ng.parameter([], name="num_segments", dtype=np.int64) + + # only 1 out of 3 optional inputs + node = ng.embedding_segments_sum(emb_table, indices, segment_ids, num_segments) + + assert node.get_type_name() == "EmbeddingSegmentsSum" + assert node.get_output_size() == 1 + assert node.get_output_partial_shape(0).same_scheme(PartialShape([-1, 2])) + assert node.get_output_element_type(0) == Type.f32 + + +def test_embedding_bag_packed_sum(): + emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32) + indices = ng.parameter([3, 3], name="indices", dtype=np.int64) + per_sample_weights = ng.parameter([3, 3], name="per_sample_weights", dtype=np.float32) + + # only 1 out of 3 optional inputs + node = ng.embedding_bag_packed_sum(emb_table, indices, per_sample_weights) + + assert node.get_type_name() == "EmbeddingBagPackedSum" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [3, 2] + assert node.get_output_element_type(0) == Type.f32 + + +@pytest.mark.parametrize("dtype", integral_np_types) +def test_interpolate(dtype): + image_shape = [1, 3, 1024, 1024] + output_shape = [64, 64] + attributes = { + "axes": [2, 3], + "mode": "cubic", + "pads_begin": np.array([2, 2], dtype=dtype), + } + + image_node = ng.parameter(image_shape, dtype, name="Image") + + node = ng.interpolate(image_node, output_shape, attributes) + expected_shape = [1, 3, 64, 64] + + assert node.get_type_name() == "Interpolate" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize( + "int_dtype, fp_dtype", + [ + (np.int8, np.float32), + (np.int16, np.float32), + (np.int32, np.float32), + (np.int64, np.float32), + (np.uint8, np.float32), + (np.uint16, np.float32), + (np.uint32, np.float32), + (np.uint64, np.float32), + (np.int32, np.float16), + (np.int32, np.float64), + ], +) +def test_prior_box(int_dtype, fp_dtype): + image_shape = np.array([64, 64], dtype=int_dtype) + attributes = { + "offset": fp_dtype(0), + "min_size": np.array([2, 3], dtype=fp_dtype), + "aspect_ratio": np.array([1.5, 2.0, 2.5], dtype=fp_dtype), + "scale_all_sizes": False + } + + layer_shape = ng.constant(np.array([32, 32], dtype=int_dtype), int_dtype) + + node = ng.prior_box(layer_shape, image_shape, attributes) + + assert node.get_type_name() == "PriorBox" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2, 20480] + + +@pytest.mark.parametrize( + "int_dtype, fp_dtype", + [ + (np.int8, np.float32), + (np.int16, np.float32), + (np.int32, np.float32), + (np.int64, np.float32), + (np.uint8, np.float32), + (np.uint16, np.float32), + (np.uint32, np.float32), + (np.uint64, np.float32), + (np.int32, np.float16), + (np.int32, np.float64), + ], +) +def test_prior_box_clustered(int_dtype, fp_dtype): + image_size = np.array([64, 64], dtype=int_dtype) + attributes = { + "offset": fp_dtype(0.5), + "width": np.array([4.0, 2.0, 3.2], dtype=fp_dtype), + "height": np.array([1.0, 2.0, 1.0], dtype=fp_dtype), + } + + output_size = ng.constant(np.array([19, 19], dtype=int_dtype), int_dtype) + + node = ng.prior_box_clustered(output_size, image_size, attributes) + + assert node.get_type_name() == "PriorBoxClustered" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2, 4332] + + +@pytest.mark.parametrize( + "int_dtype, fp_dtype", + [ + (np.int8, np.float32), + (np.int16, np.float32), + (np.int32, np.float32), + (np.int64, np.float32), + (np.uint8, np.float32), + (np.uint16, np.float32), + (np.uint32, np.float32), + (np.uint64, np.float32), + (np.int32, np.float16), + (np.int32, np.float64), + ], +) +def test_detection_output(int_dtype, fp_dtype): + attributes = { + "num_classes": int_dtype(85), + "keep_top_k": np.array([64], dtype=int_dtype), + "nms_threshold": fp_dtype(0.645), + } + + box_logits = ng.parameter([4, 8], fp_dtype, "box_logits") + class_preds = ng.parameter([4, 170], fp_dtype, "class_preds") + proposals = ng.parameter([4, 2, 10], fp_dtype, "proposals") + aux_class_preds = ng.parameter([4, 4], fp_dtype, "aux_class_preds") + aux_box_preds = ng.parameter([4, 8], fp_dtype, "aux_box_preds") + + node = ng.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds) + + assert node.get_type_name() == "DetectionOutput" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [1, 1, 256, 7] + + +@pytest.mark.parametrize( + "int_dtype, fp_dtype", + [ + (np.uint8, np.float32), + (np.uint16, np.float32), + (np.uint32, np.float32), + (np.uint64, np.float32), + (np.uint32, np.float16), + (np.uint32, np.float64), + ], +) +def test_proposal(int_dtype, fp_dtype): + attributes = { + "base_size": int_dtype(1), + "pre_nms_topn": int_dtype(20), + "post_nms_topn": int_dtype(64), + "nms_thresh": fp_dtype(0.34), + "feat_stride": int_dtype(16), + "min_size": int_dtype(32), + "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype), + "scale": np.array([2, 3, 3, 4], dtype=fp_dtype), + } + batch_size = 7 + + class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs") + bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas") + image_shape = ng.parameter([3], fp_dtype, "image_shape") + node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes) + + assert node.get_type_name() == "Proposal" + assert node.get_output_size() == 2 + assert list(node.get_output_shape(0)) == [batch_size * attributes["post_nms_topn"], 5] + + +def test_tensor_iterator(): + from ngraph.utils.tensor_iterator_types import ( + GraphBody, + TensorIteratorSliceInputDesc, + TensorIteratorMergedInputDesc, + TensorIteratorInvariantInputDesc, + TensorIteratorBodyOutputDesc, + TensorIteratorConcatOutputDesc, + ) + + # Body parameters + body_timestep = ng.parameter([], np.int32, "timestep") + body_data_in = ng.parameter([1, 2, 2], np.float32, "body_in") + body_prev_cma = ng.parameter([2, 2], np.float32, "body_prev_cma") + body_const_one = ng.parameter([], np.int32, "body_const_one") + + # CMA = cumulative moving average + prev_cum_sum = ng.multiply(ng.convert(body_timestep, "f32"), body_prev_cma) + curr_cum_sum = ng.add(prev_cum_sum, ng.squeeze(body_data_in, [0])) + elem_cnt = ng.add(body_const_one, body_timestep) + curr_cma = ng.divide(curr_cum_sum, ng.convert(elem_cnt, "f32")) + cma_hist = ng.unsqueeze(curr_cma, [0]) + + # TI inputs + data = ng.parameter([16, 2, 2], np.float32, "data") + # Iterations count + zero = ng.constant(0, dtype=np.int32) + one = ng.constant(1, dtype=np.int32) + initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32) + iter_cnt = ng.range(zero, np.int32(16), np.int32(1)) + ti_inputs = [iter_cnt, data, initial_cma, one] + + graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], [curr_cma, cma_hist]) + ti_slice_input_desc = [ + # timestep + # input_idx, body_param_idx, start, stride, part_size, end, axis + TensorIteratorSliceInputDesc(0, 0, 0, 1, 1, -1, 0), + # data + TensorIteratorSliceInputDesc(1, 1, 0, 1, 1, -1, 0), + ] + ti_merged_input_desc = [ + # body prev/curr_cma + TensorIteratorMergedInputDesc(2, 2, 0), + ] + ti_invariant_input_desc = [ + # body const one + TensorIteratorInvariantInputDesc(3, 3), + ] + + # TI outputs + ti_body_output_desc = [ + # final average + TensorIteratorBodyOutputDesc(0, 0, -1), + ] + ti_concat_output_desc = [ + # history of cma + TensorIteratorConcatOutputDesc(1, 1, 0, 1, 1, -1, 0), + ] + + node = ng.tensor_iterator( + ti_inputs, + graph_body, + ti_slice_input_desc, + ti_merged_input_desc, + ti_invariant_input_desc, + ti_body_output_desc, + ti_concat_output_desc, + ) + + assert node.get_type_name() == "TensorIterator" + assert node.get_output_size() == 2 + # final average + assert list(node.get_output_shape(0)) == [2, 2] + # cma history + assert list(node.get_output_shape(1)) == [16, 2, 2] + + +def test_read_value_opset5(): + init_value = ng_opset5.parameter([2, 2], name="init_value", dtype=np.int32) + + node = ng_opset5.read_value(init_value, "var_id_667") + + assert node.get_type_name() == "ReadValue" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2, 2] + assert node.get_output_element_type(0) == Type.i32 + + +def test_assign_opset5(): + input_data = ng_opset5.parameter([5, 7], name="input_data", dtype=np.int32) + rv = ng_opset5.read_value(input_data, "var_id_667") + node = ng_opset5.assign(rv, "var_id_667") + + assert node.get_type_name() == "Assign" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [5, 7] + assert node.get_output_element_type(0) == Type.i32 + + +def test_read_value(): + init_value = ng.parameter([2, 2], name="init_value", dtype=np.int32) + + node = ng.read_value(init_value, "var_id_667") + + assert node.get_type_name() == "ReadValue" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2, 2] + assert node.get_output_element_type(0) == Type.i32 + + +def test_assign(): + input_data = ng.parameter([5, 7], name="input_data", dtype=np.int32) + rv = ng.read_value(input_data, "var_id_667") + node = ng.assign(rv, "var_id_667") + + assert node.get_type_name() == "Assign" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [5, 7] + assert node.get_output_element_type(0) == Type.i32 + + +def test_extract_image_patches(): + image = ng.parameter([64, 3, 10, 10], name="image", dtype=np.int32) + sizes = [3, 3] + strides = [5, 5] + rates = [1, 1] + padding = "VALID" + node = ng.extract_image_patches(image, sizes, strides, rates, padding) + + assert node.get_type_name() == "ExtractImagePatches" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [64, 27, 2, 2] + assert node.get_output_element_type(0) == Type.i32 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_lstm_sequence_operator_bidirectional(dtype): + batch_size = 1 + input_size = 16 + hidden_size = 128 + num_directions = 2 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + C_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, 4 * hidden_size, input_size] + R_shape = [num_directions, 4 * hidden_size, hidden_size] + B_shape = [num_directions, 4 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "BIDIRECTIONAL" + node = ng.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node.get_type_name() == "LSTMSequence" + assert node.get_output_size() == 3 + + activations = ["RELU", "tanh", "Sigmoid"] + activation_alpha = [1.0, 2.0, 3.0] + activation_beta = [3.0, 2.0, 1.0] + clip = 1.22 + + node_param = ng.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node_param.get_type_name() == "LSTMSequence" + assert node_param.get_output_size() == 3 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_lstm_sequence_operator_reverse(dtype): + batch_size = 2 + input_size = 4 + hidden_size = 3 + num_directions = 1 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + C_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, 4 * hidden_size, input_size] + R_shape = [num_directions, 4 * hidden_size, hidden_size] + B_shape = [num_directions, 4 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "REVERSE" + + node_default = ng.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node_default.get_type_name() == "LSTMSequence" + assert node_default.get_output_size() == 3 + + activations = ["RELU", "tanh", "Sigmoid"] + activation_alpha = [1.0, 2.0, 3.0] + activation_beta = [3.0, 2.0, 1.0] + clip = 1.22 + + node_param = ng.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node_param.get_type_name() == "LSTMSequence" + assert node_param.get_output_size() == 3 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_lstm_sequence_operator_forward(dtype): + batch_size = 2 + input_size = 4 + hidden_size = 3 + num_directions = 1 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + C_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, 4 * hidden_size, input_size] + R_shape = [num_directions, 4 * hidden_size, hidden_size] + B_shape = [num_directions, 4 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "forward" + + node_default = ng.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node_default.get_type_name() == "LSTMSequence" + assert node_default.get_output_size() == 3 + + activations = ["RELU", "tanh", "Sigmoid"] + activation_alpha = [2.0] + activation_beta = [1.0] + clip = 0.5 + + node = ng.lstm_sequence( + parameter_X, + parameter_H_t, + parameter_C_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node.get_type_name() == "LSTMSequence" + assert node.get_output_size() == 3 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_gru_sequence_operator_bidirectional(dtype): + batch_size = 1 + input_size = 16 + hidden_size = 128 + num_directions = 2 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, 3 * hidden_size, input_size] + R_shape = [num_directions, 3 * hidden_size, hidden_size] + B_shape = [num_directions, 3 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "BIDIRECTIONAL" + node = ng.gru_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node.get_type_name() == "GRUSequence" + assert node.get_output_size() == 2 + + activations = ["RELU", "tanh"] + activation_alpha = [1.0, 2.0, 3.0] + activation_beta = [3.0, 2.0, 1.0] + clip = 1.22 + linear_before_reset = True + B_shape = [num_directions, 4 * hidden_size] + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + node_param = ng.gru_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + linear_before_reset + ) + + assert node_param.get_type_name() == "GRUSequence" + assert node_param.get_output_size() == 2 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_gru_sequence_operator_reverse(dtype): + batch_size = 2 + input_size = 4 + hidden_size = 3 + num_directions = 1 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, 3 * hidden_size, input_size] + R_shape = [num_directions, 3 * hidden_size, hidden_size] + B_shape = [num_directions, 3 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "REVERSE" + + node_default = ng.gru_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node_default.get_type_name() == "GRUSequence" + assert node_default.get_output_size() == 2 + + activations = ["RELU", "tanh"] + activation_alpha = [1.0, 2.0, 3.0] + activation_beta = [3.0, 2.0, 1.0] + clip = 1.22 + linear_before_reset = True + B_shape = [num_directions, 4 * hidden_size] + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + node_param = ng.gru_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + linear_before_reset + ) + + assert node_param.get_type_name() == "GRUSequence" + assert node_param.get_output_size() == 2 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_gru_sequence_operator_forward(dtype): + batch_size = 2 + input_size = 4 + hidden_size = 3 + num_directions = 1 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, 3 * hidden_size, input_size] + R_shape = [num_directions, 3 * hidden_size, hidden_size] + B_shape = [num_directions, 3 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "forward" + + node_default = ng.gru_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node_default.get_type_name() == "GRUSequence" + assert node_default.get_output_size() == 2 + + activations = ["RELU", "tanh"] + activation_alpha = [2.0] + activation_beta = [1.0] + clip = 0.5 + linear_before_reset = True + B_shape = [num_directions, 4 * hidden_size] + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + node = ng.gru_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + linear_before_reset + ) + + assert node.get_type_name() == "GRUSequence" + assert node.get_output_size() == 2 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_rnn_sequence_operator_bidirectional(dtype): + batch_size = 1 + input_size = 16 + hidden_size = 128 + num_directions = 2 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, hidden_size, input_size] + R_shape = [num_directions, hidden_size, hidden_size] + B_shape = [num_directions, hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "BIDIRECTIONAL" + node = ng.rnn_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node.get_type_name() == "RNNSequence" + assert node.get_output_size() == 2 + + activations = ["RELU", "tanh"] + activation_alpha = [1.0, 2.0, 3.0] + activation_beta = [3.0, 2.0, 1.0] + clip = 1.22 + + node_param = ng.rnn_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node_param.get_type_name() == "RNNSequence" + assert node_param.get_output_size() == 2 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_rnn_sequence_operator_reverse(dtype): + batch_size = 2 + input_size = 4 + hidden_size = 3 + num_directions = 1 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, hidden_size, input_size] + R_shape = [num_directions, hidden_size, hidden_size] + B_shape = [num_directions, hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "REVERSE" + + node_default = ng.rnn_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node_default.get_type_name() == "RNNSequence" + assert node_default.get_output_size() == 2 + + activations = ["RELU", "tanh"] + activation_alpha = [1.0, 2.0, 3.0] + activation_beta = [3.0, 2.0, 1.0] + clip = 1.22 + + node_param = ng.rnn_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node_param.get_type_name() == "RNNSequence" + assert node_param.get_output_size() == 2 + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_rnn_sequence_operator_forward(dtype): + batch_size = 2 + input_size = 4 + hidden_size = 3 + num_directions = 1 + seq_length = 2 + + X_shape = [batch_size, seq_length, input_size] + H_t_shape = [batch_size, num_directions, hidden_size] + seq_len_shape = [batch_size] + W_shape = [num_directions, hidden_size, input_size] + R_shape = [num_directions, hidden_size, hidden_size] + B_shape = [num_directions, hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=dtype) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype) + parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32) + parameter_W = ng.parameter(W_shape, name="W", dtype=dtype) + parameter_R = ng.parameter(R_shape, name="R", dtype=dtype) + parameter_B = ng.parameter(B_shape, name="B", dtype=dtype) + + direction = "forward" + + node_default = ng.rnn_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + ) + + assert node_default.get_type_name() == "RNNSequence" + assert node_default.get_output_size() == 2 + + activations = ["RELU", "tanh"] + activation_alpha = [2.0] + activation_beta = [1.0] + clip = 0.5 + + node = ng.rnn_sequence( + parameter_X, + parameter_H_t, + parameter_seq_len, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + direction, + activations, + activation_alpha, + activation_beta, + clip, + ) + + assert node.get_type_name() == "RNNSequence" + assert node.get_output_size() == 2 + + +def test_multiclass_nms(): + boxes_data = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, + 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, + 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32") + boxes_data = boxes_data.reshape([1, 6, 4]) + box = ng.constant(boxes_data, dtype=np.float) + scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3, + 0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32") + scores_data = scores_data.reshape([1, 2, 6]) + score = ng.constant(scores_data, dtype=np.float) + + nms_node = ng.multiclass_nms(box, score, output_type="i32", nms_top_k=3, + iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid", + nms_eta=1.0) + + assert nms_node.get_type_name() == "MulticlassNms" + assert nms_node.get_output_size() == 3 + assert nms_node.outputs()[0].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(6)]) + assert nms_node.outputs()[1].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(1)]) + assert list(nms_node.outputs()[2].get_shape()) == [1, ] + assert nms_node.get_output_element_type(0) == Type.f32 + assert nms_node.get_output_element_type(1) == Type.i32 + assert nms_node.get_output_element_type(2) == Type.i32 + + +def test_matrix_nms(): + boxes_data = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.1, 1.0, 1.1, + 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, + 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32") + boxes_data = boxes_data.reshape([1, 6, 4]) + box = ng.constant(boxes_data, dtype=np.float) + scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3, + 0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32") + scores_data = scores_data.reshape([1, 2, 6]) + score = ng.constant(scores_data, dtype=np.float) + + nms_node = ng.matrix_nms(box, score, output_type="i32", nms_top_k=3, + score_threshold=0.0, sort_result_type="score", background_class=0, + decay_function="linear", gaussian_sigma=2.0, post_threshold=0.0) + + assert nms_node.get_type_name() == "MatrixNms" + assert nms_node.get_output_size() == 3 + assert nms_node.outputs()[0].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(6)]) + assert nms_node.outputs()[1].get_partial_shape() == PartialShape([Dimension(0, 6), Dimension(1)]) + assert list(nms_node.outputs()[2].get_shape()) == [1, ] + assert nms_node.get_output_element_type(0) == Type.f32 + assert nms_node.get_output_element_type(1) == Type.i32 + assert nms_node.get_output_element_type(2) == Type.i32 diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_ctc_loss.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ctc_loss.py new file mode 100644 index 00000000000..7c977eacb75 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ctc_loss.py @@ -0,0 +1,27 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import ngraph as ng +from ngraph.impl import Type + + +def test_ctc_loss_props(): + ind_dtype = np.int32 + float_dtype = np.float32 + logits = ng.parameter([2, 100, 80], dtype=float_dtype, name="logits") + logit_length = ng.parameter([2], dtype=ind_dtype, name="logit_length") + labels = ng.parameter([2, 100], dtype=ind_dtype, name="labels") + label_length = ng.parameter([2], dtype=ind_dtype, name="label_length") + blank_index = ng.parameter([], dtype=ind_dtype, name="blank_index") + preprocess_collapse_repeated = False + ctc_merge_repeated = True + unique = False + + node = ng.ctc_loss(logits, logit_length, labels, label_length, blank_index, + preprocess_collapse_repeated, ctc_merge_repeated, unique) + assert node.get_type_name() == "CTCLoss" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2] + assert node.get_output_element_type(0) == Type.f32 diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_data_movement.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_data_movement.py new file mode 100644 index 00000000000..5873057f679 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_data_movement.py @@ -0,0 +1,218 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import ngraph as ng +from ngraph.impl import Type, Shape +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_ngraph.util import run_op_node + + +def test_reverse_sequence(): + input_data = np.array( + [ + 0, + 0, + 3, + 0, + 6, + 0, + 9, + 0, + 1, + 0, + 4, + 0, + 7, + 0, + 10, + 0, + 2, + 0, + 5, + 0, + 8, + 0, + 11, + 0, + 12, + 0, + 15, + 0, + 18, + 0, + 21, + 0, + 13, + 0, + 16, + 0, + 19, + 0, + 22, + 0, + 14, + 0, + 17, + 0, + 20, + 0, + 23, + 0, + ], + dtype=np.int32, + ).reshape([2, 3, 4, 2]) + seq_lengths = np.array([1, 2, 1, 2], dtype=np.int32) + batch_axis = 2 + sequence_axis = 1 + + input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32) + seq_lengths_param = ng.parameter(seq_lengths.shape, name="sequence lengths", dtype=np.int32) + model = ng.reverse_sequence(input_param, seq_lengths_param, batch_axis, sequence_axis) + + runtime = get_runtime() + computation = runtime.computation(model, input_param, seq_lengths_param) + result = computation(input_data, seq_lengths) + + expected = np.array( + [ + 0, + 0, + 4, + 0, + 6, + 0, + 10, + 0, + 1, + 0, + 3, + 0, + 7, + 0, + 9, + 0, + 2, + 0, + 5, + 0, + 8, + 0, + 11, + 0, + 12, + 0, + 16, + 0, + 18, + 0, + 22, + 0, + 13, + 0, + 15, + 0, + 19, + 0, + 21, + 0, + 14, + 0, + 17, + 0, + 20, + 0, + 23, + 0, + ], + ).reshape([1, 2, 3, 4, 2]) + assert np.allclose(result, expected) + + +def test_pad_edge(): + input_data = np.arange(1, 13).reshape([3, 4]) + pads_begin = np.array([0, 1], dtype=np.int32) + pads_end = np.array([2, 3], dtype=np.int32) + + input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32) + model = ng.pad(input_param, pads_begin, pads_end, "edge") + + runtime = get_runtime() + computation = runtime.computation(model, input_param) + result = computation(input_data) + + expected = np.array( + [ + [1, 1, 2, 3, 4, 4, 4, 4], + [5, 5, 6, 7, 8, 8, 8, 8], + [9, 9, 10, 11, 12, 12, 12, 12], + [9, 9, 10, 11, 12, 12, 12, 12], + [9, 9, 10, 11, 12, 12, 12, 12], + ] + ) + assert np.allclose(result, expected) + + +def test_pad_constant(): + input_data = np.arange(1, 13).reshape([3, 4]) + pads_begin = np.array([0, 1], dtype=np.int32) + pads_end = np.array([2, 3], dtype=np.int32) + + input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32) + model = ng.pad(input_param, pads_begin, pads_end, "constant", arg_pad_value=np.array(100, dtype=np.int32)) + + runtime = get_runtime() + computation = runtime.computation(model, input_param) + result = computation(input_data) + + expected = np.array( + [ + [100, 1, 2, 3, 4, 100, 100, 100], + [100, 5, 6, 7, 8, 100, 100, 100], + [100, 9, 10, 11, 12, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100, 100], + ] + ) + assert np.allclose(result, expected) + + +def test_select(): + cond = np.array([[False, False], [True, False], [True, True]]) + then_node = np.array([[-1, 0], [1, 2], [3, 4]], dtype=np.int32) + else_node = np.array([[11, 10], [9, 8], [7, 6]], dtype=np.int32) + excepted = np.array([[11, 10], [1, 8], [3, 4]], dtype=np.int32) + + result = run_op_node([cond, then_node, else_node], ng.select) + assert np.allclose(result, excepted) + + +def test_gather_nd(): + indices_type = np.int32 + data_dtype = np.float32 + data = ng.parameter([2, 10, 80, 30, 50], dtype=data_dtype, name="data") + indices = ng.parameter([2, 10, 30, 40, 2], dtype=indices_type, name="indices") + batch_dims = 2 + expected_shape = [20, 30, 40, 50] + + node = ng.gather_nd(data, indices, batch_dims) + assert node.get_type_name() == "GatherND" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + assert node.get_output_element_type(0) == Type.f32 + + +def test_gather_elements(): + indices_type = np.int32 + data_dtype = np.float32 + data = ng.parameter(Shape([2, 5]), dtype=data_dtype, name="data") + indices = ng.parameter(Shape([2, 100]), dtype=indices_type, name="indices") + axis = 1 + expected_shape = [2, 100] + + node = ng.gather_elements(data, indices, axis) + assert node.get_type_name() == "GatherElements" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + assert node.get_output_element_type(0) == Type.f32 diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_dft.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_dft.py new file mode 100644 index 00000000000..44ea887d845 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_dft.py @@ -0,0 +1,117 @@ +import ngraph as ng +import numpy as np +from tests_compatibility.runtime import get_runtime + + +def build_fft_input_data(): + np.random.seed(202104) + return np.random.uniform(0, 1, (2, 10, 10, 2)).astype(np.float32) + + +def test_dft_1d(): + runtime = get_runtime() + input_data = build_fft_input_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([2], dtype=np.int64)) + + dft_node = ng.dft(input_tensor, input_axes) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.fft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), + axis=2).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.00001) + + +def test_dft_2d(): + runtime = get_runtime() + input_data = build_fft_input_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) + + dft_node = ng.dft(input_tensor, input_axes) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), + axes=[1, 2]).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.000062) + + +def test_dft_3d(): + runtime = get_runtime() + input_data = build_fft_input_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) + + dft_node = ng.dft(input_tensor, input_axes) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.fftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), + axes=[0, 1, 2]).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.0002) + + +def test_dft_1d_signal_size(): + runtime = get_runtime() + input_data = build_fft_input_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([-2], dtype=np.int64)) + input_signal_size = ng.constant(np.array([20], dtype=np.int64)) + + dft_node = ng.dft(input_tensor, input_axes, input_signal_size) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.fft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), n=20, + axis=-2).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.00001) + + +def test_dft_2d_signal_size_1(): + runtime = get_runtime() + input_data = build_fft_input_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([0, 2], dtype=np.int64)) + input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) + + dft_node = ng.dft(input_tensor, input_axes, input_signal_size) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5], + axes=[0, 2]).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.000062) + + +def test_dft_2d_signal_size_2(): + runtime = get_runtime() + input_data = build_fft_input_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) + input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) + + dft_node = ng.dft(input_tensor, input_axes, input_signal_size) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5], + axes=[1, 2]).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.000062) + + +def test_dft_3d_signal_size(): + runtime = get_runtime() + input_data = build_fft_input_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) + input_signal_size = ng.constant(np.array([4, 5, 16], dtype=np.int64)) + + dft_node = ng.dft(input_tensor, input_axes, input_signal_size) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.fftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), + s=[4, 5, 16], axes=[0, 1, 2]).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.0002) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_dyn_attributes.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_dyn_attributes.py new file mode 100644 index 00000000000..a945ec91ef0 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_dyn_attributes.py @@ -0,0 +1,227 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +import ngraph as ng + + +@pytest.fixture() +def _proposal_node(): + attributes = { + "base_size": np.uint16(1), + "pre_nms_topn": np.uint16(20), + "post_nms_topn": np.uint16(64), + "nms_thresh": np.float64(0.34), + "feat_stride": np.uint16(16), + "min_size": np.uint16(32), + "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float64), + "scale": np.array([2, 3, 3, 4], dtype=np.float64), + } + batch_size = 7 + + class_probs = ng.parameter([batch_size, 12, 34, 62], np.float64, "class_probs") + bbox_deltas = ng.parameter([batch_size, 24, 34, 62], np.float64, "bbox_deltas") + image_shape = ng.parameter([3], np.float64, "image_shape") + return ng.proposal(class_probs, bbox_deltas, image_shape, attributes) + + +def test_dynamic_attributes_softmax(): + axis = 2 + data = ng.parameter([1, 2, 3, 4], np.float32, "data_in") + node = ng.softmax(data, axis) + + assert node.get_axis() == axis + node.set_axis(3) + assert node.get_axis() == 3 + + +@pytest.mark.parametrize( + "int_dtype, fp_dtype", + [ + (np.int8, np.float32), + (np.int16, np.float32), + (np.int32, np.float32), + (np.int64, np.float32), + (np.uint8, np.float32), + (np.uint16, np.float32), + (np.uint32, np.float32), + (np.uint64, np.float32), + (np.int32, np.float16), + (np.int32, np.float64), + ], +) +def test_dynamic_get_attribute_value(int_dtype, fp_dtype): + attributes = { + "num_classes": int_dtype(85), + "background_label_id": int_dtype(13), + "top_k": int_dtype(16), + "variance_encoded_in_target": True, + "keep_top_k": np.array([64, 32, 16, 8], dtype=int_dtype), + "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", + "share_location": False, + "nms_threshold": fp_dtype(0.645), + "confidence_threshold": fp_dtype(0.111), + "clip_after_nms": True, + "clip_before_nms": False, + "decrease_label_id": True, + "normalized": True, + "input_height": int_dtype(86), + "input_width": int_dtype(79), + "objectness_score": fp_dtype(0.77), + } + + box_logits = ng.parameter([4, 680], fp_dtype, "box_logits") + class_preds = ng.parameter([4, 170], fp_dtype, "class_preds") + proposals = ng.parameter([4, 1, 8], fp_dtype, "proposals") + aux_class_preds = ng.parameter([4, 4], fp_dtype, "aux_class_preds") + aux_box_preds = ng.parameter([4, 680], fp_dtype, "aux_box_preds") + + node = ng.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds) + + assert node.get_num_classes() == int_dtype(85) + assert node.get_background_label_id() == int_dtype(13) + assert node.get_top_k() == int_dtype(16) + assert node.get_variance_encoded_in_target() + assert np.all(np.equal(node.get_keep_top_k(), np.array([64, 32, 16, 8], dtype=int_dtype))) + assert node.get_code_type() == "caffe.PriorBoxParameter.CENTER_SIZE" + assert not node.get_share_location() + assert np.isclose(node.get_nms_threshold(), fp_dtype(0.645)) + assert np.isclose(node.get_confidence_threshold(), fp_dtype(0.111)) + assert node.get_clip_after_nms() + assert not node.get_clip_before_nms() + assert node.get_decrease_label_id() + assert node.get_normalized() + assert node.get_input_height() == int_dtype(86) + assert node.get_input_width() == int_dtype(79) + assert np.isclose(node.get_objectness_score(), fp_dtype(0.77)) + assert node.get_num_classes() == int_dtype(85) + + +@pytest.mark.parametrize( + "int_dtype, fp_dtype", + [ + (np.uint8, np.float32), + (np.uint16, np.float32), + (np.uint32, np.float32), + (np.uint64, np.float32), + (np.uint32, np.float16), + (np.uint32, np.float64), + ], +) +def test_dynamic_set_attribute_value(int_dtype, fp_dtype): + attributes = { + "base_size": int_dtype(1), + "pre_nms_topn": int_dtype(20), + "post_nms_topn": int_dtype(64), + "nms_thresh": fp_dtype(0.34), + "feat_stride": int_dtype(16), + "min_size": int_dtype(32), + "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype), + "scale": np.array([2, 3, 3, 4], dtype=fp_dtype), + } + batch_size = 7 + + class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs") + bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas") + image_shape = ng.parameter([3], fp_dtype, "image_shape") + node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes) + + node.set_base_size(int_dtype(15)) + node.set_pre_nms_topn(int_dtype(7)) + node.set_post_nms_topn(int_dtype(33)) + node.set_nms_thresh(fp_dtype(1.55)) + node.set_feat_stride(int_dtype(8)) + node.set_min_size(int_dtype(123)) + node.set_ratio(np.array([1.1, 2.5, 3.0, 4.5], dtype=fp_dtype)) + node.set_scale(np.array([2.1, 3.2, 3.3, 4.4], dtype=fp_dtype)) + node.set_clip_before_nms(True) + node.set_clip_after_nms(True) + node.set_normalize(True) + node.set_box_size_scale(fp_dtype(1.34)) + node.set_box_coordinate_scale(fp_dtype(0.88)) + node.set_framework("OpenVINO") + + assert node.get_base_size() == int_dtype(15) + assert node.get_pre_nms_topn() == int_dtype(7) + assert node.get_post_nms_topn() == int_dtype(33) + assert np.isclose(node.get_nms_thresh(), fp_dtype(1.55)) + assert node.get_feat_stride() == int_dtype(8) + assert node.get_min_size() == int_dtype(123) + assert np.allclose(node.get_ratio(), np.array([1.1, 2.5, 3.0, 4.5], dtype=fp_dtype)) + assert np.allclose(node.get_scale(), np.array([2.1, 3.2, 3.3, 4.4], dtype=fp_dtype)) + assert node.get_clip_before_nms() + assert node.get_clip_after_nms() + assert node.get_normalize() + assert np.isclose(node.get_box_size_scale(), fp_dtype(1.34)) + assert np.isclose(node.get_box_coordinate_scale(), fp_dtype(0.88)) + assert node.get_framework() == "OpenVINO" + + +def test_dynamic_attr_cache(_proposal_node): + node = _proposal_node + + assert not node._attr_cache_valid + node.set_nms_thresh(1.3453678102) + assert not node._attr_cache_valid + assert np.isclose(node.get_nms_thresh(), np.float64(1.3453678102)) + assert node._attr_cache_valid + + +def test_dynamic_attr_transitivity(_proposal_node): + node = _proposal_node + node2 = node + + node.set_ratio(np.array([1.1, 2.5, 3.0, 4.5], dtype=np.float64)) + assert np.allclose(node.get_ratio(), np.array([1.1, 2.5, 3.0, 4.5], dtype=np.float64)) + assert np.allclose(node2.get_ratio(), np.array([1.1, 2.5, 3.0, 4.5], dtype=np.float64)) + + node2.set_scale(np.array([2.1, 3.2, 3.3, 4.4], dtype=np.float64)) + assert np.allclose(node2.get_scale(), np.array([2.1, 3.2, 3.3, 4.4], dtype=np.float64)) + assert np.allclose(node.get_scale(), np.array([2.1, 3.2, 3.3, 4.4], dtype=np.float64)) + + +def test_dynamic_attributes_simple(): + batch_size = 1 + input_size = 16 + hidden_size = 128 + + X_shape = [batch_size, input_size] + H_t_shape = [batch_size, hidden_size] + W_shape = [3 * hidden_size, input_size] + R_shape = [3 * hidden_size, hidden_size] + B_shape = [4 * hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) + parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) + parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) + parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + + activations = ["tanh", "relu"] + activations_alpha = [1.0, 2.0] + activations_beta = [1.0, 2.0] + clip = 0.5 + linear_before_reset = True + + node = ng.gru_cell( + parameter_X, + parameter_H_t, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + activations, + activations_alpha, + activations_beta, + clip, + linear_before_reset, + ) + + assert node.get_hidden_size() == hidden_size + assert all(map(lambda x, y: x == y, node.get_activations(), activations)) + assert all(np.equal(node.get_activations_alpha(), activations_alpha)) + assert all(np.equal(node.get_activations_beta(), activations_beta)) + assert node.get_linear_before_reset() == linear_before_reset + assert np.isclose(node.get_clip(), clip) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_einsum.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_einsum.py new file mode 100644 index 00000000000..94aa451503f --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_einsum.py @@ -0,0 +1,98 @@ +import ngraph as ng +import numpy as np +import pytest + +from ngraph.utils.types import get_element_type +from tests_compatibility import xfail_issue_58033 +from tests_compatibility.runtime import get_runtime + + +def einsum_op_exec(input_shapes: list, equation: str, data_type: np.dtype, + with_value=False, seed=202104): + """Test Einsum operation for given input shapes, equation, and data type. + + It generates input data of given shapes and type, receives reference results using numpy, + and tests IE implementation by matching with reference numpy results. + :param input_shapes: a list of tuples with shapes + :param equation: Einsum equation + :param data_type: a type of input data + :param with_value: if True - tests output data shape and type along with its value, + otherwise, tests only the output shape and type + :param seed: a seed for random generation of input data + """ + np.random.seed(seed) + num_inputs = len(input_shapes) + runtime = get_runtime() + + # set absolute tolerance based on the data type + atol = 0.0 if np.issubdtype(data_type, np.integer) else 1e-04 + + # generate input tensors + ng_inputs = [] + np_inputs = [] + for i in range(num_inputs): + input_i = np.random.random_integers(10, size=input_shapes[i]).astype(data_type) + np_inputs.append(input_i) + ng_inputs.append(ng.parameter(input_i.shape, dtype=data_type)) + + expected_result = np.einsum(equation, *np_inputs) + einsum_model = ng.einsum(ng_inputs, equation) + + # check the output shape and type + assert einsum_model.get_type_name() == "Einsum" + assert einsum_model.get_output_size() == 1 + assert list(einsum_model.get_output_shape(0)) == list(expected_result.shape) + assert einsum_model.get_output_element_type(0) == get_element_type(data_type) + + # check inference result + if with_value: + computation = runtime.computation(einsum_model, *ng_inputs) + actual_result = computation(*np_inputs) + np.allclose(actual_result, expected_result, atol=atol) + + +@pytest.mark.parametrize("data_type", [np.float32, np.int32]) +def test_dot_product(data_type): + einsum_op_exec([5, 5], "i,i->", data_type) + + +@pytest.mark.parametrize("data_type", [np.float32, np.int32]) +def test_matrix_multiplication(data_type): + einsum_op_exec([(2, 3), (3, 4)], "ab,bc->ac", data_type) + + +@pytest.mark.parametrize("data_type", [np.float32, np.int32]) +def test_batch_trace(data_type): + einsum_op_exec([(2, 3, 3)], "kii->k", data_type) + + +@pytest.mark.parametrize("data_type", [np.float32, np.int32]) +def test_diagonal_extraction(data_type): + einsum_op_exec([(6, 5, 5)], "kii->ki", data_type) + + +@pytest.mark.parametrize("data_type", [np.float32, np.int32]) +def test_transpose(data_type): + einsum_op_exec([(1, 2, 3)], "ijk->kij", data_type) + + +@pytest.mark.parametrize("data_type", [np.float32, np.int32]) +def test_multiple_multiplication(data_type): + einsum_op_exec([(2, 5), (5, 3, 6), (5, 3)], "ab,bcd,bc->ca", data_type) + + +@pytest.mark.parametrize("data_type", [np.float32, np.int32]) +def test_simple_ellipsis(data_type): + einsum_op_exec([(5, 3, 4)], "a...->...", data_type) + + +@xfail_issue_58033 +@pytest.mark.parametrize("data_type", [np.float32, np.int32]) +def test_multiple_ellipsis(data_type): + einsum_op_exec([(3, 5), 1], "a...,...->a...", data_type, with_value=True) + + +@xfail_issue_58033 +@pytest.mark.parametrize("data_type", [np.float32, np.int32]) +def test_broadcasting_ellipsis(data_type): + einsum_op_exec([(9, 1, 4, 3), (3, 11, 7, 1)], "a...b,b...->a...", data_type, with_value=True) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_gather.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_gather.py new file mode 100644 index 00000000000..13ad452ee18 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_gather.py @@ -0,0 +1,89 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import ngraph as ng +import numpy as np + +from tests_compatibility import xfail_issue_54630 +from tests_compatibility.test_ngraph.util import run_op_node + + +def test_gather(): + input_data = np.array( + [1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32 + ).reshape((3, 3)) + input_indices = np.array([0, 2], np.int32).reshape(1, 2) + input_axis = np.array([1], np.int32) + + expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape( + (3, 1, 2) + ) + + result = run_op_node([input_data], ng.gather, input_indices, input_axis) + assert np.allclose(result, expected) + + +def test_gather_with_scalar_axis(): + input_data = np.array( + [1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32 + ).reshape((3, 3)) + input_indices = np.array([0, 2], np.int32).reshape(1, 2) + input_axis = np.array(1, np.int32) + + expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape( + (3, 1, 2) + ) + + result = run_op_node([input_data], ng.gather, input_indices, input_axis) + assert np.allclose(result, expected) + + +def test_gather_batch_dims_1(): + + input_data = np.array([[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], np.float32) + + input_indices = np.array([[0, 0, 4], + [4, 0, 0]], np.int32) + input_axis = np.array([1], np.int32) + batch_dims = 1 + + expected = np.array([[1, 1, 5], + [10, 6, 6]], np.float32) + + result = run_op_node([input_data], ng.gather, input_indices, input_axis, batch_dims) + assert np.allclose(result, expected) + + +@xfail_issue_54630 +def test_gather_negative_indices(): + input_data = np.array( + [1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32 + ).reshape((3, 3)) + input_indices = np.array([0, -1], np.int32).reshape(1, 2) + input_axis = np.array([1], np.int32) + + expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape( + (3, 1, 2) + ) + + result = run_op_node([input_data], ng.gather, input_indices, input_axis) + assert np.allclose(result, expected) + + +@xfail_issue_54630 +def test_gather_batch_dims_1_negative_indices(): + + input_data = np.array([[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], np.float32) + + input_indices = np.array([[0, 1, -2], + [-2, 0, 0]], np.int32) + input_axis = np.array([1], np.int32) + batch_dims = 1 + + expected = np.array([[1, 2, 4], + [9, 6, 6]], np.float32) + + result = run_op_node([input_data], ng.gather, input_indices, input_axis, batch_dims) + assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_idft.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_idft.py new file mode 100644 index 00000000000..3ff0efb763c --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_idft.py @@ -0,0 +1,117 @@ +import ngraph as ng +import numpy as np +from tests_compatibility.runtime import get_runtime + + +def get_data(): + np.random.seed(202104) + return np.random.uniform(0, 1, (2, 10, 10, 2)).astype(np.float32) + + +def test_idft_1d(): + runtime = get_runtime() + expected_results = get_data() + complex_input_data = np.fft.fft(np.squeeze(expected_results.view(dtype=np.complex64), + axis=-1), axis=2).astype(np.complex64) + input_data = np.stack((complex_input_data.real, complex_input_data.imag), axis=-1) + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([2], dtype=np.int64)) + + dft_node = ng.idft(input_tensor, input_axes) + computation = runtime.computation(dft_node) + dft_results = computation() + assert np.allclose(dft_results, expected_results, atol=0.000002) + + +def test_idft_2d(): + runtime = get_runtime() + expected_results = get_data() + complex_input_data = np.fft.fft2(np.squeeze(expected_results.view(dtype=np.complex64), axis=-1), + axes=[1, 2]).astype(np.complex64) + input_data = np.stack((complex_input_data.real, complex_input_data.imag), axis=-1) + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) + + dft_node = ng.idft(input_tensor, input_axes) + computation = runtime.computation(dft_node) + dft_results = computation() + assert np.allclose(dft_results, expected_results, atol=0.000002) + + +def test_idft_3d(): + runtime = get_runtime() + expected_results = get_data() + complex_input_data = np.fft.fft2(np.squeeze(expected_results.view(dtype=np.complex64), axis=-1), + axes=[0, 1, 2]).astype(np.complex64) + input_data = np.stack((complex_input_data.real, complex_input_data.imag), axis=-1) + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) + + dft_node = ng.idft(input_tensor, input_axes) + computation = runtime.computation(dft_node) + dft_results = computation() + assert np.allclose(dft_results, expected_results, atol=0.000003) + + +def test_idft_1d_signal_size(): + runtime = get_runtime() + input_data = get_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([-2], dtype=np.int64)) + input_signal_size = ng.constant(np.array([20], dtype=np.int64)) + + dft_node = ng.idft(input_tensor, input_axes, input_signal_size) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.ifft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), n=20, + axis=-2).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.000002) + + +def test_idft_2d_signal_size_1(): + runtime = get_runtime() + input_data = get_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([0, 2], dtype=np.int64)) + input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) + + dft_node = ng.idft(input_tensor, input_axes, input_signal_size) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.ifft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5], + axes=[0, 2]).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.000002) + + +def test_idft_2d_signal_size_2(): + runtime = get_runtime() + input_data = get_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([1, 2], dtype=np.int64)) + input_signal_size = ng.constant(np.array([4, 5], dtype=np.int64)) + + dft_node = ng.idft(input_tensor, input_axes, input_signal_size) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.ifft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5], + axes=[1, 2]).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.000002) + + +def test_idft_3d_signal_size(): + runtime = get_runtime() + input_data = get_data() + input_tensor = ng.constant(input_data) + input_axes = ng.constant(np.array([0, 1, 2], dtype=np.int64)) + input_signal_size = ng.constant(np.array([4, 5, 16], dtype=np.int64)) + + dft_node = ng.idft(input_tensor, input_axes, input_signal_size) + computation = runtime.computation(dft_node) + dft_results = computation() + np_results = np.fft.ifftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), + s=[4, 5, 16], axes=[0, 1, 2]).astype(np.complex64) + expected_results = np.stack((np_results.real, np_results.imag), axis=-1) + assert np.allclose(dft_results, expected_results, atol=0.000002) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_input_validation.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_input_validation.py new file mode 100644 index 00000000000..ca31e065b8f --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_input_validation.py @@ -0,0 +1,157 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from ngraph.exceptions import UserInputError +from ngraph.utils.input_validation import ( + _check_value, + check_valid_attribute, + check_valid_attributes, + is_non_negative_value, + is_positive_value, +) + + +@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64]) +def test_is_positive_value_signed_type(dtype): + assert is_positive_value(dtype(16)) + assert not is_positive_value(dtype(-16)) + + +@pytest.mark.parametrize("dtype", [np.uint8, np.uint16, np.uint32, np.uint64]) +def test_is_positive_value_unsigned_type(dtype): + assert is_positive_value(dtype(16)) + + +@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64]) +def test_is_non_negative_value_signed_type(dtype): + assert is_non_negative_value(dtype(16)) + assert is_non_negative_value(dtype(0)) + assert not is_non_negative_value(dtype(-1)) + assert not is_non_negative_value(dtype(-16)) + + +@pytest.mark.parametrize("dtype", [np.uint8, np.uint16, np.uint32, np.uint64]) +def test_is_non_negative_value_unsigned_type(dtype): + assert is_non_negative_value(dtype(16)) + assert is_non_negative_value(dtype(0)) + + +@pytest.mark.parametrize( + "value, val_type", + [ + (np.int8(64), np.integer), + (np.int16(64), np.integer), + (np.int32(64), np.integer), + (np.int64(64), np.integer), + (np.uint8(64), np.unsignedinteger), + (np.uint16(64), np.unsignedinteger), + (np.uint32(64), np.unsignedinteger), + (np.uint64(64), np.unsignedinteger), + (np.float32(64), np.floating), + (np.float64(64), np.floating), + ], +) +def test_check_value(value, val_type): + def is_even(x): + return x % 2 == 0 + + assert _check_value("TestOp", "test_attr", value, val_type, is_even) + + +@pytest.mark.parametrize( + "value, val_type", + [ + (np.int8(64), np.floating), + (np.int16(64), np.floating), + (np.int32(64), np.floating), + (np.int64(64), np.floating), + (np.uint8(64), np.floating), + (np.uint16(64), np.floating), + (np.uint32(64), np.floating), + (np.uint64(64), np.floating), + (np.float32(64), np.integer), + (np.float64(64), np.integer), + ], +) +def test_check_value_fail_type(value, val_type): + try: + _check_value("TestOp", "test_attr", value, val_type, None) + except UserInputError: + pass + else: + raise AssertionError("Type validation has unexpectedly passed.") + + +@pytest.mark.parametrize( + "value, val_type", + [ + (np.int8(61), np.integer), + (np.int16(61), np.integer), + (np.int32(61), np.integer), + (np.int64(61), np.integer), + (np.uint8(61), np.unsignedinteger), + (np.uint16(61), np.unsignedinteger), + (np.uint32(61), np.unsignedinteger), + (np.uint64(61), np.unsignedinteger), + (np.float32(61), np.floating), + (np.float64(61), np.floating), + ], +) +def test_check_value_fail_cond(value, val_type): + def is_even(x): + return x % 2 == 0 + + try: + _check_value("TestOp", "test_attr", value, val_type, is_even) + except UserInputError: + pass + else: + raise AssertionError("Condition validation has unexpectedly passed.") + + +def test_check_valid_attribute(): + attr_dict = { + "mode": "bilinear", + "coefficients": [1, 2, 3, 4, 5], + } + + assert check_valid_attribute("TestOp", attr_dict, "width", np.unsignedinteger, required=False) + assert check_valid_attribute("TestOp", attr_dict, "mode", np.str_, required=True) + assert check_valid_attribute("TestOp", attr_dict, "coefficients", np.integer, required=True) + + try: + check_valid_attribute("TestOp", attr_dict, "alpha", np.floating, required=True) + except UserInputError: + pass + else: + raise AssertionError("Validation of missing required attribute has unexpectedly passed.") + + +def test_check_valid_attributes(): + attr_dict = { + "mode": "bilinear", + "coefficients": [1, 2, 3, 4, 5], + } + + def _is_supported_mode(x): + return x in ["linear", "area", "cubic", "bilinear"] + + requirements = [ + ("width", False, np.unsignedinteger, None), + ("mode", True, np.str_, _is_supported_mode), + ("coefficients", True, np.integer, lambda x: x > 0), + ("alpha", False, np.float64, None), + ] + + assert check_valid_attributes("TestOp", attr_dict, requirements) + + requirements[3] = ("alpha", True, np.float64, None) + try: + check_valid_attributes("TestOp", attr_dict, requirements) + except UserInputError: + pass + else: + raise AssertionError("Validation of missing required attribute has unexpectedly passed.") diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_log_softmax.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_log_softmax.py new file mode 100644 index 00000000000..2506c591c05 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_log_softmax.py @@ -0,0 +1,17 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import ngraph as ng +from ngraph.impl import Shape, Type + + +def test_log_softmax(): + float_dtype = np.float32 + data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + + node = ng.log_softmax(data, 1) + assert node.get_type_name() == "LogSoftmax" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [3, 10] + assert node.get_output_element_type(0) == Type.f32 diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_manager.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_manager.py new file mode 100644 index 00000000000..eb3f420ac12 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_manager.py @@ -0,0 +1,37 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# flake8: noqa + +import json + +import numpy as np +import pytest + +import ngraph as ng +from ngraph.impl import Function, PartialShape, Shape +from ngraph.impl.passes import Manager +from tests_compatibility.test_ngraph.util import count_ops_of_type + + +def test_constant_folding(): + node_constant = ng.constant(np.array([[0.0, 0.1, -0.1], [-2.5, 2.5, 3.0]], dtype=np.float32)) + node_ceil = ng.ceiling(node_constant) + func = Function(node_ceil, [], "TestFunction") + + assert count_ops_of_type(func, node_ceil) == 1 + assert count_ops_of_type(func, node_constant) == 1 + + pass_manager = Manager() + pass_manager.register_pass("ConstantFolding") + pass_manager.run_passes(func) + + assert count_ops_of_type(func, node_ceil) == 0 + assert count_ops_of_type(func, node_constant) == 1 + + new_const = func.get_results()[0].input(0).get_source_output().get_node() + + values_out = new_const.get_vector() + values_expected = [0.0, 1.0, 0.0, -2.0, 3.0, 3.0] + + assert np.allclose(values_out, values_expected) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_node_factory.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_node_factory.py new file mode 100644 index 00000000000..14fe3d62d04 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_node_factory.py @@ -0,0 +1,94 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import ngraph as ng +from ngraph.exceptions import UserInputError +from ngraph.utils.node_factory import NodeFactory +from _pyngraph import NodeFactory as _NodeFactory + + +def test_node_factory_add(): + shape = [2, 2] + dtype = np.int8 + parameter_a = ng.parameter(shape, dtype=dtype, name="A") + parameter_b = ng.parameter(shape, dtype=dtype, name="B") + + factory = _NodeFactory("opset1") + arguments = NodeFactory._arguments_as_outputs([parameter_a, parameter_b]) + node = factory.create("Add", arguments, {}) + + assert node.get_type_name() == "Add" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2, 2] + + +def test_node_factory_wrapper_add(): + shape = [2, 2] + dtype = np.int8 + parameter_a = ng.parameter(shape, dtype=dtype, name="A") + parameter_b = ng.parameter(shape, dtype=dtype, name="B") + + node = ng.add(parameter_a, parameter_b, name="TestNode") + + assert node.get_type_name() == "Add" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2, 2] + assert node.friendly_name == "TestNode" + + +def test_node_factory_topk(): + dtype = np.int32 + data = ng.parameter([2, 10], dtype=dtype, name="A") + k = ng.constant(3, dtype=dtype, name="B") + factory = _NodeFactory("opset1") + arguments = NodeFactory._arguments_as_outputs([data, k]) + node = factory.create( + "TopK", arguments, {"axis": 1, "mode": "max", "sort": "value"} + ) + attributes = node.get_attributes() + + assert node.get_type_name() == "TopK" + assert node.get_output_size() == 2 + assert list(node.get_output_shape(0)) == [2, 3] + assert attributes["axis"] == 1 + assert attributes["mode"] == "max" + assert attributes["sort"] == "value" + + +def test_node_factory_empty_topk(): + factory = NodeFactory("opset1") + node = factory.create("TopK") + + assert node.get_type_name() == "TopK" + + +def test_node_factory_empty_topk_with_args_and_attrs(): + dtype = np.int32 + data = ng.parameter([2, 10], dtype=dtype, name="A") + k = ng.constant(3, dtype=dtype, name="B") + factory = NodeFactory("opset1") + arguments = NodeFactory._arguments_as_outputs([data, k]) + node = factory.create("TopK", None, None) + node.set_arguments(arguments) + node.set_attribute("axis", 1) + node.set_attribute("mode", "max") + node.set_attribute("sort", "value") + node.validate() + + assert node.get_type_name() == "TopK" + assert node.get_output_size() == 2 + assert list(node.get_output_shape(0)) == [2, 3] + + +def test_node_factory_validate_missing_arguments(): + factory = NodeFactory("opset1") + + try: + factory.create( + "TopK", None, {"axis": 1, "mode": "max", "sort": "value"} + ) + except UserInputError: + pass + else: + raise AssertionError("Validation of missing arguments has unexpectedly passed.") diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_normalization.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_normalization.py new file mode 100644 index 00000000000..12ba3181f8d --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_normalization.py @@ -0,0 +1,142 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import ngraph as ng +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_ngraph.util import run_op_node + + +def test_lrn(): + input_image_shape = (2, 3, 2, 1) + input_image = np.arange(int(np.prod(input_image_shape))).reshape(input_image_shape).astype("f") + axes = np.array([1], dtype=np.int64) + runtime = get_runtime() + model = ng.lrn(ng.constant(input_image), ng.constant(axes), alpha=1.0, beta=2.0, bias=1.0, size=3) + computation = runtime.computation(model) + result = computation() + assert np.allclose( + result, + np.array( + [ + [[[0.0], [0.05325444]], [[0.03402646], [0.01869806]], [[0.06805293], [0.03287071]]], + [[[0.00509002], [0.00356153]], [[0.00174719], [0.0012555]], [[0.00322708], [0.00235574]]], + ], + dtype=np.float32, + ), + ) + + # Test LRN default parameter values + model = ng.lrn(ng.constant(input_image), ng.constant(axes)) + computation = runtime.computation(model) + result = computation() + assert np.allclose( + result, + np.array( + [ + [[[0.0], [0.35355338]], [[0.8944272], [1.0606602]], [[1.7888544], [1.767767]]], + [[[0.93704253], [0.97827977]], [[1.2493901], [1.2577883]], [[1.5617375], [1.5372968]]], + ], + dtype=np.float32, + ), + ) + + +def test_lrn_factory(): + alpha = 0.0002 + beta = 0.5 + bias = 2.0 + nsize = 3 + axis = np.array([1], dtype=np.int32) + x = np.array( + [ + [ + [ + [0.31403765, -0.16793324, 1.388258, -0.6902954], + [-0.3994045, -0.7833511, -0.30992958, 0.3557573], + [-0.4682631, 1.1741459, -2.414789, -0.42783254], + ], + [ + [-0.82199496, -0.03900861, -0.43670088, -0.53810567], + [-0.10769883, 0.75242394, -0.2507971, 1.0447186], + [-1.4777364, 0.19993274, 0.925649, -2.282516], + ], + ] + ], + dtype=np.float32, + ) + excepted = np.array( + [ + [ + [ + [0.22205527, -0.11874668, 0.98161197, -0.4881063], + [-0.2824208, -0.553902, -0.21915273, 0.2515533], + [-0.33109877, 0.8302269, -1.7073234, -0.3024961], + ], + [ + [-0.5812307, -0.02758324, -0.30878326, -0.38049328], + [-0.07615435, 0.53203356, -0.17733987, 0.7387126], + [-1.0448756, 0.14137045, 0.6544598, -1.6138376], + ], + ] + ], + dtype=np.float32, + ) + result = run_op_node([x], ng.lrn, axis, alpha, beta, bias, nsize) + + assert np.allclose(result, excepted) + + +def test_batch_norm_inference(): + data = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float32) + gamma = np.array([2.0, 3.0, 4.0], dtype=np.float32) + beta = np.array([0.0, 0.0, 0.0], dtype=np.float32) + mean = np.array([0.0, 0.0, 0.0], dtype=np.float32) + variance = np.array([1.0, 1.0, 1.0], dtype=np.float32) + epsilon = 9.99e-06 + excepted = np.array([[2.0, 6.0, 12.0], [-2.0, -6.0, -12.0]], dtype=np.float32) + + result = run_op_node([data, gamma, beta, mean, variance], ng.batch_norm_inference, epsilon) + + assert np.allclose(result, excepted) + + +def test_mvn_no_variance(): + data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, + 1, 2, 3, 4, 5, 6, 7, 8, 9, + 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float32).reshape([1, 3, 3, 3]) + axes = np.array([2, 3], dtype=np.int64) + epsilon = 1e-9 + normalize_variance = False + eps_mode = "outside_sqrt" + excepted = np.array([-4, -3, -2, -1, 0, 1, 2, 3, 4, + -4, -3, -2, -1, 0, 1, 2, 3, 4, + -4, -3, -2, -1, 0, 1, 2, 3, 4], dtype=np.float32).reshape([1, 3, 3, 3]) + + result = run_op_node([data], ng.mvn, axes, normalize_variance, epsilon, eps_mode) + + assert np.allclose(result, excepted) + + +def test_mvn(): + data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, + 1, 2, 3, 4, 5, 6, 7, 8, 9, + 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float32).reshape([1, 3, 3, 3]) + axes = np.array([2, 3], dtype=np.int64) + epsilon = 1e-9 + normalize_variance = True + eps_mode = "outside_sqrt" + excepted = np.array([-1.5491934, -1.161895, -0.7745967, + -0.38729835, 0., 0.38729835, + 0.7745967, 1.161895, 1.5491934, + -1.5491934, -1.161895, -0.7745967, + -0.38729835, 0., 0.38729835, + 0.7745967, 1.161895, 1.5491934, + -1.5491934, -1.161895, -0.7745967, + -0.38729835, 0., 0.38729835, + 0.7745967, 1.161895, 1.5491934], dtype=np.float32).reshape([1, 3, 3, 3]) + + result = run_op_node([data], ng.mvn, axes, normalize_variance, epsilon, eps_mode) + + assert np.allclose(result, excepted) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops.py new file mode 100644 index 00000000000..ba468fa6fe1 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops.py @@ -0,0 +1,856 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# flake8: noqa + +import numpy as np + +import ngraph as ng +from ngraph.impl import AxisSet, Function, Shape, Type +from ngraph.impl.op import Constant, Parameter +from tests_compatibility.runtime import get_runtime + + +def binary_op(op_str, a, b): + + if op_str == "+": + return a + b + elif op_str == "Add": + return ng.add(a, b) + elif op_str == "-": + return a - b + elif op_str == "Sub": + return ng.subtract(a, b) + elif op_str == "*": + return a * b + elif op_str == "Mul": + return ng.multiply(a, b) + elif op_str == "/": + return a / b + elif op_str == "Div": + return ng.divide(a, b) + elif op_str == "Equal": + return ng.equal(a, b) + elif op_str == "Greater": + return ng.greater(a, b) + elif op_str == "GreaterEq": + return ng.greater_equal(a, b) + elif op_str == "Less": + return ng.less(a, b) + elif op_str == "LessEq": + return ng.less_equal(a, b) + elif op_str == "Maximum": + return ng.maximum(a, b) + elif op_str == "Minimum": + return ng.minimum(a, b) + elif op_str == "NotEqual": + return ng.not_equal(a, b) + elif op_str == "Power": + return ng.power(a, b) + + +def binary_op_ref(op_str, a, b): + + if op_str == "+" or op_str == "Add": + return a + b + elif op_str == "-" or op_str == "Sub": + return a - b + elif op_str == "*" or op_str == "Mul": + return a * b + elif op_str == "/" or op_str == "Div": + return a / b + elif op_str == "Dot": + return np.dot(a, b) + elif op_str == "Equal": + return np.equal(a, b) + elif op_str == "Greater": + return np.greater(a, b) + elif op_str == "GreaterEq": + return np.greater_equal(a, b) + elif op_str == "Less": + return np.less(a, b) + elif op_str == "LessEq": + return np.less_equal(a, b) + elif op_str == "Maximum": + return np.maximum(a, b) + elif op_str == "Minimum": + return np.minimum(a, b) + elif op_str == "NotEqual": + return np.not_equal(a, b) + elif op_str == "Power": + return np.power(a, b) + + +def binary_op_exec(op_str): + + element_type = Type.f32 + shape = Shape([2, 2]) + A = Parameter(element_type, shape) + B = Parameter(element_type, shape) + parameter_list = [A, B] + function = Function([binary_op(op_str, A, B)], parameter_list, "test") + + a_arr = np.array([[1, 6], [7, 4]], dtype=np.float32) + b_arr = np.array([[5, 2], [3, 8]], dtype=np.float32) + + runtime = get_runtime() + computation = runtime.computation(function, A, B) + result = computation(a_arr, b_arr)[0] + + expected = binary_op_ref(op_str, a_arr, b_arr) + assert np.allclose(result, expected) + + +def binary_op_comparison(op_str): + + element_type = Type.f32 + shape = Shape([2, 2]) + A = Parameter(element_type, shape) + B = Parameter(element_type, shape) + parameter_list = [A, B] + function = Function([binary_op(op_str, A, B)], parameter_list, "test") + a_arr = np.array([[1, 5], [3, 2]], dtype=np.float32) + b_arr = np.array([[2, 4], [3, 1]], dtype=np.float32) + + runtime = get_runtime() + computation = runtime.computation(function, A, B) + result = computation(a_arr, b_arr)[0] + + expected = binary_op_ref(op_str, a_arr, b_arr) + assert np.allclose(result, expected) + + +def test_add(): + binary_op_exec("+") + + +def test_add_op(): + binary_op_exec("Add") + + +def test_sub(): + binary_op_exec("-") + + +def test_sub_op(): + binary_op_exec("Sub") + + +def test_mul(): + binary_op_exec("*") + + +def test_mul_op(): + binary_op_exec("Mul") + + +def test_div(): + binary_op_exec("/") + + +def test_div_op(): + binary_op_exec("Div") + + +def test_maximum(): + binary_op_exec("Maximum") + + +def test_minimum(): + binary_op_exec("Minimum") + + +def test_power(): + binary_op_exec("Power") + + +def test_greater(): + binary_op_comparison("Greater") + + +def test_greater_eq(): + binary_op_comparison("GreaterEq") + + +def test_less(): + binary_op_comparison("Less") + + +def test_less_eq(): + binary_op_comparison("LessEq") + + +def test_not_equal(): + binary_op_comparison("NotEqual") + + +def test_add_with_mul(): + + element_type = Type.f32 + shape = Shape([4]) + A = Parameter(element_type, shape) + B = Parameter(element_type, shape) + C = Parameter(element_type, shape) + parameter_list = [A, B, C] + function = Function([ng.multiply(ng.add(A, B), C)], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, A, B, C) + result = computation( + np.array([1, 2, 3, 4], dtype=np.float32), + np.array([5, 6, 7, 8], dtype=np.float32), + np.array([9, 10, 11, 12], dtype=np.float32), + )[0] + + a_arr = np.array([1, 2, 3, 4], dtype=np.float32) + b_arr = np.array([5, 6, 7, 8], dtype=np.float32) + c_arr = np.array([9, 10, 11, 12], dtype=np.float32) + result_arr_ref = (a_arr + b_arr) * c_arr + + assert np.allclose(result, result_arr_ref) + + +def unary_op(op_str, a): + if op_str == "Abs": + return ng.abs(a) + elif op_str == "Acos": + return ng.acos(a) + elif op_str == "Acosh": + return ng.acosh(a) + elif op_str == "Asin": + return ng.asin(a) + elif op_str == "Asinh": + return ng.asinh(a) + elif op_str == "Atan": + return ng.atan(a) + elif op_str == "Atanh": + return ng.atanh(a) + elif op_str == "Ceiling": + return ng.ceiling(a) + elif op_str == "Cos": + return ng.cos(a) + elif op_str == "Cosh": + return ng.cosh(a) + elif op_str == "Floor": + return ng.floor(a) + elif op_str == "log": + return ng.log(a) + elif op_str == "exp": + return ng.exp(a) + elif op_str == "negative": + return ng.negative(a) + elif op_str == "Sign": + return ng.sign(a) + elif op_str == "Sin": + return ng.sin(a) + elif op_str == "Sinh": + return ng.sinh(a) + elif op_str == "Sqrt": + return ng.sqrt(a) + elif op_str == "Tan": + return ng.tan(a) + elif op_str == "Tanh": + return ng.tanh(a) + + +def unary_op_ref(op_str, a): + if op_str == "Abs": + return np.abs(a) + elif op_str == "Acos": + return np.arccos(a) + elif op_str == "Acosh": + return np.arccosh(a) + elif op_str == "Asin": + return np.arcsin(a) + elif op_str == "Asinh": + return np.arcsinh(a) + elif op_str == "Atan": + return np.arctan(a) + elif op_str == "Atanh": + return np.arctanh(a) + elif op_str == "Ceiling": + return np.ceil(a) + elif op_str == "Cos": + return np.cos(a) + elif op_str == "Cosh": + return np.cosh(a) + elif op_str == "Floor": + return np.floor(a) + elif op_str == "log": + return np.log(a) + elif op_str == "exp": + return np.exp(a) + elif op_str == "negative": + return np.negative(a) + elif op_str == "Reverse": + return np.fliplr(a) + elif op_str == "Sign": + return np.sign(a) + elif op_str == "Sin": + return np.sin(a) + elif op_str == "Sinh": + return np.sinh(a) + elif op_str == "Sqrt": + return np.sqrt(a) + elif op_str == "Tan": + return np.tan(a) + elif op_str == "Tanh": + return np.tanh(a) + + +def unary_op_exec(op_str, input_list): + """ + input_list needs to have deep length of 4 + """ + element_type = Type.f32 + shape = Shape(np.array(input_list).shape) + A = Parameter(element_type, shape) + parameter_list = [A] + function = Function([unary_op(op_str, A)], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation(np.array(input_list, dtype=np.float32))[0] + + expected = unary_op_ref(op_str, np.array(input_list, dtype=np.float32)) + assert np.allclose(result, expected) + + +def test_abs(): + input_list = [-1, 0, 1, 2] + op_str = "Abs" + unary_op_exec(op_str, input_list) + + +def test_acos(): + input_list = [-1, 0, 0.5, 1] + op_str = "Acos" + unary_op_exec(op_str, input_list) + + +def test_acosh(): + input_list = [2., 3., 1.5, 1.0] + op_str = "Acosh" + unary_op_exec(op_str, input_list) + + +def test_asin(): + input_list = [-1, 0, 0.5, 1] + op_str = "Asin" + unary_op_exec(op_str, input_list) + + +def test_asinh(): + input_list = [-1, 0, 0.5, 1] + op_str = "Asinh" + unary_op_exec(op_str, input_list) + + +def test_atan(): + input_list = [-1, 0, 0.5, 1] + op_str = "Atan" + unary_op_exec(op_str, input_list) + + +def test_atanh(): + input_list = [-1, 0, 0.5, 1] + op_str = "Atanh" + unary_op_exec(op_str, input_list) + + +def test_ceiling(): + input_list = [0.5, 0, 0.4, 0.5] + op_str = "Ceiling" + unary_op_exec(op_str, input_list) + + +def test_cos(): + input_list = [0, 0.7, 1.7, 3.4] + op_str = "Cos" + unary_op_exec(op_str, input_list) + + +def test_cosh(): + input_list = [-1, 0.0, 0.5, 1] + op_str = "Cosh" + unary_op_exec(op_str, input_list) + + +def test_floor(): + input_list = [-0.5, 0, 0.4, 0.5] + op_str = "Floor" + unary_op_exec(op_str, input_list) + + +def test_log(): + input_list = [1, 2, 3, 4] + op_str = "log" + unary_op_exec(op_str, input_list) + + +def test_exp(): + input_list = [-1, 0, 1, 2] + op_str = "exp" + unary_op_exec(op_str, input_list) + + +def test_negative(): + input_list = [-1, 0, 1, 2] + op_str = "negative" + unary_op_exec(op_str, input_list) + + +def test_sign(): + input_list = [-1, 0, 0.5, 1] + op_str = "Sign" + unary_op_exec(op_str, input_list) + + +def test_sin(): + input_list = [0, 0.7, 1.7, 3.4] + op_str = "Sin" + unary_op_exec(op_str, input_list) + + +def test_sinh(): + input_list = [-1, 0.0, 0.5, 1] + op_str = "Sinh" + unary_op_exec(op_str, input_list) + + +def test_sqrt(): + input_list = [0.0, 0.5, 1, 2] + op_str = "Sqrt" + unary_op_exec(op_str, input_list) + + +def test_tan(): + input_list = [-np.pi / 4, 0, np.pi / 8, np.pi / 8] + op_str = "Tan" + unary_op_exec(op_str, input_list) + + +def test_tanh(): + input_list = [-1, 0, 0.5, 1] + op_str = "Tanh" + unary_op_exec(op_str, input_list) + + +def test_reshape(): + + element_type = Type.f32 + shape = Shape([2, 3]) + A = Parameter(element_type, shape) + parameter_list = [A] + function = Function([ng.reshape(A, Shape([3, 2]), special_zero=False)], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation(np.array(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), dtype=np.float32))[0] + + expected = np.reshape(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), (3, 2)) + assert np.allclose(result, expected) + + +def test_broadcast(): + + element_type = Type.f32 + A = Parameter(element_type, Shape([3])) + parameter_list = [A] + function = Function([ng.broadcast(A, [3, 3])], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation(np.array([1, 2, 3], dtype=np.float32))[0] + + a_arr = np.array([[0], [0], [0]], dtype=np.float32) + b_arr = np.array([[1, 2, 3]], dtype=np.float32) + expected = np.add(a_arr, b_arr) + assert np.allclose(result, expected) + + +def test_constant(): + element_type = Type.f32 + parameter_list = [] + function = Function([Constant(element_type, Shape([3, 3]), list(range(9)))], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation()[0] + + expected = np.arange(9).reshape(3, 3) + assert np.allclose(result, expected) + + +def test_concat(): + + element_type = Type.f32 + A = Parameter(element_type, Shape([1, 2])) + B = Parameter(element_type, Shape([1, 2])) + C = Parameter(element_type, Shape([1, 2])) + parameter_list = [A, B, C] + axis = 0 + function = Function([ng.concat([A, B, C], axis)], parameter_list, "test") + + a_arr = np.array([[1, 2]], dtype=np.float32) + b_arr = np.array([[5, 6]], dtype=np.float32) + c_arr = np.array([[7, 8]], dtype=np.float32) + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation(a_arr, b_arr, c_arr)[0] + + expected = np.concatenate((a_arr, b_arr, c_arr), axis) + assert np.allclose(result, expected) + + +def test_axisset(): + + set_axisset = AxisSet({1, 2, 3}) + list_axisset = AxisSet([1, 2, 3]) + tuple_axisset = AxisSet((1, 2, 3)) + + assert len(set_axisset) == 3 + assert set(set_axisset) == {1, 2, 3} + + assert len(list_axisset) == 3 + assert set(list_axisset) == set(set_axisset) + + assert len(tuple_axisset) == 3 + assert set(tuple_axisset) == set(set_axisset) + + +def test_select(): + element_type = Type.f32 + A = Parameter(Type.boolean, Shape([1, 2])) + B = Parameter(element_type, Shape([1, 2])) + C = Parameter(element_type, Shape([1, 2])) + parameter_list = [A, B, C] + + function = Function([ng.select(A, B, C)], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation( + np.array([[True, False]], dtype=np.bool), + np.array([[5, 6]], dtype=np.float32), + np.array([[7, 8]], dtype=np.float32), + )[0] + + expected = np.array([[5, 8]]) + assert np.allclose(result, expected) + + +def test_max_pool(): + # test 1d + element_type = Type.f32 + shape = Shape([1, 1, 10]) + A = Parameter(element_type, shape) + parameter_list = [A] + + input_arr = np.arange(10, dtype=np.float32).reshape([1, 1, 10]) + window_shape = [3] + + strides = [1] * len(window_shape) + dilations = [1] * len(window_shape) + pads_begin = [0] * len(window_shape) + pads_end = [0] * len(window_shape) + rounding_type = "floor" + auto_pad = "explicit" + idx_elem_type = "i32" + + model = ng.max_pool( + A, + strides, + dilations, + pads_begin, + pads_end, + window_shape, + rounding_type, + auto_pad, + idx_elem_type, + ) + function = Function([model], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation(input_arr)[0] + + expected = (np.arange(8) + 2).reshape(1, 1, 8) + assert np.allclose(result, expected) + + # test 1d with strides + strides = [2] + pads_begin = [0] * len(window_shape) + pads_end = [0] * len(window_shape) + + model = ng.max_pool( + A, + strides, + dilations, + pads_begin, + pads_end, + window_shape, + rounding_type, + auto_pad, + idx_elem_type, + ) + function = Function([model], parameter_list, "test") + + size = 4 + computation = runtime.computation(function, *parameter_list) + result = computation(input_arr)[0] + + expected = ((np.arange(size) + 1) * 2).reshape(1, 1, size) + assert np.allclose(result, expected) + + # test 2d + element_type = Type.f32 + shape = Shape([1, 1, 10, 10]) + A = Parameter(element_type, shape) + parameter_list = [A] + + input_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10) + window_shape = [3, 3] + + strides = [1, 1] + dilations = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + + model = ng.max_pool( + A, + strides, + dilations, + pads_begin, + pads_end, + window_shape, + rounding_type, + auto_pad, + idx_elem_type, + ) + function = Function([model], parameter_list, "test") + + computation = runtime.computation(function, *parameter_list) + result = computation(input_arr)[0] + + expected = ((np.arange(100).reshape(10, 10))[2:, 2:]).reshape(1, 1, 8, 8) + assert np.allclose(result, expected) + + # test 2d with strides + strides = [2, 2] + dilations = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + + model = ng.max_pool( + A, + strides, + dilations, + pads_begin, + pads_end, + window_shape, + rounding_type, + auto_pad, + idx_elem_type, + ) + function = Function([model], parameter_list, "test") + computation = runtime.computation(function, *parameter_list) + result = computation(input_arr)[0] + + size = 4 + expected = ((np.arange(100).reshape(10, 10))[2::2, 2::2]).reshape(1, 1, size, size) + assert np.allclose(result, expected) + + +def convolution2d( + image, + filterit, + strides=(1, 1), + dilation=(1, 1), + padding_below=(0, 0), + padding_above=(0, 0), + data_dilation=(1, 1), +): + def dilate(arr, dil=(1, 1)): + m, n = arr.shape + new_m, new_n = (m - 1) * dil[0] + 1, (n - 1) * dil[1] + 1 + new_arr = np.zeros(new_m * new_n, dtype=np.float32).reshape(new_m, new_n) + for i in range(m): + for j in range(n): + new_arr[dil[0] * i][dil[1] * j] = arr[i][j] + return new_arr + + i_m, i_n = image.shape + new_image = np.zeros( + (i_m + padding_below[0] + padding_above[0]) * (i_n + padding_below[1] + padding_above[1]), + dtype=np.float32, + ).reshape(i_m + padding_below[0] + padding_above[0], i_n + padding_below[1] + padding_above[1]) + new_image[padding_below[0] : padding_below[0] + i_m, padding_below[1] : padding_below[1] + i_n] = image + image = new_image + image = image if data_dilation[0] == data_dilation[1] == 1 else dilate(image, data_dilation) + i_m, i_n = image.shape + + filterit = filterit if dilation[0] == dilation[1] == 1 else dilate(filterit, dilation) + f_m, f_n = filterit.shape + + # result_shape + r_m = i_m - f_m + 1 + r_n = i_n - f_n + 1 + r_m //= strides[0] + r_n //= strides[1] + + result = np.zeros(r_m * r_n, dtype=np.float32).reshape(r_m, r_n) + + for i in range(r_m): + for j in range(r_n): + sub_m = image[i * strides[0] : i * strides[0] + f_m, j * strides[1] : j * strides[1] + f_n] + result[i][j] = np.sum(sub_m * filterit) + return result + + +def test_convolution_simple(): + + element_type = Type.f32 + image_shape = Shape([1, 1, 16, 16]) + filter_shape = Shape([1, 1, 3, 3]) + data = Parameter(element_type, image_shape) + filters = Parameter(element_type, filter_shape) + parameter_list = [data, filters] + + image_arr = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16) + filter_arr = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3) + filter_arr[0][0][0][0] = -1 + filter_arr[0][0][1][1] = -1 + filter_arr[0][0][2][2] = -1 + filter_arr[0][0][0][2] = -1 + filter_arr[0][0][2][0] = -1 + + strides = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + dilations = [1, 1] + + model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) + function = Function([model], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation(image_arr, filter_arr)[0] + + expected = convolution2d(image_arr[0][0], filter_arr[0][0]).reshape(1, 1, 14, 14) + assert np.allclose(result, expected) + + +def test_convolution_with_strides(): + + element_type = Type.f32 + image_shape = Shape([1, 1, 10, 10]) + filter_shape = Shape([1, 1, 3, 3]) + data = Parameter(element_type, image_shape) + filters = Parameter(element_type, filter_shape) + parameter_list = [data, filters] + + image_arr = np.arange(100, dtype=np.float32).reshape([1, 1, 10, 10]) + filter_arr = np.zeros(9, dtype=np.float32).reshape([1, 1, 3, 3]) + filter_arr[0][0][1][1] = 1 + strides = [2, 2] + pads_begin = [0, 0] + pads_end = [0, 0] + dilations = [1, 1] + + model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) + function = Function([model], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation(image_arr, filter_arr)[0] + + expected = convolution2d(image_arr[0][0], filter_arr[0][0], strides).reshape(1, 1, 4, 4) + assert np.allclose(result, expected) + + +def test_convolution_with_filter_dilation(): + + element_type = Type.f32 + image_shape = Shape([1, 1, 10, 10]) + filter_shape = Shape([1, 1, 3, 3]) + data = Parameter(element_type, image_shape) + filters = Parameter(element_type, filter_shape) + parameter_list = [data, filters] + + image_arr = np.arange(100, dtype=np.float32).reshape([1, 1, 10, 10]) + filter_arr = np.ones(9, dtype=np.float32).reshape([1, 1, 3, 3]) + strides = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + dilations = [2, 2] + + model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) + function = Function([model], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation(image_arr, filter_arr)[0] + + expected = convolution2d(image_arr[0][0], filter_arr[0][0], strides, dilations).reshape([1, 1, 6, 6]) + assert np.allclose(result, expected) + + +def test_convolution_with_padding(): + + element_type = Type.f32 + image_shape = Shape([1, 1, 10, 10]) + filter_shape = Shape([1, 1, 3, 3]) + data = Parameter(element_type, image_shape) + filters = Parameter(element_type, filter_shape) + parameter_list = [data, filters] + + image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10) + filter_arr = np.zeros(9, dtype=np.float32).reshape(1, 1, 3, 3) + filter_arr[0][0][1][1] = 1 + strides = [1, 1] + dilations = [2, 2] + pads_begin = [0, 0] + pads_end = [0, 0] + + model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) + function = Function([model], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation(image_arr, filter_arr)[0] + + expected = convolution2d( + image_arr[0][0], filter_arr[0][0], strides, dilations, pads_begin, pads_end + ).reshape([1, 1, 6, 6]) + assert np.allclose(result, expected) + + +def test_convolution_with_non_zero_padding(): + element_type = Type.f32 + image_shape = Shape([1, 1, 10, 10]) + filter_shape = Shape([1, 1, 3, 3]) + data = Parameter(element_type, image_shape) + filters = Parameter(element_type, filter_shape) + parameter_list = [data, filters] + + image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10) + filter_arr = (np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)) * -1 + filter_arr[0][0][1][1] = 1 + strides = [1, 1] + dilations = [2, 2] + pads_begin = [2, 1] + pads_end = [1, 2] + + model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations) + function = Function([model], parameter_list, "test") + + runtime = get_runtime() + computation = runtime.computation(function, *parameter_list) + result = computation(image_arr, filter_arr)[0] + + expected = convolution2d( + image_arr[0][0], filter_arr[0][0], strides, dilations, pads_begin, pads_end + ).reshape([1, 1, 9, 9]) + assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_binary.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_binary.py new file mode 100644 index 00000000000..d0a77c8a404 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_binary.py @@ -0,0 +1,209 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import operator + +import numpy as np +import pytest + +import ngraph as ng +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_ngraph.util import run_op_node + + +@pytest.mark.parametrize( + "ng_api_helper,numpy_function", + [ + (ng.add, np.add), + (ng.divide, np.divide), + (ng.multiply, np.multiply), + (ng.subtract, np.subtract), + (ng.minimum, np.minimum), + (ng.maximum, np.maximum), + (ng.mod, np.mod), + (ng.equal, np.equal), + (ng.not_equal, np.not_equal), + (ng.greater, np.greater), + (ng.greater_equal, np.greater_equal), + (ng.less, np.less), + (ng.less_equal, np.less_equal), + ], +) +def test_binary_op(ng_api_helper, numpy_function): + runtime = get_runtime() + + shape = [2, 2] + parameter_a = ng.parameter(shape, name="A", dtype=np.float32) + parameter_b = ng.parameter(shape, name="B", dtype=np.float32) + + model = ng_api_helper(parameter_a, parameter_b) + computation = runtime.computation(model, parameter_a, parameter_b) + + value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) + value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) + + result = computation(value_a, value_b) + expected = numpy_function(value_a, value_b) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "ng_api_helper,numpy_function", + [ + (ng.add, np.add), + (ng.divide, np.divide), + (ng.multiply, np.multiply), + (ng.subtract, np.subtract), + (ng.minimum, np.minimum), + (ng.maximum, np.maximum), + (ng.mod, np.mod), + (ng.equal, np.equal), + (ng.not_equal, np.not_equal), + (ng.greater, np.greater), + (ng.greater_equal, np.greater_equal), + (ng.less, np.less), + (ng.less_equal, np.less_equal), + ], +) +def test_binary_op_with_scalar(ng_api_helper, numpy_function): + runtime = get_runtime() + + value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) + value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) + + shape = [2, 2] + parameter_a = ng.parameter(shape, name="A", dtype=np.float32) + + model = ng_api_helper(parameter_a, value_b) + computation = runtime.computation(model, parameter_a) + + result = computation(value_a) + expected = numpy_function(value_a, value_b) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "ng_api_helper,numpy_function", + [(ng.logical_and, np.logical_and), (ng.logical_or, np.logical_or), (ng.logical_xor, np.logical_xor)], +) +def test_binary_logical_op(ng_api_helper, numpy_function): + runtime = get_runtime() + + shape = [2, 2] + parameter_a = ng.parameter(shape, name="A", dtype=np.bool) + parameter_b = ng.parameter(shape, name="B", dtype=np.bool) + + model = ng_api_helper(parameter_a, parameter_b) + computation = runtime.computation(model, parameter_a, parameter_b) + + value_a = np.array([[True, False], [False, True]], dtype=np.bool) + value_b = np.array([[False, True], [False, True]], dtype=np.bool) + + result = computation(value_a, value_b) + expected = numpy_function(value_a, value_b) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "ng_api_helper,numpy_function", + [(ng.logical_and, np.logical_and), (ng.logical_or, np.logical_or), (ng.logical_xor, np.logical_xor)], +) +def test_binary_logical_op_with_scalar(ng_api_helper, numpy_function): + runtime = get_runtime() + + value_a = np.array([[True, False], [False, True]], dtype=np.bool) + value_b = np.array([[False, True], [False, True]], dtype=np.bool) + + shape = [2, 2] + parameter_a = ng.parameter(shape, name="A", dtype=np.bool) + + model = ng_api_helper(parameter_a, value_b) + computation = runtime.computation(model, parameter_a) + + result = computation(value_a) + expected = numpy_function(value_a, value_b) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "operator,numpy_function", + [ + (operator.add, np.add), + (operator.sub, np.subtract), + (operator.mul, np.multiply), + (operator.truediv, np.divide), + (operator.eq, np.equal), + (operator.ne, np.not_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.lt, np.less), + (operator.le, np.less_equal), + ], +) +def test_binary_operators(operator, numpy_function): + runtime = get_runtime() + + value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) + value_b = np.array([[4, 5], [1, 7]], dtype=np.float32) + + shape = [2, 2] + parameter_a = ng.parameter(shape, name="A", dtype=np.float32) + + model = operator(parameter_a, value_b) + computation = runtime.computation(model, parameter_a) + + result = computation(value_a) + expected = numpy_function(value_a, value_b) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "operator,numpy_function", + [ + (operator.add, np.add), + (operator.sub, np.subtract), + (operator.mul, np.multiply), + (operator.truediv, np.divide), + (operator.eq, np.equal), + (operator.ne, np.not_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.lt, np.less), + (operator.le, np.less_equal), + ], +) +def test_binary_operators_with_scalar(operator, numpy_function): + runtime = get_runtime() + + value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) + value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) + + shape = [2, 2] + parameter_a = ng.parameter(shape, name="A", dtype=np.float32) + + model = operator(parameter_a, value_b) + computation = runtime.computation(model, parameter_a) + + result = computation(value_a) + expected = numpy_function(value_a, value_b) + assert np.allclose(result, expected) + + +def test_multiply(): + A = np.arange(48, dtype=np.int32).reshape((8, 1, 6, 1)) + B = np.arange(35, dtype=np.int32).reshape((7, 1, 5)) + + expected = np.multiply(A, B) + result = run_op_node([A, B], ng.multiply) + + assert np.allclose(result, expected) + + +def test_power_v1(): + A = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1)) + B = np.arange(20, dtype=np.float32).reshape((4, 1, 5)) + + expected = np.power(A, B) + result = run_op_node([A, B], ng.power) + + assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_fused.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_fused.py new file mode 100644 index 00000000000..2d62518e6f0 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_fused.py @@ -0,0 +1,766 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +import ngraph as ng +from tests_compatibility.runtime import get_runtime +from tests_compatibility import xfail_issue_36486 + + +def test_elu_operator_with_scalar_and_array(): + runtime = get_runtime() + + data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) + alpha_value = np.float32(3) + + model = ng.elu(data_value, alpha_value) + computation = runtime.computation(model) + + result = computation() + expected = np.array([[-2.9797862, 1.0], [-2.5939941, 3.0]], dtype=np.float32) + assert np.allclose(result, expected) + + +def test_elu_operator_with_scalar(): + runtime = get_runtime() + + data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) + alpha_value = np.float32(3) + + data_shape = [2, 2] + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + + model = ng.elu(parameter_data, alpha_value) + computation = runtime.computation(model, parameter_data) + + result = computation(data_value) + expected = np.array([[-2.9797862, 1.0], [-2.5939941, 3.0]], dtype=np.float32) + assert np.allclose(result, expected) + + +def test_fake_quantize(): + runtime = get_runtime() + + data_value = np.arange(24.0, dtype=np.float32).reshape(1, 2, 3, 4) + input_low_value = np.float32(0) + input_high_value = np.float32(23) + output_low_value = np.float32(2) + output_high_value = np.float32(16) + levels = np.float32(4) + + data_shape = [1, 2, 3, 4] + bound_shape = [] + parameter_data = ng.parameter(data_shape, name="data", dtype=np.float32) + parameter_input_low = ng.parameter(bound_shape, name="input_low", dtype=np.float32) + parameter_input_high = ng.parameter(bound_shape, name="input_high", dtype=np.float32) + parameter_output_low = ng.parameter(bound_shape, name="output_low", dtype=np.float32) + parameter_output_high = ng.parameter(bound_shape, name="output_high", dtype=np.float32) + + model = ng.fake_quantize( + parameter_data, + parameter_input_low, + parameter_input_high, + parameter_output_low, + parameter_output_high, + levels, + ) + computation = runtime.computation( + model, + parameter_data, + parameter_input_low, + parameter_input_high, + parameter_output_low, + parameter_output_high, + ) + + result = computation(data_value, input_low_value, input_high_value, output_low_value, output_high_value) + + expected = np.array( + [ + [ + [ + [ + [2.0, 2.0, 2.0, 2.0], + [6.6666669, 6.6666669, 6.6666669, 6.6666669], + [6.6666669, 6.6666669, 6.6666669, 6.6666669], + ], + [ + [11.33333301, 11.33333301, 11.33333301, 11.33333301], + [11.33333301, 11.33333301, 11.33333301, 11.33333301], + [16.0, 16.0, 16.0, 16.0], + ], + ] + ] + ], + dtype=np.float32, + ) + assert np.allclose(result, expected) + + +def test_depth_to_space(): + runtime = get_runtime() + + data_value = np.array( + [ + [ + [[0, 1, 2], [3, 4, 5]], + [[6, 7, 8], [9, 10, 11]], + [[12, 13, 14], [15, 16, 17]], + [[18, 19, 20], [21, 22, 23]], + ] + ], + dtype=np.float32, + ) + mode = "blocks_first" + block_size = np.float32(2) + + data_shape = [1, 4, 2, 3] + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + + model = ng.depth_to_space(parameter_data, mode, block_size) + computation = runtime.computation(model, parameter_data) + + result = computation(data_value) + expected = np.array( + [[[[0, 6, 1, 7, 2, 8], [12, 18, 13, 19, 14, 20], [3, 9, 4, 10, 5, 11], [15, 21, 16, 22, 17, 23]]]], + dtype=np.float32, + ) + assert np.allclose(result, expected) + + +def test_space_to_batch(): + runtime = get_runtime() + + data_value = np.array([[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], dtype=np.float32) + data_shape = data_value.shape + + block_shape = np.array([1, 2, 3, 2], dtype=np.int64) + pads_begin = np.array([0, 0, 1, 0], dtype=np.int64) + pads_end = np.array([0, 0, 0, 1], dtype=np.int64) + + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + + model = ng.space_to_batch(parameter_data, block_shape, pads_begin, pads_end) + computation = runtime.computation(model, parameter_data) + + result = computation(data_value) + expected = np.array( + [ + [[[0, 0]]], + [[[0, 0]]], + [[[0, 2]]], + [[[1, 0]]], + [[[3, 5]]], + [[[4, 0]]], + [[[0, 0]]], + [[[0, 0]]], + [[[6, 8]]], + [[[7, 0]]], + [[[9, 11]]], + [[[10, 0]]], + ], + dtype=np.float32, + ) + assert np.allclose(result, expected) + + +def test_batch_to_space(): + runtime = get_runtime() + + data = np.array( + [ + [[[0, 0]]], + [[[0, 0]]], + [[[0, 2]]], + [[[1, 0]]], + [[[3, 5]]], + [[[4, 0]]], + [[[0, 0]]], + [[[0, 0]]], + [[[6, 8]]], + [[[7, 0]]], + [[[9, 11]]], + [[[10, 0]]], + ], + dtype=np.float32, + ) + data_shape = data.shape + + block_shape = np.array([1, 2, 3, 2], dtype=np.int64) + crops_begin = np.array([0, 0, 1, 0], dtype=np.int64) + crops_end = np.array([0, 0, 0, 1], dtype=np.int64) + + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + + model = ng.batch_to_space(parameter_data, block_shape, crops_begin, crops_end) + computation = runtime.computation(model, parameter_data) + + result = computation(data) + expected = np.array([[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], dtype=np.float32) + + assert np.allclose(result, expected) + + +def test_clamp_operator(): + runtime = get_runtime() + + data_shape = [2, 2] + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + min_value = np.float32(3) + max_value = np.float32(12) + + model = ng.clamp(parameter_data, min_value, max_value) + computation = runtime.computation(model, parameter_data) + + data_value = np.array([[-5, 9], [45, 3]], dtype=np.float32) + + result = computation(data_value) + expected = np.clip(data_value, min_value, max_value) + assert np.allclose(result, expected) + + +def test_clamp_operator_with_array(): + runtime = get_runtime() + + data_value = np.array([[-5, 9], [45, 3]], dtype=np.float32) + min_value = np.float32(3) + max_value = np.float32(12) + + model = ng.clamp(data_value, min_value, max_value) + computation = runtime.computation(model) + + result = computation() + expected = np.clip(data_value, min_value, max_value) + + assert np.allclose(result, expected) + + +def test_squeeze_operator(): + runtime = get_runtime() + + data_shape = [1, 2, 1, 3, 1, 1] + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + data_value = np.arange(6.0, dtype=np.float32).reshape([1, 2, 1, 3, 1, 1]) + axes = [2, 4] + model = ng.squeeze(parameter_data, axes) + computation = runtime.computation(model, parameter_data) + + result = computation(data_value) + expected = np.arange(6.0, dtype=np.float32).reshape([1, 2, 3, 1]) + assert np.allclose(result, expected) + + +def test_squared_difference_operator(): + runtime = get_runtime() + + x1_shape = [1, 2, 3, 4] + x2_shape = [2, 3, 4] + + parameter_x1 = ng.parameter(x1_shape, name="x1", dtype=np.float32) + parameter_x2 = ng.parameter(x2_shape, name="x2", dtype=np.float32) + + x1_value = np.arange(24.0, dtype=np.float32).reshape(x1_shape) + x2_value = np.arange(start=4.0, stop=28.0, step=1.0, dtype=np.float32).reshape(x2_shape) + + model = ng.squared_difference(parameter_x1, parameter_x2) + computation = runtime.computation(model, parameter_x1, parameter_x2) + + result = computation(x1_value, x2_value) + expected = np.square(np.subtract(x1_value, x2_value)) + assert np.allclose(result, expected) + + +def test_shuffle_channels_operator(): + runtime = get_runtime() + + data_shape = [1, 15, 2, 2] + axis = 1 + groups = 5 + + parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + + data_value = np.arange(60.0, dtype=np.float32).reshape(data_shape) + + model = ng.shuffle_channels(parameter, axis, groups) + computation = runtime.computation(model, parameter) + + result = computation(data_value) + expected = np.array( + [ + [ + [[0.0, 1.0], [2.0, 3.0]], + [[12.0, 13.0], [14.0, 15.0]], + [[24.0, 25.0], [26.0, 27.0]], + [[36.0, 37.0], [38.0, 39.0]], + [[48.0, 49.0], [50.0, 51.0]], + [[4.0, 5.0], [6.0, 7.0]], + [[16.0, 17.0], [18.0, 19.0]], + [[28.0, 29.0], [30.0, 31.0]], + [[40.0, 41.0], [42.0, 43.0]], + [[52.0, 53.0], [54.0, 55.0]], + [[8.0, 9.0], [10.0, 11.0]], + [[20.0, 21.0], [22.0, 23.0]], + [[32.0, 33.0], [34.0, 35.0]], + [[44.0, 45.0], [46.0, 47.0]], + [[56.0, 57.0], [58.0, 59.0]], + ] + ], + dtype=np.float32, + ) + assert np.allclose(result, expected) + + +def test_unsqueeze(): + runtime = get_runtime() + + data_shape = [3, 4, 5] + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + data_value = np.arange(60.0, dtype=np.float32).reshape(3, 4, 5) + axes = [0, 4] + model = ng.unsqueeze(parameter_data, axes) + computation = runtime.computation(model, parameter_data) + + result = computation(data_value) + expected = np.arange(60.0, dtype=np.float32).reshape([1, 3, 4, 5, 1]) + assert np.allclose(result, expected) + + +def test_grn_operator(): + runtime = get_runtime() + + data_value = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape([1, 2, 3, 4]) + bias = np.float32(1e-6) + + data_shape = [1, 2, 3, 4] + + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + + model = ng.grn(parameter_data, bias) + computation = runtime.computation(model, parameter_data) + + result = computation(data_value) + expected = np.array( + [ + [ + [ + [0.0766965, 0.14142136, 0.19611613, 0.24253564], + [0.28216633, 0.31622776, 0.34570536, 0.37139067], + [0.39391932, 0.41380295, 0.4314555, 0.4472136], + ], + [ + [0.9970545, 0.98994946, 0.9805807, 0.97014254], + [0.9593655, 0.9486833, 0.9383431, 0.9284767], + [0.91914505, 0.9103665, 0.9021342, 0.8944272], + ], + ] + ], + dtype=np.float32, + ) + + assert np.allclose(result, expected) + + +def test_prelu_operator(): + runtime = get_runtime() + + data_shape = [1, 2, 3, 4] + slope_shape = [2, 3, 1] + + data_value = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape(data_shape) + slope_value = np.arange(start=-10.0, stop=-4.0, dtype=np.float32).reshape(slope_shape) + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_slope = ng.parameter(slope_shape, name="Slope", dtype=np.float32) + + model = ng.prelu(parameter_data, parameter_slope) + computation = runtime.computation(model, parameter_data, parameter_slope) + + result = computation(data_value, slope_value) + expected = np.clip(data_value, 0, np.inf) + np.clip(data_value, -np.inf, 0) * slope_value + assert np.allclose(result, expected) + + +def test_selu_operator(): + runtime = get_runtime() + + data_shape = [4, 2, 3, 1] + + data = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape(data_shape) + alpha = np.array(1.6733, dtype=np.float32) + lambda_value = np.array(1.0507, dtype=np.float32) + + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + model = ng.selu(parameter_data, alpha, lambda_value) + computation = runtime.computation(model, parameter_data) + + result = computation(data) + expected = lambda_value * ((data > 0) * data + (data <= 0) * (alpha * np.exp(data) - alpha)) + assert np.allclose(result, expected) + + +@xfail_issue_36486 +def test_hard_sigmoid_operator(): + runtime = get_runtime() + + data_shape = [3] + alpha_value = np.float32(0.5) + beta_value = np.float32(0.6) + + data_value = np.array([-1, 0, 1], dtype=np.float32) + + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_alpha = ng.parameter([], name="Alpha", dtype=np.float32) + parameter_beta = ng.parameter([], name="Beta", dtype=np.float32) + + model = ng.hard_sigmoid(parameter_data, parameter_alpha, parameter_beta) + computation = runtime.computation(model, parameter_data, parameter_alpha, parameter_beta) + + result = computation(data_value, alpha_value, beta_value) + expected = [0.1, 0.6, 1.0] + assert np.allclose(result, expected) + + +def test_mvn_operator(): + runtime = get_runtime() + + data_shape = [3, 3, 3, 1] + axes = [0, 2, 3] + normalize_variance = True + eps = np.float32(1e-9) + eps_mode = "outside_sqrt" + + data_value = np.array( + [ + [ + [[0.8439683], [0.5665144], [0.05836735]], + [[0.02916367], [0.12964272], [0.5060197]], + [[0.79538304], [0.9411346], [0.9546573]], + ], + [ + [[0.17730942], [0.46192095], [0.26480448]], + [[0.6746842], [0.01665257], [0.62473077]], + [[0.9240844], [0.9722341], [0.11965699]], + ], + [ + [[0.41356155], [0.9129373], [0.59330076]], + [[0.81929934], [0.7862604], [0.11799799]], + [[0.69248444], [0.54119414], [0.07513223]], + ], + ], + dtype=np.float32, + ) + + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + + model = ng.mvn(parameter_data, axes, normalize_variance, eps, eps_mode) + computation = runtime.computation(model, parameter_data) + + result = computation(data_value) + + expected = np.array( + [ + [ + [[1.3546423], [0.33053496], [-1.5450814]], + [[-1.2106764], [-0.8925952], [0.29888135]], + [[0.38083088], [0.81808794], [0.85865635]], + ], + [ + [[-1.1060555], [-0.05552877], [-0.78310335]], + [[0.83281356], [-1.250282], [0.67467856]], + [[0.7669372], [0.9113869], [-1.6463585]], + ], + [ + [[-0.23402764], [1.6092131], [0.42940593]], + [[1.2906139], [1.1860244], [-0.92945826]], + [[0.0721334], [-0.38174], [-1.7799333]], + ], + ], + dtype=np.float32, + ) + + assert np.allclose(result, expected) + + +def test_space_to_depth_operator(): + runtime = get_runtime() + + data_shape = [1, 2, 4, 4] + data_value = np.arange(start=0, stop=32, step=1.0, dtype=np.float32).reshape(data_shape) + mode = "blocks_first" + block_size = 2 + + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + + model = ng.space_to_depth(parameter_data, mode, block_size) + computation = runtime.computation(model, parameter_data) + + result = computation(data_value) + expected = np.array( + [ + 0, + 2, + 8, + 10, + 16, + 18, + 24, + 26, + 1, + 3, + 9, + 11, + 17, + 19, + 25, + 27, + 4, + 6, + 12, + 14, + 20, + 22, + 28, + 30, + 5, + 7, + 13, + 15, + 21, + 23, + 29, + 31, + ], + dtype=np.float32, + ).reshape(1, 8, 2, 2) + assert np.allclose(result, expected) + + batch_size = 2 + input_size = 3 + hidden_size = 3 + + X_shape = [batch_size, input_size] + H_t_shape = [batch_size, hidden_size] + W_shape = [hidden_size, input_size] + R_shape = [hidden_size, hidden_size] + B_shape = [hidden_size] + + parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32) + parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32) + parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32) + parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32) + parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32) + + X_value = np.array( + [0.3432185, 0.612268, 0.20272376, 0.9513413, 0.30585995, 0.7265472], dtype=np.float32 + ).reshape(X_shape) + H_t_value = np.array( + [0.12444675, 0.52055854, 0.46489045, 0.4983964, 0.7730452, 0.28439692], dtype=np.float32 + ).reshape(H_t_shape) + W_value = np.array( + [ + 0.41930267, + 0.7872176, + 0.89940447, + 0.23659843, + 0.24676207, + 0.17101714, + 0.3147149, + 0.6555601, + 0.4559603, + ], + dtype=np.float32, + ).reshape(W_shape) + R_value = np.array( + [ + 0.8374871, + 0.86660194, + 0.82114047, + 0.71549815, + 0.18775631, + 0.3182116, + 0.25392973, + 0.38301638, + 0.85531586, + ], + dtype=np.float32, + ).reshape(R_shape) + B_value = np.array([1.0289404, 1.6362579, 0.4370661], dtype=np.float32).reshape(B_shape) + activations = ["sigmoid"] + activation_alpha = [] + activation_beta = [] + clip = 2.88 + + model = ng.rnn_cell( + parameter_X, + parameter_H_t, + parameter_W, + parameter_R, + parameter_B, + hidden_size, + activations, + activation_alpha, + activation_beta, + clip, + ) + computation = runtime.computation( + model, parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B + ) + + result = computation(X_value, H_t_value, W_value, R_value, B_value) + expected = np.array( + [0.94126844, 0.9036043, 0.841243, 0.9468489, 0.934215, 0.873708], dtype=np.float32 + ).reshape(batch_size, hidden_size) + + assert np.allclose(result, expected) + + +def test_group_convolution_operator(): + runtime = get_runtime() + + data_shape = [1, 4, 2, 2] + filters_shape = [2, 1, 2, 1, 1] + + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + parameter_filters = ng.parameter(filters_shape, name="Filters", dtype=np.float32) + + data_value = np.arange(start=1.0, stop=17.0, dtype=np.float32).reshape(data_shape) + filters_value = np.arange(start=1.0, stop=5.0, dtype=np.float32).reshape(filters_shape) + strides = [1, 1] + dilations = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + + model = ng.group_convolution(parameter_data, parameter_filters, strides, pads_begin, pads_end, dilations) + computation = runtime.computation(model, parameter_data, parameter_filters) + result = computation(data_value, filters_value) + + expected = np.array([11, 14, 17, 20, 79, 86, 93, 100], dtype=np.float32).reshape(1, 2, 2, 2) + + assert np.allclose(result, expected) + + +@pytest.mark.xfail(reason="Computation mismatch") +def test_group_convolution_backprop_data(): + runtime = get_runtime() + + data_shape = [1, 1, 3, 3] + filters_shape = [1, 1, 1, 3, 3] + strides = [2, 2] + output_padding = [1, 1] + pads_begin = [1, 1] + pads_end = [1, 1] + + data_node = ng.parameter(data_shape, name="Data", dtype=np.float32) + filters_node = ng.parameter(filters_shape, name="Filters", dtype=np.float32) + model = ng.group_convolution_backprop_data( + data_node, filters_node, strides, None, pads_begin, pads_end, output_padding=output_padding + ) + + data_value = np.array( + [ + 0.16857791, + -0.15161794, + 0.08540368, + 0.1820628, + -0.21746576, + 0.08245695, + 0.1431433, + -0.43156421, + 0.30591947, + ], + dtype=np.float32, + ).reshape(data_shape) + + filters_value = np.array( + [ + -0.06230065, + 0.37932432, + -0.25388849, + 0.33878803, + 0.43709868, + -0.22477469, + 0.04118127, + -0.44696793, + 0.06373066, + ], + dtype=np.float32, + ).reshape(filters_shape) + + computation = runtime.computation(model, data_node, filters_node) + result = computation(data_value, filters_value) + + expected = np.array( + [ + 0.07368518, + -0.08925839, + -0.06627201, + 0.06301362, + 0.03732984, + -0.01919658, + -0.00628807, + -0.02817563, + -0.01472169, + 0.04392925, + -0.00689478, + -0.01549204, + 0.07957941, + -0.11459791, + -0.09505399, + 0.07681622, + 0.03604182, + -0.01853423, + -0.0270785, + -0.00680824, + -0.06650258, + 0.08004665, + 0.07918708, + 0.0724144, + 0.06256775, + -0.17838378, + -0.18863615, + 0.20064656, + 0.133717, + -0.06876295, + -0.06398046, + -0.00864975, + 0.19289537, + -0.01490572, + -0.13673618, + 0.01949645, + ], + dtype=np.float32, + ).reshape(1, 1, 6, 6) + + assert np.allclose(result, expected) + + +def test_group_convolution_backprop_data_output_shape(): + runtime = get_runtime() + + data_shape = [1, 1, 1, 10] + filters_shape = [1, 1, 1, 1, 5] + strides = [1, 1] + + data_node = ng.parameter(data_shape, name="Data", dtype=np.float32) + filters_node = ng.parameter(filters_shape, name="Filters", dtype=np.float32) + output_shape_node = ng.constant(np.array([1, 14], dtype=np.int64)) + + model = ng.group_convolution_backprop_data( + data_node, filters_node, strides, output_shape_node, auto_pad="same_upper" + ) + + data_value = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], dtype=np.float32).reshape( + data_shape + ) + + filters_value = np.array([1.0, 2.0, 3.0, 2.0, 1.0], dtype=np.float32).reshape(filters_shape) + + computation = runtime.computation(model, data_node, filters_node) + result = computation(data_value, filters_value) + + expected = np.array( + [0.0, 1.0, 4.0, 10.0, 18.0, 27.0, 36.0, 45.0, 54.0, 63.0, 62.0, 50.0, 26.0, 9.0], dtype=np.float32, + ).reshape(1, 1, 1, 14) + + assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_matmul.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_matmul.py new file mode 100644 index 00000000000..126089119fa --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_matmul.py @@ -0,0 +1,40 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +import ngraph as ng +from tests_compatibility.test_ngraph.util import run_op_node + + +@pytest.mark.parametrize( + "shape_a, shape_b, transpose_a, transpose_b", + [ + # matrix, vector + ([2, 4], [4], False, False), + ([4], [4, 2], False, False), + # matrix, matrix + ([2, 4], [4, 2], False, False), + # tensor, vector + ([2, 4, 5], [5], False, False), + # # tensor, matrix + ([2, 4, 5], [5, 4], False, False), + # # tensor, tensor + ([2, 2, 4], [2, 4, 2], False, False), + ], +) +def test_matmul(shape_a, shape_b, transpose_a, transpose_b): + np.random.seed(133391) + left_input = -100.0 + np.random.rand(*shape_a).astype(np.float32) * 200.0 + right_input = -100.0 + np.random.rand(*shape_b).astype(np.float32) * 200.0 + + result = run_op_node([left_input, right_input], ng.matmul, transpose_a, transpose_b) + + if transpose_a: + left_input = np.transpose(left_input) + if transpose_b: + right_input = np.transpose(right_input) + + expected = np.matmul(left_input, right_input) + assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_multioutput.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_multioutput.py new file mode 100644 index 00000000000..f780df73c75 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_multioutput.py @@ -0,0 +1,36 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import ngraph as ng +from tests_compatibility.runtime import get_runtime + + +def test_split(): + runtime = get_runtime() + input_tensor = ng.constant(np.array([0, 1, 2, 3, 4, 5], dtype=np.int32)) + axis = ng.constant(0, dtype=np.int64) + splits = 3 + + split_node = ng.split(input_tensor, axis, splits) + computation = runtime.computation(split_node) + split_results = computation() + expected_results = np.array([[0, 1], [2, 3], [4, 5]], dtype=np.int32) + assert np.allclose(split_results, expected_results) + + +def test_variadic_split(): + runtime = get_runtime() + input_tensor = ng.constant(np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]], dtype=np.int32)) + axis = ng.constant(1, dtype=np.int64) + splits = ng.constant(np.array([2, 4], dtype=np.int64)) + + v_split_node = ng.variadic_split(input_tensor, axis, splits) + computation = runtime.computation(v_split_node) + results = computation() + split0 = np.array([[0, 1], [6, 7]], dtype=np.int32) + split1 = np.array([[2, 3, 4, 5], [8, 9, 10, 11]], dtype=np.int32) + + assert np.allclose(results[0], split0) + assert np.allclose(results[1], split1) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_reshape.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_reshape.py new file mode 100644 index 00000000000..e2e0d253f3e --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_reshape.py @@ -0,0 +1,201 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import ngraph as ng +import numpy as np +import pytest + +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_ngraph.util import run_op_node, run_op_numeric_data + + +def test_concat(): + a = np.array([[1, 2], [3, 4]]) + b = np.array([[5, 6]]) + axis = 0 + expected = np.concatenate((a, b), axis=0) + + runtime = get_runtime() + parameter_a = ng.parameter(list(a.shape), name="A", dtype=np.float32) + parameter_b = ng.parameter(list(b.shape), name="B", dtype=np.float32) + node = ng.concat([parameter_a, parameter_b], axis) + computation = runtime.computation(node, parameter_a, parameter_b) + result = computation(a, b) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "val_type, value", [(bool, False), (bool, np.empty((2, 2), dtype=bool))] +) +def test_constant_from_bool(val_type, value): + expected = np.array(value, dtype=val_type) + result = run_op_numeric_data(value, ng.constant, val_type) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "val_type, value", + [ + pytest.param(np.int16, np.int16(-12345)), + pytest.param(np.int64, np.int64(-1234567)), + pytest.param(np.uint16, np.uint16(12345)), + pytest.param(np.uint32, np.uint32(123456)), + pytest.param(np.uint64, np.uint64(1234567)), + pytest.param(np.float64, np.float64(0.1234)), + pytest.param(np.float32, np.float32(0.1234)), + pytest.param(np.int8, np.int8(-63)), + pytest.param(np.int32, np.int32(-123456)), + pytest.param(np.uint8, np.uint8(63)), + ], +) +def test_constant_from_scalar(val_type, value): + expected = np.array(value, dtype=val_type) + result = run_op_numeric_data(value, ng.constant, val_type) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "val_type", + [ + pytest.param(np.float64), + pytest.param(np.float32), + ], +) +def test_constant_from_float_array(val_type): + np.random.seed(133391) + input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type) + result = run_op_numeric_data(input_data, ng.constant, val_type) + assert np.allclose(result, input_data) + + +@pytest.mark.parametrize( + "val_type, range_start, range_end", + [ + pytest.param(np.int16, -64, 64), + pytest.param(np.int64, -16383, 16383), + pytest.param(np.uint16, 0, 64), + pytest.param(np.uint32, 0, 1024), + pytest.param(np.uint64, 0, 16383), + pytest.param(np.int8, -8, 8), + pytest.param(np.int32, -1024, 1024), + pytest.param(np.uint8, 0, 8), + ], +) +def test_constant_from_integer_array(val_type, range_start, range_end): + np.random.seed(133391) + input_data = np.array( + np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type + ) + result = run_op_numeric_data(input_data, ng.constant, val_type) + assert np.allclose(result, input_data) + + +def test_broadcast_numpy(): + data_shape = [16, 1, 1] + target_shape_shape = [4] + + data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + target_shape_parameter = ng.parameter( + target_shape_shape, name="Target_shape", dtype=np.int64 + ) + + node = ng.broadcast(data_parameter, target_shape_parameter) + + assert node.get_type_name() == "Broadcast" + assert node.get_output_size() == 1 + + +def test_broadcast_bidirectional(): + data_shape = [16, 1, 1] + target_shape_shape = [4] + + data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + target_shape_parameter = ng.parameter( + target_shape_shape, name="Target_shape", dtype=np.int64 + ) + + node = ng.broadcast(data_parameter, target_shape_parameter, "BIDIRECTIONAL") + + assert node.get_type_name() == "Broadcast" + assert node.get_output_size() == 1 + + +def test_transpose(): + input_tensor = np.arange(3 * 3 * 224 * 224, dtype=np.int32).reshape( + (3, 3, 224, 224) + ) + input_order = np.array([0, 2, 3, 1], dtype=np.int32) + + result = run_op_node([input_tensor], ng.transpose, input_order) + + expected = np.transpose(input_tensor, input_order) + + assert np.allclose(result, expected) + + +@pytest.mark.xfail( + reason="Tile operation has a form that is not supported. Tile_2 should be converted to TileIE operation." +) +def test_tile(): + input_tensor = np.arange(6, dtype=np.int32).reshape((2, 1, 3)) + repeats = np.array([2, 1], dtype=np.int32) + + result = run_op_node([input_tensor], ng.tile, repeats) + + expected = np.array([0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5]).reshape((2, 2, 3)) + + assert np.allclose(result, expected) + + +@pytest.mark.xfail( + reason="RuntimeError: Check 'shape_size(get_input_shape(0)) == shape_size(output_shape)'" +) +def test_strided_slice(): + input_tensor = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4)) + begin = np.array([1, 0], dtype=np.int32) + end = np.array([0, 0], dtype=np.int32) + strides = np.array([1, 1], dtype=np.int32) + begin_mask = np.array([0, 0, 0], dtype=np.int32) + end_mask = np.array([0, 0, 0], dtype=np.int32) + new_axis_mask = np.array([0, 1, 0], dtype=np.int32) + shrink_axis_mask = np.array([1, 0, 0], dtype=np.int32) + ellipsis_mask = np.array([0, 0, 0], dtype=np.int32) + + result = run_op_node( + [input_tensor], + ng.strided_slice, + begin, + end, + strides, + begin_mask, + end_mask, + new_axis_mask, + shrink_axis_mask, + ellipsis_mask, + ) + + expected = np.array( + [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype=np.float32 + ).reshape((1, 3, 4)) + + assert np.allclose(result, expected) + + +def test_reshape_v1(): + A = np.arange(1200, dtype=np.float32).reshape((2, 5, 5, 24)) + shape = np.array([0, -1, 4], dtype=np.int32) + special_zero = True + + expected_shape = np.array([2, 150, 4]) + expected = np.reshape(A, expected_shape) + result = run_op_node([A], ng.reshape, shape, special_zero) + + assert np.allclose(result, expected) + + +def test_shape_of(): + input_tensor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32) + + result = run_op_node([input_tensor], ng.shape_of) + + assert np.allclose(result, [3, 3]) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_scatter.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_scatter.py new file mode 100644 index 00000000000..b762f1068a1 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_scatter.py @@ -0,0 +1,35 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import ngraph as ng +from ngraph.impl import Type + + +def test_scatter_update_props(): + dtype = np.int8 + parameter_r = ng.parameter([2, 3, 4], dtype=dtype, name="data") + parameter_i = ng.parameter([2, 1], dtype=dtype, name="indices") + parameter_u = ng.parameter([2, 2, 1, 4], dtype=dtype, name="updates") + axis = np.array([1], dtype=np.int8) + + node = ng.scatter_update(parameter_r, parameter_i, parameter_u, axis) + assert node.get_type_name() == "ScatterUpdate" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2, 3, 4] + assert node.get_output_element_type(0) == Type.i8 + + +def test_scatter_update_elements_props(): + dtype = np.int8 + parameter_r = ng.parameter([2, 4, 5, 7], dtype=dtype, name="data") + parameter_i = ng.parameter([2, 2, 2, 2], dtype=dtype, name="indices") + parameter_u = ng.parameter([2, 2, 2, 2], dtype=dtype, name="updates") + axis = np.array([1], dtype=np.int8) + + node = ng.scatter_elements_update(parameter_r, parameter_i, parameter_u, axis) + assert node.get_type_name() == "ScatterElementsUpdate" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [2, 4, 5, 7] + assert node.get_output_element_type(0) == Type.i8 diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_unary.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_unary.py new file mode 100644 index 00000000000..4eac079f0e3 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_ops_unary.py @@ -0,0 +1,233 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +import ngraph as ng +from ngraph.impl import Shape, Type +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_ngraph.util import run_op_node + + +@pytest.mark.parametrize( + "ng_api_fn, numpy_fn, range_start, range_end", + [ + (ng.absolute, np.abs, -1, 1), + (ng.abs, np.abs, -1, 1), + (ng.acos, np.arccos, -1, 1), + (ng.acosh, np.arccosh, 1, 2), + (ng.asin, np.arcsin, -1, 1), + (ng.asinh, np.arcsinh, -1, 1), + (ng.atan, np.arctan, -100.0, 100.0), + (ng.atanh, np.arctanh, 0.0, 1.0), + (ng.ceiling, np.ceil, -100.0, 100.0), + (ng.ceil, np.ceil, -100.0, 100.0), + (ng.cos, np.cos, -100.0, 100.0), + (ng.cosh, np.cosh, -100.0, 100.0), + (ng.exp, np.exp, -100.0, 100.0), + (ng.floor, np.floor, -100.0, 100.0), + (ng.log, np.log, 0, 100.0), + (ng.relu, lambda x: np.maximum(0, x), -100.0, 100.0), + (ng.sign, np.sign, -100.0, 100.0), + (ng.sin, np.sin, -100.0, 100.0), + (ng.sinh, np.sinh, -100.0, 100.0), + (ng.sqrt, np.sqrt, 0.0, 100.0), + (ng.tan, np.tan, -1.0, 1.0), + (ng.tanh, np.tanh, -100.0, 100.0), + ], +) +def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end): + np.random.seed(133391) + input_data = (range_start + np.random.rand(2, 3, 4) * (range_end - range_start)).astype(np.float32) + expected = numpy_fn(input_data) + + result = run_op_node([input_data], ng_api_fn) + assert np.allclose(result, expected, rtol=0.001) + + +@pytest.mark.parametrize( + "ng_api_fn, numpy_fn, input_data", + [ + pytest.param(ng.absolute, np.abs, np.float32(-3)), + pytest.param(ng.abs, np.abs, np.float32(-3)), + pytest.param(ng.acos, np.arccos, np.float32(-0.5)), + pytest.param(ng.asin, np.arcsin, np.float32(-0.5)), + pytest.param(ng.atan, np.arctan, np.float32(-0.5)), + pytest.param(ng.ceiling, np.ceil, np.float32(1.5)), + pytest.param(ng.ceil, np.ceil, np.float32(1.5)), + pytest.param(ng.cos, np.cos, np.float32(np.pi / 4.0)), + pytest.param(ng.cosh, np.cosh, np.float32(np.pi / 4.0)), + pytest.param(ng.exp, np.exp, np.float32(1.5)), + pytest.param(ng.floor, np.floor, np.float32(1.5)), + pytest.param(ng.log, np.log, np.float32(1.5)), + pytest.param(ng.relu, lambda x: np.maximum(0, x), np.float32(-0.125)), + pytest.param(ng.sign, np.sign, np.float32(0.0)), + pytest.param(ng.sin, np.sin, np.float32(np.pi / 4.0)), + pytest.param(ng.sinh, np.sinh, np.float32(0.0)), + pytest.param(ng.sqrt, np.sqrt, np.float32(3.5)), + pytest.param(ng.tan, np.tan, np.float32(np.pi / 4.0)), + pytest.param(ng.tanh, np.tanh, np.float32(0.1234)), + ], +) +def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data): + expected = numpy_fn(input_data) + + result = run_op_node([input_data], ng_api_fn) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "input_data", [(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))] +) +def test_logical_not(input_data): + expected = np.logical_not(input_data) + + result = run_op_node([input_data], ng.logical_not) + assert np.allclose(result, expected) + + +def test_sigmoid(): + input_data = np.array([-3.14, -1.0, 0.0, 2.71001, 1000.0], dtype=np.float32) + result = run_op_node([input_data], ng.sigmoid) + + def sigmoid(x): + return 1.0 / (1.0 + np.exp(-x)) + + expected = np.array(list(map(sigmoid, input_data))) + + assert np.allclose(result, expected) + + +def test_softmax(): + axis = 1 + input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + + result = run_op_node([input_tensor], ng.softmax, axis) + + expected = [[0.09003056, 0.24472842, 0.6652409], [0.09003056, 0.24472842, 0.6652409]] + + assert np.allclose(result, expected) + + +def test_erf(): + input_tensor = np.array([-1.0, 0.0, 1.0, 2.5, 3.14, 4.0], dtype=np.float32) + expected = [-0.842701, 0.0, 0.842701, 0.999593, 0.999991, 1.0] + + result = run_op_node([input_tensor], ng.erf) + assert np.allclose(result, expected) + + +def test_hswish(): + float_dtype = np.float32 + data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + + node = ng.hswish(data) + assert node.get_type_name() == "HSwish" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [3, 10] + assert node.get_output_element_type(0) == Type.f32 + + +def test_round_even(): + float_dtype = np.float32 + data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + + node = ng.round(data, "HALF_TO_EVEN") + assert node.get_type_name() == "Round" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [3, 10] + assert node.get_output_element_type(0) == Type.f32 + + input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32) + expected = [-2.0, -2.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0] + + result = run_op_node([input_tensor], ng.round, "HALF_TO_EVEN") + assert np.allclose(result, expected) + + +def test_round_away(): + float_dtype = np.float32 + data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + + node = ng.round(data, "HALF_AWAY_FROM_ZERO") + assert node.get_type_name() == "Round" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [3, 10] + assert node.get_output_element_type(0) == Type.f32 + + input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32) + expected = [-3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0] + + result = run_op_node([input_tensor], ng.round, "HALF_AWAY_FROM_ZERO") + assert np.allclose(result, expected) + + +def test_hsigmoid(): + float_dtype = np.float32 + data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + + node = ng.hsigmoid(data) + assert node.get_type_name() == "HSigmoid" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [3, 10] + assert node.get_output_element_type(0) == Type.f32 + + +def test_gelu_operator_with_parameters(): + runtime = get_runtime() + + data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) + + data_shape = [2, 2] + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + + model = ng.gelu(parameter_data, "erf") + computation = runtime.computation(model, parameter_data) + + result = computation(data_value) + expected = np.array([[-1.6391277e-06, 8.4134471e-01], [-4.5500278e-02, 2.9959502]], dtype=np.float32) + assert np.allclose(result, expected, 1e-6, 1e-6) + + +def test_gelu_operator_with_array(): + runtime = get_runtime() + + data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) + + model = ng.gelu(data_value, "erf") + computation = runtime.computation(model) + + result = computation() + expected = np.array([[-1.6391277e-06, 8.4134471e-01], [-4.5500278e-02, 2.9959502]], dtype=np.float32) + assert np.allclose(result, expected, 1e-6, 1e-6) + + +def test_gelu_tanh_operator_with_parameters(): + runtime = get_runtime() + + data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) + + data_shape = [2, 2] + parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32) + + model = ng.gelu(parameter_data, "tanh") + computation = runtime.computation(model, parameter_data) + + result = computation(data_value) + expected = np.array([[0.0, 0.841192], [-0.04540223, 2.9963627]], dtype=np.float32) + assert np.allclose(result, expected, 1e-6, 1e-6) + + +def test_gelu_tanh_operator_with_array(): + runtime = get_runtime() + + data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) + + model = ng.gelu(data_value, "tanh") + computation = runtime.computation(model) + + result = computation() + expected = np.array([[0.0, 0.841192], [-0.04540223, 2.9963627]], dtype=np.float32) + + assert np.allclose(result, expected, 1e-6, 1e-6) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_pooling.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_pooling.py new file mode 100644 index 00000000000..423c519272b --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_pooling.py @@ -0,0 +1,430 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +import ngraph as ng +from tests_compatibility.runtime import get_runtime + + +@pytest.fixture +def _ndarray_1x1x4x4(): + return np.arange(11, 27, dtype=np.float32).reshape(1, 1, 4, 4) + + +def test_avg_pool_2d(_ndarray_1x1x4x4): + runtime = get_runtime() + input_data = _ndarray_1x1x4x4 + param = ng.parameter(input_data.shape, name="A", dtype=np.float32) + + kernel_shape = [2, 2] + spatial_dim_count = len(kernel_shape) + pads_begin = [0] * spatial_dim_count + pads_end = [0] * spatial_dim_count + strides = [2, 2] + exclude_pad = True + expected = [[[[13.5, 15.5], [21.5, 23.5]]]] + + avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + computation = runtime.computation(avg_pool_node, param) + result = computation(input_data) + assert np.allclose(result, expected) + + expected = [[[[13.5, 14.5, 15.5], [17.5, 18.5, 19.5], [21.5, 22.5, 23.5]]]] + strides = [1, 1] + avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + computation = runtime.computation(avg_pool_node, param) + result = computation(input_data) + assert np.allclose(result, expected) + + pads_begin = [1, 1] + pads_end = [1, 1] + strides = [2, 2] + exclude_pad = True + + expected = [[[[11.0, 12.5, 14.0], [17.0, 18.5, 20.0], [23.0, 24.5, 26.0]]]] + avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + computation = runtime.computation(avg_pool_node, param) + result = computation(input_data) + assert np.allclose(result, expected) + + exclude_pad = False + expected = [[[[2.75, 6.25, 3.5], [8.5, 18.5, 10.0], [5.75, 12.25, 6.5]]]] + avg_pool_node = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + computation = runtime.computation(avg_pool_node, param) + result = computation(input_data) + assert np.allclose(result, expected) + + +def test_avg_pooling_3d(_ndarray_1x1x4x4): + rt = get_runtime() + data = _ndarray_1x1x4x4 + data = np.broadcast_to(data, (1, 1, 4, 4, 4)) + param = ng.parameter(list(data.shape)) + kernel_shape = [2, 2, 2] + strides = [2, 2, 2] + spatial_dim_count = len(kernel_shape) + pads_begin = [0] * spatial_dim_count + pads_end = [0] * spatial_dim_count + exclude_pad = True + + avgpool = ng.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) + comp = rt.computation(avgpool, param) + result = comp(data) + result_ref = [[[[[13.5, 15.5], [21.5, 23.5]], [[13.5, 15.5], [21.5, 23.5]]]]] + assert np.allclose(result, result_ref) + + +def test_max_pool_basic(): + rt = get_runtime() + + # array([[[[ 0.5, 1.5, 2.5, 3.5], + # [ 4.5, 5.5, 6.5, 7.5], + # [ 8.5, 9.5, 10.5, 11.5], + # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) + data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) + strides = [1, 1] + dilations = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + kernel_shape = [2, 2] + rounding_type = "floor" + auto_pad = None + index_et = "i32" + + data_node = ng.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) + result = comp(data) + + expected = np.array( + [[[[5.5, 6.5, 7.5], [9.5, 10.5, 11.5], [13.5, 14.5, 15.5]]]], dtype=np.float32 + ) + expected_idx = np.array([[[[5, 6, 7], [9, 10, 11], [13, 14, 15]]]], dtype=np.int32) + assert np.allclose(result[0], expected) + assert np.allclose(result[1], expected_idx) + + +def test_max_pool_strides(): + rt = get_runtime() + + # array([[[[ 0.5, 1.5, 2.5, 3.5], + # [ 4.5, 5.5, 6.5, 7.5], + # [ 8.5, 9.5, 10.5, 11.5], + # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) + data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) + strides = [2, 1] + dilations = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + kernel_shape = [2, 2] + rounding_type = "floor" + auto_pad = None + index_et = "i32" + + data_node = ng.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) + result = comp(data) + + expected = np.array([[[[5.5, 6.5, 7.5], [13.5, 14.5, 15.5]]]], dtype=np.float32) + expected_idx = np.array([[[[5, 6, 7], [13, 14, 15]]]], dtype=np.int32) + assert np.allclose(result[0], expected) + assert np.allclose(result[1], expected_idx) + + +def test_max_pool_kernel_shape1x1(): + rt = get_runtime() + + # array([[[[ 0.5, 1.5, 2.5, 3.5], + # [ 4.5, 5.5, 6.5, 7.5], + # [ 8.5, 9.5, 10.5, 11.5], + # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) + data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) + strides = [1, 1] + dilations = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + kernel_shape = [1, 1] + rounding_type = "floor" + auto_pad = None + index_et = "i32" + + data_node = ng.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) + result = comp(data) + + assert np.allclose(result[0], data) + assert np.allclose(result[1], np.arange(0, 16, dtype=np.int32).reshape((1, 1, 4, 4))) + + +def test_max_pool_kernel_shape3x3(): + rt = get_runtime() + + # array([[[[ 0.5, 1.5, 2.5, 3.5], + # [ 4.5, 5.5, 6.5, 7.5], + # [ 8.5, 9.5, 10.5, 11.5], + # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) + data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) + strides = [1, 1] + dilations = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + kernel_shape = [3, 3] + rounding_type = "floor" + auto_pad = None + index_et = "i32" + + data_node = ng.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) + result = comp(data) + + expected = np.array([[[[10.5, 11.5], [14.5, 15.5]]]], dtype=np.float32) + assert np.allclose(result[0], expected) + + +def test_max_pool_non_zero_pads(): + rt = get_runtime() + + # array([[[[ 0.5, 1.5, 2.5, 3.5], + # [ 4.5, 5.5, 6.5, 7.5], + # [ 8.5, 9.5, 10.5, 11.5], + # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) + data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) + strides = [1, 1] + dilations = [1, 1] + pads_begin = [1, 1] + pads_end = [1, 1] + # 0 0 , 0 , 0 , 0, 0 + # 0 [ 0.5, 1.5, 2.5, 3.5], 0, + # 0 [ 4.5, 5.5, 6.5, 7.5], 0, + # 0 [ 8.5, 9.5, 10.5, 11.5], 0, + # 0 [12.5, 13.5, 14.5, 15.5], 0 + # 0 0 , 0 , 0 , 0, 0 + kernel_shape = [2, 2] + rounding_type = "floor" + auto_pad = None + index_et = "i32" + + data_node = ng.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) + result = comp(data) + + expected = np.array( + [ + [ + [ + [0.5, 1.5, 2.5, 3.5, 3.5], + [4.5, 5.5, 6.5, 7.5, 7.5], + [8.5, 9.5, 10.5, 11.5, 11.5], + [12.5, 13.5, 14.5, 15.5, 15.5], + [12.5, 13.5, 14.5, 15.5, 15.5], + ] + ] + ], + dtype=np.float32, + ) + expected_idx = np.array( + [ + [ + [ + [0, 1, 2, 3, 3], + [4, 5, 6, 7, 7], + [8, 9, 10, 11, 11], + [12, 13, 14, 15, 15], + [12, 13, 14, 15, 15], + ] + ] + ], + dtype=np.int32, + ) + assert np.allclose(result[0], expected) + assert np.allclose(result[1], expected_idx) + + +def test_max_pool_same_upper_auto_pads(): + rt = get_runtime() + + # array([[[[ 0.5, 1.5, 2.5, 3.5], + # [ 4.5, 5.5, 6.5, 7.5], + # [ 8.5, 9.5, 10.5, 11.5], + # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) + data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) + strides = [1, 1] + dilations = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + # [ 0.5, 1.5, 2.5, 3.5], 0, + # [ 4.5, 5.5, 6.5, 7.5], 0, + # [ 8.5, 9.5, 10.5, 11.5], 0, + # [12.5, 13.5, 14.5, 15.5], 0 + # 0 , 0 , 0 , 0, 0 + kernel_shape = [2, 2] + auto_pad = "same_upper" + rounding_type = "floor" + index_et = "i32" + + data_node = ng.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) + result = comp(data) + + expected = np.array( + [ + [ + [ + [5.5, 6.5, 7.5, 7.5], + [9.5, 10.5, 11.5, 11.5], + [13.5, 14.5, 15.5, 15.5], + [13.5, 14.5, 15.5, 15.5], + ] + ] + ], + dtype=np.float32, + ) + expected_idx = np.array( + [ + [ + [ + [5, 6, 7, 7], + [9, 10, 11, 11], + [13, 14, 15, 15], + [13, 14, 15, 15], + ] + ] + ], + dtype=np.int32, + ) + assert np.allclose(result[0], expected) + assert np.allclose(result[1], expected_idx) + + +def test_max_pool_same_lower_auto_pads(): + rt = get_runtime() + + # array([[[[ 0.5, 1.5, 2.5, 3.5], + # [ 4.5, 5.5, 6.5, 7.5], + # [ 8.5, 9.5, 10.5, 11.5], + # [12.5, 13.5, 14.5, 15.5]]]], dtype=float32) + data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) + strides = [1, 1] + dilations = [1, 1] + pads_begin = [0, 0] + pads_end = [0, 0] + # 0 0 , 0 , 0 , 0, + # 0 [ 0.5, 1.5, 2.5, 3.5], + # 0 [ 4.5, 5.5, 6.5, 7.5], + # 0 [ 8.5, 9.5, 10.5, 11.5], + # 0 [12.5, 13.5, 14.5, 15.5], + kernel_shape = [2, 2] + auto_pad = "same_lower" + rounding_type = "floor" + index_et = "i32" + + data_node = ng.parameter(data.shape, name="A", dtype=np.float32) + maxpool_node = ng.max_pool( + data_node, + strides, + dilations, + pads_begin, + pads_end, + kernel_shape, + rounding_type, + auto_pad, + index_et, + ) + comp = rt.computation(maxpool_node, data_node) + result = comp(data) + + expected = np.array( + [ + [ + [ + [0.5, 1.5, 2.5, 3.5], + [4.5, 5.5, 6.5, 7.5], + [8.5, 9.5, 10.5, 11.5], + [12.5, 13.5, 14.5, 15.5], + ] + ] + ], + dtype=np.float32, + ) + expected_idx = np.array( + [ + [ + [ + [0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11], + [12, 13, 14, 15], + ] + ] + ], + dtype=np.int32, + ) + assert np.allclose(result[0], expected) + assert np.allclose(result[1], expected_idx) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_proposal.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_proposal.py new file mode 100644 index 00000000000..0c99934b1ba --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_proposal.py @@ -0,0 +1,36 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import ngraph as ng +from ngraph.impl import Shape, Type + + +def test_proposal_props(): + float_dtype = np.float32 + batch_size = 1 + post_nms_topn = 20 + probs = ng.parameter(Shape([batch_size, 8, 255, 255]), dtype=float_dtype, name="probs") + deltas = ng.parameter(Shape([batch_size, 16, 255, 255]), dtype=float_dtype, name="bbox_deltas") + im_info = ng.parameter(Shape([4]), dtype=float_dtype, name="im_info") + + attrs = { + "base_size": np.uint32(85), + "pre_nms_topn": np.uint32(10), + "post_nms_topn": np.uint32(post_nms_topn), + "nms_thresh": np.float32(0.34), + "feat_stride": np.uint32(16), + "min_size": np.uint32(32), + "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float32), + "scale": np.array([2, 3, 3, 4], dtype=np.float32), + } + + node = ng.proposal(probs, deltas, im_info, attrs) + + assert node.get_type_name() == "Proposal" + assert node.get_output_size() == 2 + + assert list(node.get_output_shape(0)) == [batch_size * post_nms_topn, 5] + assert list(node.get_output_shape(1)) == [batch_size * post_nms_topn] + assert node.get_output_element_type(0) == Type.f32 + assert node.get_output_element_type(1) == Type.f32 diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py new file mode 100644 index 00000000000..c82654c7167 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_random_uniform.py @@ -0,0 +1,27 @@ +import ngraph as ng +import numpy as np +from tests_compatibility.runtime import get_runtime + + +def test_random_uniform(): + runtime = get_runtime() + input_tensor = ng.constant(np.array([2, 4, 3], dtype=np.int32)) + min_val = ng.constant(np.array([-2.7], dtype=np.float32)) + max_val = ng.constant(np.array([3.5], dtype=np.float32)) + + random_uniform_node = ng.random_uniform(input_tensor, min_val, max_val, + output_type="f32", global_seed=7461, + op_seed=1546) + computation = runtime.computation(random_uniform_node) + random_uniform_results = computation() + expected_results = np.array([[[2.8450181, -2.3457108, 2.2134445], + [-1.0436587, 0.79548645, 1.3023183], + [0.34447956, -2.0267959, 1.3989122], + [0.9607613, 1.5363653, 3.117298]], + + [[1.570041, 2.2782724, 2.3193843], + [3.3393657, 0.63299894, 0.41231918], + [3.1739233, 0.03919673, -0.2136085], + [-1.4519991, -2.277353, 2.630727]]], dtype=np.float32) + + assert np.allclose(random_uniform_results, expected_results) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_reduction.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_reduction.py new file mode 100644 index 00000000000..c360f75d815 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_reduction.py @@ -0,0 +1,213 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +from _pyngraph import PartialShape, Dimension + +import ngraph as ng +from ngraph.utils.types import make_constant_node +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_ngraph.util import run_op_node + + +@pytest.mark.parametrize( + "ng_api_helper, numpy_function, reduction_axes", + [ + (ng.reduce_max, np.max, np.array([0, 1, 2, 3])), + (ng.reduce_min, np.min, np.array([0, 1, 2, 3])), + (ng.reduce_sum, np.sum, np.array([0, 1, 2, 3])), + (ng.reduce_prod, np.prod, np.array([0, 1, 2, 3])), + (ng.reduce_max, np.max, np.array([0])), + (ng.reduce_min, np.min, np.array([0])), + (ng.reduce_sum, np.sum, np.array([0])), + (ng.reduce_prod, np.prod, np.array([0])), + (ng.reduce_max, np.max, np.array([0, 2])), + (ng.reduce_min, np.min, np.array([0, 2])), + (ng.reduce_sum, np.sum, np.array([0, 2])), + (ng.reduce_prod, np.prod, np.array([0, 2])), + ], +) +def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.randn(*shape).astype(np.float32) + + expected = numpy_function(input_data, axis=tuple(reduction_axes)) + result = run_op_node([input_data], ng_api_helper, reduction_axes) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "ng_api_helper, numpy_function, reduction_axes", + [ + (ng.reduce_logical_and, np.logical_and.reduce, np.array([0])), + (ng.reduce_logical_or, np.logical_or.reduce, np.array([0])), + (ng.reduce_logical_and, np.logical_and.reduce, np.array([0, 2])), + (ng.reduce_logical_or, np.logical_or.reduce, np.array([0, 2])), + (ng.reduce_logical_and, np.logical_and.reduce, np.array([0, 1, 2, 3])), + (ng.reduce_logical_or, np.logical_or.reduce, np.array([0, 1, 2, 3])), + ], +) +def test_reduction_logical_ops(ng_api_helper, numpy_function, reduction_axes): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.randn(*shape).astype(np.bool) + + expected = numpy_function(input_data, axis=tuple(reduction_axes)) + result = run_op_node([input_data], ng_api_helper, reduction_axes) + assert np.allclose(result, expected) + + +def test_topk(): + data_shape = [6, 12, 10, 24] + data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + K = np.int32(3) + axis = np.int32(1) + node = ng.topk(data_parameter, K, axis, "max", "value") + assert node.get_type_name() == "TopK" + assert node.get_output_size() == 2 + assert list(node.get_output_shape(0)) == [6, 3, 10, 24] + assert list(node.get_output_shape(1)) == [6, 3, 10, 24] + + +@pytest.mark.parametrize( + "ng_api_helper, numpy_function, reduction_axes", + [ + (ng.reduce_mean, np.mean, np.array([0, 1, 2, 3])), + (ng.reduce_mean, np.mean, np.array([0])), + (ng.reduce_mean, np.mean, np.array([0, 2])), + ], +) +def test_reduce_mean_op(ng_api_helper, numpy_function, reduction_axes): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.randn(*shape).astype(np.float32) + + expected = numpy_function(input_data, axis=tuple(reduction_axes)) + result = run_op_node([input_data], ng_api_helper, reduction_axes) + assert np.allclose(result, expected) + + +def test_non_max_suppression(): + + boxes_shape = [1, 1000, 4] + scores_shape = [1, 1, 1000] + boxes_parameter = ng.parameter(boxes_shape, name="Boxes", dtype=np.float32) + scores_parameter = ng.parameter(scores_shape, name="Scores", dtype=np.float32) + + node = ng.non_max_suppression(boxes_parameter, scores_parameter, make_constant_node(1000, np.int64)) + + assert node.get_type_name() == "NonMaxSuppression" + assert node.get_output_size() == 3 + assert node.get_output_partial_shape(0) == PartialShape([Dimension(0, 1000), Dimension(3)]) + assert node.get_output_partial_shape(1) == PartialShape([Dimension(0, 1000), Dimension(3)]) + assert list(node.get_output_shape(2)) == [1] + + +def test_non_zero(): + + data_shape = [3, 10, 100, 200] + + data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + + node = ng.non_zero(data_parameter) + + assert node.get_type_name() == "NonZero" + assert node.get_output_size() == 1 + + +def test_roi_align(): + + data_shape = [7, 256, 200, 200] + rois = [1000, 4] + batch_indices = [1000] + expected_shape = [1000, 256, 6, 6] + + data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32) + rois_parameter = ng.parameter(rois, name="Rois", dtype=np.float32) + batch_indices_parameter = ng.parameter(batch_indices, name="Batch_indices", dtype=np.int32) + pooled_h = 6 + pooled_w = 6 + sampling_ratio = 2 + spatial_scale = np.float32(16) + mode = "avg" + + node = ng.roi_align( + data_parameter, + rois_parameter, + batch_indices_parameter, + pooled_h, + pooled_w, + sampling_ratio, + spatial_scale, + mode, + ) + + assert node.get_type_name() == "ROIAlign" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + + +@pytest.mark.parametrize( + "input_shape, cumsum_axis, reverse", + [([5, 2], 0, False), ([5, 2], 1, False), ([5, 2, 6], 2, False), ([5, 2], 0, True)], +) +def test_cum_sum(input_shape, cumsum_axis, reverse): + input_data = np.arange(np.prod(input_shape)).reshape(input_shape) + + if reverse: + expected = np.cumsum(input_data[::-1], axis=cumsum_axis)[::-1] + else: + expected = np.cumsum(input_data, axis=cumsum_axis) + + runtime = get_runtime() + node = ng.cum_sum(input_data, cumsum_axis, reverse=reverse) + computation = runtime.computation(node) + result = computation() + assert np.allclose(result, expected) + + +def test_normalize_l2(): + input_shape = [1, 2, 3, 4] + input_data = np.arange(np.prod(input_shape)).reshape(input_shape).astype(np.float32) + input_data += 1 + axes = np.array([1, 2, 3]).astype(np.int64) + eps = 1e-6 + eps_mode = "add" + + runtime = get_runtime() + node = ng.normalize_l2(input_data, axes, eps, eps_mode) + computation = runtime.computation(node) + result = computation() + + expected = np.array( + [ + 0.01428571, + 0.02857143, + 0.04285714, + 0.05714286, + 0.07142857, + 0.08571429, + 0.1, + 0.11428571, + 0.12857144, + 0.14285715, + 0.15714286, + 0.17142858, + 0.18571429, + 0.2, + 0.21428572, + 0.22857143, + 0.24285714, + 0.25714287, + 0.27142859, + 0.2857143, + 0.30000001, + 0.31428573, + 0.32857144, + 0.34285715, + ] + ).reshape(input_shape) + + assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_roll.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_roll.py new file mode 100644 index 00000000000..3a090b2ad30 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_roll.py @@ -0,0 +1,18 @@ +import ngraph as ng +import numpy as np +from tests_compatibility.runtime import get_runtime + + +def test_roll(): + runtime = get_runtime() + input = np.reshape(np.arange(10), (2, 5)) + input_tensor = ng.constant(input) + input_shift = ng.constant(np.array([-10, 7], dtype=np.int32)) + input_axes = ng.constant(np.array([-1, 0], dtype=np.int32)) + + roll_node = ng.roll(input_tensor, input_shift, input_axes) + computation = runtime.computation(roll_node) + roll_results = computation() + expected_results = np.roll(input, shift=(-10, 7), axis=(-1, 0)) + + assert np.allclose(roll_results, expected_results) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_sequence_processing.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_sequence_processing.py new file mode 100644 index 00000000000..6b96724569a --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_sequence_processing.py @@ -0,0 +1,45 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import ngraph as ng +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_ngraph.util import run_op_node +from tests_compatibility import (xfail_issue_47337, + xfail_issue_44848) + + +def test_onehot(): + runtime = get_runtime() + param = ng.parameter([3], dtype=np.int32) + model = ng.one_hot(param, 3, 1, 0, 0) + computation = runtime.computation(model, param) + + expected = np.eye(3)[np.array([1, 0, 2])] + input_data = np.array([1, 0, 2], dtype=np.int32) + result = computation(input_data) + assert np.allclose(result, expected) + + +@xfail_issue_47337 +def test_one_hot(): + data = np.array([0, 1, 2], dtype=np.int32) + depth = 2 + on_value = 5 + off_value = 10 + axis = -1 + excepted = [[5, 10], [10, 5], [10, 10]] + + result = run_op_node([data, depth, on_value, off_value], ng.one_hot, axis) + assert np.allclose(result, excepted) + + +@xfail_issue_44848 +def test_range(): + start = 5 + stop = 35 + step = 5 + + result = run_op_node([start, stop, step], ng.range) + assert np.allclose(result, [5, 10, 15, 20, 25, 30]) diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_swish.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_swish.py new file mode 100644 index 00000000000..17b418beaca --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_swish.py @@ -0,0 +1,29 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import ngraph as ng +from ngraph.impl import Shape, Type + + +def test_swish_props_with_beta(): + float_dtype = np.float32 + data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + beta = ng.parameter(Shape([]), dtype=float_dtype, name="beta") + + node = ng.swish(data, beta) + assert node.get_type_name() == "Swish" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [3, 10] + assert node.get_output_element_type(0) == Type.f32 + + +def test_swish_props_without_beta(): + float_dtype = np.float32 + data = ng.parameter(Shape([3, 10]), dtype=float_dtype, name="data") + + node = ng.swish(data) + assert node.get_type_name() == "Swish" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == [3, 10] + assert node.get_output_element_type(0) == Type.f32 diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/test_utils.py b/runtime/bindings/python/tests_compatibility/test_ngraph/test_utils.py new file mode 100644 index 00000000000..49b90017305 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/test_utils.py @@ -0,0 +1,28 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import ngraph as ng +from ngraph.impl import Shape + + +def test_get_constant_from_source_success(): + dtype = np.int + input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1") + input2 = ng.parameter(Shape([25]), dtype=dtype, name="input_2") + shape_of = ng.shape_of(input2, name="shape_of") + reshape = ng.reshape(input1, shape_of, special_zero=True) + folded_const = ng.impl.util.get_constant_from_source(reshape.input(1).get_source_output()) + + assert folded_const is not None + assert folded_const.get_vector() == [25] + + +def test_get_constant_from_source_failed(): + dtype = np.int + input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1") + input2 = ng.parameter(Shape([1]), dtype=dtype, name="input_2") + reshape = ng.reshape(input1, input2, special_zero=True) + folded_const = ng.impl.util.get_constant_from_source(reshape.input(1).get_source_output()) + + assert folded_const is None diff --git a/runtime/bindings/python/tests_compatibility/test_ngraph/util.py b/runtime/bindings/python/tests_compatibility/test_ngraph/util.py new file mode 100644 index 00000000000..bb75222452b --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_ngraph/util.py @@ -0,0 +1,79 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from typing import Any, Callable, List, Union + +import numpy as np + +import ngraph as ng +from ngraph.utils.types import NumericData +from tests_compatibility.runtime import get_runtime +from string import ascii_uppercase + + +def _get_numpy_dtype(scalar): + return np.array([scalar]).dtype + + +def run_op_node(input_data, op_fun, *args): + # type: (Union[NumericData, List[NumericData]], Callable, *Any) -> List[NumericData] + """Run computation on node performing `op_fun`. + + `op_fun` has to accept a node as an argument. + + This function converts passed raw input data to nGraph Constant Node and that form is passed + to `op_fun`. + + :param input_data: The input data for performed computation. + :param op_fun: The function handler for operation we want to carry out. + :param args: The arguments passed to operation we want to carry out. + :return: The result from computations. + """ + runtime = get_runtime() + comp_args = [] + op_fun_args = [] + comp_inputs = [] + + for idx, data in enumerate(input_data): + node = None + if np.isscalar(data): + node = ng.parameter([], name=ascii_uppercase[idx], dtype=_get_numpy_dtype(data)) + else: + node = ng.parameter(data.shape, name=ascii_uppercase[idx], dtype=data.dtype) + op_fun_args.append(node) + comp_args.append(node) + comp_inputs.append(data) + + op_fun_args.extend(args) + node = op_fun(*op_fun_args) + computation = runtime.computation(node, *comp_args) + return computation(*comp_inputs) + + +def run_op_numeric_data(input_data, op_fun, *args): + # type: (NumericData, Callable, *Any) -> List[NumericData] + """Run computation on node performing `op_fun`. + + `op_fun` has to accept a scalar or an array. + + This function passess input data AS IS. This mean that in case they're a scalar (integral, + or floating point value) or a NumPy's ndarray object they will be automatically converted + to nGraph's Constant Nodes. + + :param input_data: The input data for performed computation. + :param op_fun: The function handler for operation we want to carry out. + :param args: The arguments passed to operation we want to carry out. + :return: The result from computations. + """ + runtime = get_runtime() + node = op_fun(input_data, *args) + computation = runtime.computation(node) + return computation() + + +def count_ops_of_type(func, op_type): + count = 0 + for op in func.get_ops(): + if (type(op) is type(op_type)): + count += 1 + return count diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/__init__.py b/runtime/bindings/python/tests_compatibility/test_onnx/__init__.py new file mode 100644 index 00000000000..dc8ba3c4598 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/model_zoo_preprocess.sh b/runtime/bindings/python/tests_compatibility/test_onnx/model_zoo_preprocess.sh new file mode 100755 index 00000000000..e7b5a891cf1 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/model_zoo_preprocess.sh @@ -0,0 +1,170 @@ +#!/bin/bash + +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -e + +# default ONNX Model Zoo commit hash ID: +ONNX_SHA=d58213534f2a4d1c4b19ba62b3bb5f544353256e + +MODELS_DIR="$HOME/.onnx/model_zoo" +ENABLE_MSFT=false +ENABLE_ONNX_MODELS_ZOO=false +ENABLE_MSFT_MODELS=false +FORCE_MODE=false + +function print_help { + echo "Model preprocessing options:" + echo " -h display this help message" + echo " -d set location of the models (for onnx model ZOO and MSFT models)" + printf " By default the models location is: %s\n" "$HOME/.onnx/model_zoo" + echo " -o update Onnx Model Zoo models" + echo " -s Onnx Model Zoo commit SHA" + echo " -m update MSFT models" + echo " -f force update of a chosen model" + echo "" + echo "Note: This script requires wget, GNU tar (not bsdtar) and git with LFS support." +} + +while getopts "homfd:s:" opt; do + case ${opt} in + h ) + print_help + ;; + \? ) + print_help + ;; + : ) + print_help + ;; + d ) + MODELS_DIR="$OPTARG" + ;; + o ) + ENABLE_ONNX_MODELS_ZOO=true + ;; + s ) + ONNX_SHA="$OPTARG" + ;; + m ) + ENABLE_MSFT_MODELS=true + ;; + f ) + FORCE_MODE=true + ;; + esac +done +shift $((OPTIND -1)) + +MODEL_ZOO_DIR="$MODELS_DIR/model_zoo" +ONNX_MODELS_DIR="$MODEL_ZOO_DIR/onnx_model_zoo_$ONNX_SHA" +MSFT_MODELS_DIR="$MODEL_ZOO_DIR/MSFT" + +function pull_and_postprocess_onnx_model_zoo() { + git fetch + git reset HEAD --hard + + git checkout -f $ONNX_SHA + + echo "Pulling models data via Git LFS for onnx model zoo repository" + git lfs pull --include="*" --exclude="*.onnx" + find "$ONNX_MODELS_DIR" -name "*.onnx" | while read filename; do rm "$filename"; done; + + printf "Extracting tar.gz archives into %s\n" "$ONNX_MODELS_DIR" + find "$ONNX_MODELS_DIR" -name '*.tar.gz' \ + -execdir sh -c 'BASEDIR=$(basename "{}" .tar.gz) && rm -rf $BASEDIR && mkdir -p $BASEDIR' \; \ + -execdir sh -c 'BASEDIR=$(basename "{}" .tar.gz) && tar --warning=no-unknown-keyword -xvzf "{}" -C $BASEDIR' \; + + echo "Postprocessing of ONNX Model Zoo models:" + + echo "Fix roberta model" + cd "$ONNX_MODELS_DIR/text/machine_comprehension/roberta/model/roberta-sequence-classification-9/roberta-sequence-classification-9" + mkdir -p test_data_set_0 + mv *.pb test_data_set_0/ + + rm -f $MODEL_ZOO_DIR/executing_$ONNX_SHA +} + +function update_onnx_models() { + if test `find $MODEL_ZOO_DIR/executing_$ONNX_SHA -mmin +60 2>/dev/null`;then + rm -rf $ONNX_MODELS_DIR + rm -f $MODEL_ZOO_DIR/executing_$ONNX_SHA + fi + + while [[ -f $MODEL_ZOO_DIR/executing_$ONNX_SHA ]]; + do + echo "Onnx Models update are currently executing - sleeping 5 minutes" + sleep 300 + done + + if [[ ! -d $ONNX_MODELS_DIR ]] ; then + touch $MODEL_ZOO_DIR/executing_$ONNX_SHA + trap "rm -f $MODEL_ZOO_DIR/executing_$ONNX_SHA" EXIT INT TERM + echo "The ONNX Model Zoo repository doesn't exist on your filesystem then will be cloned" + git clone https://github.com/onnx/models.git "$ONNX_MODELS_DIR" + cd "$ONNX_MODELS_DIR" + pull_and_postprocess_onnx_model_zoo + else + # Check if ONNX Model Zoo directory consists of proper git repo + export git_remote_url=`git -C $ONNX_MODELS_DIR config --local remote.origin.url 2> /dev/null 2>&1` + printf "ONNX Model Zoo repository exists: %s\n" "$ONNX_MODELS_DIR" + if [[ $git_remote_url = "https://github.com/onnx/models.git" ]]; then + printf "The proper github repository detected: %s\n" "$git_remote_url" + else + echo "The ONNX Model Zoo repository doesn't exist then will be cloned" + git clone https://github.com/onnx/models.git "$ONNX_MODELS_DIR" + fi + fi +} + +function update_msft_models() { + wget https://onnxruntimetestdata.blob.core.windows.net/models/20191107.zip -O "$MSFT_MODELS_DIR.zip" + unzip "$MSFT_MODELS_DIR.zip" -d "$MSFT_MODELS_DIR" && rm "$MSFT_MODELS_DIR.zip" + +} + +function postprocess_msft_models() { + echo "Postprocessing of MSFT models:" + + echo "Fix LSTM_Seq_lens_unpacked" + mv $MSFT_MODELS_DIR/opset9/LSTM_Seq_lens_unpacked/seq_lens_sorted $MSFT_MODELS_DIR/opset9/LSTM_Seq_lens_unpacked/test_data_set_0 + mv $MSFT_MODELS_DIR/opset9/LSTM_Seq_lens_unpacked/seq_lens_unsorted $MSFT_MODELS_DIR/opset9/LSTM_Seq_lens_unpacked/test_data_set_1 +} + +if [[ $ENABLE_ONNX_MODELS_ZOO = false ]] && [[ $ENABLE_MSFT_MODELS = false ]] ; then + echo "Please choose an option to update chosen model: + -o to update ONNX Model ZOO + -m to update MSFT models" + exit 170 +fi + +if [[ $MODELS_DIR = false ]] ; then + printf "Unknown location of the general models directory (onnx model ZOO and MSFT models) + Please specify the location using -d flag" + exit 170 +fi + + +# check if general model zoo directory exists (directory to store ONNX model zoo and MSFT models) +if [[ ! -d $MODEL_ZOO_DIR ]] ; then + printf "The general model directory: %s doesn't exist on your filesystem, it will be created \n" "$MODEL_ZOO_DIR" + mkdir -p $MODEL_ZOO_DIR +else + printf "The general model directory: %s found\n" "$MODEL_ZOO_DIR" +fi + +if [[ $ENABLE_ONNX_MODELS_ZOO = true ]] ; then + if [[ $FORCE_MODE = true ]]; then + rm -rf $ONNX_MODELS_DIR + fi + update_onnx_models +fi + +if [[ $ENABLE_MSFT_MODELS = true ]] ; then + if [[ $FORCE_MODE = true ]]; then + rm -rf $MSFT_MODELS_DIR + fi + update_msft_models + postprocess_msft_models +fi diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/models/add_abc.onnx b/runtime/bindings/python/tests_compatibility/test_onnx/models/add_abc.onnx new file mode 100644 index 00000000000..5c2da5dcc0b --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/models/add_abc.onnx @@ -0,0 +1,24 @@ +ngraph ONNXImporter:† + +A +BX add_node1"Add + +X +CY add_node2"Add +test_graphZ +A + + +Z +B + + +Z +C + + +b +Y + + +B \ No newline at end of file diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/models/data/tensor.data b/runtime/bindings/python/tests_compatibility/test_onnx/models/data/tensor.data new file mode 100644 index 0000000000000000000000000000000000000000..5116510eebcfbd3a254c8e0e661dbb88acd086a7 GIT binary patch literal 12 TcmZQzSm40G&|uHN;NSoN4Tl0C literal 0 HcmV?d00001 diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/models/external_data.onnx b/runtime/bindings/python/tests_compatibility/test_onnx/models/external_data.onnx new file mode 100644 index 00000000000..9cd1ae3bb10 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/models/external_data.onnx @@ -0,0 +1,22 @@ +nGraph ONNX Importer:Á +& +data_a +data_b +data_cresult"Meantest_mean_example*,Bdata_cj +locationdata/tensor.datapZ +data_a + + +Z +data_b + + +Z +data_c + + +b +result + + +B \ No newline at end of file diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_backend.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_backend.py new file mode 100644 index 00000000000..c850f9d5045 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_backend.py @@ -0,0 +1,528 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging + +import onnx.backend.test +from tests_compatibility import ( + BACKEND_NAME, + skip_rng_tests, + xfail_issue_33488, + xfail_issue_33538, + xfail_issue_33581, + xfail_issue_33589, + xfail_issue_33593, + xfail_issue_33595, + xfail_issue_33596, + xfail_issue_33606, + xfail_issue_33633, + xfail_issue_33651, + xfail_issue_38091, + xfail_issue_38699, + xfail_issue_38701, + xfail_issue_38706, + xfail_issue_38708, + xfail_issue_38710, + xfail_issue_38713, + xfail_issue_38722, + xfail_issue_38724, + xfail_issue_38732, + xfail_issue_38734, + xfail_issue_38735, + xfail_issue_39656, + xfail_issue_39658, + xfail_issue_39659, + xfail_issue_39662, + xfail_issue_44848, + xfail_issue_44851, + xfail_issue_44854, + xfail_issue_44858, + xfail_issue_44956, + xfail_issue_44957, + xfail_issue_44958, + xfail_issue_44965, + xfail_issue_44968, + xfail_issue_45180, + xfail_issue_45344, + xfail_issue_47323, + xfail_issue_47337, + xfail_issue_48052, + xfail_issue_49207, + xfail_issue_49750, + xfail_issue_49752, + xfail_issue_49753, + xfail_issue_49754, + xfail_issue_52463, + xfail_issue_55760, + xfail_issue_58033, + xfail_issue_63033, + xfail_issue_63036, + xfail_issue_63039, + xfail_issue_63043, + xfail_issue_63044, + xfail_issue_63045, + xfail_issue_63136, + xfail_issue_63137, + xfail_issue_63138, +) +from tests_compatibility.test_onnx.utils.onnx_backend import OpenVinoTestBackend + + +def expect_fail(test_case_path, xfail): # type: (str) -> None + """Mark the test as expected to fail.""" + module_name, test_name = test_case_path.split(".") + module = globals().get(module_name) + if hasattr(module, test_name): + xfail(getattr(module, test_name)) + else: + logging.getLogger().warning( + "Could not mark test as XFAIL, not found: %s", test_case_path + ) + + +OpenVinoTestBackend.backend_name = BACKEND_NAME + +# This is a pytest magic variable to load extra plugins +# Uncomment the line below to enable the ONNX compatibility report +# pytest_plugins = "onnx.backend.test.report", + +# import all test cases at global scope to make them visible to python.unittest +backend_test = onnx.backend.test.BackendTest(OpenVinoTestBackend, __name__) + +skip_tests_general = [ + # Big model tests (see test_zoo_models.py): + "test_bvlc_alexnet", + "test_densenet121", + "test_inception_v1", + "test_inception_v2", + "test_resnet50", + "test_shufflenet", + "test_squeezenet", + "test_vgg19", + "test_zfnet512", +] + +for test in skip_tests_general: + backend_test.exclude(test) + +# NOTE: ALL backend_test.exclude CALLS MUST BE PERFORMED BEFORE THE CALL TO globals().update + +OnnxBackendNodeModelTest = None +OnnxBackendSimpleModelTest = None +OnnxBackendPyTorchOperatorModelTest = None +OnnxBackendPyTorchConvertedModelTest = None +globals().update(backend_test.enable_report().test_cases) + +tests_expected_to_fail = [ + ( + xfail_issue_49207, + "OnnxBackendNodeModelTest.test_rnn_seq_length_cpu", + "OnnxBackendNodeModelTest.test_simple_rnn_defaults_cpu", + "OnnxBackendNodeModelTest.test_simple_rnn_with_initial_bias_cpu", + "OnnxBackendNodeModelTest.test_gru_defaults_cpu", + "OnnxBackendNodeModelTest.test_gru_seq_length_cpu", + "OnnxBackendNodeModelTest.test_gru_with_initial_bias_cpu", + "OnnxBackendNodeModelTest.test_lstm_defaults_cpu", + "OnnxBackendNodeModelTest.test_lstm_with_initial_bias_cpu", + "OnnxBackendNodeModelTest.test_lstm_with_peepholes_cpu", + ), + ( + xfail_issue_49752, + "OnnxBackendNodeModelTest.test_constant_pad_cpu", + "OnnxBackendNodeModelTest.test_edge_pad_cpu", + "OnnxBackendNodeModelTest.test_reflect_pad_cpu", + ), + ( + xfail_issue_39656, + "OnnxBackendNodeModelTest.test_reshape_extended_dims_cpu", + "OnnxBackendNodeModelTest.test_reshape_negative_dim_cpu", + "OnnxBackendNodeModelTest.test_reshape_one_dim_cpu", + "OnnxBackendNodeModelTest.test_reshape_reduced_dims_cpu", + "OnnxBackendNodeModelTest.test_reshape_negative_extended_dims_cpu", + "OnnxBackendNodeModelTest.test_reshape_reordered_all_dims_cpu", + "OnnxBackendNodeModelTest.test_reshape_reordered_last_dims_cpu", + "OnnxBackendNodeModelTest.test_reshape_zero_and_negative_dim_cpu", + "OnnxBackendNodeModelTest.test_reshape_zero_dim_cpu", + ), + ( + xfail_issue_39658, + "OnnxBackendNodeModelTest.test_tile_cpu", + "OnnxBackendNodeModelTest.test_tile_precomputed_cpu", + ), + ( + xfail_issue_39659, + "OnnxBackendNodeModelTest.test_constantofshape_float_ones_cpu", + "OnnxBackendNodeModelTest.test_constantofshape_int_zeros_cpu", + "OnnxBackendNodeModelTest.test_constantofshape_int_shape_zero_cpu", + ), + ( + xfail_issue_45344, + "OnnxBackendNodeModelTest.test_nonmaxsuppression_center_point_box_format_cpu", + "OnnxBackendNodeModelTest.test_nonmaxsuppression_flipped_coordinates_cpu", + "OnnxBackendNodeModelTest.test_nonmaxsuppression_identical_boxes_cpu", + "OnnxBackendNodeModelTest.test_nonmaxsuppression_limit_output_size_cpu", + "OnnxBackendNodeModelTest.test_nonmaxsuppression_single_box_cpu", + "OnnxBackendNodeModelTest.test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu", + "OnnxBackendNodeModelTest.test_nonmaxsuppression_suppress_by_IOU_cpu", + "OnnxBackendNodeModelTest.test_nonmaxsuppression_two_batches_cpu", + "OnnxBackendNodeModelTest.test_nonmaxsuppression_two_classes_cpu", + ), + ( + xfail_issue_39662, + "OnnxBackendNodeModelTest.test_nonmaxsuppression_two_classes_cpu", + "OnnxBackendNodeModelTest.test_scatter_elements_with_negative_indices_cpu", + "OnnxBackendNodeModelTest.test_gather_negative_indices_cpu", + ), + (xfail_issue_49753, "OnnxBackendNodeModelTest.test_slice_default_axes_cpu"), + ( + xfail_issue_49754, + "OnnxBackendNodeModelTest.test_top_k_cpu", + "OnnxBackendNodeModelTest.test_top_k_negative_axis_cpu", + "OnnxBackendNodeModelTest.test_top_k_smallest_cpu", + ), + (xfail_issue_33633, "OnnxBackendNodeModelTest.test_maxpool_2d_dilations_cpu"), + ( + xfail_issue_55760, + "OnnxBackendNodeModelTest.test_argmax_negative_axis_keepdims_example_select_last_index_cpu", + "OnnxBackendNodeModelTest.test_argmin_negative_axis_keepdims_example_select_last_index_cpu", + "OnnxBackendNodeModelTest.test_argmax_negative_axis_keepdims_random_select_last_index_cpu", + "OnnxBackendNodeModelTest.test_argmin_negative_axis_keepdims_random_select_last_index_cpu", + ), + ( + xfail_issue_38091, + "OnnxBackendNodeModelTest.test_gather_negative_indices_cpu", + "OnnxBackendNodeModelTest.test_dynamicquantizelinear_cpu", + "OnnxBackendNodeModelTest.test_dynamicquantizelinear_expanded_cpu", + ), + ( + xfail_issue_52463, + "OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_singleton_broadcast_cpu", + ), + ( + xfail_issue_47323, + "OnnxBackendPyTorchOperatorModelTest.test_operator_add_broadcast_cpu", + "OnnxBackendPyTorchOperatorModelTest.test_operator_addconstant_cpu", + "OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_right_broadcast_cpu", + ), + ( + xfail_issue_38699, + "OnnxBackendSimpleModelTest.test_gradient_of_add_and_mul_cpu", + "OnnxBackendSimpleModelTest.test_gradient_of_add_cpu", + ), + ( + xfail_issue_33596, + "OnnxBackendSimpleModelTest.test_sequence_model5_cpu", + "OnnxBackendSimpleModelTest.test_sequence_model7_cpu", + "OnnxBackendSimpleModelTest.test_sequence_model1_cpu", + "OnnxBackendSimpleModelTest.test_sequence_model3_cpu", + "OnnxBackendSimpleModelTest.test_sequence_model6_cpu", + "OnnxBackendSimpleModelTest.test_sequence_model8_cpu", + "OnnxBackendSimpleModelTest.test_sequence_model4_cpu", + "OnnxBackendSimpleModelTest.test_sequence_model2_cpu", + "OnnxBackendNodeModelTest.test_identity_sequence_cpu", + "OnnxBackendNodeModelTest.test_if_seq_cpu", + ), + ( + xfail_issue_38701, + "OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_nochangecase_cpu", + "OnnxBackendSimpleModelTest.test_strnorm_model_nostopwords_nochangecase_cpu", + "OnnxBackendSimpleModelTest.test_strnorm_model_monday_empty_output_cpu", + "OnnxBackendSimpleModelTest.test_strnorm_model_monday_insensintive_upper_twodim_cpu", + "OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_lower_cpu", + "OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_upper_cpu", + "OnnxBackendNodeModelTest.test_strnormalizer_nostopwords_nochangecase_cpu", + "OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_nochangecase_cpu", + "OnnxBackendNodeModelTest.test_strnormalizer_export_monday_insensintive_upper_twodim_cpu", + "OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_lower_cpu", + "OnnxBackendNodeModelTest.test_strnormalizer_export_monday_empty_output_cpu", + "OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_upper_cpu", + "OnnxBackendNodeModelTest.test_cast_STRING_to_FLOAT_cpu", + "OnnxBackendNodeModelTest.test_cast_FLOAT_to_STRING_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_STRING_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_STRING_expanded_cpu", + "OnnxBackendNodeModelTest.test_castlike_STRING_to_FLOAT_cpu", + "OnnxBackendNodeModelTest.test_castlike_STRING_to_FLOAT_expanded_cpu", + ), + ( + xfail_issue_33595, + "OnnxBackendNodeModelTest.test_unique_not_sorted_without_axis_cpu", + "OnnxBackendNodeModelTest.test_unique_sorted_with_negative_axis_cpu", + "OnnxBackendNodeModelTest.test_unique_sorted_with_axis_cpu", + "OnnxBackendNodeModelTest.test_unique_sorted_with_axis_3d_cpu", + "OnnxBackendNodeModelTest.test_unique_sorted_without_axis_cpu", + ), + ( + xfail_issue_33651, + "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_onlybigrams_skip5_cpu", + "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_levelempty_cpu", + "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_onlybigrams_skip0_cpu", + "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_uniandbigrams_skip5_cpu", + "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_only_bigrams_skip0_cpu", + "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu", + "OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_skip5_cpu", + ), + (xfail_issue_38706, "OnnxBackendNodeModelTest.test_split_zero_size_splits_cpu"), + ( + xfail_issue_38708, + "OnnxBackendNodeModelTest.test_slice_default_steps_cpu", + "OnnxBackendNodeModelTest.test_slice_negative_axes_cpu", + "OnnxBackendNodeModelTest.test_slice_neg_steps_cpu", + "OnnxBackendNodeModelTest.test_slice_neg_cpu", + "OnnxBackendNodeModelTest.test_slice_cpu", + "OnnxBackendNodeModelTest.test_slice_end_out_of_bounds_cpu", + "OnnxBackendNodeModelTest.test_slice_start_out_of_bounds_cpu", + ), + ( + xfail_issue_33538, + "OnnxBackendNodeModelTest.test_scan_sum_cpu", + "OnnxBackendNodeModelTest.test_scan9_sum_cpu", + ), + ( + xfail_issue_49750, + "OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_align_corners_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_cpu", + "OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_cpu", + "OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_align_corners_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_scales_nearest_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_asymmetric_cpu", + "OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_align_corners_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_align_corners_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu", + "OnnxBackendNodeModelTest.test_resize_downsample_scales_nearest_cpu", + "OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_cpu", + "OnnxBackendNodeModelTest.test_resize_downsample_sizes_cubic_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_floor_align_corners_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu", + "OnnxBackendNodeModelTest.test_resize_upsample_sizes_cubic_cpu", + "OnnxBackendNodeModelTest.test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu", + "OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_cpu", + "OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu", + ), + ( + xfail_issue_33581, + "OnnxBackendNodeModelTest.test_gather_elements_negative_indices_cpu", + ), + ( + xfail_issue_38713, + "OnnxBackendNodeModelTest.test_momentum_cpu", + "OnnxBackendNodeModelTest.test_nesterov_momentum_cpu", + "OnnxBackendNodeModelTest.test_momentum_multiple_cpu", + ), + ( + xfail_issue_47337, + "OnnxBackendNodeModelTest.test_onehot_without_axis_cpu", + "OnnxBackendNodeModelTest.test_onehot_with_negative_axis_cpu", + "OnnxBackendNodeModelTest.test_onehot_with_axis_cpu", + "OnnxBackendNodeModelTest.test_onehot_negative_indices_cpu", + ), + ( + xfail_issue_33488, + "OnnxBackendNodeModelTest.test_maxunpool_export_with_output_shape_cpu", + "OnnxBackendNodeModelTest.test_maxunpool_export_without_output_shape_cpu", + ), + ( + xfail_issue_33589, + "OnnxBackendNodeModelTest.test_isnan_cpu", + "OnnxBackendNodeModelTest.test_isinf_positive_cpu", + "OnnxBackendNodeModelTest.test_isinf_negative_cpu", + "OnnxBackendNodeModelTest.test_isinf_cpu", + ), + ( + xfail_issue_38722, + "OnnxBackendNodeModelTest.test_qlinearmatmul_2D_cpu", + "OnnxBackendNodeModelTest.test_qlinearmatmul_3D_cpu", + ), + (xfail_issue_38724, "OnnxBackendNodeModelTest.test_resize_tf_crop_and_resize_cpu"), + ( + xfail_issue_33606, + "OnnxBackendNodeModelTest.test_det_2d_cpu", + "OnnxBackendNodeModelTest.test_det_nd_cpu", + ), + ( + xfail_issue_38732, + "OnnxBackendNodeModelTest.test_convinteger_without_padding_cpu", + "OnnxBackendNodeModelTest.test_convinteger_with_padding_cpu", + "OnnxBackendNodeModelTest.test_basic_convinteger_cpu", + ), + ( + xfail_issue_38734, + "OnnxBackendNodeModelTest.test_adam_multiple_cpu", + "OnnxBackendNodeModelTest.test_adam_cpu", + ), + ( + xfail_issue_38735, + "OnnxBackendNodeModelTest.test_adagrad_multiple_cpu", + "OnnxBackendNodeModelTest.test_adagrad_cpu", + ), + ( + xfail_issue_48052, + "OnnxBackendNodeModelTest.test_training_dropout_cpu", + "OnnxBackendNodeModelTest.test_training_dropout_mask_cpu", + "OnnxBackendNodeModelTest.test_training_dropout_default_cpu", + "OnnxBackendNodeModelTest.test_training_dropout_zero_ratio_cpu", + "OnnxBackendNodeModelTest.test_training_dropout_default_mask_cpu", + "OnnxBackendNodeModelTest.test_training_dropout_zero_ratio_mask_cpu", + ), + ( + xfail_issue_45180, + "OnnxBackendNodeModelTest.test_reduce_sum_do_not_keepdims_example_cpu", + "OnnxBackendNodeModelTest.test_reduce_sum_do_not_keepdims_random_cpu", + "OnnxBackendNodeModelTest.test_reduce_sum_keepdims_example_cpu", + "OnnxBackendNodeModelTest.test_reduce_sum_keepdims_random_cpu", + "OnnxBackendNodeModelTest.test_reduce_sum_negative_axes_keepdims_example_cpu", + ), + ( + xfail_issue_44848, + "OnnxBackendNodeModelTest.test_range_float_type_positive_delta_cpu", + "OnnxBackendNodeModelTest.test_range_int32_type_negative_delta_cpu", + ), + ( + xfail_issue_44851, + "OnnxBackendNodeModelTest.test_expand_dim_changed_cpu", + "OnnxBackendNodeModelTest.test_expand_dim_unchanged_cpu", + "OnnxBackendSimpleModelTest.test_expand_shape_model1_cpu", + "OnnxBackendSimpleModelTest.test_expand_shape_model2_cpu", + "OnnxBackendSimpleModelTest.test_expand_shape_model3_cpu", + "OnnxBackendSimpleModelTest.test_expand_shape_model4_cpu", + ), + ( + xfail_issue_44854, + "OnnxBackendNodeModelTest.test_split_variable_parts_1d_cpu", + "OnnxBackendNodeModelTest.test_split_variable_parts_2d_cpu", + "OnnxBackendNodeModelTest.test_split_variable_parts_default_axis_cpu", + ), + ( + xfail_issue_44858, + "OnnxBackendNodeModelTest.test_unsqueeze_axis_0_cpu", + "OnnxBackendNodeModelTest.test_unsqueeze_axis_1_cpu", + "OnnxBackendNodeModelTest.test_unsqueeze_axis_2_cpu", + "OnnxBackendNodeModelTest.test_unsqueeze_negative_axes_cpu", + "OnnxBackendNodeModelTest.test_unsqueeze_three_axes_cpu", + "OnnxBackendNodeModelTest.test_unsqueeze_two_axes_cpu", + "OnnxBackendNodeModelTest.test_unsqueeze_unsorted_axes_cpu", + ), + ( + xfail_issue_44956, + "OnnxBackendNodeModelTest.test_loop11_cpu", + "OnnxBackendNodeModelTest.test_range_int32_type_negative_delta_expanded_cpu", + "OnnxBackendNodeModelTest.test_range_float_type_positive_delta_expanded_cpu", + ), + ( + xfail_issue_44957, + "OnnxBackendNodeModelTest.test_compress_0_cpu", + "OnnxBackendNodeModelTest.test_compress_1_cpu", + "OnnxBackendNodeModelTest.test_compress_default_axis_cpu", + "OnnxBackendNodeModelTest.test_compress_negative_axis_cpu", + "OnnxBackendNodeModelTest.test_nonzero_example_cpu", + ), + (xfail_issue_44958, "OnnxBackendNodeModelTest.test_upsample_nearest_cpu"), + ( + xfail_issue_44965, + "OnnxBackendNodeModelTest.test_loop13_seq_cpu", + "OnnxBackendNodeModelTest.test_sequence_insert_at_back_cpu", + "OnnxBackendNodeModelTest.test_sequence_insert_at_front_cpu", + ), + ( + xfail_issue_44968, + "OnnxBackendNodeModelTest.test_squeeze_cpu", + "OnnxBackendNodeModelTest.test_squeeze_negative_axes_cpu", + ), + ( + xfail_issue_33593, + "OnnxBackendNodeModelTest.test_maxpool_with_argmax_2d_precomputed_strides_cpu", + "OnnxBackendNodeModelTest.test_maxpool_with_argmax_2d_precomputed_pads_cpu", + ), + (xfail_issue_58033, "OnnxBackendNodeModelTest.test_einsum_batch_diagonal_cpu"), + ( + xfail_issue_63033, + "OnnxBackendNodeModelTest.test_batchnorm_epsilon_training_mode_cpu", + "OnnxBackendNodeModelTest.test_batchnorm_example_training_mode_cpu", + ), + (xfail_issue_63036, "OnnxBackendNodeModelTest.test_convtranspose_autopad_same_cpu"), + ( + xfail_issue_63039, + "OnnxBackendNodeModelTest.test_div_uint8_cpu", + "OnnxBackendNodeModelTest.test_mul_uint8_cpu", + "OnnxBackendNodeModelTest.test_sub_uint8_cpu", + ), + ( + xfail_issue_63043, + "OnnxBackendNodeModelTest.test_gru_batchwise_cpu", + "OnnxBackendNodeModelTest.test_lstm_batchwise_cpu", + "OnnxBackendNodeModelTest.test_simple_rnn_batchwise_cpu", + ), + ( + xfail_issue_38710, + "OnnxBackendNodeModelTest.test_reshape_allowzero_reordered_cpu", + ), + ( + xfail_issue_63044, + "OnnxBackendNodeModelTest.test_tril_cpu", + "OnnxBackendNodeModelTest.test_tril_neg_cpu", + "OnnxBackendNodeModelTest.test_tril_one_row_neg_cpu", + "OnnxBackendNodeModelTest.test_tril_out_neg_cpu", + "OnnxBackendNodeModelTest.test_tril_out_pos_cpu", + "OnnxBackendNodeModelTest.test_tril_pos_cpu", + "OnnxBackendNodeModelTest.test_tril_square_cpu", + "OnnxBackendNodeModelTest.test_tril_square_neg_cpu", + "OnnxBackendNodeModelTest.test_tril_zero_cpu", + "OnnxBackendNodeModelTest.test_triu_cpu", + "OnnxBackendNodeModelTest.test_triu_neg_cpu", + "OnnxBackendNodeModelTest.test_triu_one_row_cpu", + "OnnxBackendNodeModelTest.test_triu_out_neg_out_cpu", + "OnnxBackendNodeModelTest.test_triu_out_pos_cpu", + "OnnxBackendNodeModelTest.test_triu_pos_cpu", + "OnnxBackendNodeModelTest.test_triu_square_cpu", + "OnnxBackendNodeModelTest.test_triu_square_neg_cpu", + "OnnxBackendNodeModelTest.test_triu_zero_cpu", + ), + ( + xfail_issue_63045, + "OnnxBackendPyTorchConvertedModelTest.test_MaxPool1d_stride_padding_dilation_cpu", + "OnnxBackendPyTorchConvertedModelTest.test_MaxPool2d_stride_padding_dilation_cpu", + ), + ( + skip_rng_tests, + "OnnxBackendNodeModelTest.test_bernoulli_cpu", + "OnnxBackendNodeModelTest.test_bernoulli_double_cpu", + "OnnxBackendNodeModelTest.test_bernoulli_double_expanded_cpu", + "OnnxBackendNodeModelTest.test_bernoulli_expanded_cpu", + "OnnxBackendNodeModelTest.test_bernoulli_seed_cpu", + "OnnxBackendNodeModelTest.test_bernoulli_seed_expanded_cpu", + ), + ( + xfail_issue_63136, + "OnnxBackendNodeModelTest.test_castlike_BFLOAT16_to_FLOAT_cpu", + "OnnxBackendNodeModelTest.test_castlike_DOUBLE_to_FLOAT16_cpu", + "OnnxBackendNodeModelTest.test_castlike_DOUBLE_to_FLOAT_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT16_to_DOUBLE_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT16_to_FLOAT_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_BFLOAT16_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_DOUBLE_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_FLOAT16_cpu", + ), + ( + xfail_issue_63137, + "OnnxBackendNodeModelTest.test_optional_get_element_cpu", + "OnnxBackendNodeModelTest.test_optional_get_element_sequence_cpu", + "OnnxBackendNodeModelTest.test_optional_has_element_cpu", + "OnnxBackendNodeModelTest.test_optional_has_element_empty_cpu", + ), + ( + xfail_issue_63138, + "OnnxBackendNodeModelTest.test_shape_end_1_cpu", + "OnnxBackendNodeModelTest.test_shape_end_negative_1_cpu", + "OnnxBackendNodeModelTest.test_shape_start_1_cpu", + "OnnxBackendNodeModelTest.test_shape_start_1_end_2_cpu", + "OnnxBackendNodeModelTest.test_shape_start_1_end_negative_1_cpu", + "OnnxBackendNodeModelTest.test_shape_start_negative_1_cpu", + ), +] + +for test_group in tests_expected_to_fail: + for test_case in test_group[1:]: + expect_fail("{}".format(test_case), test_group[0]) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py new file mode 100644 index 00000000000..f36b5cbdaad --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py @@ -0,0 +1,28 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os + +import numpy as np +import ngraph as ng +from openvino.inference_engine import IECore + +from tests_compatibility.runtime import get_runtime + + +def test_import_onnx_with_external_data(): + model_path = os.path.join(os.path.dirname(__file__), "models/external_data.onnx") + ie = IECore() + ie_network = ie.read_network(model=model_path) + + ng_function = ng.function_from_cnn(ie_network) + + dtype = np.float32 + value_a = np.array([1.0, 3.0, 5.0], dtype=dtype) + value_b = np.array([3.0, 5.0, 1.0], dtype=dtype) + # third input [5.0, 1.0, 3.0] read from external file + + runtime = get_runtime() + computation = runtime.computation(ng_function) + result = computation(value_a, value_b) + assert np.allclose(result, np.array([3.0, 3.0, 3.0], dtype=dtype)) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_onnx_import.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_onnx_import.py new file mode 100644 index 00000000000..3ef517bdbe2 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_onnx_import.py @@ -0,0 +1,54 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os + +import numpy as np +import ngraph as ng +import onnx +from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info +from openvino.inference_engine import IECore + +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_onnx.utils.onnx_helpers import import_onnx_model + + +def test_import_onnx_function(): + model_path = os.path.join(os.path.dirname(__file__), "models/add_abc.onnx") + ie = IECore() + ie_network = ie.read_network(model=model_path) + + ng_function = ng.function_from_cnn(ie_network) + + dtype = np.float32 + value_a = np.array([1.0], dtype=dtype) + value_b = np.array([2.0], dtype=dtype) + value_c = np.array([3.0], dtype=dtype) + + runtime = get_runtime() + computation = runtime.computation(ng_function) + result = computation(value_a, value_b, value_c) + assert np.allclose(result, np.array([6], dtype=dtype)) + + +def test_simple_graph(): + node1 = make_node("Add", ["A", "B"], ["X"], name="add_node1") + node2 = make_node("Add", ["X", "C"], ["Y"], name="add_node2") + graph = make_graph( + [node1, node2], + "test_graph", + [ + make_tensor_value_info("A", onnx.TensorProto.FLOAT, [1]), + make_tensor_value_info("B", onnx.TensorProto.FLOAT, [1]), + make_tensor_value_info("C", onnx.TensorProto.FLOAT, [1]), + ], + [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [1])], + ) + model = make_model(graph, producer_name="ngraph ONNX Importer") + + ng_model_function = import_onnx_model(model) + + runtime = get_runtime() + computation = runtime.computation(ng_model_function) + assert np.array_equal(computation(1, 2, 3)[0], np.array([6.0], dtype=np.float32)) + assert np.array_equal(computation(4, 5, 6)[0], np.array([15.0], dtype=np.float32)) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_batchnorm.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_batchnorm.py new file mode 100644 index 00000000000..09559300b9f --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_batchnorm.py @@ -0,0 +1,84 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx + +from tests_compatibility.test_onnx.utils import run_node + + +def make_batch_norm_node(**node_attributes): + return onnx.helper.make_node( + "BatchNormalization", inputs=["X", "scale", "B", "mean", "var"], outputs=["Y"], **node_attributes + ) + + +def test_batch_norm_test_node(): + data = np.arange(48).reshape((1, 3, 4, 4)).astype(np.float32) + scale = np.ones((3,)).astype(np.float32) # Gamma + bias = np.zeros((3,)).astype(np.float32) # Beta + mean = np.mean(data, axis=(0, 2, 3)) + var = np.var(data, axis=(0, 2, 3)) + + expected_output = np.array( + [ + [ + [ + [-1.62694025, -1.41001487, -1.19308949, -0.97616416], + [-0.75923878, -0.54231346, -0.32538807, -0.10846269], + [0.10846269, 0.32538807, 0.54231334, 0.75923872], + [0.9761641, 1.19308949, 1.41001487, 1.62694025], + ], + [ + [-1.62694049, -1.41001511, -1.19308972, -0.97616434], + [-0.7592392, -0.54231358, -0.32538843, -0.10846281], + [0.10846233, 0.32538795, 0.5423131, 0.75923872], + [0.97616386, 1.19308949, 1.41001463, 1.62694025], + ], + [ + [-1.62694025, -1.41001511, -1.19308949, -0.97616434], + [-0.75923872, -0.54231358, -0.32538795, -0.10846233], + [0.10846233, 0.32538795, 0.54231358, 0.7592392], + [0.97616386, 1.19308949, 1.41001511, 1.62694073], + ], + ] + ], + dtype=np.float32, + ) + + node = make_batch_norm_node() + result = run_node(node, [data, scale, bias, mean, var])[0] + assert np.allclose(result, expected_output, rtol=1e-04, atol=1e-08) + + scale = np.broadcast_to(0.1, (3,)).astype(np.float32) # Gamma + bias = np.broadcast_to(1, (3,)).astype(np.float32) # Beta + + expected_output = np.array( + [ + [ + [ + [0.83730596, 0.85899848, 0.88069105, 0.90238357], + [0.92407608, 0.94576865, 0.96746117, 0.98915374], + [1.01084626, 1.03253877, 1.05423129, 1.07592392], + [1.09761643, 1.11930895, 1.14100146, 1.16269398], + ], + [ + [0.83730596, 0.85899854, 0.88069105, 0.90238357], + [0.92407608, 0.94576865, 0.96746117, 0.98915374], + [1.01084626, 1.03253877, 1.05423141, 1.07592392], + [1.09761643, 1.11930895, 1.14100146, 1.16269398], + ], + [ + [0.83730596, 0.85899848, 0.88069105, 0.90238357], + [0.92407614, 0.94576865, 0.96746117, 0.98915374], + [1.01084626, 1.03253877, 1.05423141, 1.07592392], + [1.09761643, 1.11930895, 1.14100146, 1.16269398], + ], + ] + ], + dtype=np.float32, + ) + + node = make_batch_norm_node() + result = run_node(node, [data, scale, bias, mean, var])[0] + assert np.allclose(result, expected_output, rtol=1e-04, atol=1e-08) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_binary.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_binary.py new file mode 100644 index 00000000000..60d2d2b7382 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_binary.py @@ -0,0 +1,136 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx +import pytest +from onnx.helper import make_graph, make_model, make_tensor_value_info + +from tests_compatibility.test_onnx.utils import run_model + + +def import_and_compute(op_type, input_data_left, input_data_right, opset=7, **node_attributes): + inputs = [np.array(input_data_left), np.array(input_data_right)] + onnx_node = onnx.helper.make_node(op_type, inputs=["x", "y"], outputs=["z"], **node_attributes) + + input_tensors = [ + make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) + for name, value in zip(onnx_node.input, inputs) + ] + output_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, ()) for name in onnx_node.output] + + graph = make_graph([onnx_node], "compute_graph", input_tensors, output_tensors) + model = make_model(graph, producer_name="ngraph ONNX Importer") + model.opset_import[0].version = opset + return run_model(model, inputs)[0] + + +def test_add_opset4(): + assert np.array_equal(import_and_compute("Add", 1, 2, opset=4), np.array(3, dtype=np.float32)) + + assert np.array_equal(import_and_compute("Add", [1], [2], opset=4), np.array([3], dtype=np.float32)) + + assert np.array_equal( + import_and_compute("Add", [1, 2], [3, 4], opset=4), np.array([4, 6], dtype=np.float32) + ) + + assert np.array_equal( + import_and_compute("Add", [1, 2, 3], [4, 5, 6], opset=4), np.array([5, 7, 9], dtype=np.float32) + ) + + assert np.array_equal( + import_and_compute("Add", [[1, 2, 3], [4, 5, 6]], [7, 8, 9], broadcast=1, opset=4), + np.array([[8, 10, 12], [11, 13, 15]], dtype=np.float32), + ) + + # shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar + left_operand = np.ones((2, 3, 4, 5)).astype(np.float32) + assert np.array_equal(import_and_compute("Add", left_operand, 8, broadcast=1, opset=4), left_operand + 8) + + # shape(A) = (2, 3, 4, 5), shape(B) = (5,) + left_operand = np.ones((2, 3, 4, 5), dtype=np.float32) + right_operand = np.random.rand(5).astype(np.float32) + import_and_compute("Add", left_operand, right_operand, broadcast=1, opset=4) + + # shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) + left_operand = np.ones((2, 3, 4, 5), dtype=np.float32) + right_operand = np.random.rand(4, 5).astype(np.float32) + assert np.array_equal( + import_and_compute("Add", left_operand, right_operand, broadcast=1, opset=4), + left_operand + right_operand, + ) + + # shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 + left_operand = np.ones((2, 3, 4, 5), dtype=np.float32) + right_operand = np.random.rand(3, 4).astype(np.float32) + assert np.array_equal( + import_and_compute("Add", left_operand, right_operand, broadcast=1, axis=1, opset=4), + left_operand + right_operand.reshape(1, 3, 4, 1), + ) + + # shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 + left_operand = np.ones((2, 3, 4, 5), dtype=np.float32) + right_operand = np.random.rand(2).astype(np.float32) + assert np.array_equal( + import_and_compute("Add", left_operand, right_operand, broadcast=1, axis=0, opset=4), + left_operand + right_operand.reshape(2, 1, 1, 1), + ) + + +@pytest.mark.parametrize( + "left_shape,right_shape", + [ + ((1,), (1,)), + ((256, 256, 3), (3,)), + ((5, 4), (1,)), + ((5, 4), (4,)), + ((15, 3, 5), (3, 5)), + ((15, 3, 5), (15, 1, 5)), + ((15, 3, 5), (3, 1)), + ((8, 1, 6, 1), (7, 1, 5)), + ], +) +def test_add_opset7(left_shape, right_shape): + """Test Add-7 operator, which uses numpy-style broadcasting.""" + left_input = np.ones(left_shape) + right_input = np.ones(right_shape) + assert np.array_equal(import_and_compute("Add", left_input, right_input), left_input + right_input) + + +def test_sub(): + assert np.array_equal(import_and_compute("Sub", 20, 1), np.array(19, dtype=np.float32)) + + assert np.array_equal(import_and_compute("Sub", [20], [1]), np.array([19], dtype=np.float32)) + + assert np.array_equal(import_and_compute("Sub", [20, 19], [1, 2]), np.array([19, 17], dtype=np.float32)) + + assert np.array_equal( + import_and_compute("Sub", [[1, 2, 3], [4, 5, 6]], [7, 8, 9], opset=6, broadcast=1), + np.array([[-6, -6, -6], [-3, -3, -3]], dtype=np.float32), + ) + + +def test_mul(): + assert np.array_equal(import_and_compute("Mul", 2, 3), np.array(6, dtype=np.float32)) + + assert np.array_equal(import_and_compute("Mul", [2], [3]), np.array([6], dtype=np.float32)) + + assert np.array_equal(import_and_compute("Mul", [2, 3], [4, 5]), np.array([8, 15], dtype=np.float32)) + + assert np.array_equal( + import_and_compute("Mul", [[1, 2, 3], [4, 5, 6]], [7, 8, 9], opset=6, broadcast=1), + np.array([[7, 16, 27], [28, 40, 54]], dtype=np.float32), + ) + + +def test_div(): + assert np.array_equal(import_and_compute("Div", 6, 3), np.array(2, dtype=np.float32)) + + assert np.array_equal(import_and_compute("Div", [6], [3]), np.array([2], dtype=np.float32)) + + assert np.array_equal(import_and_compute("Div", [6, 8], [3, 2]), np.array([2, 4], dtype=np.float32)) + + assert np.array_equal( + import_and_compute("Div", [[10, 20, 30], [40, 50, 60]], [2, 5, 6], opset=6, broadcast=1), + np.array([[5, 4, 5], [20, 10, 10]], dtype=np.float32), + ) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_convpool.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_convpool.py new file mode 100644 index 00000000000..89c0087f71e --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_convpool.py @@ -0,0 +1,402 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx +import pytest +from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info +from onnx.onnx_cpp2py_export.checker import ValidationError + +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node + + +@pytest.fixture +def ndarray_1x1x4x4(): + return np.array( + [[11, 12, 13, 14], [15, 16, 17, 18], [19, 20, 21, 22], [23, 24, 25, 26]], dtype=np.float32 + ).reshape([1, 1, 4, 4]) + + +def make_onnx_model_for_conv_op(x_shape, weights_shape, transpose=False, **attributes): + output_shape = () # We don't need output shape to be accurate for these tests + + if transpose: + node_op = "ConvTranspose" + else: + node_op = "Conv" + + node = make_node(node_op, ["X", "weight"], ["Y"], name="test_node", **attributes) + graph = make_graph( + [node], + "test_graph", + [ + make_tensor_value_info("X", onnx.TensorProto.FLOAT, x_shape), + make_tensor_value_info("weight", onnx.TensorProto.FLOAT, weights_shape), + ], + [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, output_shape)], + ) + model = make_model(graph, producer_name="ngraph ONNXImporter") + return model + + +def import_and_compute_conv(x, weights, transpose=False, **attributes): + x, weights = np.array(x), np.array(weights) + onnx_model = make_onnx_model_for_conv_op(x.shape, weights.shape, transpose=transpose, **attributes) + ng_model_function = import_onnx_model(onnx_model) + computation = get_runtime().computation(ng_model_function) + return computation(x, weights)[0] + + +def test_2d_conv(): + # x should have shape N(batch) x C x H x W + input_x = np.array( + [ + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float32, + ).reshape(1, 1, 9, 9) + + # filter weights should have shape M x C x kH x kW + input_filter = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape( + [1, 1, 3, 3] + ) + + # convolution with padding=1 should produce 9 x 9 output: + result = import_and_compute_conv(input_x, input_filter, pads=(1, 1, 1, 1), strides=(1, 1)) + assert np.array_equal( + result, + np.array( + [ + [ + [ + [0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0], + ] + ] + ], + dtype=np.float32, + ), + ) + + # convolution with padding=0 should produce 7 x 7 output: + result = import_and_compute_conv(input_x, input_filter, pads=(0, 0, 0, 0), strides=(1, 1)) + assert np.array_equal( + result, + np.array( + [ + [ + [ + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + [-20, -20, 20, 20, 0, 0, 0], + ] + ] + ], + dtype=np.float32, + ), + ) + + # convolution with strides=2 should produce 4 x 4 output: + result = import_and_compute_conv(input_x, input_filter, pads=(0, 0, 0, 0), strides=(2, 2)) + assert np.array_equal( + result, + np.array( + [ + [ + [ + [-20.0, 20.0, 0.0, 0.0], + [-20.0, 20.0, 0.0, 0.0], + [-20.0, 20.0, 0.0, 0.0], + [-20.0, 20.0, 0.0, 0.0], + ] + ] + ], + dtype=np.float32, + ), + ) + + # convolution with dilations=2 should produce 5 x 5 output: + result = import_and_compute_conv(input_x, input_filter, dilations=(2, 2)) + assert np.array_equal( + result, + np.array( + [ + [ + [ + [0, 0, 20, 20, 0], + [0, 0, 20, 20, 0], + [0, 0, 20, 20, 0], + [0, 0, 20, 20, 0], + [0, 0, 20, 20, 0], + ] + ] + ], + dtype=np.float32, + ), + ) + + +def test_3d_conv(): + # x should have shape N(batch) x C x H x W x D + input_x = np.array( + [ + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float32, + ).reshape([1, 1, 9, 9, 1]) + input_x = np.broadcast_to(input_x, (1, 1, 9, 9, 4)) + + # filter weights should have shape M x C x kH x kW x kD + input_filter = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape( + [1, 1, 3, 3, 1] + ) + input_filter = np.broadcast_to(input_filter, (1, 1, 3, 3, 3)) + + # convolution with padding=0 should produce 7 x 7 x 2 output: + result = import_and_compute_conv( + input_x, input_filter, dilations=(1, 1, 1), pads=(0, 0, 0, 0, 0, 0), strides=(1, 1, 1) + ) + + assert np.array_equal( + np.moveaxis(result.squeeze(), (0, 1, 2), (1, 2, 0)), + np.array( + [ + [ + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + ], + [ + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + [-60.0, -60.0, 60.0, 60.0, 0.0, 0.0, 0.0], + ], + ], + dtype=np.float32, + ), + ) + + +def test_2d_conv_transpose(): + # x should have shape N(batch) x C x H x W + input_x = np.array( + [ + [0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0], + [0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float32, + ).reshape([1, 1, 9, 9]) + + # filter weights should have shape M x C x kH x kW + input_filter = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape( + [1, 1, 3, 3] + ) + + # deconvolution with padding=1 should produce 9 x 9 output: + result = import_and_compute_conv(input_x, input_filter, transpose=True, pads=(1, 1, 1, 1), strides=(1, 1)) + + assert np.array_equal( + result.reshape([9, 9]), + np.array( + [ + [-50.0, -50.0, 100.0, 100.0, -50.0, -50.0, 0.0, 0.0, 0.0], + [-75.0, -75.0, 150.0, 150.0, -75.0, -75.0, 0.0, 0.0, 0.0], + [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], + [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], + [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], + [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], + [-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0], + [-75.0, -75.0, 150.0, 150.0, -75.0, -75.0, 0.0, 0.0, 0.0], + [-50.0, -50.0, 100.0, 100.0, -50.0, -50.0, 0.0, 0.0, 0.0], + ], + dtype=np.float32, + ), + ) + + +def test_pad_opset_1(): + x = np.ones((2, 2), dtype=np.float32) + y = np.pad(x, pad_width=1, mode="constant") + + model = get_node_model("Pad", x, paddings=[1, 1, 1, 1]) + ng_results = run_model(model, [x]) + assert np.array_equal(ng_results, [y]) + + x = np.random.randn(1, 3, 4, 5).astype(np.float32) + y = np.pad(x, pad_width=((0, 0), (0, 0), (1, 2), (3, 4)), mode="constant") + + model = get_node_model("Pad", x, mode="constant", paddings=[0, 0, 1, 3, 0, 0, 2, 4]) + ng_results = run_model(model, [x]) + assert np.array_equal(ng_results, [y]) + + # incorrect paddings rank + x = np.ones((2, 2), dtype=np.float32) + model = get_node_model("Pad", x, paddings=[0, 1, 1, 3, 1, 2]) + with pytest.raises(RuntimeError): + run_model(model, [x]) + + # no paddings arttribute + model = get_node_model("Pad", x) + with pytest.raises(ValidationError): + import_onnx_model(model) + + +def test_pad_opset_2(): + x = np.ones((2, 2), dtype=np.float32) + y = np.pad(x, pad_width=1, mode="constant") + + model = get_node_model("Pad", x, opset=2, pads=[1, 1, 1, 1]) + ng_results = run_model(model, [x]) + assert np.array_equal(ng_results, [y]) + + x = np.random.randn(1, 3, 4, 5).astype(np.float32) + y = np.pad(x, pad_width=((0, 0), (0, 0), (1, 2), (3, 4)), mode="constant") + + model = get_node_model("Pad", x, opset=2, mode="constant", pads=[0, 0, 1, 3, 0, 0, 2, 4]) + ng_results = run_model(model, [x]) + assert np.array_equal(ng_results, [y]) + + # incorrect pads rank + x = np.ones((2, 2), dtype=np.float32) + model = get_node_model("Pad", x, opset=2, pads=[0, 1, 1, 3, 1, 2]) + with pytest.raises(RuntimeError): + run_model(model, [x]) + + +def test_pad_negative_values_begin(): + x = np.ones((2, 2), dtype=np.float32) + + # Axis 1 begin + model = get_node_model("Pad", x, opset=2, pads=[-1, 0, 0, 0]) + ng_result = run_model(model, [x])[0] + assert np.array_equal(ng_result, np.array([[1, 1]])) + + # Axis 2 begin + model = get_node_model("Pad", x, opset=2, pads=[0, -1, 0, 0]) + ng_result = run_model(model, [x])[0] + assert np.array_equal(ng_result, np.array([[1], [1]])) + + +def test_pad_negative_values_end(): + x = np.ones((2, 2), dtype=np.float32) + + # Axis 1 end + model = get_node_model("Pad", x, opset=2, pads=[0, 0, -1, 0]) + ng_result = run_model(model, [x])[0] + assert np.array_equal(ng_result, np.array([[1.0, 1.0]])) + + # Axis 2 end + model = get_node_model("Pad", x, opset=2, pads=[0, 0, 0, -1]) + ng_result = run_model(model, [x])[0] + assert np.array_equal(ng_result, np.array([[1], [1]])) + + +def test_pool_average(ndarray_1x1x4x4): + x = ndarray_1x1x4x4 + node = onnx.helper.make_node( + "AveragePool", inputs=["x"], outputs=["y"], kernel_shape=(2, 2), strides=(2, 2) + ) + y = np.array([[13.5, 15.5], [21.5, 23.5]], dtype=np.float32).reshape([1, 1, 2, 2]) + ng_results = run_node(node, [x]) + assert np.array_equal(ng_results, [y]) + + node = onnx.helper.make_node( + "AveragePool", inputs=["x"], outputs=["y"], kernel_shape=(2, 2), strides=(2, 2), pads=(1, 1, 1, 1) + ) + y = np.array([[11, 12.5, 14], [17, 18.5, 20], [23, 24.5, 26]], dtype=np.float32).reshape([1, 1, 3, 3]) + ng_results = run_node(node, [x]) + assert np.array_equal(ng_results, [y]) + + +def test_pool_average_3d(ndarray_1x1x4x4): + x = np.broadcast_to(ndarray_1x1x4x4, (1, 1, 4, 4, 4)) + node = onnx.helper.make_node( + "AveragePool", inputs=["x"], outputs=["y"], kernel_shape=(2, 2, 2), strides=(2, 2, 2) + ) + y = np.array([[[13.5, 15.5], [21.5, 23.5]], [[13.5, 15.5], [21.5, 23.5]]], dtype=np.float32).reshape( + [1, 1, 2, 2, 2] + ) + ng_results = run_node(node, [x]) + assert np.array_equal(ng_results, [y]) + + +def test_pool_max(ndarray_1x1x4x4): + node = onnx.helper.make_node("MaxPool", inputs=["x"], outputs=["y"], kernel_shape=(2, 2), strides=(2, 2)) + + x = ndarray_1x1x4x4 + y = np.array([[16, 18], [24, 26]], dtype=np.float32).reshape([1, 1, 2, 2]) + + ng_results = run_node(node, [x]) + assert np.array_equal(ng_results, [y]) + + +def test_pool_global_max(ndarray_1x1x4x4): + node = onnx.helper.make_node("GlobalMaxPool", inputs=["x"], outputs=["y"]) + + x = ndarray_1x1x4x4 + y = np.array([26], dtype=np.float32).reshape([1, 1, 1, 1]) + + ng_results = run_node(node, [x]) + assert np.array_equal(ng_results, [y]) + + +def test_pool_global_average(ndarray_1x1x4x4): + node = onnx.helper.make_node("GlobalAveragePool", inputs=["x"], outputs=["y"]) + + x = ndarray_1x1x4x4 + y = np.array([18.5], dtype=np.float32).reshape([1, 1, 1, 1]) + + ng_results = run_node(node, [x]) + assert np.array_equal(ng_results, [y]) + + +def test_pool_global_average_3d(ndarray_1x1x4x4): + x = np.broadcast_to(ndarray_1x1x4x4, (1, 1, 4, 4, 4)) + + node = onnx.helper.make_node("GlobalAveragePool", inputs=["x"], outputs=["y"]) + y = np.array([18.5], dtype=np.float32).reshape([1, 1, 1, 1, 1]) + ng_results = run_node(node, [x]) + assert np.array_equal(ng_results, [y]) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_logical.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_logical.py new file mode 100644 index 00000000000..9d8530a9c5f --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_logical.py @@ -0,0 +1,44 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx +import pytest + +from tests_compatibility.test_onnx.utils import run_node + + +@pytest.mark.parametrize( + "onnx_op, numpy_func, data_type", + [ + pytest.param("And", np.logical_and, np.bool), + pytest.param("Or", np.logical_or, np.bool), + pytest.param("Xor", np.logical_xor, np.bool), + pytest.param("Equal", np.equal, np.int32), + pytest.param("Greater", np.greater, np.int32), + pytest.param("Less", np.less, np.int32), + ], +) +def test_logical(onnx_op, numpy_func, data_type): + node = onnx.helper.make_node(onnx_op, inputs=["A", "B"], outputs=["C"], broadcast=1) + + input_a = np.array([[0, 1, -1], [0, 1, -1], [0, 1, -1]]).astype(data_type) + input_b = np.array([[0, 0, 0], [1, 1, 1], [-1, -1, -1]]).astype(data_type) + expected_output = numpy_func(input_a, input_b) + ng_results = run_node(node, [input_a, input_b], opset_version=4) + assert np.array_equal(ng_results, [expected_output]) + + input_a = np.array([[0, 1, -1], [0, 1, -1], [0, 1, -1]]).astype(data_type) + input_b = np.array(1).astype(data_type) + expected_output = numpy_func(input_a, input_b) + ng_results = run_node(node, [input_a, input_b], opset_version=4) + assert np.array_equal(ng_results, [expected_output]) + + +def test_logical_not(): + input_data = np.array([[False, True, True], [False, True, False], [False, False, True]]) + expected_output = np.logical_not(input_data) + + node = onnx.helper.make_node("Not", inputs=["X"], outputs=["Y"]) + ng_results = run_node(node, [input_data]) + assert np.array_equal(ng_results, [expected_output]) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_matmul.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_matmul.py new file mode 100644 index 00000000000..e32bd7cce6f --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_matmul.py @@ -0,0 +1,155 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx +from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info +import pytest + +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_onnx.utils import import_onnx_model + + +def make_onnx_model_for_matmul_op(input_left, input_right): + output_shape = np.matmul(input_left, input_right).shape + node = make_node("MatMul", ["X", "Y"], ["Z"], name="test_node") + graph = make_graph( + [node], + "test_graph", + [ + make_tensor_value_info("X", onnx.TensorProto.FLOAT, input_left.shape), + make_tensor_value_info("Y", onnx.TensorProto.FLOAT, input_right.shape), + ], + [make_tensor_value_info("Z", onnx.TensorProto.FLOAT, output_shape)], + ) + model = make_model(graph, producer_name="ngraph ONNXImporter") + return model + + +def import_and_compute_matmul(input_left, input_right): + input_data_left = np.array(input_left) + input_data_right = np.array(input_right) + onnx_model = make_onnx_model_for_matmul_op(input_data_left, input_data_right) + transformer = get_runtime() + ng_model_function = import_onnx_model(onnx_model) + computation = transformer.computation(ng_model_function) + return computation(input_data_left, input_data_right)[0] + + +def numpy_gemm(input_a, input_b, input_c, alpha=1, beta=1, trans_a=False, trans_b=False, broadcast=False): + input_a, input_b, input_c = np.array(input_a), np.array(input_b), np.array(input_c) + if trans_a: + input_a = input_a.T + if trans_b: + input_b = input_b.T + + return (alpha * np.dot(input_a, input_b)) + (beta * input_c) + + +def make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs): + input_a_for_output = input_a + input_b_for_output = input_b + if kwargs.get("transA"): + input_a_for_output = input_a.T + if kwargs.get("transB"): + input_b_for_output = input_b.T + + output_shape = np.dot(input_a_for_output, input_b_for_output).shape + node = make_node("Gemm", ["A", "B", "C"], ["Y"], name="test_node", **kwargs) + graph = make_graph( + [node], + "test_graph", + [ + make_tensor_value_info("A", onnx.TensorProto.FLOAT, input_a.shape), + make_tensor_value_info("B", onnx.TensorProto.FLOAT, input_b.shape), + make_tensor_value_info("C", onnx.TensorProto.FLOAT, input_c.shape), + ], + [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, output_shape)], + ) + model = make_model(graph, producer_name="ngraph ONNXImporter") + return model + + +def import_and_compute_gemm(input_a, input_b, input_c, **kwargs): + input_a, input_b, input_c = np.array(input_a), np.array(input_b), np.array(input_c) + + if kwargs.get("trans_a"): + kwargs["transA"] = kwargs["trans_a"] + del kwargs["trans_a"] + + if kwargs.get("trans_b"): + kwargs["transB"] = kwargs["trans_b"] + del kwargs["trans_b"] + + onnx_model = make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs) + transformer = get_runtime() + ng_model_function = import_onnx_model(onnx_model) + computation = transformer.computation(ng_model_function) + return computation(input_a, input_b, input_c)[0] + + +@pytest.mark.parametrize( + "data, description", + [ + pytest.param(([1, 2], [1, 3]), "vector and vector 1"), + (([1, 2, 3], [[4], [5], [6]]), "vector and vector 2"), + (([[1, 2, 3]], [1, 2, 3]), "vector and vector 3"), + (([1, 2, 3], [[4, 5], [6, 7], [8, 9]]), "vector and matrix"), + (([[1, 2, 3], [4, 5, 6]], [[7], [8], [9]]), "matrix and vector"), + (([[1, 2], [3, 4]], [[5, 6], [7, 8]]), "matrix and matrix 1"), + (([[1, 2, 3], [4, 5, 6]], [[7, 8], [9, 10], [11, 12]]), "matrix and matrix 2"), + (([[1, 2], [3, 4], [5, 6]], [[7, 8, 9], [10, 11, 12]]), "matrix and matrix 3") + ], +) +def test_op_matmul(data, description): + assert np.allclose(import_and_compute_matmul(*data), np.matmul(*data)) + + +def test_op_matmul_3d(): + # 3d tensor @ 3d tensor + data = ([[[1, 2], [3, 4]], [[1, 2], [3, 4]]], [[[5, 6], [7, 8]], [[5, 6], [7, 8]]]) + assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data)) + + data = (np.ones((5, 2, 3)), (np.ones((5, 3, 2)) + 2)) + assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data)) + + +@pytest.mark.parametrize( + "data, kwargs, description", + [ + pytest.param(([1, 2], [1, 3], [1, 4]), {}, "vectors"), + pytest.param(([1, 2], [1, 3], 1), {}, "vectors and scalar"), + pytest.param(([1, 2], [1, 3], [1]), {}, "vectors and identity vector"), + pytest.param(([1, 2], [1, 3], [1, 4]), {"alpha": 7.0, "beta": 9.0}, + "vectors with alpha and beta"), + pytest.param(([1, 2, 3, 4], [1, 3, 5, 7], [1, 4]), {"alpha": 7.0, "beta": 9.0}, + "longer vectors with alpha and beta") + ], +) +def test_gemm(data, kwargs, description): + assert np.allclose(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs)) + + +@pytest.mark.parametrize( + "data, kwargs, description", + [ + pytest.param(([1, 2], [1, 3], [1, 4]), {"trans_a": True, "trans_b": True}, + "vectors with trans_a/trans_b"), + pytest.param(([[1, 2], [1, 2]], [[1, 3], [1, 3]], [4, 1]), + {"trans_a": True, "trans_b": True, "alpha": 7.0, "beta": 9.0}, + "matrices and vector with trans_b and alpha/beta"), + pytest.param(([[1, 2]], [[1, 3]], 1), {"trans_b": True, "alpha": 7.0, "beta": 9.0}, + "matrices and scalar with trans_b and alpha/beta"), + pytest.param(([[1], [2]], [[1], [3]], 1), {"trans_a": True, "alpha": 7.0, "beta": 9.0}, + "matrices and scalar with trans_a and alpha/beta"), + ], +) +def test_gemm_transpositions(data, kwargs, description): + assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs)) + + +def test_gemm_flatten(): + # input_a.shape is (4,1) + data = ([[1], [2], [3], [4]], [1, 3, 5, 7], [1, 4]) + kwargs = {"alpha": 7.0, "beta": 9.0, "trans_a": True} + assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs)) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py new file mode 100644 index 00000000000..a73eba85b7f --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py @@ -0,0 +1,109 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx +import pytest + +from tests_compatibility.test_onnx.utils import run_node + + +def import_and_compute(op_type, input_data, **node_attrs): + data_inputs = [np.array(input_data)] + node = onnx.helper.make_node(op_type, inputs=["x"], outputs=["y"], **node_attrs) + return run_node(node, data_inputs).pop() + + +def assert_onnx_import_equals_callable(onnx_op_type, python_function, data, **kwargs): + data = np.array(data, dtype=np.float32) + assert np.allclose(import_and_compute(onnx_op_type, data, **kwargs), python_function(data, **kwargs)) + + +def test_sigmoid(): + def sigmoid(x): + return 1 / (1 + np.exp(-x)) + + assert_onnx_import_equals_callable("Sigmoid", sigmoid, [-2, -1.0, 0.0, 1.0, 2.0]) + assert_onnx_import_equals_callable("Sigmoid", sigmoid, [0.0]) + assert_onnx_import_equals_callable("Sigmoid", sigmoid, [-2, -1.0, 0.0, 1.0, 2.0]) + + +def test_tanh(): + assert_onnx_import_equals_callable("Tanh", np.tanh, [-2, -1.0, 0.0, 1.0, 2.0]) + assert_onnx_import_equals_callable("Tanh", np.tanh, [0.0]) + assert_onnx_import_equals_callable("Tanh", np.tanh, [-2, -1.0, 0.0, 1.0, 2.0]) + + +def test_relu(): + def relu(x): + return np.maximum(x, 0) + + assert_onnx_import_equals_callable("Relu", relu, [-2, -1.0, 0.0, 1.0, 2.0]) + assert_onnx_import_equals_callable("Relu", relu, [0.0]) + assert_onnx_import_equals_callable("Relu", relu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1]) + assert_onnx_import_equals_callable("Relu", relu, [[1, 2, 3], [4, 5, 6]]) + assert_onnx_import_equals_callable("Relu", relu, [[-3, -2, -1], [1, 2, 3]]) + + +def test_leaky_relu(): + def leaky_relu(x, alpha=0.01): + return np.maximum(alpha * x, x) + + assert_onnx_import_equals_callable("LeakyRelu", leaky_relu, [-2, -1.0, 0.0, 1.0, 2.0], alpha=0.5) + assert_onnx_import_equals_callable("LeakyRelu", leaky_relu, [0.0]) + assert_onnx_import_equals_callable( + "LeakyRelu", leaky_relu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], alpha=1.0 + ) + assert_onnx_import_equals_callable("LeakyRelu", leaky_relu, [[1, 2, 3], [4, 5, 6]], alpha=0.2) + assert_onnx_import_equals_callable("LeakyRelu", leaky_relu, [[-3, -2, -1], [1, 2, 3]]) + + +@pytest.mark.parametrize( + "x, slope", + [ + ([-2, -1.0, 0.0, 1.0, 2.0], 0.5), + ([0.0], 1), + ([-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], 1), + ([[1, 2, 3], [4, 5, 6]], 0.5), + ([[-3, -2, -1], [1, 2, 3]], 1), + ] +) +def test_parametric_relu(x, slope): + def parametic_relu(x, slope): + return np.where(x < 0, slope * x, x) + + x, slope = np.array(x).astype(np.float32), np.array(slope).astype(np.float32) + expected_output = parametic_relu(x, slope) + node = onnx.helper.make_node("PRelu", inputs=["x", "slope"], outputs=["y"]) + output = run_node(node, [x, slope]).pop() + assert np.allclose(output, expected_output) + + +def test_selu(): + # f(x) = gamma * (alpha * exp(x) - alpha) for x <= 0, y = gamma * x for x > 0 + def selu(x, alpha=1.67326319217681884765625, gamma=1.05070102214813232421875): + return np.where(x <= 0, gamma * (alpha * np.exp(x) - alpha), gamma * x) + + assert_onnx_import_equals_callable("Selu", selu, [-2, -1.0, 0.0, 1.0, 2.0]) + assert_onnx_import_equals_callable("Selu", selu, [0.0]) + assert_onnx_import_equals_callable("Selu", selu, [-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1]) + assert_onnx_import_equals_callable("Selu", selu, [[1, 2, 3], [4, 5, 6]]) + assert_onnx_import_equals_callable("Selu", selu, [-2, -1.0, 0.0, 1.0, 2.0], gamma=0.5, alpha=0.5) + + +@pytest.mark.parametrize( + "data, alpha_value", + [ + pytest.param([-2, -1.0, 0.0, 1.0, 2.0], 1.0), + pytest.param([0.0], 1.0), + pytest.param([-0.9, -0.8, -0.7, -0.4, -0.3, -0.2, -0.1], 1.0), + pytest.param([[1, 2, 3], [4, 5, 6]], 1.0), + pytest.param([-2, -1.0, 0.0, 1.0, 2.0], 0.5) + ] +) +def test_elu(data, alpha_value): + # f(x) = alpha * (exp(x) - 1) for x < 0, f(x) = x for x >= 0 + def elu(x, alpha): + return np.where(x < 0, alpha * (np.exp(x) - 1), x) + + assert_onnx_import_equals_callable("Elu", elu, data, alpha=alpha_value) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_reduction.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_reduction.py new file mode 100644 index 00000000000..892e560fcb8 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_reduction.py @@ -0,0 +1,368 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx +import pytest + +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_onnx.utils import ( + run_node, + import_onnx_model, +) + +reduce_data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32) +reduce_axis_parameters = [ + (0,), + (1,), + (2,), + (0, 1), + (0, 2), + (1, 2), + (0, 1, 2) +] + +reduce_operation_parameters_as_attr = [ + ("ReduceMax", np.max), + ("ReduceMin", np.min), + ("ReduceMean", np.mean), + ("ReduceProd", np.prod) +] + +reduce_operation_parameters_as_const = [ + ("ReduceSum", np.sum), +] + + +def import_and_compute(op_type, input_data, **node_attrs): + data_inputs = [np.array(input_data)] + node = onnx.helper.make_node(op_type, inputs=["x"], outputs=["y"], **node_attrs) + return run_node(node, data_inputs).pop() + + +def import_and_compute_with_axes_as_const(op_type, data, axes, **node_attrs): + data_input = np.array(data) + axes_input = np.array(axes, dtype=int) + axes_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["const_axes"], + value=onnx.helper.make_tensor( + name="const_axes", + data_type=onnx.TensorProto.INT64, + dims=axes_input.shape, + vals=axes_input.flatten(), + ), + ) + node = onnx.helper.make_node( + op_type, inputs=["x", "const_axes"], outputs=["y"], **node_attrs + ) + graph = onnx.helper.make_graph( + [axes_const_node, node], + "test_graph", + [onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, data_input.shape)], + [onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, ())], + ) + + model = onnx.helper.make_model(graph, producer_name="ngraph ONNX Importer") + model.opset_import[0].version = 13 + ng_model_function = import_onnx_model(model) + runtime = get_runtime() + computation = runtime.computation(ng_model_function) + return computation(data_input)[0] + + +@pytest.mark.parametrize("operation, ref_operation", + reduce_operation_parameters_as_attr + reduce_operation_parameters_as_const) +def test_reduce_operation_keepdims_none_axes(operation, ref_operation): + assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=True), + ref_operation(reduce_data, keepdims=True)) + + +@pytest.mark.parametrize("operation, ref_operation", reduce_operation_parameters_as_attr) +@pytest.mark.parametrize("axes", reduce_axis_parameters) +def test_reduce_operation_keepdims_with_axes_as_attr(operation, ref_operation, axes): + assert np.array_equal(import_and_compute(operation, reduce_data, axes=axes, keepdims=True), + ref_operation(reduce_data, keepdims=True, axis=axes)) + + +@pytest.mark.parametrize("operation, ref_operation", reduce_operation_parameters_as_const) +@pytest.mark.parametrize("axes", reduce_axis_parameters) +def test_reduce_operation_keepdims_with_axes_as_const(operation, ref_operation, axes): + assert np.array_equal(import_and_compute_with_axes_as_const(operation, reduce_data, axes, keepdims=True), + ref_operation(reduce_data, keepdims=True, axis=axes)) + + +@pytest.mark.parametrize("axes", [ + None, + (0,), + (1,), + (2,), + (0, 1), + (0, 2), + (1, 2), + (0, 1, 2)]) +@pytest.mark.parametrize("operation, ref_operation", reduce_operation_parameters_as_attr) +def test_reduce_operation_no_keepdims_axes_as_attr(operation, ref_operation, axes): + if axes: + assert np.array_equal(import_and_compute(operation, reduce_data, axes=axes, keepdims=False), + ref_operation(reduce_data, keepdims=False, axis=axes)) + else: + assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=False), + ref_operation(reduce_data, keepdims=False)) + + +@pytest.mark.parametrize("axes", [ + None, + (0,), + (1,), + (2,), + (0, 1), + (0, 2), + (1, 2), + (0, 1, 2)]) +@pytest.mark.parametrize("operation, ref_operation", reduce_operation_parameters_as_const) +def test_reduce_operation_no_keepdims_axes_as_const(operation, ref_operation, axes): + if axes: + assert np.array_equal(import_and_compute_with_axes_as_const(operation, + reduce_data, + axes, + keepdims=False), + ref_operation(reduce_data, keepdims=False, axis=axes)) + else: + assert np.array_equal(import_and_compute(operation, reduce_data, keepdims=False), + ref_operation(reduce_data, keepdims=False)) + + +@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)]) +def test_reduce_l1(reduction_axes): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.uniform(-100, 100, shape).astype(np.float32) + + expected = np.sum(np.abs(input_data), keepdims=True, axis=reduction_axes) + node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], axes=reduction_axes) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + expected = np.sum(np.abs(input_data), keepdims=False, axis=reduction_axes) + node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + +def test_reduce_l1_default_axes(): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.uniform(-100, 100, shape).astype(np.float32) + + expected = np.sum(np.abs(input_data), keepdims=True) + node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"]) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + expected = np.array(np.sum(np.abs(input_data), keepdims=False)) + node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], keepdims=0) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + +@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)]) +def test_reduce_l2(reduction_axes): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.uniform(-100, 100, shape).astype(np.float32) + + expected = np.sqrt(np.sum(np.square(input_data), keepdims=True, axis=reduction_axes)) + node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], axes=reduction_axes) + raw_result = run_node(node, [input_data]) + ng_result = np.array(raw_result.pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + expected = np.sqrt(np.sum(np.square(input_data), keepdims=False, axis=reduction_axes)) + node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + +def test_reduce_l2_default_axes(): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.uniform(-100, 100, shape).astype(np.float32) + + expected = np.sqrt(np.sum(np.square(input_data), keepdims=True)) + node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"]) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + expected = np.array(np.sqrt(np.sum(np.square(input_data), keepdims=False))) + node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], keepdims=0) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + +@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)]) +def test_reduce_log_sum(reduction_axes): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.uniform(0, 1, shape).astype(np.float32) + + expected = np.log(np.sum(input_data, keepdims=True, axis=reduction_axes)) + node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], axes=reduction_axes) + ng_result = run_node(node, [input_data]).pop() + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + expected = np.log(np.sum(input_data, keepdims=False, axis=reduction_axes)) + node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes) + ng_result = run_node(node, [input_data]).pop() + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + +def test_reduce_log_sum_default_axes(): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.uniform(0, 1, shape).astype(np.float32) + + expected = np.log(np.sum(input_data, keepdims=True)) + node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"]) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + expected = np.log(np.sum(input_data, keepdims=False)) + node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], keepdims=0) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + +def test_reduce_log_sum_exp(): + def logsumexp(data, axis=None, keepdims=True): + return np.log(np.sum(np.exp(data), axis=axis, keepdims=keepdims)) + + data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32) + + assert np.array_equal(import_and_compute("ReduceLogSumExp", data), logsumexp(data, keepdims=True)) + assert np.array_equal( + import_and_compute("ReduceLogSumExp", data, keepdims=0), logsumexp(data, keepdims=False) + ) + + assert np.array_equal( + import_and_compute("ReduceLogSumExp", data, axes=(1,)), logsumexp(data, keepdims=True, axis=(1,)) + ) + assert np.array_equal( + import_and_compute("ReduceLogSumExp", data, axes=(1,), keepdims=0), + logsumexp(data, keepdims=False, axis=(1,)), + ) + + assert np.array_equal( + import_and_compute("ReduceLogSumExp", data, axes=(0, 2)), logsumexp(data, keepdims=True, axis=(0, 2)) + ) + assert np.array_equal( + import_and_compute("ReduceLogSumExp", data, axes=(0, 2), keepdims=0), + logsumexp(data, keepdims=False, axis=(0, 2)), + ) + + assert np.array_equal( + import_and_compute("ReduceLogSumExp", data, axes=(0, 1, 2)), + logsumexp(data, keepdims=True, axis=(0, 1, 2)), + ) + assert np.array_equal( + import_and_compute("ReduceLogSumExp", data, axes=(0, 1, 2), keepdims=0), + logsumexp(data, keepdims=False, axis=(0, 1, 2)), + ) + + +@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)]) +def test_reduce_sum_square(reduction_axes): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.uniform(-100, 100, shape).astype(np.float32) + + expected = np.sum(np.square(input_data), keepdims=True, axis=reduction_axes) + node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"], axes=reduction_axes) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + expected = np.sum(np.square(input_data), keepdims=False, axis=reduction_axes) + node = onnx.helper.make_node( + "ReduceSumSquare", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes + ) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + +def test_reduce_sum_square_default_axes(): + shape = [2, 4, 3, 2] + np.random.seed(133391) + input_data = np.random.uniform(-100, 100, shape).astype(np.float32) + + expected = np.sum(np.square(input_data), keepdims=True) + node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"]) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + expected = np.sum(np.square(input_data), keepdims=False) + node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"], keepdims=0) + ng_result = np.array(run_node(node, [input_data]).pop()) + assert np.array_equal(expected.shape, ng_result.shape) + assert np.allclose(expected, ng_result) + + +def test_reduce_argmin(): + def argmin(ndarray, axis, keepdims=False): + res = np.argmin(ndarray, axis=axis) + if keepdims: + res = np.expand_dims(res, axis=axis) + return res + + data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32) + + assert np.array_equal(import_and_compute("ArgMin", data, axis=0), argmin(data, keepdims=True, axis=0)) + assert np.array_equal( + import_and_compute("ArgMin", data, axis=0, keepdims=0), argmin(data, keepdims=False, axis=0) + ) + assert np.array_equal(import_and_compute("ArgMin", data, axis=1), argmin(data, keepdims=True, axis=1)) + assert np.array_equal( + import_and_compute("ArgMin", data, axis=1, keepdims=0), argmin(data, keepdims=False, axis=1) + ) + assert np.array_equal(import_and_compute("ArgMin", data, axis=2), argmin(data, keepdims=True, axis=2)) + assert np.array_equal( + import_and_compute("ArgMin", data, axis=2, keepdims=0), argmin(data, keepdims=False, axis=2) + ) + + +def test_reduce_argmax(): + def argmax(ndarray, axis, keepdims=False): + res = np.argmax(ndarray, axis=axis) + if keepdims: + res = np.expand_dims(res, axis=axis) + return res + + data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32) + + assert np.array_equal(import_and_compute("ArgMax", data, axis=0), argmax(data, keepdims=True, axis=0)) + assert np.array_equal( + import_and_compute("ArgMax", data, axis=0, keepdims=0), argmax(data, keepdims=False, axis=0) + ) + assert np.array_equal(import_and_compute("ArgMax", data, axis=1), argmax(data, keepdims=True, axis=1)) + assert np.array_equal( + import_and_compute("ArgMax", data, axis=1, keepdims=0), argmax(data, keepdims=False, axis=1) + ) + assert np.array_equal(import_and_compute("ArgMax", data, axis=2), argmax(data, keepdims=True, axis=2)) + assert np.array_equal( + import_and_compute("ArgMax", data, axis=2, keepdims=0), argmax(data, keepdims=False, axis=2) + ) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_reshape.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_reshape.py new file mode 100644 index 00000000000..28be8a7a5be --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_reshape.py @@ -0,0 +1,415 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx +import pytest +from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info + +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_onnx.utils import ( + all_arrays_equal, + get_node_model, + import_onnx_model, + run_model, + run_node, +) +from tests_compatibility import (xfail_issue_35927, + xfail_issue_44854, + xfail_issue_44858, + xfail_issue_44968) + + +def test_reshape(): + input_data = np.arange(2560, dtype=np.int32).reshape([16, 4, 4, 10]) + reshape_node = onnx.helper.make_node( + "Reshape", inputs=["x"], outputs=["y"], shape=(256, 10) + ) + expected_output = input_data.reshape([256, 10]) + + ng_results = run_node(reshape_node, [input_data], opset_version=4) + assert np.array_equal(ng_results, [expected_output]) + + +def test_reshape_opset5(): + original_shape = [2, 3, 4] + test_cases = { + "reordered_dims": np.array([4, 2, 3], dtype=np.int64), + "reduced_dims": np.array([3, 8], dtype=np.int64), + "extended_dims": np.array([3, 2, 2, 2], dtype=np.int64), + "one_dim": np.array([24], dtype=np.int64), + "negative_dim": np.array([6, -1, 2], dtype=np.int64), + } + input_data = np.random.random_sample(original_shape).astype(np.float32) + + for _, shape in test_cases.items(): + const_node = make_node( + "Constant", + inputs=[], + outputs=["const_shape"], + value=onnx.helper.make_tensor( + name="const_tensor", + data_type=onnx.TensorProto.INT64, + dims=shape.shape, + vals=shape.flatten(), + ), + ) + reshape_node = onnx.helper.make_node( + "Reshape", inputs=["data", "const_shape"], outputs=["reshaped"] + ) + + graph = make_graph( + [const_node, reshape_node], + "test_graph", + [make_tensor_value_info("data", onnx.TensorProto.FLOAT, input_data.shape)], + [make_tensor_value_info("reshaped", onnx.TensorProto.FLOAT, ())], + ) + + model = make_model(graph, producer_name="ngraph ONNX Importer") + model.opset_import[0].version = 5 + ng_model_function = import_onnx_model(model) + runtime = get_runtime() + computation = runtime.computation(ng_model_function) + ng_results = computation(input_data) + expected_output = np.reshape(input_data, shape) + assert np.array_equal(ng_results[0], expected_output) + + +@pytest.mark.xfail(reason="RuntimeError: Reshape z has dynamic second input!") +def test_reshape_opset5_param_err(): + original_shape = [2, 3, 4] + output_shape = np.array([4, 2, 3], dtype=np.int32) + input_data = np.random.random_sample(original_shape).astype(np.float32) + reshape_node = onnx.helper.make_node("Reshape", inputs=["x", "y"], outputs=["z"]) + ng_result = run_node(reshape_node, [input_data, output_shape], opset_version=5) + assert ng_result[0].shape == output_shape + + +@pytest.mark.parametrize( + "axis,expected_output", + [ + (0, np.arange(120).reshape(1, 120)), + (1, np.arange(120).reshape(2, 60)), + (2, np.arange(120).reshape(6, 20)), + (3, np.arange(120).reshape(24, 5)), + (4, np.arange(120).reshape(120, 1)), + ], +) +def test_flatten(axis, expected_output): + data = np.arange(120, dtype=np.int32).reshape([2, 3, 4, 5]) + node = onnx.helper.make_node("Flatten", inputs=["x"], outputs=["y"], axis=axis) + ng_results = run_node(node, [data]) + assert np.array_equal(ng_results, [expected_output]) + + +def test_flatten_exception(): + data = np.arange(120).reshape([2, 3, 4, 5]) + node = onnx.helper.make_node("Flatten", inputs=["x"], outputs=["y"], axis=5) + + with pytest.raises(RuntimeError): + run_node(node, [data]) + + +def test_transpose(): + data = np.arange(120, dtype=np.int32).reshape([2, 3, 4, 5]) + + node = onnx.helper.make_node("Transpose", inputs=["x"], outputs=["y"]) + expected_output = data.T + ng_results = run_node(node, [data]) + assert np.array_equal(ng_results, [expected_output]) + + node = onnx.helper.make_node( + "Transpose", inputs=["x"], outputs=["y"], perm=(3, 1, 0, 2) + ) + expected_output = np.transpose(data, axes=(3, 1, 0, 2)) + ng_results = run_node(node, [data]) + assert np.array_equal(ng_results, [expected_output]) + + +@xfail_issue_35927 +def test_slice_opset1(): + data = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + + expected_output = np.array([[5, 6, 7]]) + model = get_node_model("Slice", data, axes=[0, 1], starts=[1, 0], ends=[2, 3]) + ng_results = run_model(model, [data]) + assert np.array_equal(ng_results, [expected_output]) + + expected_output = np.array([[2, 3, 4]]) + model = get_node_model("Slice", data, starts=[0, 1], ends=[-1, 1000]) + ng_results = run_model(model, [data]) + assert np.array_equal(ng_results, [expected_output]) + + data = np.random.randn(20, 10, 5).astype(np.float32) + expected_output = data[0:3, 0:10] + model = get_node_model("Slice", data, axes=[0, 1], starts=[0, 0], ends=[3, 10]) + ng_results = run_model(model, [data]) + assert np.array_equal(ng_results, [expected_output]) + + # default axes + data = np.random.randn(20, 10, 5).astype(np.float32) + expected_output = data[:, :, 3:4] + model = get_node_model("Slice", data, starts=[0, 0, 3], ends=[20, 10, 4]) + ng_results = run_model(model, [data]) + assert np.array_equal(ng_results, [expected_output]) + + # end out of bounds + data = np.random.randn(20, 10, 5).astype(np.float32) + expected_output = data[:, 1:1000] + model = get_node_model("Slice", data, axes=[1], starts=[1], ends=[1000]) + ng_results = run_model(model, [data]) + assert np.array_equal(ng_results, [expected_output]) + + # negative value + data = np.random.randn(20, 10, 5).astype(np.float32) + expected_output = data[:, 0:-1] + model = get_node_model("Slice", data, axes=[1], starts=[0], ends=[-1]) + ng_results = run_model(model, [data]) + assert np.array_equal(ng_results, [expected_output]) + + # start ouf of bounds + data = np.random.randn(20, 10, 5).astype(np.float32) + expected_output = data[:, 1000:1000] + model = get_node_model("Slice", data, axes=[1], starts=[1000], ends=[1000]) + ng_results = run_model(model, [data]) + assert np.array_equal(ng_results, [expected_output]) + + +def test_concat(): + a = np.array([[1, 2], [3, 4]], dtype=np.int32) + b = np.array([[5, 6]], dtype=np.int32) + + node = onnx.helper.make_node("Concat", inputs=["x"], outputs=["z"], axis=0) + ng_results = run_node(node, [a]) + assert np.array_equal(ng_results, [a]) + + expected_output = np.concatenate((a, b), axis=0) + node = onnx.helper.make_node("Concat", inputs=["x", "y"], outputs=["z"], axis=0) + ng_results = run_node(node, [a, b]) + assert np.array_equal(ng_results, [expected_output]) + + a = np.array([[1, 2], [3, 4]], dtype=np.int32) + b = np.array([[5, 6]], dtype=np.int32).T + expected_output = np.concatenate((a, b), axis=1) + node = onnx.helper.make_node("Concat", inputs=["x", "y"], outputs=["z"], axis=1) + ng_results = run_node(node, [a, b]) + assert np.array_equal(ng_results, [expected_output]) + + test_cases = { + "1d": ([1, 2], [3, 4]), + "2d": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]), + "3d": ( + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], + [[[9, 10], [11, 12]], [[13, 14], [15, 16]]], + ), + } + + for _, values in test_cases.items(): + values = [np.asarray(v) for v in values] + for i in range(len(values[0].shape)): + in_args = ["value" + str(k) for k in range(len(values))] + node = onnx.helper.make_node( + "Concat", + inputs=list(in_args), + outputs=["output"], + axis=i, + ) + expected_output = np.concatenate(values, i) + ng_results = run_node(node, np.array(values, dtype=np.int32)) + assert np.array_equal(ng_results, [expected_output]) + + +@xfail_issue_44968 +def test_squeeze(): + data = np.arange(6, dtype=np.int32).reshape([1, 2, 3, 1]) + expected_output = data.reshape([2, 3]) + + axes = np.array([0, 3]).astype(np.int64) + node = onnx.helper.make_node("Squeeze", inputs=["x", "axes"], outputs=["y"]) + ng_results = run_node(node, [data, axes]) + assert np.array_equal(ng_results, [expected_output]) + + data = np.random.randn(1, 3, 4, 5).astype(np.float32) + expected_output = np.squeeze(data, axis=0) + axes = np.array([0]).astype(np.int64) + node = onnx.helper.make_node("Squeeze", inputs=["x", "axes"], outputs=["y"]) + ng_results = run_node(node, [data, axes]) + assert np.array_equal(ng_results, [expected_output]) + + +@xfail_issue_44858 +def test_unsqueeze(): + data = np.random.randn(3, 4, 5).astype(np.float32) + expected_output = np.expand_dims(data, axis=0) + axes = np.array([0]).astype(np.int64) + node = onnx.helper.make_node("Unsqueeze", inputs=["x", "axes"], outputs=["y"]) + ng_results = run_node(node, [data, axes]) + assert np.array_equal(ng_results, [expected_output]) + + expected_output = np.reshape(data, [1, 3, 4, 5, 1]) + axes = np.array([0, 4]).astype(np.int64) + node = onnx.helper.make_node("Unsqueeze", inputs=["x", "axes"], outputs=["y"]) + ng_results = run_node(node, [data, axes]) + assert np.array_equal(ng_results, [expected_output]) + + expected_output = np.reshape(data, [1, 3, 1, 4, 5]) + axes = np.array([0, 2]).astype(np.int64) + node = onnx.helper.make_node("Unsqueeze", inputs=["x", "axes"], outputs=["y"]) + ng_results = run_node(node, [data, axes]) + assert np.array_equal(ng_results, [expected_output]) + + +@pytest.mark.parametrize( + "node, expected_output", + [ + # Split into 2 equal parts along axis=0 + ( + onnx.helper.make_node("Split", inputs=["x"], outputs=["y", "z"], axis=0), + [ + np.array([[0, 1, 2, 3]], dtype=np.int32), + np.array([[4, 5, 6, 7]], dtype=np.int32), + ], + ), + # Default, split along axis=0 into 2 equal parts + ( + onnx.helper.make_node("Split", inputs=["x"], outputs=["y", "z"]), + [ + np.array([[0, 1, 2, 3]], dtype=np.int32), + np.array([[4, 5, 6, 7]], dtype=np.int32), + ], + ), + # Split into 2 equal parts along axis=1 + ( + onnx.helper.make_node("Split", inputs=["x"], outputs=["a", "b"], axis=1), + [ + np.array([[0, 1], [4, 5]], dtype=np.int32), + np.array([[2, 3], [6, 7]], dtype=np.int32), + ], + ), + # Split into 4 equal parts along axis=1 + ( + onnx.helper.make_node( + "Split", inputs=["x"], outputs=["a", "b", "c", "d"], axis=1 + ), + [ + np.array([[0], [4]], dtype=np.int32), + np.array([[1], [5]], dtype=np.int32), + np.array([[2], [6]], dtype=np.int32), + np.array([[3], [7]], dtype=np.int32), + ], + ), + ], +) +def test_split_2d(node, expected_output): + data = np.arange(8, dtype=np.int32).reshape(2, 4) + ng_results = run_node(node, [data]) + assert all_arrays_equal(ng_results, expected_output) + + +@xfail_issue_44854 +def test_split_2d_splits_input(): + data = np.arange(8, dtype=np.int32).reshape(2, 4) + splits = np.array([3, 1]).astype(np.int64) + node = onnx.helper.make_node( + "Split", inputs=["x", "splits"], outputs=["a", "b"], axis=1 + ) + expected_outputs = [ + np.array([[0, 1, 2], [4, 5, 6]], dtype=np.int32), + np.array([[3], [7]], dtype=np.int32), + ] + ng_results = run_node(node, [data, splits]) + assert all_arrays_equal(ng_results, expected_outputs) + + +@xfail_issue_44854 +def test_split_1d(): + # 1D + data = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32) + + node = onnx.helper.make_node("Split", inputs=["input"], outputs=["z", "w"], axis=0) + expected_outputs = [ + np.array([1.0, 2.0, 3.0]).astype(np.float32), + np.array([4.0, 5.0, 6.0]).astype(np.float32), + ] + ng_results = run_node(node, [data]) + assert all_arrays_equal(ng_results, expected_outputs) + + splits = np.array([2, 3, 1]).astype(np.int64) + node = onnx.helper.make_node( + "Split", inputs=["input", "splits"], outputs=["y", "z", "w"], axis=0 + ) + expected_outputs = [ + np.array([1.0, 2.0]).astype(np.float32), + np.array([3.0, 4.0, 5.0]).astype(np.float32), + np.array([6.0]).astype(np.float32), + ] + ng_results = run_node(node, [data, splits]) + assert all_arrays_equal(ng_results, expected_outputs) + + # Default values + data = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32) + + node = onnx.helper.make_node("Split", inputs=["input"], outputs=["y", "z", "w"]) + expected_outputs = [ + np.array([1.0, 2.0]).astype(np.float32), + np.array([3.0, 4.0]).astype(np.float32), + np.array([5.0, 6.0]).astype(np.float32), + ] + ng_results = run_node(node, [data]) + assert all_arrays_equal(ng_results, expected_outputs) + + splits = np.array([2, 4]).astype(np.int64) + node = onnx.helper.make_node( + "Split", inputs=["input", "splits"], outputs=["y", "z"], split=[2, 4] + ) + expected_outputs = [ + np.array([1.0, 2.0]).astype(np.float32), + np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32), + ] + ng_results = run_node(node, [data, splits]) + assert all_arrays_equal(ng_results, expected_outputs) + + +def test_depth_to_space(): + b, c, h, w = shape = (2, 8, 3, 3) + blocksize = 2 + data = np.random.random_sample(shape).astype(np.float32) + tmp = np.reshape(data, [b, blocksize, blocksize, c // (blocksize ** 2), h, w]) + tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) + expected_output = np.reshape( + tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize] + ) + + node = onnx.helper.make_node( + "DepthToSpace", inputs=["x"], outputs=["y"], blocksize=blocksize + ) + ng_results = run_node(node, [data]) + assert np.array_equal(ng_results, [expected_output]) + + # (1, 4, 2, 3) input tensor + data = np.array( + [ + [ + [[0, 1, 2], [3, 4, 5]], + [[6, 7, 8], [9, 10, 11]], + [[12, 13, 14], [15, 16, 17]], + [[18, 19, 20], [21, 22, 23]], + ] + ] + ).astype(np.float32) + # (1, 1, 4, 6) output tensor + expected_output = np.array( + [ + [ + [ + [0, 6, 1, 7, 2, 8], + [12, 18, 13, 19, 14, 20], + [3, 9, 4, 10, 5, 11], + [15, 21, 16, 22, 17, 23], + ] + ] + ] + ).astype(np.float32) + + ng_results = run_node(node, [data]) + assert np.array_equal(ng_results, [expected_output]) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py new file mode 100644 index 00000000000..d5bd4cd79e5 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py @@ -0,0 +1,485 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx +import onnx.mapping +import pytest +from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info + +from ngraph.exceptions import NgraphTypeError +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node + + +@pytest.mark.parametrize( + "input_data", + [ + np.array([-4, 0, 5, -10], dtype=np.float32), + np.array([[-4, 0, 5, -10], [-4, 0, 5, -10]], dtype=np.float32), + np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]], dtype=np.float32), + ], +) +def test_abs(input_data): + expected_output = np.abs(input_data) + node = onnx.helper.make_node("Abs", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [input_data]) + assert np.array_equal(ng_results, [expected_output]) + + +@pytest.mark.parametrize( + "input_data", + [ + np.array([4, 0, 5, 10]), + np.array([[4, 0, 5, 10], [4, 0, 5, 10]]), + np.array([[[1, 2], [3, 4]], [[1, 2], [3, 4]]]), + ], +) +def test_sqrt(input_data): + input_data = input_data.astype(np.float32) + expected_output = np.sqrt(input_data) + node = onnx.helper.make_node("Sqrt", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [input_data]) + assert np.allclose(ng_results, [expected_output]) + + +@pytest.mark.parametrize( + "input_data", + [ + np.array([4, 0, 5, 10]), + np.array([[4, 0, 5, 10], [4, 0, 5, 10]]), + np.array([[[1, 2], [3, 4]], [[1, 2], [3, 4]]]), + ], +) +def test_exp(input_data): + input_data = input_data.astype(np.float32) + expected_output = np.exp(input_data) + node = onnx.helper.make_node("Exp", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [input_data]) + assert np.allclose(ng_results, [expected_output]) + + +@pytest.mark.parametrize( + "input_data", + [ + np.array([4, 2, 5, 10]), + np.array([[4, 1, 5, 10], [4, 2, 5, 10]]), + np.array([[[1, 2], [3, 4]], [[1, 2], [3, 4]]]), + ], +) +def test_log(input_data): + input_data = input_data.astype(np.float32) + expected_output = np.log(input_data) + node = onnx.helper.make_node("Log", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [input_data]) + assert np.allclose(ng_results, [expected_output]) + + +@pytest.mark.parametrize( + "input_data", + [ + np.array([-4, 0, 5, -10], dtype=np.float32), + np.array([[-4, 0, 5, -10], [-4, 0, 5, -10]], dtype=np.float32), + np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]], dtype=np.float32), + ], +) +def test_neg(input_data): + expected_output = np.negative(input_data) + node = onnx.helper.make_node("Neg", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [input_data]) + assert np.array_equal(ng_results, [expected_output]) + + +@pytest.mark.parametrize( + "input_data", + [ + np.array([-4.2, 0.43, 5.99, -10.01]), + np.array([[-4.5, 0.99, 5.01, -10.00], [-4.5, 0.5, 5.1, 10.01]]), + np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]]) / 6, + ], +) +def test_floor(input_data): + input_data = input_data.astype(np.float32) + expected_output = np.floor(input_data) + node = onnx.helper.make_node("Floor", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [input_data]) + assert np.array_equal(ng_results, [expected_output]) + + +@pytest.mark.parametrize( + "input_data", + [ + np.array([-4.2, 0, 5.99, -10.01]), + np.array([[-4.5, 0.99, 5.01, -10.00], [-4.5, 0.5, 5.1, 10.01]]), + np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]]) / 6, + ], +) +def test_ceil(input_data): + input_data = input_data.astype(np.float32) + expected_output = np.ceil(input_data) + node = onnx.helper.make_node("Ceil", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [input_data]) + assert np.array_equal(ng_results, [expected_output]) + + +@pytest.mark.parametrize( + "min_value, max_value", + [(np.finfo(np.float32).min, np.finfo(np.float32).max), (-0.5, 0.5), (0.0, np.finfo(np.float32).max)], +) +def test_clip(min_value, max_value): + np.random.seed(133391) + input_data = np.float32(-100.0) + np.random.randn(3, 4, 5).astype(np.float32) * np.float32(200.0) + model = get_node_model("Clip", input_data, opset=10, min=float(min_value), max=float(max_value)) + result = run_model(model, [input_data]) + expected = np.clip(input_data, min_value, max_value) + assert np.allclose(result, [expected]) + + +def test_clip_default(): + np.random.seed(133391) + input_data = -100.0 + np.random.randn(3, 4, 5).astype(np.float32) * 200.0 + + model = get_node_model("Clip", input_data, opset=10, min=0.0) + result = run_model(model, [input_data]) + expected = np.clip(input_data, np.float32(0.0), np.finfo(np.float32).max) + assert np.allclose(result, [expected]) + + model = get_node_model("Clip", input_data, opset=10, max=0.0) + result = run_model(model, [input_data]) + expected = np.clip(input_data, np.finfo(np.float32).min, np.float32(0.0)) + assert np.allclose(result, [expected]) + + +@pytest.mark.parametrize( + "input_data", + [ + np.array([-4.2, 1, 5.99, -10.01]), + np.array([[-4.5, 0.99, 5.01, -10.00], [-4.5, 0.5, 5.1, 10.01]]), + np.array([[[1, 2], [-3, 4]], [[1, -2], [3, 4]]]) / 6, + ], +) +def test_reciprocal(input_data): + input_data = input_data.astype(np.float32) + expected_output = np.reciprocal(input_data) + node = onnx.helper.make_node("Reciprocal", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [input_data]) + assert np.allclose(ng_results, [expected_output]) + + +@pytest.mark.parametrize("axis, dim1, dim2", [(0, 1, 60), (1, 3, 20), (2, 12, 5)]) +def test_hardmax(axis, dim1, dim2): + def hardmax_2d(data): + return np.eye(data.shape[1], dtype=data.dtype)[np.argmax(data, axis=1)] + + np.random.seed(133391) + data = np.random.rand(3, 4, 5).astype(np.float32) + expected = hardmax_2d(data.reshape(dim1, dim2)).reshape(3, 4, 5) + node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], axis=axis) + ng_results = run_node(node, [data], opset_version=12) + assert np.allclose(ng_results, [expected]) + + +def test_hardmax_special_cases(): + def hardmax_2d(data): + return np.eye(data.shape[1], dtype=data.dtype)[np.argmax(data, axis=1)] + + np.random.seed(133391) + data = np.random.rand(3, 4, 5).astype(np.float32) + + # default axis=1 + expected = hardmax_2d(data.reshape(3, 20)).reshape(3, 4, 5) + node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [data], opset_version=12) + assert np.allclose(ng_results, [expected]) + + expected = hardmax_2d(data.reshape(12, 5)).reshape(3, 4, 5) + node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], axis=-1) + ng_results = run_node(node, [data], opset_version=12) + assert np.allclose(ng_results, [expected]) + + with pytest.raises(RuntimeError): + node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], axis=3) + ng_results = run_node(node, [data], opset_version=12) + + # For multiple occurrences of the maximal values, the first occurrence is selected + # for one-hot output + data = np.array([[3, 3, 3, 1]]).astype(np.float32) + expected = np.array([[1, 0, 0, 0]]).astype(np.float32) + node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [data], opset_version=12) + assert np.allclose(ng_results, [expected]) + + +def test_hardsigmoid(): + def hardsigmoid(data, alpha=0.2, beta=0.5): + return np.clip(alpha * data + beta, 0, 1) + + np.random.seed(133391) + alpha = np.random.rand() + beta = np.random.rand() + data = np.random.rand(3, 4, 5).astype(np.float32) + + expected = hardsigmoid(data, alpha, beta) + node = onnx.helper.make_node("HardSigmoid", inputs=["x"], outputs=["y"], alpha=alpha, beta=beta) + ng_results = run_node(node, [data]) + assert np.allclose(ng_results, [expected]) + + expected = hardsigmoid(data) + node = onnx.helper.make_node("HardSigmoid", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [data]) + assert np.allclose(ng_results, [expected]) + + +def test_logsoftmax(): + def logsoftmax_2d(x): + max_x = np.max(x, axis=1).reshape((-1, 1)) + exp_x = np.exp(x - max_x) + return x - max_x - np.log(np.sum(exp_x, axis=1).reshape((-1, 1))) + + np.random.seed(133391) + data = np.random.randn(3, 4, 5).astype(np.float32) + + node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=0) + expected = logsoftmax_2d(data.reshape(1, 60)).reshape(3, 4, 5) + ng_results = run_node(node, [data], opset_version=12) + assert np.allclose(ng_results, [expected]) + + node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=1) + expected = logsoftmax_2d(data.reshape(3, 20)).reshape(3, 4, 5) + ng_results = run_node(node, [data], opset_version=12) + assert np.allclose(ng_results, [expected]) + + # default axis is 1 + node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"]) + ng_results = run_node(node, [data], opset_version=12) + assert np.allclose(ng_results, [expected]) + + node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=2) + expected = logsoftmax_2d(data.reshape(12, 5)).reshape(3, 4, 5) + ng_results = run_node(node, [data], opset_version=12) + assert np.allclose(ng_results, [expected]) + + with pytest.raises(RuntimeError): + node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=3) + ng_results = run_node(node, [data], opset_version=12) + + +def test_softplus(): + def softplus(x): + return np.where(x < 20, np.log(np.exp(x) + 1), x) + + np.random.seed(133391) + data = np.random.randn(3, 4, 5).astype(np.float32) + + node = onnx.helper.make_node("Softplus", inputs=["x"], outputs=["y"]) + expected = softplus(data) + ng_results = run_node(node, [data]) + assert np.allclose(ng_results, [expected]) + + +def test_softsign(): + def softsign(x): + return x / (1 + np.abs(x)) + + np.random.seed(133391) + data = np.random.randn(3, 4, 5).astype(np.float32) + + node = onnx.helper.make_node("Softsign", inputs=["x"], outputs=["y"]) + expected = softsign(data) + ng_results = run_node(node, [data]) + assert np.allclose(ng_results, [expected]) + + +def test_identity(): + np.random.seed(133391) + shape = [2, 4] + input_data = np.random.randn(*shape).astype(np.float32) + + identity_node = make_node("Identity", inputs=["x"], outputs=["y"]) + ng_results = run_node(identity_node, [input_data]) + assert np.array_equal(ng_results, [input_data]) + + node1 = make_node("Add", inputs=["A", "B"], outputs=["add1"], name="add_node1") + node2 = make_node("Identity", inputs=["add1"], outputs=["identity1"], name="identity_node1") + node3 = make_node("Abs", inputs=["identity1"], outputs=["Y"], name="abs_node1") + + graph = make_graph( + [node1, node2, node3], + "test_graph", + [ + make_tensor_value_info("A", onnx.TensorProto.FLOAT, shape), + make_tensor_value_info("B", onnx.TensorProto.FLOAT, shape), + ], + [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, shape)], + ) + model = make_model(graph, producer_name="ngraph ONNX Importer") + ng_model_function = import_onnx_model(model) + runtime = get_runtime() + computation = runtime.computation(ng_model_function) + ng_results = computation(input_data, input_data) + expected_result = np.abs(input_data + input_data) + + assert np.array_equal(ng_results[0], expected_result) + + +@pytest.mark.parametrize("val_type, input_data", [(np.dtype(bool), np.zeros((2, 2), dtype=int))]) +def test_cast_to_bool(val_type, input_data): + expected = np.array(input_data, dtype=val_type) + + model = get_node_model("Cast", input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) + result = run_model(model, [input_data]) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "val_type, range_start, range_end, in_dtype", + [ + (np.dtype(np.float32), -8, 8, np.dtype(np.int32)), + (np.dtype(np.float64), -16383, 16383, np.dtype(np.int64)), + ], +) +def test_cast_to_float(val_type, range_start, range_end, in_dtype): + np.random.seed(133391) + input_data = np.random.randint(range_start, range_end, size=(2, 2), dtype=in_dtype) + expected = np.array(input_data, dtype=val_type) + + model = get_node_model("Cast", input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) + result = run_model(model, [input_data]) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "val_type", [np.dtype(np.int8), + np.dtype(np.int16), + np.dtype(np.int32), + np.dtype(np.int64)] +) +def test_cast_to_int(val_type): + np.random.seed(133391) + input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16) + expected = np.array(input_data, dtype=val_type) + + model = get_node_model("Cast", input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) + result = run_model(model, [input_data]) + assert np.allclose(result, expected) + + +@pytest.mark.parametrize( + "val_type", [np.dtype(np.uint8), np.dtype(np.uint16), np.dtype(np.uint32), np.dtype(np.uint64)] +) +def test_cast_to_uint(val_type): + np.random.seed(133391) + input_data = np.ceil(np.random.rand(2, 3, 4) * 16) + expected = np.array(input_data, dtype=val_type) + + model = get_node_model("Cast", input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) + result = run_model(model, [input_data]) + assert np.allclose(result, expected) + + +def test_cast_errors(): + from onnx.onnx_cpp2py_export.checker import ValidationError + + np.random.seed(133391) + input_data = np.ceil(np.random.rand(2, 3, 4) * 16) + + # missing 'to' attribute + node = onnx.helper.make_node("Cast", inputs=["A"], outputs=["B"]) + input_tensors = [ + make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) + for name, value in zip(node.input, [input_data]) + ] + output_tensors = [ + make_tensor_value_info(node.output[0], onnx.TensorProto.FLOAT16, input_data.shape) + ] # type: ignore + + graph = make_graph([node], "compute_graph", input_tensors, output_tensors) + model = make_model(graph, producer_name="NgraphBackend") + with pytest.raises(ValidationError): + import_onnx_model(model) + + # unsupported data type representation + node = onnx.helper.make_node("Cast", inputs=["A"], outputs=["B"], to=1.2345) + input_tensors = [ + make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) + for name, value in zip(node.input, [input_data]) + ] + output_tensors = [ + make_tensor_value_info(node.output[0], onnx.TensorProto.INT32, input_data.shape) + ] # type: ignore + + graph = make_graph([node], "compute_graph", input_tensors, output_tensors) + model = make_model(graph, producer_name="NgraphBackend") + with pytest.raises(ValidationError): + import_onnx_model(model) + + # unsupported input tensor data type: + node = onnx.helper.make_node("Cast", inputs=["A"], outputs=["B"], to=onnx.TensorProto.INT32) + input_tensors = [ + make_tensor_value_info(name, onnx.TensorProto.COMPLEX64, value.shape) + for name, value in zip(node.input, [input_data]) + ] + output_tensors = [ + make_tensor_value_info(node.output[0], onnx.TensorProto.INT32, input_data.shape) + ] # type: ignore + + graph = make_graph([node], "compute_graph", input_tensors, output_tensors) + model = make_model(graph, producer_name="NgraphBackend") + with pytest.raises((RuntimeError, NgraphTypeError)): + import_onnx_model(model) + + # unsupported output tensor data type: + node = onnx.helper.make_node("Cast", inputs=["A"], outputs=["B"], to=onnx.TensorProto.COMPLEX128) + input_tensors = [ + make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) + for name, value in zip(node.input, [input_data]) + ] + output_tensors = [ + make_tensor_value_info(node.output[0], onnx.TensorProto.COMPLEX128, input_data.shape) + ] # type: ignore + + graph = make_graph([node], "compute_graph", input_tensors, output_tensors) + model = make_model(graph, producer_name="NgraphBackend") + with pytest.raises(RuntimeError): + import_onnx_model(model) + + +@pytest.mark.parametrize("value_type", + [pytest.param(np.float64), + pytest.param(np.float32)]) +def test_constant(value_type): + values = np.random.randn(5, 5).astype(value_type) + node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["values"], + value=onnx.helper.make_tensor( + name="const_tensor", + data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(value_type)], + dims=values.shape, + vals=values.flatten(), + ), + ) + + ng_results = run_node(node, []) + assert np.allclose(ng_results, [values]) + + +# See https://github.com/onnx/onnx/issues/1190 +@pytest.mark.xfail(reason="ONNX#1190 numpy.float16 not supported by ONNX make_node", strict=True) +def test_constant_err(): + values = np.random.randn(5, 5).astype(np.float16) + node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["values"], + value=onnx.helper.make_tensor( + name="const_tensor", + data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(np.float16)], + dims=values.shape, + vals=values.flatten(), + ), + ) + + ng_results = run_node(node, []) + assert np.allclose(ng_results, [values]) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_variadic.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_variadic.py new file mode 100644 index 00000000000..a91b3157de8 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_ops_variadic.py @@ -0,0 +1,43 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from functools import reduce + +import numpy as np +import onnx +import pytest + +from tests_compatibility.test_onnx.utils import run_node + + +@pytest.mark.parametrize( + "onnx_op,numpy_func", [("Sum", np.add), ("Min", np.minimum), ("Max", np.maximum)] +) +def test_variadic(onnx_op, numpy_func): + data = [ + np.array([1, 2, 3], dtype=np.int32), + np.array([4, 5, 6], dtype=np.int32), + np.array([7, 8, 9], dtype=np.int32), + ] + node = onnx.helper.make_node( + onnx_op, inputs=["data_0", "data_1", "data_2"], outputs=["y"] + ) + expected_output = reduce(numpy_func, data) + + ng_results = run_node(node, data) + assert np.array_equal(ng_results, [expected_output]) + + +def test_mean(): + data = [ + np.array([1, 2, 3], dtype=np.int32), + np.array([4, 5, 6], dtype=np.int32), + np.array([7, 8, 9], dtype=np.int32), + ] + node = onnx.helper.make_node( + "Mean", inputs=["data_0", "data_1", "data_2"], outputs=["y"] + ) + expected_output = reduce(np.add, data) / len(data) + + ng_results = run_node(node, data) + assert np.array_equal(ng_results, [expected_output]) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py b/runtime/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py new file mode 100644 index 00000000000..6f7f90b2970 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/test_zoo_models.py @@ -0,0 +1,197 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import tests_compatibility +from operator import itemgetter +from pathlib import Path +from typing import Sequence, Any +import numpy as np + +from tests_compatibility.test_onnx.utils import OpenVinoOnnxBackend +from tests_compatibility.test_onnx.utils.model_importer import ModelImportRunner + +from tests_compatibility import ( + xfail_issue_38701, + xfail_issue_45457, + xfail_issue_37957, + xfail_issue_38084, + xfail_issue_39669, + xfail_issue_38726, + xfail_issue_37973, + xfail_issue_47430, + xfail_issue_47495, + xfail_issue_48145, + xfail_issue_48190, + xfail_issue_58676, + xfail_issue_63643, + xfail_issue_onnx_models_140) + +MODELS_ROOT_DIR = tests_compatibility.MODEL_ZOO_DIR + +def yolov3_post_processing(outputs : Sequence[Any]) -> Sequence[Any]: + concat_out_index = 2 + # remove all elements with value -1 from yolonms_layer_1/concat_2:0 output + concat_out = outputs[concat_out_index][outputs[concat_out_index] != -1] + concat_out = np.expand_dims(concat_out, axis=0) + outputs[concat_out_index] = concat_out + return outputs + +def tinyyolov3_post_processing(outputs : Sequence[Any]) -> Sequence[Any]: + concat_out_index = 2 + # remove all elements with value -1 from yolonms_layer_1:1 output + concat_out = outputs[concat_out_index][outputs[concat_out_index] != -1] + concat_out = concat_out.reshape((outputs[concat_out_index].shape[0], -1, 3)) + outputs[concat_out_index] = concat_out + return outputs + +post_processing = { + "yolov3" : {"post_processing" : yolov3_post_processing}, + "tinyyolov3" : {"post_processing" : tinyyolov3_post_processing}, + "tiny-yolov3-11": {"post_processing": tinyyolov3_post_processing}, +} + +tolerance_map = { + "arcface_lresnet100e_opset8": {"atol": 0.001, "rtol": 0.001}, + "fp16_inception_v1": {"atol": 0.001, "rtol": 0.001}, + "mobilenet_opset7": {"atol": 0.001, "rtol": 0.001}, + "resnet50_v2_opset7": {"atol": 0.001, "rtol": 0.001}, + "test_mobilenetv2-1.0": {"atol": 0.001, "rtol": 0.001}, + "test_resnet101v2": {"atol": 0.001, "rtol": 0.001}, + "test_resnet18v2": {"atol": 0.001, "rtol": 0.001}, + "test_resnet34v2": {"atol": 0.001, "rtol": 0.001}, + "test_resnet50v2": {"atol": 0.001, "rtol": 0.001}, + "mosaic": {"atol": 0.001, "rtol": 0.001}, + "pointilism": {"atol": 0.001, "rtol": 0.001}, + "rain_princess": {"atol": 0.001, "rtol": 0.001}, + "udnie": {"atol": 0.001, "rtol": 0.001}, + "candy": {"atol": 0.003, "rtol": 0.003}, + "densenet-3": {"atol": 1e-7, "rtol": 0.0011}, + "arcfaceresnet100-8": {"atol": 0.001, "rtol": 0.001}, + "mobilenetv2-7": {"atol": 0.001, "rtol": 0.001}, + "resnet101-v1-7": {"atol": 0.001, "rtol": 0.001}, + "resnet101-v2-7": {"atol": 0.001, "rtol": 0.001}, + "resnet152-v1-7": {"atol": 1e-7, "rtol": 0.003}, + "resnet152-v2-7": {"atol": 0.001, "rtol": 0.001}, + "resnet18-v1-7": {"atol": 0.001, "rtol": 0.001}, + "resnet18-v2-7": {"atol": 0.001, "rtol": 0.001}, + "resnet34-v2-7": {"atol": 0.001, "rtol": 0.001}, + "vgg16-7": {"atol": 0.001, "rtol": 0.001}, + "vgg19-bn-7": {"atol": 0.001, "rtol": 0.001}, + "tinyyolov2-7": {"atol": 0.001, "rtol": 0.001}, + "tinyyolov2-8": {"atol": 0.001, "rtol": 0.001}, + "candy-8": {"atol": 0.001, "rtol": 0.001}, + "candy-9": {"atol": 0.007, "rtol": 0.001}, + "mosaic-8": {"atol": 0.003, "rtol": 0.001}, + "mosaic-9": {"atol": 0.001, "rtol": 0.001}, + "pointilism-8": {"atol": 0.001, "rtol": 0.001}, + "pointilism-9": {"atol": 0.001, "rtol": 0.001}, + "rain-princess-8": {"atol": 0.001, "rtol": 0.001}, + "rain-princess-9": {"atol": 0.001, "rtol": 0.001}, + "udnie-8": {"atol": 0.001, "rtol": 0.001}, + "udnie-9": {"atol": 0.001, "rtol": 0.001}, + "mxnet_arcface": {"atol": 1.5e-5, "rtol": 0.001}, + "resnet100": {"atol": 1.5e-5, "rtol": 0.001}, + "densenet121": {"atol": 1e-7, "rtol": 0.0011}, + "resnet152v1": {"atol": 1e-7, "rtol": 0.003}, + "test_shufflenetv2": {"atol": 1e-05, "rtol": 0.001}, + "tiny_yolov2": {"atol": 1e-05, "rtol": 0.001}, + "mobilenetv2-1": {"atol": 1e-04, "rtol": 0.001}, + "resnet101v1": {"atol": 1e-04, "rtol": 0.001}, + "resnet101v2": {"atol": 1e-06, "rtol": 0.001}, + "resnet152v2": {"atol": 1e-05, "rtol": 0.001}, + "resnet18v2": {"atol": 1e-05, "rtol": 0.001}, + "resnet34v2": {"atol": 1e-05, "rtol": 0.001}, + "vgg16": {"atol": 1e-05, "rtol": 0.001}, + "vgg19-bn": {"atol": 1e-05, "rtol": 0.001}, + "test_tiny_yolov2": {"atol": 1e-05, "rtol": 0.001}, + "test_resnet152v2": {"atol": 1e-04, "rtol": 0.001}, + "test_mobilenetv2-1": {"atol": 1e-04, "rtol": 0.001}, + "yolov3": {"atol": 0.001, "rtol": 0.001}, + "yolov4": {"atol": 1e-04, "rtol": 0.001}, + "tinyyolov3": {"atol": 1e-04, "rtol": 0.001}, + "tiny-yolov3-11": {"atol": 1e-04, "rtol": 0.001}, + "GPT2": {"atol": 5e-06, "rtol": 0.01}, + "GPT-2-LM-HEAD": {"atol": 4e-06}, + "test_retinanet_resnet101": {"atol": 1.3e-06}, +} + +zoo_models = [] +# rglob doesn't work for symlinks, so models have to be physically somwhere inside "MODELS_ROOT_DIR" +for path in Path(MODELS_ROOT_DIR).rglob("*.onnx"): + mdir = path.parent + file_name = path.name + if path.is_file() and not file_name.startswith("."): + model = {"model_name": path, "model_file": file_name, "dir": mdir} + basedir = mdir.stem + if basedir in tolerance_map: + # updated model looks now: + # {"model_name": path, "model_file": file, "dir": mdir, "atol": ..., "rtol": ...} + model.update(tolerance_map[basedir]) + if basedir in post_processing: + model.update(post_processing[basedir]) + zoo_models.append(model) + +if len(zoo_models) > 0: + zoo_models = sorted(zoo_models, key=itemgetter("model_name")) + + # Set backend device name to be used instead of hardcoded by ONNX BackendTest class ones. + OpenVinoOnnxBackend.backend_name = tests_compatibility.BACKEND_NAME + + # import all test cases at global scope to make them visible to pytest + backend_test = ModelImportRunner(OpenVinoOnnxBackend, zoo_models, __name__, MODELS_ROOT_DIR) + test_cases = backend_test.test_cases["OnnxBackendModelImportTest"] + # flake8: noqa: E501 + if tests_compatibility.MODEL_ZOO_XFAIL: + import_xfail_list = [ + # ONNX Model Zoo + (xfail_issue_38701, "test_onnx_model_zoo_text_machine_comprehension_bidirectional_attention_flow_model_bidaf_9_bidaf_bidaf_cpu"), + (xfail_issue_38726, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_decoder_with_lm_head_12_t5_decoder_with_lm_head_cpu"), + + # Model MSFT + (xfail_issue_37957, "test_MSFT_opset10_mask_rcnn_keras_mask_rcnn_keras_cpu"), + ] + for test_case in import_xfail_list: + xfail, test_name = test_case + xfail(getattr(test_cases, test_name)) + + del test_cases + + test_cases = backend_test.test_cases["OnnxBackendModelExecutionTest"] + if tests_compatibility.MODEL_ZOO_XFAIL: + execution_xfail_list = [ + # ONNX Model Zoo + (xfail_issue_39669, "test_onnx_model_zoo_text_machine_comprehension_t5_model_t5_encoder_12_t5_encoder_cpu"), + (xfail_issue_38084, "test_onnx_model_zoo_vision_object_detection_segmentation_mask_rcnn_model_MaskRCNN_10_mask_rcnn_R_50_FPN_1x_cpu"), + (xfail_issue_38084, "test_onnx_model_zoo_vision_object_detection_segmentation_faster_rcnn_model_FasterRCNN_10_faster_rcnn_R_50_FPN_1x_cpu"), + (xfail_issue_47430, "test_onnx_model_zoo_vision_object_detection_segmentation_fcn_model_fcn_resnet50_11_fcn_resnet50_11_model_cpu"), + (xfail_issue_47430, "test_onnx_model_zoo_vision_object_detection_segmentation_fcn_model_fcn_resnet101_11_fcn_resnet101_11_model_cpu"), + (xfail_issue_48145, "test_onnx_model_zoo_text_machine_comprehension_bert_squad_model_bertsquad_8_download_sample_8_bertsquad8_cpu"), + (xfail_issue_48190, "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_roberta_base_11_cpu"), + (xfail_issue_onnx_models_140, "test_onnx_model_zoo_vision_object_detection_segmentation_duc_model_ResNet101_DUC_7_ResNet101_DUC_HDC_ResNet101_DUC_HDC_cpu"), + (xfail_issue_63643, "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_ssd_mobilenet_v1_cpu"), + + # Model MSFT + (xfail_issue_37973, "test_MSFT_opset7_tf_inception_v2_model_cpu"), + (xfail_issue_37973, "test_MSFT_opset8_tf_inception_v2_model_cpu"), + (xfail_issue_37973, "test_MSFT_opset9_tf_inception_v2_model_cpu"), + (xfail_issue_37973, "test_MSFT_opset11_tf_inception_v2_model_cpu"), + (xfail_issue_37973, "test_MSFT_opset10_tf_inception_v2_model_cpu"), + + (xfail_issue_58676, "test_MSFT_opset7_fp16_tiny_yolov2_onnxzoo_winmlperf_tiny_yolov2_cpu"), + (xfail_issue_58676, "test_MSFT_opset8_fp16_tiny_yolov2_onnxzoo_winmlperf_tiny_yolov2_cpu"), + + (xfail_issue_38084, "test_MSFT_opset10_mask_rcnn_mask_rcnn_R_50_FPN_1x_cpu"), + (xfail_issue_38084, "test_MSFT_opset10_faster_rcnn_faster_rcnn_R_50_FPN_1x_cpu"), + + (xfail_issue_39669, "test_MSFT_opset9_cgan_cgan_cpu"), + (xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"), + (xfail_issue_45457, "test_MSFT_opset10_mlperf_ssd_resnet34_1200_ssd_resnet34_mAP_20.2_cpu"), + (xfail_issue_63643, "test_MSFT_opset10_mlperf_ssd_mobilenet_300_ssd_mobilenet_v1_coco_2018_01_28_cpu"), + ] + for test_case in import_xfail_list + execution_xfail_list: + xfail, test_name = test_case + xfail(getattr(test_cases, test_name)) + + del test_cases + + globals().update(backend_test.enable_report().test_cases) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/utils/__init__.py b/runtime/bindings/python/tests_compatibility/test_onnx/utils/__init__.py new file mode 100644 index 00000000000..691092a87e7 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/utils/__init__.py @@ -0,0 +1,87 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from string import ascii_uppercase +from typing import Any, Dict, Iterable, List, Optional, Text + +import numpy as np +import onnx +from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info + +import tests_compatibility +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_onnx.utils.onnx_backend import OpenVinoOnnxBackend +from tests_compatibility.test_onnx.utils.onnx_helpers import import_onnx_model + + +def run_node(onnx_node, data_inputs, **kwargs): + # type: (onnx.NodeProto, List[np.ndarray], Dict[Text, Any]) -> List[np.ndarray] + """ + Convert ONNX node to ngraph node and perform computation on input data. + + :param onnx_node: ONNX NodeProto describing a computation node + :param data_inputs: list of numpy ndarrays with input data + :return: list of numpy ndarrays with computed output + """ + OpenVinoOnnxBackend.backend_name = tests_compatibility.BACKEND_NAME + return OpenVinoOnnxBackend.run_node(onnx_node, data_inputs, **kwargs) + + +def run_model(onnx_model, data_inputs): + # type: (onnx.ModelProto, List[np.ndarray]) -> List[np.ndarray] + """ + Convert ONNX model to an ngraph model and perform computation on input data. + + :param onnx_model: ONNX ModelProto describing an ONNX model + :param data_inputs: list of numpy ndarrays with input data + :return: list of numpy ndarrays with computed output + """ + ng_model_function = import_onnx_model(onnx_model) + runtime = get_runtime() + computation = runtime.computation(ng_model_function) + return computation(*data_inputs) + + +def get_node_model(op_type, *input_data, opset=1, num_outputs=1, **node_attributes): + # type: (str, *Any, Optional[int], Optional[int], **Any) -> onnx.ModelProto + """Generate model with single requested node. + + Input and output Tensor data type is the same. + + :param op_type: The ONNX node operation. + :param input_data: Optional list of input arguments for node. + :param opset: The ONNX operation set version to use. Default to 4. + :param num_outputs: The number of node outputs. + :param node_attributes: Optional dictionary of node attributes. + :return: Generated model with single node for requested ONNX operation. + """ + node_inputs = [np.array(data) for data in input_data] + num_inputs = len(node_inputs) + node_input_names = [ascii_uppercase[idx] for idx in range(num_inputs)] + node_output_names = [ascii_uppercase[num_inputs + idx] for idx in range(num_outputs)] + onnx_node = make_node(op_type, node_input_names, node_output_names, **node_attributes) + + input_tensors = [ + make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape) + for name, value in zip(onnx_node.input, node_inputs) + ] + output_tensors = [ + make_tensor_value_info(name, onnx.TensorProto.FLOAT, ()) for name in onnx_node.output + ] # type: ignore + + graph = make_graph([onnx_node], "compute_graph", input_tensors, output_tensors) + model = make_model(graph, producer_name="Ngraph ONNX Importer") + model.opset_import[0].version = opset + return model + + +def all_arrays_equal(first_list, second_list): + # type: (Iterable[np.ndarray], Iterable[np.ndarray]) -> bool + """ + Check that all numpy ndarrays in `first_list` are equal to all numpy ndarrays in `second_list`. + + :param first_list: iterable containing numpy ndarray objects + :param second_list: another iterable containing numpy ndarray objects + :return: True if all ndarrays are equal, otherwise False + """ + return all(map(lambda pair: np.array_equal(*pair), zip(first_list, second_list))) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/utils/model_importer.py b/runtime/bindings/python/tests_compatibility/test_onnx/utils/model_importer.py new file mode 100644 index 00000000000..03dedada9d0 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/utils/model_importer.py @@ -0,0 +1,149 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx +import onnx.backend.test +import unittest + +from collections import defaultdict, namedtuple +from onnx import numpy_helper, NodeProto, ModelProto +from onnx.backend.base import Backend, BackendRep +from onnx.backend.test.case.test_case import TestCase as OnnxTestCase +from onnx.backend.test.runner import TestItem +from pathlib import Path +from tests_compatibility.test_onnx.utils.onnx_helpers import import_onnx_model +from typing import Any, Dict, List, Optional, Pattern, Set, Text, Type, Union, Callable, Sequence + + +# add post-processing function as part of test data +ExtOnnxTestCase = namedtuple("TestCaseExt", OnnxTestCase._fields + ("post_processing",)) + + +class ModelImportRunner(onnx.backend.test.BackendTest): + def __init__( + self, + backend: Type[Backend], + models: List[Dict[str, Path]], + parent_module: Optional[str] = None, + data_root: Optional[Path] = "", + ) -> None: + self.backend = backend + self._parent_module = parent_module + self._include_patterns = set() # type: Set[Pattern[Text]] + self._exclude_patterns = set() # type: Set[Pattern[Text]] + self._test_items = defaultdict(dict) # type: Dict[Text, Dict[Text, TestItem]] + self._xfail_patterns = set() # type: Set[Pattern[Text]] + + for model in models: + test_name = "test{}".format(model["model_name"]) \ + .replace(str(data_root), "") \ + .replace(".onnx", "") \ + .replace("/", "_") \ + .replace("\\", "_") \ + .replace("-", "_") + + test_case = ExtOnnxTestCase( + name=test_name, + url=None, + model_name=model["model_name"], + model_dir=model["dir"], + model=model["model_file"], + data_sets=None, + kind="OnnxBackendRealModelTest", + rtol=model.get("rtol", 0.001), + atol=model.get("atol", 1e-07), + post_processing=model.get("post_processing", None) + ) + self._add_model_import_test(test_case) + self._add_model_execution_test(test_case) + + @staticmethod + def _load_onnx_model(model_dir: Path, filename: Path) -> ModelProto: + if model_dir is None: + raise unittest.SkipTest("Model directory not provided") + + return onnx.load(model_dir / filename) + + def _add_model_import_test(self, model_test: ExtOnnxTestCase) -> None: + # model is loaded at runtime, note sometimes it could even + # never loaded if the test skipped + model_marker = [None] # type: List[Optional[Union[ModelProto, NodeProto]]] + + def run_import(test_self: Any, device: Text) -> None: + model = ModelImportRunner._load_onnx_model(model_test.model_dir, model_test.model) + model_marker[0] = model + assert import_onnx_model(model) + + self._add_test("ModelImport", model_test.name, run_import, model_marker) + + @classmethod + def _execute_npz_data( + cls, model_dir: str, prepared_model: BackendRep, result_rtol: float, result_atol: float, + post_processing: Callable[[Sequence[Any]], Sequence[Any]] = None + ) -> int: + executed_tests = 0 + for test_data_npz in model_dir.glob("test_data_*.npz"): + test_data = np.load(test_data_npz, encoding="bytes") + inputs = list(test_data["inputs"]) + outputs = list(prepared_model.run(inputs)) + ref_outputs = test_data["outputs"] + if post_processing is not None: + outputs = post_processing(outputs) + cls.assert_similar_outputs(ref_outputs, outputs, result_rtol, result_atol) + executed_tests = executed_tests + 1 + return executed_tests + + @classmethod + def _execute_pb_data( + cls, model_dir: str, prepared_model: BackendRep, result_rtol: float, result_atol: float, + post_processing: Callable[[Sequence[Any]], Sequence[Any]] = None + ) -> int: + executed_tests = 0 + for test_data_dir in model_dir.glob("test_data_set*"): + inputs = [] + inputs_num = len(list(test_data_dir.glob("input_*.pb"))) + for i in range(inputs_num): + input_file = Path(test_data_dir) / "input_{}.pb".format(i) + tensor = onnx.TensorProto() + with open(input_file, "rb") as f: + tensor.ParseFromString(f.read()) + inputs.append(numpy_helper.to_array(tensor)) + ref_outputs = [] + ref_outputs_num = len(list(test_data_dir.glob("output_*.pb"))) + for i in range(ref_outputs_num): + output_file = Path(test_data_dir) / "output_{}.pb".format(i) + tensor = onnx.TensorProto() + with open(output_file, "rb") as f: + tensor.ParseFromString(f.read()) + ref_outputs.append(numpy_helper.to_array(tensor)) + if(len(inputs) == 0): + continue + outputs = list(prepared_model.run(inputs)) + if post_processing is not None: + outputs = post_processing(outputs) + cls.assert_similar_outputs(ref_outputs, outputs, result_rtol, result_atol) + executed_tests = executed_tests + 1 + return executed_tests + + def _add_model_execution_test(self, model_test: ExtOnnxTestCase) -> None: + # model is loaded at runtime, note sometimes it could even + # never loaded if the test skipped + model_marker = [None] # type: List[Optional[Union[ModelProto, NodeProto]]] + + def run_execution(test_self: Any, device: Text) -> None: + model = ModelImportRunner._load_onnx_model(model_test.model_dir, model_test.model) + model_marker[0] = model + prepared_model = self.backend.prepare(model, device) + assert prepared_model is not None + executed_tests = ModelImportRunner._execute_npz_data( + model_test.model_dir, prepared_model, model_test.rtol, model_test.atol, + model_test.post_processing + ) + + executed_tests = executed_tests + ModelImportRunner._execute_pb_data( + model_test.model_dir, prepared_model, model_test.rtol, model_test.atol, + model_test.post_processing + ) + assert executed_tests > 0, "This model has no test data" + self._add_test("ModelExecution", model_test.name, run_execution, model_marker) diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/utils/onnx_backend.py b/runtime/bindings/python/tests_compatibility/test_onnx/utils/onnx_backend.py new file mode 100644 index 00000000000..33f78926d88 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/utils/onnx_backend.py @@ -0,0 +1,135 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" +ONNX Backend implementation. + +See ONNX documentation for details: +https://github.com/onnx/onnx/blob/master/docs/Implementing%20an%20ONNX%20backend.md +""" + +from typing import Any, Dict, List, Optional, Sequence, Text, Tuple + +import numpy +import onnx +from onnx.backend.base import Backend, BackendRep +from onnx.helper import make_graph, make_model, make_tensor_value_info + +from ngraph.impl import Function +from tests_compatibility.runtime import get_runtime +from tests_compatibility.test_onnx.utils.onnx_helpers import import_onnx_model, np_dtype_to_tensor_type + + +class OpenVinoOnnxBackendRep(BackendRep): + def __init__(self, ng_model_function, device="CPU"): # type: (List[Function], str) -> None + super().__init__() + self.device = device + self.ng_model_function = ng_model_function + self.runtime = get_runtime() + self.computation = self.runtime.computation(ng_model_function) + + def run(self, inputs, **kwargs): # type: (Any, **Any) -> Tuple[Any, ...] + """Run computation on model.""" + return self.computation(*inputs) + + +class OpenVinoOnnxBackend(Backend): + @classmethod + def is_compatible( + cls, + model, # type: onnx.ModelProto + device="CPU", # type: Text + **kwargs # type: Any + ): # type: (...) -> bool + # Return whether the model is compatible with the backend. + try: + import_onnx_model(model) + return True + except Exception: + return False + + @classmethod + def prepare( + cls, + onnx_model, # type: onnx.ModelProto + device="CPU", # type: Text + **kwargs # type: Any + ): # type: (...) -> OpenVinoOnnxBackendRep + super().prepare(onnx_model, device, **kwargs) + ng_model_function = import_onnx_model(onnx_model) + return OpenVinoOnnxBackendRep(ng_model_function, device) + + @classmethod + def run_model( + cls, + model, # type: onnx.ModelProto + inputs, # type: Any + device="CPU", # type: Text + **kwargs # type: Any + ): # type: (...) -> Tuple[Any, ...] + return cls.prepare(model, device, **kwargs).run(inputs) + + @classmethod + def run_node( + cls, + node, # type: onnx.NodeProto + inputs, # type: Any + device="CPU", # type: Text + outputs_info=None, # type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]] + **kwargs # type: Dict[Text, Any] + ): # type: (...) -> Optional[Tuple[Any, ...]] + """Prepare and run a computation on an ONNX node.""" + # default values for input/output tensors + input_tensor_types = [np_dtype_to_tensor_type(node_input.dtype) for node_input in inputs] + output_tensor_types = [onnx.TensorProto.FLOAT for idx in range(len(node.output))] + output_tensor_shapes = [()] # type: List[Tuple[int, ...]] + + if outputs_info is not None: + output_tensor_types = [ + np_dtype_to_tensor_type(dtype) for (dtype, shape) in outputs_info + ] + output_tensor_shapes = [shape for (dtype, shape) in outputs_info] + + input_tensors = [ + make_tensor_value_info(name, tensor_type, value.shape) + for name, value, tensor_type in zip(node.input, inputs, input_tensor_types) + ] + output_tensors = [ + make_tensor_value_info(name, tensor_type, shape) + for name, shape, tensor_type in zip( + node.output, output_tensor_shapes, output_tensor_types + ) + ] + + graph = make_graph([node], "compute_graph", input_tensors, output_tensors) + model = make_model(graph, producer_name="OpenVinoOnnxBackend") + if "opset_version" in kwargs: + model.opset_import[0].version = kwargs["opset_version"] + return cls.prepare(model, device).run(inputs) + + @classmethod + def supports_device(cls, device): # type: (Text) -> bool + """Check whether the backend is compiled with particular device support. + + In particular it's used in the testing suite. + """ + return device != "CUDA" + + +class OpenVinoTestBackend(OpenVinoOnnxBackend): + @classmethod + def is_compatible( + cls, + model, # type: onnx.ModelProto + device="CPU", # type: Text + **kwargs # type: Any + ): # type: (...) -> bool + # Return whether the model is compatible with the backend. + import_onnx_model(model) + return True + + +prepare = OpenVinoOnnxBackend.prepare +run_model = OpenVinoOnnxBackend.run_model +run_node = OpenVinoOnnxBackend.run_node +supports_device = OpenVinoOnnxBackend.supports_device diff --git a/runtime/bindings/python/tests_compatibility/test_onnx/utils/onnx_helpers.py b/runtime/bindings/python/tests_compatibility/test_onnx/utils/onnx_helpers.py new file mode 100644 index 00000000000..6e4a9d99f23 --- /dev/null +++ b/runtime/bindings/python/tests_compatibility/test_onnx/utils/onnx_helpers.py @@ -0,0 +1,30 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import onnx +from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE +from openvino.inference_engine import IECore + +import ngraph as ng +from ngraph.impl import Function + + +def np_dtype_to_tensor_type(data_type: np.dtype) -> int: + """Return TensorProto type for provided numpy dtype. + + :param data_type: Numpy data type object. + :return: TensorProto.DataType enum value for corresponding type. + """ + return NP_TYPE_TO_TENSOR_TYPE[data_type] + + +def import_onnx_model(model: onnx.ModelProto) -> Function: + onnx.checker.check_model(model) + model_byte_string = model.SerializeToString() + + ie = IECore() + ie_network = ie.read_network(model=model_byte_string, weights=b"", init_from_buffer=True) + + ng_function = ng.function_from_cnn(ie_network) + return ng_function diff --git a/runtime/bindings/python/tox.ini b/runtime/bindings/python/tox.ini index c34d286ca33..c1a9b7eaafa 100644 --- a/runtime/bindings/python/tox.ini +++ b/runtime/bindings/python/tox.ini @@ -16,16 +16,17 @@ passenv = https_proxy commands= {envbindir}/python setup.py bdist_wheel - {envbindir}/pip install --no-index --pre --find-links=dist/ ngraph-core + {envbindir}/pip install --no-index --pre --find-links=dist/ openvino flake8 {posargs:src/ setup.py} - flake8 --ignore=D100,D101,D102,D103,D104,D105,D107,W503 tests/ # ignore lack of docs in tests + flake8 --ignore=D100,D101,D102,D103,D104,D105,D107,W503 tests/ tests_compatibility/ # ignore lack of docs in tests mypy --config-file=tox.ini {posargs:src/} pytest --backend={env:NGRAPH_BACKEND} tests -v -k 'not _cuda' --ignore=tests/test_onnx/test_zoo_models.py --ignore=tests/test_inference_engine + pytest --backend={env:NGRAPH_BACKEND} tests_compatibility -v -k 'not _cuda' --ignore=tests_compatibility/test_onnx/test_zoo_models.py [testenv:zoo_models] commands= {envbindir}/python setup.py bdist_wheel - {envbindir}/pip install --no-index --pre --find-links=dist/ ngraph-core + {envbindir}/pip install --no-index --pre --find-links=dist/ openvino pytest --backend={env:NGRAPH_BACKEND} tests/test_onnx/test_zoo_models.py -v -n 4 --forked -k 'not _cuda' --model_zoo_xfail [testenv:devenv] diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index f1967825dc1..2229800cd86 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -19,6 +19,7 @@ ie_shellcheck_process(DIRECTORY "${OpenVINO_SOURCE_DIR}" "${OpenVINO_SOURCE_DIR}/scripts/install_dependencies/install_NEO_OCL_driver.sh" "${OpenVINO_SOURCE_DIR}/scripts/install_dependencies/install_openvino_dependencies.sh" "${OpenVINO_SOURCE_DIR}/runtime/bindings/python/tests/test_onnx/model_zoo_preprocess.sh" + "${OpenVINO_SOURCE_DIR}/runtime/bindings/python/tests_compatibility/test_onnx/model_zoo_preprocess.sh" ) #