From 53e699eaba4c81be1b40fd706b7b69ce6e51dbc9 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Wed, 18 Jan 2023 15:16:57 +0100 Subject: [PATCH] Add PyTorch Frontend (#15069) * WIP * update input validation * upsample_nearest2d and upsample_bilinear2d support * support leaky_relu add test for inplace relu * update tests, add handler for ListConstruct * Do not create extra outputs in main body * add positive case with non-default value * update testing * update test, handle non constant size and scale * remove ie_device * add aten::group_norm support * refactoring * Enable aten::reshape_as operator and add layer test * more tests * Fix typo in test * Resolve conflicts * fix code style * expand init version * expand_as and tests * add transposed convolutions support * add tests * initial support pad * add circular * update for differenced in rang * cleanup * refactor * more tests * apply review comments * Add split+listunpack transformation * Add split+getitem transformation * Add test cases * fix typo * Minor fixes * Apply suggestions from code review Co-authored-by: Maxim Vafin * Apply suggestions from code review * Small fix * Support converting models without freezing * support BoolTensor and masked_fill * add support aten::rsqrt and test for sqrt * add cumsum and type_as * support clamp * support more matrix operations * add tests * Add aten::adaptive_avg_pool3d and layer test * Change to rank * fix code style in utils.hpp * Update src/frontends/pytorch/src/op_table.cpp Co-authored-by: Sergey Lyalin * fix code style * add tests * add xfail * remove unnecessary broadcast * Changes required by style formater * aten::_convolution_mode * Changes requested by a reviewer * remove code duplication * add aten::unbind transformation * full, zeros and ones * Support getattr list and unrolling nested ifs * Remove line change * Enable back freezing in layer tests * Add aten::norm operator and layer test * Small fix in layer test * add aten::roll * add empty line * Typo fix * fix style * fix style v2 * add pytorch frontend to wheel * Support all types of numeric norms * add check for dynamic shapes * remove random change * merge statements * add min and max ops support * aten::max and aten::min * move axes range creation to utils * add transformation for tuple results, update tests * fix copyright * aten::var * add test and translation for numel * ignore aten::clone * Add layer test for aten::add operator * Fix typo * Remove redundant import * Add parameter name in forward method * fix code style * apply review comments * Add size+slice+listunpack transform * Add append listunpack transformation * Register transformation * aten::where * update realization * Fix issue with getitem * Fix getitem * Add layer test for aten::view operator * Add tests for listunpack * add test for aten::div * fix style * update aten::adaptive_max_pool2d * fix style * add aten::floor_divide * aten::addmm support alpha and beta with different dtype * nonzero * Change test name * update test cases to include other dtypes * aten::arange * prim::max transformation for ListConstruct * rename op * generalize conv2d implementation for conv1d and conv3d * aten::unsqueeze_ and tests for aten::unsqueeze (#70) * add aten::le, aten::ge and tests for other tensor comparision ops (#74) * add support trigonometry ops (#73) * support aten::upsample_bicubic2d, aten::ceil, aten::floor (#72) Co-authored-by: Maxim Vafin * extend and add tests for avg_pool and max_pool * extend tests and constant filling ops * fix as_tensor and full ops * aten::repeat * fix code style * aten::im2col (#61) * aten::im2col * remove debug prints, add number of elements check * fix failed tests * move helper function * use split * Update src/frontends/pytorch/src/op/im2col.cpp Co-authored-by: Maxim Vafin * fix code style Co-authored-by: Maxim Vafin * Update src/frontends/pytorch/src/utils.cpp Co-authored-by: Maxim Vafin * fix code style * revert removeinf floordiv, add floor_divide file * Fix merge issue * reduce code duplication * refactor * Add len operator with layer test * update clamp to support mixed precision and add support torch.long for constants * aten::selu * add trunc mode to div * add else statement * Add test case to layer test * Fix submodules (#88) * update test file * fix namings * execute in fp64 and convert back to initial precision * Revert set_output_size to master. Small fix in If validate * Fix build and code style * fix failed tests * Add torchvision::nms operator and layer test * Change requested by a reviewer * Remove div test * convert constants to input type * Mark some cases in div tests as xfail (#93) * Small refactoring (#94) * Small refactoring * Fix type * Fix python codestyle * Incremental fix code style (#95) * Fix style (#96) * Fix copyright * Fix code style * Branch clean up (#97) * Optimize includes and force opset10 (#98) * Optimize includes * Force opset10 in pt fe * Fix codestyle (#99) * Fix style * Fix clang codestyle * Fix cerr with debug log * Update src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp * Add pytorch dependency only if pytorch frontend is enabled * Update src/bindings/python/src/pyopenvino/CMakeLists.txt * Add layer tests to precommit (#100) * Add layer tests to precommit * Remove accidentally added files * Apply code style on layer tests * batch norm tests and fixes * move default weight and bias to else block * reduce code duplication * Changes requested by a reviewer * Changes requested by a reviewer * Remove dependency from pytorch in pyopenvino (#102) * Remove dependency from pytorch when fe is disabled * Change docstring * Remove pytorch FE dependency from pyopenvino * Apply codestyle (#107) * Apply codestyle * Remove commented line * Apply suggestions from code review Co-authored-by: Roman Kazantsev * Fix mock FE test (#108) * Fix mock PE test (#111) * Revert changes in StridedSlice (#114) * Small refactoring (#116) * Small refactoring * Fix codestyle * Apply suggestions from code review Co-authored-by: Roman Kazantsev * Apply suggestions from code review * Update src/frontends/pytorch/src/op/group_norm.cpp * Fix cmake copyright define (#117) * Update src/frontends/pytorch/src/op/arange.cpp * Apply suggestions from code review * Update build configs (#120) * FIx build configs * Update type cast in full.cpp * Apply review feedback (#121) * Apply suggestions from code review * Apply suggestions from code review Co-authored-by: Roman Kazantsev * Fix issue after master merge (#122) * Fix issue after master merge * Fix build Co-authored-by: eaidova Co-authored-by: bszmelcz Co-authored-by: Sergey Lyalin Co-authored-by: sikorsl1 Co-authored-by: Leonard Sikorski Co-authored-by: Mateusz Co-authored-by: Roman Kazantsev --- .ci/azure/linux.yml | 7 + .ci/azure/linux_conditional_compilation.yml | 1 + .ci/azure/linux_cuda.yml | 1 + .ci/azure/linux_onnxruntime.yml | 1 + .ci/azure/windows_conditional_compilation.yml | 2 + .ci/openvino-onnx/Dockerfile | 1 + cmake/coverage.cmake | 7 + cmake/features.cmake | 1 + cmake/packaging/debian.cmake | 13 + cmake/packaging/rpm.cmake | 9 + cmake/templates/OpenVINOConfig.cmake.in | 9 + .../src/openvino/frontend/pytorch/__init__.py | 21 + .../src/openvino/frontend/pytorch/decoder.py | 319 +++++++++++ .../python/src/pyopenvino/CMakeLists.txt | 8 +- .../src/pyopenvino/frontend/decoder.cpp | 15 + .../src/pyopenvino/frontend/decoder.hpp | 18 + .../src/pyopenvino/frontend/frontend.cpp | 17 +- .../frontend/pytorch/CMakeLists.txt | 6 + .../pyopenvino/frontend/pytorch/decoder.cpp | 33 ++ .../pyopenvino/frontend/pytorch/decoder.hpp | 106 ++++ .../pyopenvino/frontend/pytorch/py_module.cpp | 13 + .../src/pyopenvino/graph/ops/constant.cpp | 6 + .../python/src/pyopenvino/pyopenvino.cpp | 2 + .../python/src/pyopenvino/utils/utils.cpp | 9 + .../python/src/pyopenvino/utils/utils.hpp | 9 +- .../mock_py_frontend/src/mock_py_frontend.cpp | 8 +- .../test_frontend/test_frontendmanager.py | 2 +- src/bindings/python/wheel/setup.py | 7 + .../openvino/op/util/framework_node.hpp | 4 + src/core/src/node.cpp | 2 +- src/core/src/op/if.cpp | 7 +- src/core/src/op/interpolate.cpp | 2 +- src/core/src/op/scatter_elements_update.cpp | 3 +- src/core/src/op/swish.cpp | 5 +- src/core/src/op/util/framework_node.cpp | 3 +- src/frontends/CMakeLists.txt | 4 + .../include/openvino/frontend/decoder.hpp | 48 ++ .../openvino/frontend/node_context.hpp | 13 + src/frontends/common/src/manager.cpp | 3 + src/frontends/pytorch/CMakeLists.txt | 5 + .../openvino/frontend/pytorch/decoder.hpp | 125 +++++ .../openvino/frontend/pytorch/frontend.hpp | 65 +++ .../frontend/pytorch/node_context.hpp | 153 ++++++ .../openvino/frontend/pytorch/visibility.hpp | 20 + src/frontends/pytorch/src/CMakeLists.txt | 9 + src/frontends/pytorch/src/frontend.cpp | 135 +++++ src/frontends/pytorch/src/input_model.hpp | 25 + src/frontends/pytorch/src/node_context.cpp | 136 +++++ .../pytorch/src/op/adaptive_avg_pool3d.cpp | 38 ++ .../pytorch/src/op/adaptive_max_pool2d.cpp | 24 + src/frontends/pytorch/src/op/add.cpp | 26 + src/frontends/pytorch/src/op/addcmul.cpp | 28 + src/frontends/pytorch/src/op/addmm.cpp | 31 ++ src/frontends/pytorch/src/op/arange.cpp | 80 +++ src/frontends/pytorch/src/op/as_tensor.cpp | 39 ++ src/frontends/pytorch/src/op/avg_poolnd.cpp | 50 ++ src/frontends/pytorch/src/op/batch_norm.cpp | 57 ++ src/frontends/pytorch/src/op/clamp.cpp | 32 ++ src/frontends/pytorch/src/op/constant.cpp | 21 + src/frontends/pytorch/src/op/convnd.cpp | 62 +++ src/frontends/pytorch/src/op/convolution.cpp | 84 +++ .../pytorch/src/op/convolution_mode.cpp | 60 ++ src/frontends/pytorch/src/op/dim.cpp | 25 + src/frontends/pytorch/src/op/div.cpp | 38 ++ src/frontends/pytorch/src/op/elu.cpp | 23 + src/frontends/pytorch/src/op/embedding.cpp | 28 + src/frontends/pytorch/src/op/expand.cpp | 43 ++ src/frontends/pytorch/src/op/flatten.cpp | 64 +++ src/frontends/pytorch/src/op/floor_divide.cpp | 24 + src/frontends/pytorch/src/op/floordiv.cpp | 23 + src/frontends/pytorch/src/op/full.cpp | 154 ++++++ src/frontends/pytorch/src/op/gelu.cpp | 25 + src/frontends/pytorch/src/op/get_attr.cpp | 24 + src/frontends/pytorch/src/op/group_norm.cpp | 49 ++ src/frontends/pytorch/src/op/hardtanh.cpp | 29 + src/frontends/pytorch/src/op/if.cpp | 152 ++++++ src/frontends/pytorch/src/op/im2col.cpp | 96 ++++ src/frontends/pytorch/src/op/int.cpp | 21 + src/frontends/pytorch/src/op/layer_norm.cpp | 36 ++ src/frontends/pytorch/src/op/len.cpp | 28 + src/frontends/pytorch/src/op/linear.cpp | 24 + .../pytorch/src/op/list_construct.cpp | 40 ++ src/frontends/pytorch/src/op/loop.cpp | 72 +++ src/frontends/pytorch/src/op/masked_fill.cpp | 28 + src/frontends/pytorch/src/op/max_poolnd.cpp | 33 ++ src/frontends/pytorch/src/op/mean.cpp | 26 + src/frontends/pytorch/src/op/min_max.cpp | 76 +++ src/frontends/pytorch/src/op/neg.cpp | 24 + src/frontends/pytorch/src/op/nms.cpp | 40 ++ src/frontends/pytorch/src/op/nonzero.cpp | 24 + src/frontends/pytorch/src/op/norm.cpp | 52 ++ src/frontends/pytorch/src/op/numel.cpp | 21 + src/frontends/pytorch/src/op/pad.cpp | 111 ++++ src/frontends/pytorch/src/op/reciprocal.cpp | 25 + src/frontends/pytorch/src/op/relu6.cpp | 22 + src/frontends/pytorch/src/op/repeat.cpp | 28 + src/frontends/pytorch/src/op/reshape.cpp | 41 ++ src/frontends/pytorch/src/op/reshape_as.cpp | 24 + src/frontends/pytorch/src/op/roll.cpp | 37 ++ src/frontends/pytorch/src/op/rsqrt.cpp | 25 + src/frontends/pytorch/src/op/rsub.cpp | 27 + src/frontends/pytorch/src/op/select.cpp | 35 ++ src/frontends/pytorch/src/op/selu.cpp | 28 + src/frontends/pytorch/src/op/size.cpp | 27 + src/frontends/pytorch/src/op/slice.cpp | 75 +++ src/frontends/pytorch/src/op/softmax.cpp | 23 + src/frontends/pytorch/src/op/square.cpp | 23 + src/frontends/pytorch/src/op/squeeze.cpp | 26 + src/frontends/pytorch/src/op/sub.cpp | 29 + src/frontends/pytorch/src/op/sum.cpp | 35 ++ src/frontends/pytorch/src/op/to.cpp | 67 +++ src/frontends/pytorch/src/op/transpose.cpp | 47 ++ .../pytorch/src/op/tuple_construct.cpp | 25 + src/frontends/pytorch/src/op/upsample.cpp | 68 +++ src/frontends/pytorch/src/op/var.cpp | 68 +++ src/frontends/pytorch/src/op/view.cpp | 45 ++ src/frontends/pytorch/src/op/where.cpp | 26 + src/frontends/pytorch/src/op_table.cpp | 276 ++++++++++ src/frontends/pytorch/src/op_table.hpp | 18 + .../pytorch/src/pt_framework_node.hpp | 77 +++ src/frontends/pytorch/src/pytorch.cpp | 20 + src/frontends/pytorch/src/transforms.cpp | 383 +++++++++++++ src/frontends/pytorch/src/transforms.hpp | 22 + .../append_list_unpack_replacer.cpp | 97 ++++ .../append_list_unpack_replacer.hpp | 24 + .../src/transforms/aten_cat_replacer.cpp | 77 +++ .../src/transforms/aten_cat_replacer.hpp | 25 + .../src/transforms/aten_getitem_replacer.cpp | 108 ++++ .../src/transforms/aten_getitem_replacer.hpp | 24 + .../max_prim_list_construct_replacer.cpp | 57 ++ .../max_prim_list_construct_replacer.hpp | 24 + .../transforms/prim_list_unpack_replacer.cpp | 191 +++++++ .../transforms/prim_list_unpack_replacer.hpp | 24 + .../prim_tuple_construct_replacer.cpp | 44 ++ .../prim_tuple_construct_replacer.hpp | 24 + src/frontends/pytorch/src/utils.cpp | 455 ++++++++++++++++ src/frontends/pytorch/src/utils.hpp | 97 ++++ .../test_builtin_extensions/CMakeLists.txt | 5 + .../subgraphs_dumper/CMakeLists.txt | 5 + tests/layer_tests/pytorch_tests/conftest.py | 12 + .../pytorch_tests/pytorch_layer_test_class.py | 157 ++++++ .../pytorch_tests/test_adaptive_avg_pool3d.py | 37 ++ .../test_adaptive_max_pool_2d.py | 51 ++ tests/layer_tests/pytorch_tests/test_add.py | 38 ++ .../layer_tests/pytorch_tests/test_addcmul.py | 51 ++ tests/layer_tests/pytorch_tests/test_addmm.py | 49 ++ .../layer_tests/pytorch_tests/test_arange.py | 113 ++++ .../pytorch_tests/test_batch_norm.py | 48 ++ tests/layer_tests/pytorch_tests/test_ceil.py | 33 ++ tests/layer_tests/pytorch_tests/test_clamp.py | 98 ++++ tests/layer_tests/pytorch_tests/test_clone.py | 29 + .../pytorch_tests/test_comparision.py | 59 ++ .../layer_tests/pytorch_tests/test_convnd.py | 164 ++++++ .../pytorch_tests/test_convolution.py | 231 ++++++++ .../pytorch_tests/test_convolution_mode.py | 138 +++++ .../layer_tests/pytorch_tests/test_cumsum.py | 33 ++ tests/layer_tests/pytorch_tests/test_div.py | 78 +++ tests/layer_tests/pytorch_tests/test_exp.py | 29 + .../layer_tests/pytorch_tests/test_expand.py | 70 +++ tests/layer_tests/pytorch_tests/test_floor.py | 33 ++ .../pytorch_tests/test_floor_divide.py | 45 ++ tests/layer_tests/pytorch_tests/test_full.py | 511 ++++++++++++++++++ .../pytorch_tests/test_group_norm.py | 58 ++ .../layer_tests/pytorch_tests/test_im2col.py | 45 ++ .../pytorch_tests/test_leaky_relu.py | 36 ++ tests/layer_tests/pytorch_tests/test_len.py | 51 ++ .../pytorch_tests/test_listunpack.py | 126 +++++ .../pytorch_tests/test_masked_fill.py | 57 ++ .../layer_tests/pytorch_tests/test_min_max.py | 138 +++++ tests/layer_tests/pytorch_tests/test_mm.py | 86 +++ tests/layer_tests/pytorch_tests/test_nms.py | 39 ++ .../layer_tests/pytorch_tests/test_nonzero.py | 49 ++ tests/layer_tests/pytorch_tests/test_norm.py | 39 ++ tests/layer_tests/pytorch_tests/test_numel.py | 37 ++ tests/layer_tests/pytorch_tests/test_pad.py | 110 ++++ .../layer_tests/pytorch_tests/test_permute.py | 33 ++ .../layer_tests/pytorch_tests/test_pooling.py | 158 ++++++ tests/layer_tests/pytorch_tests/test_pow.py | 44 ++ tests/layer_tests/pytorch_tests/test_relu.py | 34 ++ .../layer_tests/pytorch_tests/test_repeat.py | 33 ++ .../layer_tests/pytorch_tests/test_reshape.py | 42 ++ .../pytorch_tests/test_reshape_as.py | 33 ++ tests/layer_tests/pytorch_tests/test_roll.py | 41 ++ tests/layer_tests/pytorch_tests/test_rsqrt.py | 29 + .../layer_tests/pytorch_tests/test_select.py | 37 ++ tests/layer_tests/pytorch_tests/test_selu.py | 34 ++ tests/layer_tests/pytorch_tests/test_silu.py | 32 ++ .../layer_tests/pytorch_tests/test_softmax.py | 34 ++ tests/layer_tests/pytorch_tests/test_split.py | 76 +++ tests/layer_tests/pytorch_tests/test_sqrt.py | 29 + .../layer_tests/pytorch_tests/test_squeeze.py | 35 ++ .../pytorch_tests/test_strided_const.py | 34 ++ tests/layer_tests/pytorch_tests/test_sum.py | 40 ++ tests/layer_tests/pytorch_tests/test_to.py | 93 ++++ .../pytorch_tests/test_trigonometry.py | 65 +++ .../layer_tests/pytorch_tests/test_type_as.py | 33 ++ .../layer_tests/pytorch_tests/test_unbind.py | 35 ++ .../pytorch_tests/test_unsqueeze.py | 44 ++ .../pytorch_tests/test_upsample.py | 55 ++ tests/layer_tests/pytorch_tests/test_var.py | 52 ++ tests/layer_tests/pytorch_tests/test_view.py | 61 +++ tests/layer_tests/pytorch_tests/test_where.py | 59 ++ tests/layer_tests/requirements.txt | 1 + 203 files changed, 10707 insertions(+), 23 deletions(-) create mode 100644 src/bindings/python/src/openvino/frontend/pytorch/__init__.py create mode 100644 src/bindings/python/src/openvino/frontend/pytorch/decoder.py create mode 100644 src/bindings/python/src/pyopenvino/frontend/decoder.cpp create mode 100644 src/bindings/python/src/pyopenvino/frontend/decoder.hpp create mode 100644 src/bindings/python/src/pyopenvino/frontend/pytorch/CMakeLists.txt create mode 100644 src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp create mode 100644 src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp create mode 100644 src/bindings/python/src/pyopenvino/frontend/pytorch/py_module.cpp create mode 100644 src/frontends/common/include/openvino/frontend/decoder.hpp create mode 100644 src/frontends/pytorch/CMakeLists.txt create mode 100644 src/frontends/pytorch/include/openvino/frontend/pytorch/decoder.hpp create mode 100644 src/frontends/pytorch/include/openvino/frontend/pytorch/frontend.hpp create mode 100644 src/frontends/pytorch/include/openvino/frontend/pytorch/node_context.hpp create mode 100644 src/frontends/pytorch/include/openvino/frontend/pytorch/visibility.hpp create mode 100644 src/frontends/pytorch/src/CMakeLists.txt create mode 100644 src/frontends/pytorch/src/frontend.cpp create mode 100644 src/frontends/pytorch/src/input_model.hpp create mode 100644 src/frontends/pytorch/src/node_context.cpp create mode 100644 src/frontends/pytorch/src/op/adaptive_avg_pool3d.cpp create mode 100644 src/frontends/pytorch/src/op/adaptive_max_pool2d.cpp create mode 100644 src/frontends/pytorch/src/op/add.cpp create mode 100644 src/frontends/pytorch/src/op/addcmul.cpp create mode 100644 src/frontends/pytorch/src/op/addmm.cpp create mode 100644 src/frontends/pytorch/src/op/arange.cpp create mode 100644 src/frontends/pytorch/src/op/as_tensor.cpp create mode 100644 src/frontends/pytorch/src/op/avg_poolnd.cpp create mode 100644 src/frontends/pytorch/src/op/batch_norm.cpp create mode 100644 src/frontends/pytorch/src/op/clamp.cpp create mode 100644 src/frontends/pytorch/src/op/constant.cpp create mode 100644 src/frontends/pytorch/src/op/convnd.cpp create mode 100644 src/frontends/pytorch/src/op/convolution.cpp create mode 100644 src/frontends/pytorch/src/op/convolution_mode.cpp create mode 100644 src/frontends/pytorch/src/op/dim.cpp create mode 100644 src/frontends/pytorch/src/op/div.cpp create mode 100644 src/frontends/pytorch/src/op/elu.cpp create mode 100644 src/frontends/pytorch/src/op/embedding.cpp create mode 100644 src/frontends/pytorch/src/op/expand.cpp create mode 100644 src/frontends/pytorch/src/op/flatten.cpp create mode 100644 src/frontends/pytorch/src/op/floor_divide.cpp create mode 100644 src/frontends/pytorch/src/op/floordiv.cpp create mode 100644 src/frontends/pytorch/src/op/full.cpp create mode 100644 src/frontends/pytorch/src/op/gelu.cpp create mode 100644 src/frontends/pytorch/src/op/get_attr.cpp create mode 100644 src/frontends/pytorch/src/op/group_norm.cpp create mode 100644 src/frontends/pytorch/src/op/hardtanh.cpp create mode 100644 src/frontends/pytorch/src/op/if.cpp create mode 100644 src/frontends/pytorch/src/op/im2col.cpp create mode 100644 src/frontends/pytorch/src/op/int.cpp create mode 100644 src/frontends/pytorch/src/op/layer_norm.cpp create mode 100644 src/frontends/pytorch/src/op/len.cpp create mode 100644 src/frontends/pytorch/src/op/linear.cpp create mode 100644 src/frontends/pytorch/src/op/list_construct.cpp create mode 100644 src/frontends/pytorch/src/op/loop.cpp create mode 100644 src/frontends/pytorch/src/op/masked_fill.cpp create mode 100644 src/frontends/pytorch/src/op/max_poolnd.cpp create mode 100644 src/frontends/pytorch/src/op/mean.cpp create mode 100644 src/frontends/pytorch/src/op/min_max.cpp create mode 100644 src/frontends/pytorch/src/op/neg.cpp create mode 100644 src/frontends/pytorch/src/op/nms.cpp create mode 100644 src/frontends/pytorch/src/op/nonzero.cpp create mode 100644 src/frontends/pytorch/src/op/norm.cpp create mode 100644 src/frontends/pytorch/src/op/numel.cpp create mode 100644 src/frontends/pytorch/src/op/pad.cpp create mode 100644 src/frontends/pytorch/src/op/reciprocal.cpp create mode 100644 src/frontends/pytorch/src/op/relu6.cpp create mode 100644 src/frontends/pytorch/src/op/repeat.cpp create mode 100644 src/frontends/pytorch/src/op/reshape.cpp create mode 100644 src/frontends/pytorch/src/op/reshape_as.cpp create mode 100644 src/frontends/pytorch/src/op/roll.cpp create mode 100644 src/frontends/pytorch/src/op/rsqrt.cpp create mode 100644 src/frontends/pytorch/src/op/rsub.cpp create mode 100644 src/frontends/pytorch/src/op/select.cpp create mode 100644 src/frontends/pytorch/src/op/selu.cpp create mode 100644 src/frontends/pytorch/src/op/size.cpp create mode 100644 src/frontends/pytorch/src/op/slice.cpp create mode 100644 src/frontends/pytorch/src/op/softmax.cpp create mode 100644 src/frontends/pytorch/src/op/square.cpp create mode 100644 src/frontends/pytorch/src/op/squeeze.cpp create mode 100644 src/frontends/pytorch/src/op/sub.cpp create mode 100644 src/frontends/pytorch/src/op/sum.cpp create mode 100644 src/frontends/pytorch/src/op/to.cpp create mode 100644 src/frontends/pytorch/src/op/transpose.cpp create mode 100644 src/frontends/pytorch/src/op/tuple_construct.cpp create mode 100644 src/frontends/pytorch/src/op/upsample.cpp create mode 100644 src/frontends/pytorch/src/op/var.cpp create mode 100644 src/frontends/pytorch/src/op/view.cpp create mode 100644 src/frontends/pytorch/src/op/where.cpp create mode 100644 src/frontends/pytorch/src/op_table.cpp create mode 100644 src/frontends/pytorch/src/op_table.hpp create mode 100644 src/frontends/pytorch/src/pt_framework_node.hpp create mode 100644 src/frontends/pytorch/src/pytorch.cpp create mode 100644 src/frontends/pytorch/src/transforms.cpp create mode 100644 src/frontends/pytorch/src/transforms.hpp create mode 100644 src/frontends/pytorch/src/transforms/append_list_unpack_replacer.cpp create mode 100644 src/frontends/pytorch/src/transforms/append_list_unpack_replacer.hpp create mode 100644 src/frontends/pytorch/src/transforms/aten_cat_replacer.cpp create mode 100644 src/frontends/pytorch/src/transforms/aten_cat_replacer.hpp create mode 100644 src/frontends/pytorch/src/transforms/aten_getitem_replacer.cpp create mode 100644 src/frontends/pytorch/src/transforms/aten_getitem_replacer.hpp create mode 100644 src/frontends/pytorch/src/transforms/max_prim_list_construct_replacer.cpp create mode 100644 src/frontends/pytorch/src/transforms/max_prim_list_construct_replacer.hpp create mode 100644 src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.cpp create mode 100644 src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.hpp create mode 100644 src/frontends/pytorch/src/transforms/prim_tuple_construct_replacer.cpp create mode 100644 src/frontends/pytorch/src/transforms/prim_tuple_construct_replacer.hpp create mode 100644 src/frontends/pytorch/src/utils.cpp create mode 100644 src/frontends/pytorch/src/utils.hpp create mode 100644 tests/layer_tests/pytorch_tests/conftest.py create mode 100644 tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py create mode 100644 tests/layer_tests/pytorch_tests/test_adaptive_avg_pool3d.py create mode 100644 tests/layer_tests/pytorch_tests/test_adaptive_max_pool_2d.py create mode 100644 tests/layer_tests/pytorch_tests/test_add.py create mode 100644 tests/layer_tests/pytorch_tests/test_addcmul.py create mode 100644 tests/layer_tests/pytorch_tests/test_addmm.py create mode 100644 tests/layer_tests/pytorch_tests/test_arange.py create mode 100644 tests/layer_tests/pytorch_tests/test_batch_norm.py create mode 100644 tests/layer_tests/pytorch_tests/test_ceil.py create mode 100644 tests/layer_tests/pytorch_tests/test_clamp.py create mode 100644 tests/layer_tests/pytorch_tests/test_clone.py create mode 100644 tests/layer_tests/pytorch_tests/test_comparision.py create mode 100644 tests/layer_tests/pytorch_tests/test_convnd.py create mode 100644 tests/layer_tests/pytorch_tests/test_convolution.py create mode 100644 tests/layer_tests/pytorch_tests/test_convolution_mode.py create mode 100644 tests/layer_tests/pytorch_tests/test_cumsum.py create mode 100644 tests/layer_tests/pytorch_tests/test_div.py create mode 100644 tests/layer_tests/pytorch_tests/test_exp.py create mode 100644 tests/layer_tests/pytorch_tests/test_expand.py create mode 100644 tests/layer_tests/pytorch_tests/test_floor.py create mode 100644 tests/layer_tests/pytorch_tests/test_floor_divide.py create mode 100644 tests/layer_tests/pytorch_tests/test_full.py create mode 100644 tests/layer_tests/pytorch_tests/test_group_norm.py create mode 100644 tests/layer_tests/pytorch_tests/test_im2col.py create mode 100644 tests/layer_tests/pytorch_tests/test_leaky_relu.py create mode 100644 tests/layer_tests/pytorch_tests/test_len.py create mode 100644 tests/layer_tests/pytorch_tests/test_listunpack.py create mode 100644 tests/layer_tests/pytorch_tests/test_masked_fill.py create mode 100644 tests/layer_tests/pytorch_tests/test_min_max.py create mode 100644 tests/layer_tests/pytorch_tests/test_mm.py create mode 100644 tests/layer_tests/pytorch_tests/test_nms.py create mode 100644 tests/layer_tests/pytorch_tests/test_nonzero.py create mode 100644 tests/layer_tests/pytorch_tests/test_norm.py create mode 100644 tests/layer_tests/pytorch_tests/test_numel.py create mode 100644 tests/layer_tests/pytorch_tests/test_pad.py create mode 100644 tests/layer_tests/pytorch_tests/test_permute.py create mode 100644 tests/layer_tests/pytorch_tests/test_pooling.py create mode 100644 tests/layer_tests/pytorch_tests/test_pow.py create mode 100644 tests/layer_tests/pytorch_tests/test_relu.py create mode 100644 tests/layer_tests/pytorch_tests/test_repeat.py create mode 100644 tests/layer_tests/pytorch_tests/test_reshape.py create mode 100644 tests/layer_tests/pytorch_tests/test_reshape_as.py create mode 100644 tests/layer_tests/pytorch_tests/test_roll.py create mode 100644 tests/layer_tests/pytorch_tests/test_rsqrt.py create mode 100644 tests/layer_tests/pytorch_tests/test_select.py create mode 100644 tests/layer_tests/pytorch_tests/test_selu.py create mode 100644 tests/layer_tests/pytorch_tests/test_silu.py create mode 100644 tests/layer_tests/pytorch_tests/test_softmax.py create mode 100644 tests/layer_tests/pytorch_tests/test_split.py create mode 100644 tests/layer_tests/pytorch_tests/test_sqrt.py create mode 100644 tests/layer_tests/pytorch_tests/test_squeeze.py create mode 100644 tests/layer_tests/pytorch_tests/test_strided_const.py create mode 100644 tests/layer_tests/pytorch_tests/test_sum.py create mode 100644 tests/layer_tests/pytorch_tests/test_to.py create mode 100644 tests/layer_tests/pytorch_tests/test_trigonometry.py create mode 100644 tests/layer_tests/pytorch_tests/test_type_as.py create mode 100644 tests/layer_tests/pytorch_tests/test_unbind.py create mode 100644 tests/layer_tests/pytorch_tests/test_unsqueeze.py create mode 100644 tests/layer_tests/pytorch_tests/test_upsample.py create mode 100644 tests/layer_tests/pytorch_tests/test_var.py create mode 100644 tests/layer_tests/pytorch_tests/test_view.py create mode 100644 tests/layer_tests/pytorch_tests/test_where.py diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 6883fd5161a..05bef2e8d53 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -462,6 +462,13 @@ jobs: WORKSPACE: $(INSTALL_DIR) displayName: 'Samples Smoke Tests' + - script: | + python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt + export PYTHONPATH=$(REPO_DIR)/tools/mo/:$(LAYER_TESTS_DIR):$PYTHONPATH + export TEST_DEVICE=CPU + $(RUN_PREFIX) python3 -m pytest $(LAYER_TESTS_DIR)/pytorch_tests/ -m precommit --junitxml=$(INSTALL_TEST_DIR)/TEST-pytorch.xmlTEST + displayName: 'PyTorch Layer Tests' + - script: | python3 -m pip install -r $(LAYER_TESTS_DIR)/requirements.txt export PYTHONPATH=$(REPO_DIR)/tools/mo/:$(LAYER_TESTS_DIR):$PYTHONPATH diff --git a/.ci/azure/linux_conditional_compilation.yml b/.ci/azure/linux_conditional_compilation.yml index 30372cc89e5..c7777643daf 100644 --- a/.ci/azure/linux_conditional_compilation.yml +++ b/.ci/azure/linux_conditional_compilation.yml @@ -124,6 +124,7 @@ jobs: -DENABLE_TEMPLATE=OFF -DENABLE_OV_ONNX_FRONTEND=OFF -DENABLE_OV_PADDLE_FRONTEND=OFF + -DENABLE_OV_PYTORCH_FRONTEND=OFF -DENABLE_OV_TF_FRONTEND=OFF -S $(REPO_DIR) -B $(BUILD_DIR) diff --git a/.ci/azure/linux_cuda.yml b/.ci/azure/linux_cuda.yml index 878436eecd1..b8d8b99a9b3 100644 --- a/.ci/azure/linux_cuda.yml +++ b/.ci/azure/linux_cuda.yml @@ -135,6 +135,7 @@ jobs: -DENABLE_INTEL_GNA=OFF \ -DENABLE_OV_TF_FRONTEND=OFF \ -DENABLE_OV_PADDLE_FRONTEND=OFF \ + -DENABLE_OV_PYTORCH_FRONTEND=OFF \ -DENABLE_OV_ONNX_FRONTEND=OFF \ -DENABLE_PYTHON=OFF \ -DENABLE_TESTS=ON \ diff --git a/.ci/azure/linux_onnxruntime.yml b/.ci/azure/linux_onnxruntime.yml index b5d2207a389..86eb80a03b2 100644 --- a/.ci/azure/linux_onnxruntime.yml +++ b/.ci/azure/linux_onnxruntime.yml @@ -115,6 +115,7 @@ jobs: -DENABLE_COMPILE_TOOL=OFF -DENABLE_OV_TF_FRONTEND=OFF -DENABLE_OV_PADDLE_FRONTEND=OFF + -DENABLE_OV_PYTORCH_FRONTEND=OFF -DENABLE_OPENVINO_DEBUG=OFF -S $(REPO_DIR) -B $(BUILD_DIR) diff --git a/.ci/azure/windows_conditional_compilation.yml b/.ci/azure/windows_conditional_compilation.yml index ef3db5142a1..f8734d508c8 100644 --- a/.ci/azure/windows_conditional_compilation.yml +++ b/.ci/azure/windows_conditional_compilation.yml @@ -130,6 +130,7 @@ jobs: -DENABLE_TESTS=OFF ^ -DENABLE_OV_ONNX_FRONTEND=OFF ^ -DENABLE_OV_PADDLE_FRONTEND=OFF ^ + -DENABLE_OV_PYTORCH_FRONTEND=OFF ^ -DENABLE_OV_TF_FRONTEND=OFF ^ $(REPO_DIR) workingDirectory: $(BUILD_DIR) @@ -175,6 +176,7 @@ jobs: -DENABLE_TESTS=OFF ^ -DENABLE_OV_ONNX_FRONTEND=OFF ^ -DENABLE_OV_PADDLE_FRONTEND=OFF ^ + -DENABLE_OV_PYTORCH_FRONTEND=OFF ^ -DENABLE_OV_TF_FRONTEND=OFF ^ $(REPO_DIR) workingDirectory: $(BUILD_DIR_2) diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile index 0c6e071977f..445ca6d9e34 100644 --- a/.ci/openvino-onnx/Dockerfile +++ b/.ci/openvino-onnx/Dockerfile @@ -61,6 +61,7 @@ RUN cmake .. \ -DENABLE_PROFILING_ITT=OFF \ -DENABLE_SAMPLES=OFF \ -DENABLE_OV_PADDLE_FRONTEND=OFF \ + -DENABLE_OV_PYTORCH_FRONTEND=OFF \ -DENABLE_OV_TF_FRONTEND=OFF \ -DENABLE_OPENVINO_DEBUG=OFF \ -DCMAKE_INSTALL_PREFIX=/openvino/dist diff --git a/cmake/coverage.cmake b/cmake/coverage.cmake index 0537a8cf30e..590e93489eb 100644 --- a/cmake/coverage.cmake +++ b/cmake/coverage.cmake @@ -136,6 +136,13 @@ if(ENABLE_OV_PADDLE_FRONTEND) PREFIX "${OV_COVERAGE_BASE_DIRECTORY}") endif() +if(ENABLE_OV_PYTORCH_FRONTEND) + ov_coverage_extract(INPUT "openvino" OUTPUT "pytorch_frontend" + PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/src/frontends/pytorch/*") + ov_coverage_genhtml(INFO_FILE "pytorch_frontend" + PREFIX "${OV_COVERAGE_BASE_DIRECTORY}") +endif() + if(ENABLE_OV_TF_FRONTEND) ov_coverage_extract(INPUT "openvino" OUTPUT "tf_frontend" PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/src/frontends/tensorflow/*") diff --git a/cmake/features.cmake b/cmake/features.cmake index e6f6504baee..727f0cbba1a 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -151,6 +151,7 @@ ie_dependent_option (ENABLE_CPU_DEBUG_CAPS "enable CPU debug capabilities at run find_host_package(PythonInterp 3 QUIET) ie_option(ENABLE_OV_ONNX_FRONTEND "Enable ONNX FrontEnd" ${PYTHONINTERP_FOUND}) ie_option(ENABLE_OV_PADDLE_FRONTEND "Enable PaddlePaddle FrontEnd" ON) +ie_option(ENABLE_OV_PYTORCH_FRONTEND "Enable PyTorch FrontEnd" ON) ie_option(ENABLE_OV_TF_FRONTEND "Enable TensorFlow FrontEnd" ON) ie_dependent_option(ENABLE_SYSTEM_PROTOBUF "Use system protobuf" OFF "ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_TF_FRONTEND;BUILD_SHARED_LIBS" OFF) diff --git a/cmake/packaging/debian.cmake b/cmake/packaging/debian.cmake index 4e7312bdf20..dea5d149cf1 100644 --- a/cmake/packaging/debian.cmake +++ b/cmake/packaging/debian.cmake @@ -261,6 +261,19 @@ macro(ov_cpack_settings) set(paddle_copyright "generic") endif() + if(ENABLE_OV_PYTORCH_FRONTEND) + set(CPACK_COMPONENT_PYTORCH_DESCRIPTION "OpenVINO PyTorch Frontend") + set(CPACK_COMPONENT_PYTORCH_DEPENDS "${OV_CPACK_COMP_CORE}") + set(CPACK_DEBIAN_PYTORCH_PACKAGE_NAME "libopenvino-pytorch-frontend-${cpack_name_ver}") + # since we PYTORCH FE is linkable target, we need to call ldconfig (i.e. `def_triggers`) + set(CPACK_DEBIAN_PYTORCH_PACKAGE_CONTROL_EXTRA "${def_postinst};${def_postrm};${def_triggers}") + ov_debian_add_lintian_suppression(pytorch + # we have different package name strategy; it suggests libopenvino-pytorch-frontend202230 + "package-name-doesnt-match-sonames") + list(APPEND frontends pytorch) + set(pytorch_copyright "generic") + endif() + # # core_dev: depends on core and frontends (since frontends don't want to provide its own dev packages) # diff --git a/cmake/packaging/rpm.cmake b/cmake/packaging/rpm.cmake index eb9ed9c3d28..5c1b29f605e 100644 --- a/cmake/packaging/rpm.cmake +++ b/cmake/packaging/rpm.cmake @@ -226,6 +226,15 @@ macro(ov_cpack_settings) set(paddle_copyright "generic") endif() + if(ENABLE_OV_PYTORCH_FRONTEND) + set(CPACK_COMPONENT_PYTORCH_DESCRIPTION "OpenVINO PyTorch Frontend") + set(CPACK_RPM_PYTORCH_PACKAGE_NAME "libopenvino-pytorch-frontend-${cpack_name_ver}") + set(CPACK_RPM_PYTORCH_POST_INSTALL_SCRIPT_FILE "${def_triggers}") + set(CPACK_RPM_PYTORCH_POST_UNINSTALL_SCRIPT_FILE "${def_triggers}") + _ov_add_package(frontend_packages pytorch) + set(pytorch_copyright "generic") + endif() + # # core_dev: depends on core and frontends (since frontends don't want to provide its own dev packages) # diff --git a/cmake/templates/OpenVINOConfig.cmake.in b/cmake/templates/OpenVINOConfig.cmake.in index b9f2ae6fe95..023704f6267 100644 --- a/cmake/templates/OpenVINOConfig.cmake.in +++ b/cmake/templates/OpenVINOConfig.cmake.in @@ -12,6 +12,7 @@ # * `Runtime`: OpenVINO C++ and C Core & Inference Runtime, frontend common # * `ONNX`: OpenVINO ONNX frontend # * `Paddle`: OpenVINO Paddle frontend +# * `PyTorch`: OpenVINO PyTorch frontend # * `TensorFlow`: OpenVINO TensorFlow frontend # # If no components are specified, `Runtime` component is provided: @@ -41,6 +42,9 @@ # `openvino::frontend::paddle` # Paddle FrontEnd target (optional) # +# `openvino::frontend::pytorch` +# PyTorch FrontEnd target (optional) +# # `openvino::frontend::tensorflow` # TensorFlow FrontEnd target (optional) # @@ -61,6 +65,9 @@ # `OpenVINO_Frontend_Paddle_FOUND` # OpenVINO Paddle frontend is available # +# `OpenVINO_Frontend_PyTorch_FOUND` +# OpenVINO PyTorch frontend is available +# # `OpenVINO_Frontend_TensorFlow_FOUND` # OpenVINO TensorFlow frontend is available # @@ -293,11 +300,13 @@ set(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND @ENABLE_OV_ONNX_FRONTEND@) set(${CMAKE_FIND_PACKAGE_NAME}_Paddle_FOUND @ENABLE_OV_PADDLE_FRONTEND@) set(${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND @ENABLE_OV_TF_FRONTEND@) set(${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND @ENABLE_OV_IR_FRONTEND@) +set(${CMAKE_FIND_PACKAGE_NAME}_PyTorch_FOUND @ENABLE_OV_PYTORCH_FRONTEND@) set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_ONNX_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND}) set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_Paddle_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_Paddle_FOUND}) set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_TensorFlow_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND}) set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_IR_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND}) +set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_PyTorch_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_PyTorch_FOUND}) # if no components specified, only Runtime is provided if(NOT ${CMAKE_FIND_PACKAGE_NAME}_FIND_COMPONENTS) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/__init__.py b/src/bindings/python/src/openvino/frontend/pytorch/__init__.py new file mode 100644 index 00000000000..78cfb7a0182 --- /dev/null +++ b/src/bindings/python/src/openvino/frontend/pytorch/__init__.py @@ -0,0 +1,21 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +""" +Package: openvino +Low level wrappers for the FrontEnd C++ API. +""" + +# flake8: noqa + +from openvino.utils import add_openvino_libs_to_path + +add_openvino_libs_to_path() + + +try: + from openvino.frontend.pytorch.py_pytorch_frontend import _FrontEndPytorchDecoder as Decoder + from openvino.frontend.pytorch.py_pytorch_frontend import _Type as DecoderType +except ImportError as err: + raise ImportError("OpenVINO PyTorch frontend is not available, please make sure the frontend is built." + "{}".format(err)) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/decoder.py new file mode 100644 index 00000000000..df04e6ea495 --- /dev/null +++ b/src/bindings/python/src/openvino/frontend/pytorch/decoder.py @@ -0,0 +1,319 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# flake8: noqa +# mypy: ignore-errors + +from openvino.frontend.pytorch.py_pytorch_frontend import _FrontEndPytorchDecoder as Decoder +from openvino.frontend.pytorch.py_pytorch_frontend import _Type as DecoderType +from openvino.runtime import op, PartialShape, Type as OVType, OVAny, Shape + +import warnings +import torch + + +def get_type_from_py_type(value): + if isinstance(value, float): + return OVType.f32 + if isinstance(value, int): + return OVType.i32 + if isinstance(value, bool): + return OVType.boolean + return OVType.dynamic + + +def ivalue_to_constant(ivalue): + ov_type = get_type_from_py_type(ivalue) + if ov_type.is_static(): + return op.Constant(ov_type, Shape([]), [ivalue]).outputs() + + if isinstance(ivalue, list): + assert len(ivalue) > 0, "Can't deduce type for empty list" + ov_type = get_type_from_py_type(ivalue[0]) + assert ov_type.is_static(), "Can't deduce type for list" + return op.Constant(ov_type, Shape([len(ivalue)]), ivalue).outputs() + + if ivalue.type() in pt_to_ov_type_map: + try: + ovshape = PartialShape(ivalue.size()) + ovtype = pt_to_ov_type_map[ivalue.type()] + ov_const = op.Constant(ovtype, ovshape.get_shape(), ivalue.data_ptr()) + except Exception: + # old variant that makes a slow data copying + warnings.warn("[ WARNING ] Constant wasn't able to convert from data_ptr.") + nvalues = ivalue.numpy() + ovtype = np_to_ov_type_map[str(nvalues.dtype)] + ovshape = PartialShape(nvalues.shape) + ov_const = op.Constant(ovtype, ovshape.get_shape(), nvalues.flatten().tolist()) + return ov_const.outputs() + + +def get_value_from_getattr(getattr_node, self_module): + assert getattr_node.kind() == "prim::GetAttr", "Got node of kind not equal to prim::GetAttr" + # GetAttr nodes can be nested + stack = [] + while getattr_node.kind() == "prim::GetAttr": + stack.append(getattr_node) + inputs = list(getattr_node.inputs()) + if len(inputs) == 0: + break + getattr_node = inputs[0].node() + module = self_module + while len(stack) > 0: + node = stack.pop() + assert (hasattr(module, node.s("name"))) + module = getattr(module, node.s("name")) + return module + + +pt_to_ov_type_map = { + "float": OVType.f32, + "int": OVType.i32, + "torch.float32": OVType.f32, + "torch.int32": OVType.i32, + "torch.bool": OVType.boolean, + "torch.int64": OVType.i64, + "torch.FloatTensor": OVType.f32, + "torch.IntTensor": OVType.i32, + "torch.LongTensor": OVType.i64, + "torch.BoolTensor": OVType.boolean, +} + +pt_to_py_type_map = { + "float": "float", + "int": "int", + "torch.float32": "float", + "torch.int32": "int", + "torch.int64": "int", + "torch.bool": "bool", +} + +np_to_ov_type_map = { + "float32": OVType.f32, + "int32": OVType.i32, +} + + +class TorchScriptPythonDecoder (Decoder): + def __init__(self, pt_module, graph_element=None): + Decoder.__init__(self) + # We store every decoder created by this decoder so that all them are not deleted until the first decoder is deleted + self.m_decoders = [] + if graph_element is None: + assert hasattr(pt_module, "inlined_graph"), "graph_element must have inlined_graph" + self.graph_element = pt_module.inlined_graph + else: + self.graph_element = graph_element + self.pt_module = pt_module + + def inputs(self): + return [x.unique() for x in self.graph_element.inputs()] + + def get_input(self, index): + return self.inputs()[index] + + def get_input_shape(self, index): + raw_input = self._raw_input(index) + return self.get_shape_for_value(raw_input) + + def get_input_type(self, index): + raw_input = self._raw_input(index) + return self.get_type_for_value(raw_input) + + def get_output_shape(self, index): + output = self._raw_output(index) + return self.get_shape_for_value(output) + + def get_output_type(self, index): + output = self._raw_output(index) + return self.get_type_for_value(output) + + def _get_known_type_for_value(self, pt_type): + """Returns known/unknown types wrapped as OVAny.""" + # Check for simple scalar types first + if pt_type is None: + return OVAny(OVType.dynamic) + # TODO: Don't use str, use native types + if str(pt_type) in pt_to_ov_type_map: + return OVAny(pt_to_ov_type_map[str(pt_type)]) + elif pt_type.__class__ is torch.TensorType: + # Tensor type, parse element type + return OVAny(DecoderType.Tensor(self._get_known_type_for_value(pt_type.dtype()))) + elif pt_type.__class__ is torch.ListType: + element_type = pt_type.getElementType() + return OVAny(DecoderType.List(self._get_known_type_for_value(element_type))) + else: + # Not yet recognized + return OVAny(OVType.dynamic) + + def get_shape_for_value(self, value): + if value.isCompleteTensor(): + ps = PartialShape(value.type().sizes()) + return ps + else: + # TODO: Recognize types that we can represent as a nested constructs with objects from DecoderType + # If recognized, return scalar instead of dynamic. Scalar means a single value of that custom type. + # See get_type_for_value for reference + pass + return PartialShape.dynamic() + + def get_type_for_value(self, value): + full_type = self._get_known_type_for_value(value.type()) + return full_type + + def get_input_transpose_order(self, index): + raw_input = self._raw_input(index) + if raw_input.type() is not None and raw_input.type().kind() == "TensorType": + strides = raw_input.type().strides() + if strides is not None: + return [s[0] for s in sorted(enumerate(strides), key=lambda x:x[1], reverse=True)] + return [] + + def get_output_transpose_order(self, index): + output = self._raw_output(index) + if output.type() is not None and output.type().kind() == "TensorType": + strides = output.type().strides() + if strides is not None: + return [s[0] for s in sorted(enumerate(strides), key=lambda x:x[1], reverse=True)] + return [] + + def get_subgraph_size(self): + return len(self.get_subgraphs()) if hasattr(self.graph_element, "blocks") else 1 + + def visit_subgraph(self, node_visitor): + # make sure topological order is satisfied + for node in self.graph_element.nodes(): + decoder = TorchScriptPythonDecoder(self.pt_module, node) + self.m_decoders.append(decoder) + node_visitor(decoder) + + def get_subgraphs(self): + return list(self.graph_element.blocks()) + + def get_subgraph_decoder(self, index): + decoder = TorchScriptPythonDecoder(self.pt_module, self.get_subgraphs()[index]) + self.m_decoders.append(decoder) + return decoder + + def get_op_type(self): + return self.graph_element.kind() + + def get_schema(self): + return self.graph_element.schema() + + def outputs(self): + return [x.unique() for x in self.graph_element.outputs()] + + def _raw_outputs(self): + return list(self.graph_element.outputs()) + + def _raw_output(self, index): + return self._raw_outputs()[index] + + def _raw_inputs(self): + return list(self.graph_element.inputs()) + + def _raw_input(self, index): + return self._raw_inputs()[index] + + def num_of_outputs(self): + return len(self.outputs()) + + def output(self, index): + return self.outputs()[index] + + def mark_node(self, node): + return node + + def try_decode_get_attr(self): + pt_value = get_value_from_getattr(self.graph_element, self.pt_module) + assert pt_value is not None, "Couldn't retrieve value from prim::GetAttr" + if not isinstance(pt_value, torch.jit.ScriptModule) or isinstance(pt_value, torch.jit.TracedModule): + return ivalue_to_constant(pt_value) + else: + return [] + + def as_constant(self): + if not self.get_op_type() == "prim::Constant": + return None + pt_value = self._raw_output(0) + + pt_type_class = pt_value.type().__class__ + if pt_type_class is torch.TensorType: + return self.as_constant_tensor(pt_value) + if pt_type_class is torch.ListType: + return self.as_constant_list(pt_value) + if str(pt_value.type()) in ["torch.int32", "int"]: + return op.Constant(OVType.i32, Shape([]), [pt_value.toIValue()]).outputs() + if str(pt_value.type()) in ["torch.float", "torch.FloatType", "float"]: + return op.Constant(OVType.f32, Shape([]), [pt_value.toIValue()]).outputs() + if str(pt_value.type()) in ["torch.bool", "bool"]: + return op.Constant(OVType.boolean, Shape([]), [pt_value.toIValue()]).outputs() + + return None + + def as_string(self): + if not self.get_op_type() == "prim::Constant": + return None + pt_value = self._raw_output(0) + + if str(pt_value.type()) in ["torch.StringType", "str"]: + return pt_value.toIValue() + return None + + def as_constant_tensor(self, pt_value): + ivalue = pt_value.toIValue() + if pt_value.isCompleteTensor(): + try: + ivalue = ivalue.to(memory_format=torch.contiguous_format).detach().cpu() + except Exception: + warnings.warn("[ WARNING ] Tensor couldn't detach") + if str(pt_value.type().dtype()) in pt_to_ov_type_map: + # Constant interpretation doesn't respect new-full type of PT + # It recognizes only tensors, and give lists as 1D tensors, and scalars as Tensor scalars + # So only tensor-type constants are supported + ovshape = PartialShape(pt_value.type().sizes()) + ovtype = pt_to_ov_type_map[str(pt_value.type().dtype())] + + # TODO: try-except here is a temporary WA for issues with data_ptr that we currently cannot predict; provide better solution + try: + # this is only possible with adding a new ctor for Constant Python binding + # TODO Check strides and pass them somehow + values = ivalue.data_ptr() + ov_const = op.Constant(ovtype, ovshape.get_shape(), values) + except Exception: + # old variant that makes a slow data copying + warnings.warn("[ WARNING ] Constant wasn't able to convert from data_ptr.") + values = ivalue.flatten().tolist() + ov_const = op.Constant(ovtype, ovshape.get_shape(), values) + return ov_const.outputs() + else: + return ivalue_to_constant(ivalue) + return None + + def as_constant_list(self, pt_value): + # For now it is treat a list as a 1D tensor; it is required by converters to avoid need to massively + # rewrite them in that part where constant attributes are queried + pt_element_type = str(pt_value.type().getElementType()) + ivalue = pt_value.toIValue() + is_known_type = pt_element_type in pt_to_ov_type_map + + if is_known_type: + ovtype = pt_to_ov_type_map[pt_element_type] + ovshape = PartialShape([len(ivalue)]) + ov_const = op.Constant(ovtype, ovshape.get_shape(), ivalue) + return ov_const.outputs() + + def input_is_none(self, index): + if index >= len(self.inputs()) or self._raw_input(index) is None: + return True + else: + r_input = self._raw_input(index) + if str(r_input.type()) in ["torch.NoneType", "NoneType"]: + return True + else: + in_node = r_input.node() + if in_node.kind() == "prim::GetAttr": + pt_value = get_value_from_getattr(in_node, self.pt_module) + return pt_value is None + return False diff --git a/src/bindings/python/src/pyopenvino/CMakeLists.txt b/src/bindings/python/src/pyopenvino/CMakeLists.txt index c5a777722a0..f3432b088d1 100644 --- a/src/bindings/python/src/pyopenvino/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/CMakeLists.txt @@ -61,10 +61,14 @@ if(TARGET openvino::frontend::paddle) add_subdirectory(frontend/paddle) endif() +if(TARGET openvino::frontend::pytorch) + add_subdirectory(frontend/pytorch) +endif() + # create target -file(GLOB_RECURSE SOURCES core/*.cpp graph/*.cpp frontend/*.cpp utils/*cpp pyopenvino.cpp) -list(FILTER SOURCES EXCLUDE REGEX frontend/onnx|tensorflow|paddle/* ) +file(GLOB_RECURSE SOURCES core/*.cpp graph/*.cpp frontend/*.cpp utils/*.cpp pyopenvino.cpp) +list(FILTER SOURCES EXCLUDE REGEX frontend/onnx|tensorflow|paddle|pytorch/* ) pybind11_add_module(${PROJECT_NAME} MODULE NO_EXTRAS ${SOURCES}) diff --git a/src/bindings/python/src/pyopenvino/frontend/decoder.cpp b/src/bindings/python/src/pyopenvino/frontend/decoder.cpp new file mode 100644 index 00000000000..b0560e36113 --- /dev/null +++ b/src/bindings/python/src/pyopenvino/frontend/decoder.cpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "decoder.hpp" + +#include "openvino/frontend/decoder.hpp" + +namespace py = pybind11; + +using namespace ov::frontend; + +void regclass_frontend_IDecoder(py::module m) { + py::class_>(m, "_IDecoder"); +} diff --git a/src/bindings/python/src/pyopenvino/frontend/decoder.hpp b/src/bindings/python/src/pyopenvino/frontend/decoder.hpp new file mode 100644 index 00000000000..541c0081779 --- /dev/null +++ b/src/bindings/python/src/pyopenvino/frontend/decoder.hpp @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/frontend/decoder.hpp" + +namespace py = pybind11; + +class PyIDecoder : public ov::frontend::IDecoder { +public: + using IDecoder::IDecoder; // Inherit constructors +}; + +void regclass_frontend_IDecoder(py::module m); diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp index d169ea2ec42..ace191ae92b 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp @@ -25,16 +25,21 @@ void regclass_frontend_FrontEnd(py::module m) { fem.def( "load", - [](FrontEnd& self, const py::object& path) { - std::string model_path = Common::utils::convert_path_to_string(path); - return self.load(model_path); + [](FrontEnd& self, const py::object& py_obj) { + try { + std::string model_path = Common::utils::convert_path_to_string(py_obj); + return self.load(model_path); + } catch (...) { + // Extended for one argument only for this time + return self.load({Common::utils::py_object_to_any(py_obj)}); + } }, py::arg("path"), R"( - Loads an input model by specified model file path. + Loads an input model. - :param path: Main model file path. - :type path: Union[str, pathlib.Path] + :param path: Object describing the model. It can be path to model file. + :type path: Any :return: Loaded input model. :rtype: openvino.frontend.InputModel )"); diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/CMakeLists.txt b/src/bindings/python/src/pyopenvino/frontend/pytorch/CMakeLists.txt new file mode 100644 index 00000000000..6f7669ad21b --- /dev/null +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/CMakeLists.txt @@ -0,0 +1,6 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +include(${pyopenvino_SOURCE_DIR}/frontend/frontend_module.cmake) +frontend_module(py_pytorch_frontend pytorch ${OV_CPACK_COMP_PYTHON_OPENVINO}_${pyversion}) diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp new file mode 100644 index 00000000000..71b69a23562 --- /dev/null +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include + +#include "decoder.hpp" + +#include "openvino/frontend/decoder.hpp" + +namespace py = pybind11; + +using namespace ov::frontend; +using ov::Any; + + +void regclass_frontend_pytorch_decoder(py::module m) { + py::class_>(m, "_FrontEndPytorchDecoder") + .def(py::init<>()); + + auto type_module = m.def_submodule("_Type"); + + // Register classes for TorchScript type system + py::class_(type_module, "Tensor"). + def(py::init()); + py::class_(type_module, "List"). + def(py::init()); + py::class_(type_module, "Str"). + def(py::init<>()); +} diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp new file mode 100644 index 00000000000..21b597d8ed0 --- /dev/null +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp @@ -0,0 +1,106 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/frontend/pytorch/decoder.hpp" + +namespace py = pybind11; + +/// Trampoline class to support inheritence from TorchDecoder in Python +class PyDecoder : public ov::frontend::pytorch::TorchDecoder { + using ov::frontend::pytorch::TorchDecoder::TorchDecoder; + + ov::Any const_input(size_t index) const override { + PYBIND11_OVERRIDE_PURE(ov::Any, TorchDecoder, const_input, index); + } + + size_t input(size_t index) const override { + PYBIND11_OVERRIDE_PURE(size_t, TorchDecoder, get_input, index); + } + + const std::vector& inputs() const override { + PYBIND11_OVERRIDE_PURE(const std::vector&, TorchDecoder, inputs); + } + + ov::PartialShape get_input_shape(size_t index) const override { + PYBIND11_OVERRIDE_PURE(ov::PartialShape, TorchDecoder, get_input_shape, index); + } + + ov::Any get_input_type(size_t index) const override { + PYBIND11_OVERRIDE_PURE(ov::Any, TorchDecoder, get_input_type, index); + } + + const std::vector& get_input_transpose_order(size_t index) const override { + PYBIND11_OVERRIDE_PURE(const std::vector&, TorchDecoder, get_input_transpose_order, index); + } + + const std::vector& get_output_transpose_order(size_t index) const override { + PYBIND11_OVERRIDE_PURE(const std::vector&, TorchDecoder, get_output_transpose_order, index); + } + + ov::PartialShape get_output_shape(size_t index) const override { + PYBIND11_OVERRIDE_PURE(ov::PartialShape, TorchDecoder, get_output_shape, index); + } + + ov::Any get_output_type(size_t index) const override { + PYBIND11_OVERRIDE_PURE(ov::Any, TorchDecoder, get_output_type, index); + } + + bool input_is_none(size_t index) const override { + PYBIND11_OVERRIDE_PURE(bool, TorchDecoder, input_is_none, index); + } + + ov::OutputVector try_decode_get_attr() const override { + PYBIND11_OVERRIDE_PURE(ov::OutputVector, TorchDecoder, try_decode_get_attr); + } + + ov::OutputVector as_constant() const override { + PYBIND11_OVERRIDE_PURE(ov::OutputVector, TorchDecoder, as_constant); + } + + const std::string& as_string() const override { + PYBIND11_OVERRIDE_PURE(const std::string&, TorchDecoder, as_string); + } + + const std::string& get_op_type() const override { + PYBIND11_OVERRIDE_PURE(const std::string&, TorchDecoder, get_op_type); + } + + const std::string& get_schema() const override { + PYBIND11_OVERRIDE_PURE(const std::string&, TorchDecoder, get_schema); + } + + size_t num_of_outputs() const override { + PYBIND11_OVERRIDE_PURE(size_t, TorchDecoder, num_of_outputs); + } + + const std::vector& outputs() const override { + PYBIND11_OVERRIDE_PURE(const std::vector&, TorchDecoder, outputs); + } + + size_t output(size_t index) const override { + PYBIND11_OVERRIDE_PURE(size_t, TorchDecoder, output, index); + } + + std::shared_ptr mark_node(std::shared_ptr ov_node) const override { + PYBIND11_OVERRIDE_PURE(std::shared_ptr, TorchDecoder, mark_node, ov_node); + } + + size_t get_subgraph_size() const override { + PYBIND11_OVERRIDE_PURE(size_t, TorchDecoder, get_subgraph_size); + } + + void visit_subgraph(std::function)> node_visitor) const override { + PYBIND11_OVERRIDE_PURE(void, TorchDecoder, visit_subgraph, node_visitor); + } + + std::shared_ptr get_subgraph_decoder(size_t index) const override { + PYBIND11_OVERRIDE_PURE(std::shared_ptr, TorchDecoder, get_subgraph_decoder, index); + } +}; + +void regclass_frontend_pytorch_decoder(py::module m); diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/py_module.cpp b/src/bindings/python/src/pyopenvino/frontend/pytorch/py_module.cpp new file mode 100644 index 00000000000..185f91ef59f --- /dev/null +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/py_module.cpp @@ -0,0 +1,13 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "decoder.hpp" + +namespace py = pybind11; + +PYBIND11_MODULE(py_pytorch_frontend, m) { + regclass_frontend_pytorch_decoder(m); +} diff --git a/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp b/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp index afb4aa36ce0..2e558242351 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp @@ -80,6 +80,12 @@ void regclass_graph_op_Constant(py::module m) { constant.def(py::init&>()); constant.def(py::init&>()); constant.def(py::init&>()); + constant.def(py::init([](const ov::element::Type& et, const ov::Shape& sh, int64_t p) { + // restore pointer from integer + // TODO: Align on bit width + void* pp = reinterpret_cast(p); + return std::make_shared(et, sh, pp); + })); constant.def("get_value_strings", &ov::op::v0::Constant::get_value_strings); diff --git a/src/bindings/python/src/pyopenvino/pyopenvino.cpp b/src/bindings/python/src/pyopenvino/pyopenvino.cpp index 968bafa5ecf..a229f9eaa7d 100644 --- a/src/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/src/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -34,6 +34,7 @@ #include "pyopenvino/core/tensor.hpp" #include "pyopenvino/core/variable_state.hpp" #include "pyopenvino/core/version.hpp" +#include "pyopenvino/frontend/decoder.hpp" #include "pyopenvino/frontend/extension.hpp" #include "pyopenvino/frontend/frontend.hpp" #include "pyopenvino/frontend/input_model.hpp" @@ -235,6 +236,7 @@ PYBIND11_MODULE(_pyopenvino, m) { regclass_frontend_FrontEnd(m); regclass_frontend_InputModel(m); regclass_frontend_NodeContext(m); + regclass_frontend_IDecoder(m); // frontend extensions regclass_frontend_TelemetryExtension(m); diff --git a/src/bindings/python/src/pyopenvino/utils/utils.cpp b/src/bindings/python/src/pyopenvino/utils/utils.cpp index a729ccc2537..7891650d6f9 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.cpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.cpp @@ -12,6 +12,7 @@ #include #include "Python.h" +#include "openvino/frontend/decoder.hpp" namespace Common { namespace utils { @@ -233,6 +234,14 @@ ov::Any py_object_to_any(const py::object& py_obj) { return py::cast(py_obj); } else if (py::isinstance(py_obj)) { return py::cast(py_obj); + // FrontEnd Decoder + } else if (py::isinstance(py_obj)) { + return py::cast>(py_obj); + // Custom FrontEnd Types + } else if (py::isinstance(py_obj)) { + return py::cast(py_obj); + } else if (py::isinstance(py_obj)) { + return py::cast(py_obj); // If there is no match fallback to py::object } else if (py::isinstance(py_obj)) { return py_obj; diff --git a/src/bindings/python/src/pyopenvino/utils/utils.hpp b/src/bindings/python/src/pyopenvino/utils/utils.hpp index 4f602dc7efb..cbc3640affb 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.hpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.hpp @@ -5,15 +5,16 @@ #pragma once #include -#include -#include -#include + +#include "openvino/core/any.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/runtime/properties.hpp" namespace py = pybind11; namespace Common { namespace utils { - py::object from_ov_any(const ov::Any &any); + py::object from_ov_any(const ov::Any& any); std::map properties_to_any_map(const std::map& properties); diff --git a/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp b/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp index bc3d59501b5..7116839f20f 100644 --- a/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp +++ b/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp @@ -365,8 +365,12 @@ InputModel::Ptr FrontEndMockPy::load_impl(const std::vector& params) co m_telemetry->send_error("load_impl_error"); m_telemetry->send_stack_trace("mock_stack_trace"); } - if (!params.empty() && params[0].is()) { - m_stat.m_load_paths.push_back(params[0].as()); + if (!params.empty()) { + if (params[0].is()) { + m_stat.m_load_paths.push_back(params[0].as()); + } else { + throw ov::Exception("Only path is supported."); + } } return std::make_shared(); diff --git a/src/bindings/python/tests/test_frontend/test_frontendmanager.py b/src/bindings/python/tests/test_frontend/test_frontendmanager.py index 7c76cc2680d..ddc82f97688 100644 --- a/src/bindings/python/tests/test_frontend/test_frontendmanager.py +++ b/src/bindings/python/tests/test_frontend/test_frontendmanager.py @@ -100,7 +100,7 @@ def test_load_wrong_path(): assert fe is not None with pytest.raises(RuntimeError) as e: fe.load(TestClass()) - assert "Path: 'test class' does not exist. Please provide valid model's path either as a string, bytes or pathlib.Path" in str(e.value) + assert "Only path is supported." in str(e.value) @mock_needed diff --git a/src/bindings/python/wheel/setup.py b/src/bindings/python/wheel/setup.py index a36ed6b9ad3..37ec1df3a45 100644 --- a/src/bindings/python/wheel/setup.py +++ b/src/bindings/python/wheel/setup.py @@ -119,6 +119,13 @@ LIB_INSTALL_CFG = { "rpath": LIBS_RPATH, "binary_dir": OPENVINO_BUILD_DIR, }, + "pytorch_libs": { + "name": "pytorch", + "prefix": "libs.pytorch", + "install_dir": OV_RUNTIME_LIBS_DIR, + "rpath": LIBS_RPATH, + "binary_dir": OPENVINO_BUILD_DIR, + }, "onnx_libs": { "name": "onnx", "prefix": "libs.onnx", diff --git a/src/core/include/openvino/op/util/framework_node.hpp b/src/core/include/openvino/op/util/framework_node.hpp index a2186b4f0ed..57a6be7a3a3 100644 --- a/src/core/include/openvino/op/util/framework_node.hpp +++ b/src/core/include/openvino/op/util/framework_node.hpp @@ -58,6 +58,10 @@ public: return m_attrs.at(key); } + attrs_t::const_iterator find(const std::string& key) const { + return m_attrs.find(key); + } + bool operator==(const FrameworkNodeAttrs& other) const { return m_type_name == other.m_type_name && m_opset_name == other.m_opset_name && m_attrs == other.m_attrs; } diff --git a/src/core/src/node.cpp b/src/core/src/node.cpp index d1f2b7db07c..51db5b6fe13 100644 --- a/src/core/src/node.cpp +++ b/src/core/src/node.cpp @@ -446,7 +446,7 @@ std::set> ov::Node::get_output_target_inputs(size_t i) const } ov::descriptor::Tensor& ov::Node::get_output_tensor(size_t i) const { - NGRAPH_CHECK(i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor(size_t i)"); + NGRAPH_CHECK(i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor(size_t i) for node ", *this); return m_outputs[i].get_tensor(); } diff --git a/src/core/src/op/if.cpp b/src/core/src/op/if.cpp index 01d45ee1736..7b5ffbe0b59 100644 --- a/src/core/src/op/if.cpp +++ b/src/core/src/op/if.cpp @@ -152,14 +152,17 @@ void ov::op::v8::If::validate_and_infer_types() { auto else_node_result = m_bodies[ELSE_BODY_INDEX]->get_results().at(else_desc->m_body_value_index)->input_value(0); + element::Type merged_type; NODE_VALIDATION_CHECK(this, - then_node_result.get_element_type() == else_node_result.get_element_type(), + element::Type::merge(merged_type, + then_node_result.get_element_type(), + else_node_result.get_element_type()), "type of then_body output is not equal type of else_body output"); // shape inference for output and associated with it body outputs auto partial_shape = resolve_shape(then_node_result.get_partial_shape(), else_node_result.get_partial_shape()); - set_output_type(output_index, then_node_result.get_element_type(), partial_shape); + set_output_type(output_index, merged_type, partial_shape); } } } diff --git a/src/core/src/op/interpolate.cpp b/src/core/src/op/interpolate.cpp index fff2004b9a2..3396f8af7fe 100644 --- a/src/core/src/op/interpolate.cpp +++ b/src/core/src/op/interpolate.cpp @@ -194,7 +194,7 @@ void op::v4::Interpolate::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, input_et == element::f32 || input_et == element::f16 || input_et == element::i8 || input_et == element::bf16 || input_et == element::u8 || input_et == element::i64 || - input_et == element::i32, + input_et == element::i32 || input_et == element::dynamic, "Input element type must be f32, f16, bf16, i8, u8, i64, i32"); element::Type sizes_et = get_input_element_type(1); diff --git a/src/core/src/op/scatter_elements_update.cpp b/src/core/src/op/scatter_elements_update.cpp index ffcc063862d..ff24d4a6048 100644 --- a/src/core/src/op/scatter_elements_update.cpp +++ b/src/core/src/op/scatter_elements_update.cpp @@ -42,8 +42,9 @@ void op::v3::ScatterElementsUpdate::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, axis_et.is_integral(), "Axis element type must be integral_number, but is: ", axis_et); + element::Type merged_type; NODE_VALIDATION_CHECK(this, - data_et == updates_et, + element::Type::merge(merged_type, data_et, updates_et), "Data type and updates type are required to be the same. ", "Got: ", data_et, diff --git a/src/core/src/op/swish.cpp b/src/core/src/op/swish.cpp index 8c0ee365f40..f21faeb2182 100644 --- a/src/core/src/op/swish.cpp +++ b/src/core/src/op/swish.cpp @@ -36,10 +36,11 @@ void op::v4::Swish::validate_and_infer_types() { "Swish must have 1 or 2 inputs, but it has: ", inputs_count); + auto in_type = get_input_element_type(0); NODE_VALIDATION_CHECK(this, - get_input_element_type(0).is_real(), + in_type.is_dynamic() || in_type.is_real(), "Swish input tensor must be floating point type(", - get_input_element_type(0), + in_type, ")."); if (inputs_count == 2) { diff --git a/src/core/src/op/util/framework_node.cpp b/src/core/src/op/util/framework_node.cpp index 4927edb8a2a..2b4881cc7e8 100644 --- a/src/core/src/op/util/framework_node.cpp +++ b/src/core/src/op/util/framework_node.cpp @@ -157,7 +157,8 @@ void ov::op::util::FrameworkNode::validate_and_infer_types() { reset_output_shape_to_dynamic = true; } else { NODE_VALIDATION_CHECK(this, - m_inputs_desc[i] == std::make_tuple(input_pshape, input_type), + std::get<0>(m_inputs_desc[i]).compatible(input_pshape) && + std::get<1>(m_inputs_desc[i]).compatible(input_type), get_error_message()); } } diff --git a/src/frontends/CMakeLists.txt b/src/frontends/CMakeLists.txt index 6f64b2a8c94..a200ea27af0 100644 --- a/src/frontends/CMakeLists.txt +++ b/src/frontends/CMakeLists.txt @@ -16,6 +16,10 @@ if(ENABLE_OV_PADDLE_FRONTEND) add_subdirectory(paddle) endif() +if(ENABLE_OV_PYTORCH_FRONTEND) + add_subdirectory(pytorch) +endif() + if(ENABLE_OV_IR_FRONTEND) add_subdirectory(ir) endif() diff --git a/src/frontends/common/include/openvino/frontend/decoder.hpp b/src/frontends/common/include/openvino/frontend/decoder.hpp new file mode 100644 index 00000000000..8ad30b9e0d1 --- /dev/null +++ b/src/frontends/common/include/openvino/frontend/decoder.hpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/any.hpp" + +namespace ov { +namespace frontend { + +// Extendable type system which reflects Framework data types +// Type nestings are built with the help of ov::Any +namespace type { + +struct Tensor { + Tensor() = default; + explicit Tensor(const Any& _element_type) : element_type(_element_type) {} + Any element_type; +}; + +struct Tuple; + +struct List { + List() = default; + + // Specifies list of elements of element_type type, all elements have the same given type + explicit List(const Any& _element_type) : element_type(_element_type) {} + Any element_type; +}; + +struct Str {}; + +struct Optional; +struct Dict; +struct NamedTuple; +struct Union; + +} // namespace type + +/// Plays a role of node, block and module decoder +class IDecoder { +public: + virtual ~IDecoder() = default; +}; + +} // namespace frontend +} // namespace ov diff --git a/src/frontends/common/include/openvino/frontend/node_context.hpp b/src/frontends/common/include/openvino/frontend/node_context.hpp index 0d2fed9cd9d..cf6ea7b0eeb 100644 --- a/src/frontends/common/include/openvino/frontend/node_context.hpp +++ b/src/frontends/common/include/openvino/frontend/node_context.hpp @@ -16,6 +16,7 @@ namespace frontend { class FRONTEND_API NodeContext { public: + // TODO: Why this ctor is explicit when get_op_type is virtual so m_op_type looks to be a custom implementation explicit NodeContext(const std::string& op_type) : m_op_type(op_type) {} virtual ~NodeContext() = default; @@ -87,6 +88,18 @@ public: /// \brief Returns node attribute by name as ov::Any. virtual ov::Any get_attribute_as_any(const std::string& name) const = 0; + /// \brief Returns the number of sub-graphs that can be enumerated with get_subgraph + virtual size_t get_subgraph_size() const { + FRONT_END_NOT_IMPLEMENTED(get_subgraph_size); + } + + /// \brief Returns subgraph converted on demand by the first access + /// If there is no query for specific sub-graph it shouldn't be converted + /// idx should be in range 0..get_subgraph_size()-1 + virtual std::shared_ptr get_subgraph(int idx) const { + FRONT_END_NOT_IMPLEMENTED(get_subgraph); + } + private: virtual ov::Any apply_additional_conversion_rules(const ov::Any& data, const std::type_info& type_info) const { return data; diff --git a/src/frontends/common/src/manager.cpp b/src/frontends/common/src/manager.cpp index e4f51b24467..7a9dceecf06 100644 --- a/src/frontends/common/src/manager.cpp +++ b/src/frontends/common/src/manager.cpp @@ -9,6 +9,7 @@ #include "openvino/frontend/exception.hpp" #include "openvino/util/env_util.hpp" +#include "openvino/util/log.hpp" #include "plugin_loader.hpp" #include "utils.hpp" @@ -49,6 +50,7 @@ public: {"onnx", "onnx"}, {"tf", "tensorflow"}, {"paddle", "paddle"}, + {"pytorch", "pytorch"}, }; auto it = predefined_frontends.find(framework); std::lock_guard guard(m_loading_mutex); @@ -79,6 +81,7 @@ public: std::lock_guard guard(m_loading_mutex); for (auto& plugin_info : m_plugins) { if (!plugin_info.load()) { + OPENVINO_DEBUG << "Frontend load failed: " << plugin_info.m_file_path << "\n"; continue; } names.push_back(plugin_info.get_creator().m_name); diff --git a/src/frontends/pytorch/CMakeLists.txt b/src/frontends/pytorch/CMakeLists.txt new file mode 100644 index 00000000000..8efe7e3e892 --- /dev/null +++ b/src/frontends/pytorch/CMakeLists.txt @@ -0,0 +1,5 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +add_subdirectory(src) diff --git a/src/frontends/pytorch/include/openvino/frontend/pytorch/decoder.hpp b/src/frontends/pytorch/include/openvino/frontend/pytorch/decoder.hpp new file mode 100644 index 00000000000..50ce539d182 --- /dev/null +++ b/src/frontends/pytorch/include/openvino/frontend/pytorch/decoder.hpp @@ -0,0 +1,125 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/core/any.hpp" +#include "openvino/core/node.hpp" +#include "openvino/core/node_output.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/frontend/decoder.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { + +/// Plays a role of node, block and module decoder (kind of temporary fat API) +class TorchDecoder : public IDecoder { +public: + // Do not search for input in tensor map; try to access it as a constant of specified type T and return its value + // Using Any here is an easy way to avoid template definition, returned object is supposed to be of one of the + // fundamental types like int, float etc. + virtual Any const_input(size_t index) const = 0; + + // Using size_t for input/output unuque ids are in sync with torch code, see def in + // torch/include/torch/csrc/jit/ir/ir.h, Value::unique_ + + // TODO: set of input and output methods are not aligned; also they are not aligned with the rest of FEs + + // Input tensor id + virtual size_t input(size_t index) const = 0; + + virtual const std::vector& inputs() const = 0; + + // ------------------------------ + // TODO: physically inputs and outputs refer to PT Values so shape/type is not a property of input/output + // Do we need a separate Decoder for Tensor to request properties of it instead of having an impression + // that inputs/outputs have types and shapes? + + // Return shape if inputs has torch::Tensor type in the original model, otherwise returns the shape [] of a scalar + virtual PartialShape get_input_shape(size_t index) const = 0; + + // Return element::Type when it the original type can be represented, otherwise returns PT-sepcific data type object + // (see custom_type.hpp) + virtual Any get_input_type(size_t index) const = 0; + + // TODO: Consider deleting this method, probably it doesn't make sence outside Torch JIT execution + virtual const std::vector& get_input_transpose_order(size_t index) const = 0; + + // TODO: Consider deleting this method, probably it doesn't make sence outside Torch JIT execution + virtual const std::vector& get_output_transpose_order(size_t index) const = 0; + + // Return shape if inputs has torch::Tensor type in the original model, otherwise returns the shape [] of a scalar + virtual PartialShape get_output_shape(size_t index) const = 0; + + // Return element::Type when it the original type can be represented, otherwise returns PT-sepcific data type object + // (see custom_type.hpp) + virtual Any get_output_type(size_t index) const = 0; + // ------------------------------ + + // TODO: required? can be implemented in the context of a single node? + virtual bool input_is_none(size_t index) const = 0; + + virtual OutputVector try_decode_get_attr() const = 0; + + // Work for natural constant nodes, e.g. for prim::Constant; don't know other nodes kinds that fit + // TODO: why OutputVector instead of just single output? + virtual OutputVector as_constant() const = 0; + + // Get string from constant. Work for natural constant nodes, e.g. for prim::Constant; don't know other nodes kinds + // that fit + virtual const std::string& as_string() const = 0; + + // Returns PT node kind as a string mnemonics for native type uint32_t Symbol in Torch + // Decide whether we need an equivalent member for integer representation (in this case a map is required to + // understand what it means) + virtual const std::string& get_op_type() const = 0; + + // Returns PT node schema as a string + virtual const std::string& get_schema() const = 0; + + // TODO: use canonical name output_size + virtual size_t num_of_outputs() const = 0; + + // Return a vector of output IDs + virtual const std::vector& outputs() const = 0; + + // Return a vector of output IDs + virtual size_t output(size_t index) const = 0; + + // Embed mapping to/from the original node representation from/to node passed as a parameter + // the representation of this mapping is specific for particular decored type and may be NOP + // returns the same node as syntactically convenient way to make nested sentences in code + virtual std::shared_ptr mark_node(std::shared_ptr ov_node) const = 0; + + // Call mark_node for each node from the vector + void mark_nodes(std::vector> ov_nodes) const { + for (auto& ov_node : ov_nodes) { + mark_node(ov_node); + } + } + + // Syntactic sugar around mark_node -- just calls it for corresponding node for the passed output port + Output mark_output(Output ov_output) const { + mark_node(ov_output.get_node_shared_ptr()); + return ov_output; + } + + /// \brief Returns the number of sub-graphs that can be enumerated with get_subgraph + virtual size_t get_subgraph_size() const = 0; + + /// \brief Returns subgraph converted on demand by the first access + /// If there is no query for specific sub-graph it shouldn't be converted + // node_visitor is a function that will be fed by nodes in subgraph for all nodes in graph + virtual void visit_subgraph(std::function)> node_visitor) const = 0; + + /// Probably this toghether with immediate nodes visitor is a replacement for visit_subgraphs with an index + virtual std::shared_ptr get_subgraph_decoder(size_t index) const = 0; +}; + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/include/openvino/frontend/pytorch/frontend.hpp b/src/frontends/pytorch/include/openvino/frontend/pytorch/frontend.hpp new file mode 100644 index 00000000000..0dc1ec1fb53 --- /dev/null +++ b/src/frontends/pytorch/include/openvino/frontend/pytorch/frontend.hpp @@ -0,0 +1,65 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/frontend/frontend.hpp" +#include "openvino/frontend/pytorch/visibility.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { + +class PYTORCH_API FrontEnd : public ov::frontend::FrontEnd { +public: + using Ptr = std::shared_ptr; + + /// \brief Completely convert and normalize entire Model, throws if it is not possible + /// \param model Input model + /// \return fully converted OV Model + std::shared_ptr convert(const ov::frontend::InputModel::Ptr& model) const override; + + /// \brief Completely convert the remaining, not converted part of a Model. + /// \param partiallyConverted partially converted OV Model + void convert(const std::shared_ptr& partiallyConverted) const override; + + /// \brief Convert only those parts of the model that can be converted leaving others + /// as-is. Converted parts are not normalized by additional transformations; normalize + /// function or another form of convert function should be called to finalize the + /// conversion process. + /// \param model Input model + /// \return partially converted OV Model + std::shared_ptr convert_partially(const InputModel::Ptr& model) const override; + + /// \brief Convert operations with one-to-one mapping with decoding nodes. + /// Each decoding node is an OV node representing a single FW operation node with + /// all attributes represented in FW-independent way. + /// \param model Input model + /// \return OV Model after decoding + std::shared_ptr decode(const InputModel::Ptr& model) const override; + + /// \brief Runs normalization passes on Model that was loaded with partial conversion + /// \param Model partially converted OV Model + void normalize(const std::shared_ptr& model) const override; + + /// \brief Gets name of this FrontEnd. Can be used by clients + /// if frontend is selected automatically by FrontEndManager::load_by_model + /// \return Paddle frontend name. + std::string get_name() const override { + return "pytorch"; + } + + /// \brief Register base extension in the FrontEnd + /// \param extension base extension + void add_extension(const std::shared_ptr& extension) override; + +protected: + bool supported_impl(const std::vector& variants) const override; + + ov::frontend::InputModel::Ptr load_impl(const std::vector& variants) const override; +}; + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/include/openvino/frontend/pytorch/node_context.hpp b/src/frontends/pytorch/include/openvino/frontend/pytorch/node_context.hpp new file mode 100644 index 00000000000..006b69684df --- /dev/null +++ b/src/frontends/pytorch/include/openvino/frontend/pytorch/node_context.hpp @@ -0,0 +1,153 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/frontend/exception.hpp" +#include "openvino/frontend/node_context.hpp" +#include "openvino/frontend/pytorch/decoder.hpp" +#include "openvino/util/log.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { + +typedef std::unordered_map> TensorMap; + +class NodeContext : public frontend::NodeContext { +public: + NodeContext(std::shared_ptr decoder, + TensorMap* tensor_map, + ParameterVector* external_parameters, + const TensorMap& ext_tensor_map) + : // TODO: why the following ctor is explicit? + frontend::NodeContext(decoder->get_op_type()), + m_decoder(decoder), + m_tensor_map(tensor_map), + m_ext_tensor_map(ext_tensor_map), + m_external_parameters(external_parameters) {} + + // Do not search for input in tensor map; try to access it as a constant of specified type T and return its value + template + T const_input(size_t index) const; + + size_t get_input_size() const override { + return m_decoder->inputs().size(); + }; + + // Search for input in tensor map and return an output port for already converted op + // TODO: int due to base class uses it, but naturally it should be size_t for PT + Output get_input(int index) const override { + FRONT_END_GENERAL_CHECK(!m_decoder->input_is_none(index), "Input is none with index: ", index); + auto input = m_decoder->input(index); + FRONT_END_GENERAL_CHECK(m_tensor_map->count(input), "No tensor corresponding input: ", input, " exist."); + return m_tensor_map->at(input); + } + + // TODO: upstream to base class + OutputVector inputs() const { + OutputVector res; + for (size_t input : m_decoder->inputs()) { + FRONT_END_GENERAL_CHECK(m_tensor_map->count(input), "No tensor corresponding index: ", input, " exist."); + res.push_back(m_tensor_map->at(input)); + } + return res; + } + + bool input_is_none(size_t index) const { + return m_decoder->input_is_none(index); + } + + // Convert the resulting value of this node to ov Constant; works correctly only for nodes that produce + // constant value, naturally for prim::Constant + OutputVector as_constant() const { + return m_decoder->as_constant(); + } + + /* + TODO: Should be uncommented when explicit NodeContext ctor won't require passing op_type + const std::string& get_op_type() const override { + return m_decoder->get_op_type(); + } + */ + + std::string get_schema() const { + return m_decoder->get_schema(); + } + + size_t num_of_outputs() const { + return m_decoder->num_of_outputs(); + } + + std::vector outputs() const { + return m_decoder->outputs(); + } + + std::shared_ptr mark_node(std::shared_ptr ov_node) const { + return m_decoder->mark_node(ov_node); + } + + void mark_nodes(std::vector> ov_nodes) const { + return m_decoder->mark_nodes(ov_nodes); + } + + Output mark_output(Output ov_output) const { + return m_decoder->mark_node(ov_output.get_node_shared_ptr()); + } + + Any get_attribute_as_any(const std::string&) const override { + throw std::runtime_error( + "There is no any named attributes in PyTorch node, query by attribute name is not implemented"); + } + + void mutate_input(size_t index, Output ov_output) { + FRONT_END_GENERAL_CHECK(!m_decoder->input_is_none(index), "Input is none with index: ", index); + auto input = m_decoder->input(index); + FRONT_END_GENERAL_CHECK(m_tensor_map->count(input), "No tensor corresponding input: ", input, " exist."); + m_tensor_map->at(input).get_tensor().set_names({std::to_string(input) + "_"}); + // TODO: find out why this doesn't work + ov_output.get_tensor().add_names({std::to_string(input)}); + (*m_tensor_map)[input] = ov_output; + m_mutated_tensors.insert(input); + } + + std::set get_mutated_tensors() const { + return m_mutated_tensors; + } + + std::shared_ptr get_decoder() const { + return m_decoder; + } + + void add_tensor_to_context(size_t index, Output ov_output) { + if (m_tensor_map->count(index)) { + OPENVINO_DEBUG << "[ WARNING ] Current context has tensor. Rewriting.\n"; + } + ov_output.get_tensor().add_names({std::to_string(index)}); + (*m_tensor_map)[index] = ov_output; + } + + Output get_tensor_from_model(size_t index) const { + if (m_tensor_map->find(index) != m_tensor_map->end()) { + return m_tensor_map->at(index); + } else { + return Output(); + } + } + + Output get_tensor_from_model_or_create_input(size_t index); + Output get_input_from_visible_context(size_t index) const; + std::shared_ptr convert_subgraph(size_t index); + +private: + std::shared_ptr m_decoder; + std::set m_mutated_tensors; + TensorMap* m_tensor_map; + const TensorMap& m_ext_tensor_map; + ParameterVector* m_external_parameters; +}; + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/include/openvino/frontend/pytorch/visibility.hpp b/src/frontends/pytorch/include/openvino/frontend/pytorch/visibility.hpp new file mode 100644 index 00000000000..afcf51969b2 --- /dev/null +++ b/src/frontends/pytorch/include/openvino/frontend/pytorch/visibility.hpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/frontend/visibility.hpp" + +#ifdef OPENVINO_STATIC_LIBRARY +# define PYTORCH_API +# define PYTORCH_C_API +#else +# ifdef openvino_pytorch_frontend_EXPORTS +# define PYTORCH_API OPENVINO_CORE_EXPORTS +# define PYTORCH_C_API OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS +# else +# define PYTORCH_API OPENVINO_CORE_IMPORTS +# define PYTORCH_C_API OPENVINO_EXTERN_C OPENVINO_CORE_IMPORTS +# endif // openvino_pytorch_frontend_EXPORTS +#endif // OPENVINO_STATIC_LIBRARY diff --git a/src/frontends/pytorch/src/CMakeLists.txt b/src/frontends/pytorch/src/CMakeLists.txt new file mode 100644 index 00000000000..c0f432cf94a --- /dev/null +++ b/src/frontends/pytorch/src/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +ov_add_frontend(NAME pytorch + LINKABLE_FRONTEND + SHUTDOWN_PROTOBUF + FILEDESCRIPTION "FrontEnd to load and convert TorchScript models from PyTorch" + LINK_LIBRARIES openvino::util openvino::runtime::dev) \ No newline at end of file diff --git a/src/frontends/pytorch/src/frontend.cpp b/src/frontends/pytorch/src/frontend.cpp new file mode 100644 index 00000000000..45e82d9ce74 --- /dev/null +++ b/src/frontends/pytorch/src/frontend.cpp @@ -0,0 +1,135 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/frontend.hpp" + +#include "input_model.hpp" +#include "openvino/op/util/multi_subgraph_base.hpp" +#include "openvino/pass/constant_folding.hpp" +#include "openvino/util/log.hpp" +#include "pt_framework_node.hpp" +#include "transformations/control_flow/unroll_if.hpp" +#include "transforms.hpp" +#include "transforms/append_list_unpack_replacer.hpp" +#include "transforms/aten_cat_replacer.hpp" +#include "transforms/aten_getitem_replacer.hpp" +#include "transforms/max_prim_list_construct_replacer.hpp" +#include "transforms/prim_list_unpack_replacer.hpp" +#include "transforms/prim_tuple_construct_replacer.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { + +namespace { +std::set get_unconverted_types_from_model(const std::shared_ptr& model) { + std::set unconverted_ops_types; + for (const auto& node : model->get_ordered_ops()) { + if (const auto& fw_node = ov::as_type_ptr(node)) { + auto op_type = fw_node->get_decoder()->get_op_type(); + unconverted_ops_types.insert(op_type); + } + if (const auto& fw_node = ov::as_type_ptr(node)) { + for (int i = 0; i < fw_node->get_internal_subgraphs_size(); i++) { + auto internal_types = get_unconverted_types_from_model(fw_node->get_function(i)); + unconverted_ops_types.insert(internal_types.begin(), internal_types.end()); + } + } + } + return unconverted_ops_types; +} +} // namespace + +std::shared_ptr FrontEnd::convert(const InputModel::Ptr& model) const { + auto converted_model = convert_partially(model); + normalize(converted_model); + std::set unconverted_ops_types = get_unconverted_types_from_model(converted_model); + std::stringstream ops_str; + for (auto&& op_type : unconverted_ops_types) { + ops_str << op_type << '\n'; + } + FRONT_END_OP_CONVERSION_CHECK(unconverted_ops_types.size() == 0, + "Model wasn't fully converted. Unconverted operation types:\n" + ops_str.str()); + return converted_model; +} + +void FrontEnd::convert(const std::shared_ptr& partiallyConverted) const { + FRONT_END_NOT_IMPLEMENTED(convert); +} + +std::shared_ptr FrontEnd::convert_partially(const ov::frontend::InputModel::Ptr& model) const { + try { + auto pytorch_model = std::dynamic_pointer_cast(model); + auto model = convert_pytorch_model(pytorch_model->m_model); + + return model; + } catch (const std::runtime_error& e) { + std::cerr << "[ ERROR ] Unexpected error while converting pytorch model: " << e.what() << '\n'; + std::cerr << "Rethrowing. Misleading error message from pybind11 may come next. TODO."; + throw; + } +} + +std::shared_ptr FrontEnd::decode(const InputModel::Ptr& model) const { + FRONT_END_NOT_IMPLEMENTED(decode); +} + +void FrontEnd::normalize(const std::shared_ptr& model) const { + ov::pass::Manager manager; + + manager.register_pass(); + manager.register_pass(); + // Have to run UnrollIf second time, because conditions are defined outside of nested If (ticket 98155) + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + + manager.run_passes(model); + + apply_pytorch_conversion_transforms(model); + + // Usually if nn.Module.forward is given as a source model for conversion, there is the first Parameter + // that represents original `self` argument in forward(self, ...). `self` shouldn't play any role in model + // inference if model is completelly frozed and all methods are inlined. So we check if it doesn't have any + // consumers in the finally converted model and remove this parameter. This parameter should have index 0. + if (model->get_parameters().size() > 0) { + auto self = model->get_parameters()[0]; + if (self->output(0).get_target_inputs().empty()) { + // There is no consumers: safe to remove + OPENVINO_DEBUG << "[ WARNING ] Removing parameter[0] in converted Pytorch model, because it is never used " + "and treated as `self`\n"; + model->remove_parameter(self); + } else { + OPENVINO_DEBUG << "[ WARNING ] Couldn't remove parameter[0] in converted PyTorch model\n"; + } + } +} + +void FrontEnd::add_extension(const std::shared_ptr& extension) { + FRONT_END_NOT_IMPLEMENTED(add_extension); +} + +bool FrontEnd::supported_impl(const std::vector& variants) const { + return false; +} + +ov::frontend::InputModel::Ptr FrontEnd::load_impl(const std::vector& variants) const { + FRONT_END_GENERAL_CHECK(variants.size() == 1, + "PyTorch Frontend supports exactly one parameter in model representation, got ", + std::to_string(variants.size()), + " instead."); + auto decoder = variants[0].as>(); + auto tdecoder = std::dynamic_pointer_cast(decoder); + FRONT_END_GENERAL_CHECK(tdecoder, "Couldn't cast ov::Any to TorchDecoder"); + return std::make_shared(tdecoder); +} + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/input_model.hpp b/src/frontends/pytorch/src/input_model.hpp new file mode 100644 index 00000000000..f2574e4eceb --- /dev/null +++ b/src/frontends/pytorch/src/input_model.hpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/frontend/pytorch/decoder.hpp" +#include "openvino/frontend/pytorch/frontend.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { + +class InputModel : public ov::frontend::InputModel { + friend class FrontEnd; + std::shared_ptr m_model; + +public: + explicit InputModel(std::shared_ptr model) : m_model(model) {} + // TODO: pass telemetry extension to this ctor +}; + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/node_context.cpp b/src/frontends/pytorch/src/node_context.cpp new file mode 100644 index 00000000000..7083db438ec --- /dev/null +++ b/src/frontends/pytorch/src/node_context.cpp @@ -0,0 +1,136 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" + +#include "openvino/frontend/exception.hpp" +#include "openvino/frontend/pytorch/decoder.hpp" +#include "openvino/opsets/opset10.hpp" +#include "openvino/util/log.hpp" +#include "pt_framework_node.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { + +Output NodeContext::get_tensor_from_model_or_create_input(size_t index) { + if (m_tensor_map->find(index) != m_tensor_map->end()) { + return m_tensor_map->at(index); + } else { + // nested subgraphs case + auto parameter = std::make_shared(element::dynamic, PartialShape::dynamic()); + parameter->get_output_tensor(0).add_names({std::to_string(index)}); + (*m_tensor_map)[index] = parameter; + m_external_parameters->push_back(parameter); + OPENVINO_DEBUG << "Nested case, created: " << parameter << '\n'; + return parameter; + } +} + +Output NodeContext::get_input_from_visible_context(size_t index) const { + FRONT_END_GENERAL_CHECK(index < get_input_size(), "Index is lower then number of inputs."); + auto input_tensor = get_input(static_cast(index)); + auto input_node = input_tensor.get_node_shared_ptr(); + if (std::dynamic_pointer_cast(input_node)) { + // We need to look into external context for inputs that would be feed into this parameter + auto name = input_node->get_output_tensor(0).get_any_name(); + size_t tensor_idx = (size_t)std::stoll(name); + if (m_ext_tensor_map.count(tensor_idx)) { + input_tensor = m_ext_tensor_map.at(tensor_idx); + } + } + return input_tensor; +} + +std::shared_ptr NodeContext::convert_subgraph(size_t index) { + auto subgraph_decoder = m_decoder->get_subgraph_decoder(index); + + // Extend external context with internal tensors except Parameter nodes, because internal Parameters are created to + // link internal context with external + TensorMap ext_map(m_ext_tensor_map); + // map::insert does not update elements if their key is already in map; so if we have real tensors in outter scope + // we will not add Parameters we creeated in inner scope. + ext_map.insert(m_tensor_map->begin(), m_tensor_map->end()); + + auto model = convert_pytorch_model(subgraph_decoder, ext_map); + // Remove unused parameters, they could be created as inputs to the parts of graph that weren't + // used for generating output. + for (auto i = subgraph_decoder->inputs().size(); i < model->get_parameters().size(); i++) { + auto parameter = model->get_parameters()[i]; + if (parameter->output(0).get_target_inputs().empty()) { + // There is no consumers: safe to remove + OPENVINO_DEBUG << "Removing parameter " << parameter + << " in converted Pytorch model, because it is never used\n"; + model->remove_parameter(parameter); + } + } + return model; +} + +namespace { +std::shared_ptr get_constant_at_input(const NodeContext& ctx, size_t index) { + FRONT_END_GENERAL_CHECK(!ctx.input_is_none(index), "Input with index: ", index, " is none."); + auto input_node = ctx.get_input_from_visible_context(index).get_node_shared_ptr(); + auto input = std::dynamic_pointer_cast(input_node); + FRONT_END_GENERAL_CHECK(input, "Input with index ", index, " cannot be interpreted as Constant: ", input_node); + return input; +} +} // namespace + +template <> +std::vector NodeContext::const_input>(size_t index) const { + return get_constant_at_input(*this, index)->cast_vector(); +} + +template <> +ngraph::Strides NodeContext::const_input(size_t index) const { + return get_constant_at_input(*this, index)->cast_vector(); +} + +template <> +ngraph::CoordinateDiff NodeContext::const_input(size_t index) const { + return get_constant_at_input(*this, index)->cast_vector(); +} + +template <> +ngraph::Shape NodeContext::const_input(size_t index) const { + return get_constant_at_input(*this, index)->cast_vector(); +} + +template <> +int64_t NodeContext::const_input(size_t index) const { + return get_constant_at_input(*this, index)->cast_vector()[0]; +} + +template <> +bool NodeContext::const_input(size_t index) const { + return get_constant_at_input(*this, index)->cast_vector()[0]; +} + +template <> +double NodeContext::const_input(size_t index) const { + return get_constant_at_input(*this, index)->cast_vector()[0]; +} + +template <> +float NodeContext::const_input(size_t index) const { + return get_constant_at_input(*this, index)->cast_vector()[0]; +} + +template <> +std::string NodeContext::const_input(size_t index) const { + FRONT_END_GENERAL_CHECK(!input_is_none(index), "Input with index: ", index, " is none."); + auto input_node = get_input_from_visible_context(index).get_node_shared_ptr(); + auto input = std::dynamic_pointer_cast(input_node); + FRONT_END_GENERAL_CHECK(input, + "Input node with index ", + index, + " cannot be interpreted as FrameworkNode with string constant: ", + input_node); + return input->get_decoder()->as_string(); +} + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/adaptive_avg_pool3d.cpp b/src/frontends/pytorch/src/op/adaptive_avg_pool3d.cpp new file mode 100644 index 00000000000..45d87537aa0 --- /dev/null +++ b/src/frontends/pytorch/src/op/adaptive_avg_pool3d.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_adaptive_avg_pool3d(NodeContext& context) { + auto const_tile_params = context.mark_node(opset10::Constant::create(element::i32, Shape{5}, {1, 1, 1, 1, 1})); + auto const_0 = context.mark_node(opset10::Constant::create(element::i32, Shape{1}, {0})); + auto const_1 = context.mark_node(opset10::Constant::create(element::i32, Shape{1}, {1})); + auto const_neg_3 = context.mark_node(opset10::Constant::create(element::i32, Shape{1}, {-3})); + + auto input_tensor = context.get_input(0); + auto given_shape = context.get_input(1); + + auto input_shape = context.mark_node(std::make_shared(input_tensor, element::i32)); + auto shape_begin = + context.mark_node(std::make_shared(input_shape, const_0, const_neg_3, const_1, const_0)); + auto output_shape = context.mark_node(std::make_shared(OutputVector{shape_begin, given_shape}, 0)); + + auto tile = context.mark_node(std::make_shared(input_tensor, const_tile_params)); + auto adaptive_avg_pool = context.mark_node(std::make_shared(tile, given_shape)); + auto reshape = context.mark_node(std::make_shared(adaptive_avg_pool, output_shape, false)); + + return {reshape}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/adaptive_max_pool2d.cpp b/src/frontends/pytorch/src/op/adaptive_max_pool2d.cpp new file mode 100644 index 00000000000..906822c2866 --- /dev/null +++ b/src/frontends/pytorch/src/op/adaptive_max_pool2d.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_adaptive_max_pool2d(NodeContext& context) { + auto x = context.get_input(0); + auto y = context.get_input(1); + auto adaptive_max_pool = context.mark_node(std::make_shared(x, y, ov::element::i32)); + return {adaptive_max_pool->output(0), adaptive_max_pool->output(1)}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/add.cpp b/src/frontends/pytorch/src/op/add.cpp new file mode 100644 index 00000000000..cf58c3e61d6 --- /dev/null +++ b/src/frontends/pytorch/src/op/add.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_add(NodeContext& context) { + auto rhs = context.get_input(1); + if (!context.input_is_none(2)) { + auto converted_alpha = std::make_shared(context.get_input(2), rhs); + rhs = std::make_shared(converted_alpha, rhs); + } + return {context.mark_node(std::make_shared(context.get_input(0), rhs))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/addcmul.cpp b/src/frontends/pytorch/src/op/addcmul.cpp new file mode 100644 index 00000000000..b94e67b7b38 --- /dev/null +++ b/src/frontends/pytorch/src/op/addcmul.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_addcmul(NodeContext& context) { + const auto eltwise_mult = std::make_shared(context.get_input(1), context.get_input(2)); + const auto value = context.get_input(3); + const auto converted_value = std::make_shared(value, context.get_input(1)); + const auto scalar_mult = std::make_shared(eltwise_mult, converted_value); + context.mark_nodes({eltwise_mult, converted_value, scalar_mult}); + return {context.mark_node(std::make_shared(context.get_input(0), scalar_mult))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/addmm.cpp b/src/frontends/pytorch/src/op/addmm.cpp new file mode 100644 index 00000000000..eb9652fe093 --- /dev/null +++ b/src/frontends/pytorch/src/op/addmm.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_addmm(NodeContext& context) { + auto input = context.get_input(0); + auto m1 = context.get_input(1); + auto m2 = context.get_input(2); + auto beta = context.get_input(3); + auto alpha = context.get_input(4); + auto beta_converted = context.mark_node(std::make_shared(beta, input)); + auto mm = context.mark_node(std::make_shared(m1, m2)); + auto alpha_converted = context.mark_node(std::make_shared(alpha, mm)); + auto input_beta = context.mark_node(std::make_shared(input, beta_converted)); + auto mm_alpha = context.mark_node(std::make_shared(mm, alpha_converted)); + return {context.mark_node(std::make_shared(input_beta, mm_alpha))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/arange.cpp b/src/frontends/pytorch/src/op/arange.cpp new file mode 100644 index 00000000000..af4547c2e1f --- /dev/null +++ b/src/frontends/pytorch/src/op/arange.cpp @@ -0,0 +1,80 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_arange(NodeContext& context) { + auto zero = context.mark_node(opset10::Constant::create(element::i32, Shape{}, {0})); + auto one = context.mark_node(opset10::Constant::create(element::i32, Shape{}, {1})); + auto dtype = element::f32; + bool dtype_applied = false; + auto num_inputs = context.get_input_size(); + ov::Output end; + ov::Output out_tensor; + ov::Output start = zero; + ov::Output step = one; + + // aten::arange(Scalar end, tensor out) + if (num_inputs == 2) { + end = context.get_input(0); + out_tensor = context.input_is_none(1) ? end : context.get_input(1); + } + // aten::arange(Scalar start, Scalar end, Scalar step, Tensor out) + if (num_inputs == 4) { + start = context.get_input(0); + end = context.get_input(1); + step = context.get_input(2); + out_tensor = context.input_is_none(3) ? end : context.get_input(3); + } + // aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory) + if (num_inputs == 5) { + end = context.get_input(0); + out_tensor = end; + if (!context.input_is_none(1)) { + dtype = convert_dtype(context.const_input(1)); + dtype_applied = true; + } + } + // aten::arange(Scalar start, Scalar end, ScalarType dtype, Layout, Device, bool pin_memory) + if (num_inputs == 6) { + start = context.get_input(0); + end = context.get_input(1); + out_tensor = end; + if (!context.input_is_none(2)) { + dtype = convert_dtype(context.const_input(2)); + dtype_applied = true; + } + } + // aten::arange(Scalar start, Scalar end, Scalar step, ScalarType dtype, Layout, Device, bool pin_memory) + if (num_inputs == 7) { + start = context.get_input(0); + end = context.get_input(1); + step = context.get_input(2); + out_tensor = end; + if (!context.input_is_none(3)) { + dtype = convert_dtype(context.const_input(3)); + dtype_applied = true; + } + } + auto r_end = context.mark_node(std::make_shared(end, dtype)); + auto r_start = context.mark_node(std::make_shared(start, dtype)); + auto r_step = context.mark_node(std::make_shared(step, dtype)); + auto range = context.mark_node(std::make_shared(r_start, r_end, r_step, dtype)); + if (!dtype_applied) { + range = context.mark_node(std::make_shared(range, out_tensor)); + } + return {range}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/as_tensor.cpp b/src/frontends/pytorch/src/op/as_tensor.cpp new file mode 100644 index 00000000000..fb8b0d7b2f2 --- /dev/null +++ b/src/frontends/pytorch/src/op/as_tensor.cpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "pt_framework_node.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_as_tensor(NodeContext& context) { + auto dtype = element::f32; + Output cast; + if (!context.input_is_none(1)) { + auto dtype_ext_node = context.get_input_from_visible_context(1).get_node_shared_ptr(); + auto dtype_fw_node = std::dynamic_pointer_cast(dtype_ext_node); + if (dtype_fw_node && dtype_fw_node->get_op_type() == "prim::dtype") { + auto type_input = dtype_fw_node->input_value(0); + return {context.mark_node(std::make_shared(context.get_input(0), type_input))}; + } + if (auto dtype_const = std::dynamic_pointer_cast(dtype_ext_node)) { + auto pt_type = dtype_const->cast_vector()[0]; + dtype = convert_dtype(pt_type); + } + } + cast = context.mark_node(std::make_shared(context.get_input(0), dtype)); + + // Input with index 2 is device, we skip this input + return {cast}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/avg_poolnd.cpp b/src/frontends/pytorch/src/op/avg_poolnd.cpp new file mode 100644 index 00000000000..29cc471eeb1 --- /dev/null +++ b/src/frontends/pytorch/src/op/avg_poolnd.cpp @@ -0,0 +1,50 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_avg_poolnd(NodeContext& context) { + auto input = context.get_input(0); + auto kernel = context.const_input(1); + auto strides = context.const_input(2); + auto pads = context.const_input(3); // pytorch supports only symmetric padding + auto rounding_type = context.const_input(4) ? ov::op::RoundingType::CEIL : ov::op::RoundingType::FLOOR; + auto count_include_pad = context.const_input(5); + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(6), + "Translation for aten::avg_pool2d do not support divisor_override input."); + // Although ov::AvgPool provides exclude_pad=false, + // The corner case of Average Pooling with ceil_mode on + // PyTorch allows sliding window go off bound, which leads to this accommodation. + // More detail on https://github.com/pytorch/pytorch/issues/57178 + if (count_include_pad) { + auto zero = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {0})); + auto zero_i32 = context.mark_node(opset10::Constant::create(element::i32, Shape{}, {0})); + auto shape = context.mark_node(std::make_shared(input, element::i32)); + auto rank = context.mark_node(std::make_shared(shape, element::i32)); + auto pad_values = context.get_input(3); + auto pads_len = context.mark_node(opset10::Constant::create(element::i32, Shape{}, {pads.size()})); + auto pads_diff = context.mark_node(std::make_shared(rank, pads_len)); + auto pads_remaining = context.mark_node(std::make_shared(zero_i32, pads_diff)); + auto padding = context.mark_node( + std::make_shared(NodeVector{pads_remaining, pad_values.get_node_shared_ptr()}, 0)); + input = + context.mark_node(std::make_shared(input, padding, padding, zero, ov::op::PadMode::CONSTANT)); + pads = Shape(pads.size(), 0); + } + + return {context.mark_node( + std::make_shared(input, strides, pads, pads, kernel, !count_include_pad, rounding_type))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/batch_norm.cpp b/src/frontends/pytorch/src/op/batch_norm.cpp new file mode 100644 index 00000000000..5ac034d4947 --- /dev/null +++ b/src/frontends/pytorch/src/op/batch_norm.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +namespace { +Output broadcast_const_to_channel_dim(NodeContext& context, Output input, Output value) { + auto input_shape = context.mark_node(std::make_shared(input)); + auto zero_i = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {0})); + auto one_i = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {1})); + auto channel_dim = context.mark_node(std::make_shared(input_shape, one_i, zero_i)); + auto channel_dim_exp = context.mark_node(std::make_shared(channel_dim, zero_i)); + return context.mark_node(std::make_shared(value, channel_dim_exp)); +} +} // namespace + +OutputVector translate_batch_norm(NodeContext& context) { + // Schema: aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, + // bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor + auto input = context.get_input(0); + Output weight; + Output bias; + if (!context.input_is_none(1)) { + weight = context.get_input(1); + } else { + auto one_f = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {1})); + weight = broadcast_const_to_channel_dim(context, input, one_f); + } + if (!context.input_is_none(2)) { + bias = context.get_input(2); + } else { + auto zero_f = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {0})); + bias = broadcast_const_to_channel_dim(context, input, zero_f); + } + // index 3 running_mean and index 4 running_var can be none for training case only, check that not training before + auto training = context.const_input(5); + FRONT_END_OP_CONVERSION_CHECK(!training, "Translation for aten::batch_norm do not support training mode."); + auto running_mean = context.get_input(3); + auto running_var = context.get_input(4); + // Index with index 6 is momentum, it is used only in training mode + auto epsilon = context.const_input(7); + return {context.mark_node( + std::make_shared(input, weight, bias, running_mean, running_var, epsilon))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/clamp.cpp b/src/frontends/pytorch/src/op/clamp.cpp new file mode 100644 index 00000000000..090b24bbfc0 --- /dev/null +++ b/src/frontends/pytorch/src/op/clamp.cpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_clamp(NodeContext& context) { + auto x = context.get_input(0); + if (!context.input_is_none(1)) { + auto min_clip = context.get_input(1); + min_clip = context.mark_node(std::make_shared(min_clip, x)); + x = context.mark_node(std::make_shared(x, min_clip)); + } + if (!context.input_is_none(2)) { + auto max_clip = context.get_input(2); + max_clip = context.mark_node(std::make_shared(max_clip, x)); + x = context.mark_node(std::make_shared(x, max_clip)); + } + return {x}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/constant.cpp b/src/frontends/pytorch/src/op/constant.cpp new file mode 100644 index 00000000000..3dcc2d83d81 --- /dev/null +++ b/src/frontends/pytorch/src/op/constant.cpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_constant(NodeContext& context) { + return context.as_constant(); +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/convnd.cpp b/src/frontends/pytorch/src/op/convnd.cpp new file mode 100644 index 00000000000..b1ef52f3c96 --- /dev/null +++ b/src/frontends/pytorch/src/op/convnd.cpp @@ -0,0 +1,62 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_convnd(NodeContext& context) { + auto strides = context.const_input(3); + // In torch pads at beginning are same as at end + auto pads = CoordinateDiff(strides.size(), 0); + auto pad_type = ov::op::PadType::EXPLICIT; + try { + auto pad_mode = context.const_input(4); + pad_type = convert_pad(pad_mode); + } catch (ov::frontend::GeneralFailure) { + pads = context.const_input(4); + } + auto dilations = context.const_input(5); + auto groups = context.const_input(6); + + std::shared_ptr conv; + if (groups == 1) { + conv = std::make_shared(context.get_input(0), + context.get_input(1), + strides, + pads, + pads, + dilations, + pad_type); + } else { + conv = std::make_shared( + context.get_input(0), + reshape_kernel_for_group(context, context.get_input(0), context.get_input(1), groups), + strides, + pads, + pads, + dilations, + pad_type); + } + if (!context.input_is_none(2)) { + auto bias = context.get_input(2); + auto bias_rank = bias.get_partial_shape().rank(); + if (bias_rank == 1) { + bias = reshape_conv_bias(context, bias, conv); + } + conv = context.mark_node(std::make_shared(conv, bias)); + } + + return {conv}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/convolution.cpp b/src/frontends/pytorch/src/op/convolution.cpp new file mode 100644 index 00000000000..9edd5ad6ded --- /dev/null +++ b/src/frontends/pytorch/src/op/convolution.cpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_convolution(NodeContext& context) { + // Schema: aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] + // dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool + // cudnn_enabled, bool allow_tf32) -> Tensor + + auto strides = context.const_input(3); + auto pads = context.const_input(4); + auto dilations = context.const_input(5); + bool transposed = context.const_input(6); + auto output_padding = context.const_input(7); + auto groups = context.const_input(8); + + std::shared_ptr conv; + if (groups == 1) { + if (!transposed) { + conv = context.mark_node(std::make_shared(context.get_input(0), + context.get_input(1), + strides, + pads, + pads, + dilations)); + } else { + conv = context.mark_node(std::make_shared(context.get_input(0), + context.get_input(1), + strides, + pads, + pads, + dilations, + ov::op::PadType::EXPLICIT, + output_padding)); + } + } else { + if (!transposed) { + conv = context.mark_node(std::make_shared( + context.get_input(0), + context.mark_output( + reshape_kernel_for_group(context, context.get_input(0), context.get_input(1), groups)), + strides, + pads, + pads, + dilations)); + } else { + conv = context.mark_node(std::make_shared( + context.get_input(0), + context.mark_output( + reshape_kernel_for_group(context, context.get_input(0), context.get_input(1), groups)), + strides, + pads, + pads, + dilations, + ov::op::PadType::EXPLICIT, + output_padding)); + } + } + if (!context.input_is_none(2)) { + auto bias = context.get_input(2); + auto bias_rank = bias.get_partial_shape().rank(); + if (bias_rank == 1) { + bias = reshape_conv_bias(context, bias, conv); + } + + conv = context.mark_node(std::make_shared(conv, bias)); + } + + return {context.mark_output(conv)}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/convolution_mode.cpp b/src/frontends/pytorch/src/op/convolution_mode.cpp new file mode 100644 index 00000000000..c670782a2cc --- /dev/null +++ b/src/frontends/pytorch/src/op/convolution_mode.cpp @@ -0,0 +1,60 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_convolution_mode(NodeContext& context) { + // Schema: aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] + // dilation, int groups) -> Tensor + auto strides = context.const_input(3); + auto pad_mode = context.const_input(4); + auto dilations = context.const_input(5); + auto groups = context.const_input(6); + auto pad_const = CoordinateDiff(strides.size(), 0); + + auto auto_pad_mode = convert_pad(pad_mode); + + std::shared_ptr conv; + if (groups == 1) { + conv = context.mark_node(std::make_shared(context.get_input(0), + context.get_input(1), + strides, + pad_const, + pad_const, + dilations, + auto_pad_mode)); + } else { + conv = context.mark_node(std::make_shared( + context.get_input(0), + context.mark_output(reshape_kernel_for_group(context, context.get_input(0), context.get_input(1), groups)), + strides, + pad_const, + pad_const, + dilations, + auto_pad_mode)); + } + + if (!context.input_is_none(2)) { + auto bias = context.get_input(2); + auto bias_rank = bias.get_partial_shape().rank(); + if (bias_rank == 1) { + bias = reshape_conv_bias(context, bias, conv); + } + + conv = context.mark_node(std::make_shared(conv, bias)); + } + return {context.mark_output(conv)}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/dim.cpp b/src/frontends/pytorch/src/op/dim.cpp new file mode 100644 index 00000000000..599661d2486 --- /dev/null +++ b/src/frontends/pytorch/src/op/dim.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_dim(NodeContext& context) { + auto shape = std::make_shared(context.get_input(0), element::i32); + auto rank = std::make_shared(shape, element::i32); + auto squeeze = std::make_shared(rank); + context.mark_nodes({shape, rank, squeeze}); + return squeeze->outputs(); +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/div.cpp b/src/frontends/pytorch/src/op/div.cpp new file mode 100644 index 00000000000..0c5b3943511 --- /dev/null +++ b/src/frontends/pytorch/src/op/div.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_div(NodeContext& context) { + auto x = context.get_input(0); + auto y = context.get_input(1); + auto res = context.mark_node(std::make_shared(x, y, true)); + if (!context.input_is_none(2)) { + auto rounding_mode = context.const_input(2); + if (rounding_mode == "floor") { + res = context.mark_node(std::make_shared(res)); + } else if (rounding_mode == "trunc") { + const auto convert = context.mark_node(std::make_shared(res, element::i64)); + res = context.mark_node(std::make_shared(convert, x)); + } else { + FRONT_END_OP_CONVERSION_CHECK(false, + "Openvino Pytorch Frontend doesn't support rounding mode ", + rounding_mode, + " for aten::div"); + } + } + return {res}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/elu.cpp b/src/frontends/pytorch/src/op/elu.cpp new file mode 100644 index 00000000000..0ad621ba856 --- /dev/null +++ b/src/frontends/pytorch/src/op/elu.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_elu(NodeContext& context) { + auto x = context.get_input(0); + auto alpha = context.const_input(1); + return {context.mark_node(std::make_shared(x, alpha))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/embedding.cpp b/src/frontends/pytorch/src/op/embedding.cpp new file mode 100644 index 00000000000..f7b5cc33dd1 --- /dev/null +++ b/src/frontends/pytorch/src/op/embedding.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_embedding(NodeContext& context) { + auto data = context.get_input(0); + auto indices = context.get_input(1); + // TODO: find out the meaning of input idx 2 + FRONT_END_OP_CONVERSION_CHECK( + context.const_input(3) == false && context.const_input(4) == false, + "Only False is supported on inputs with indexes 3 and 4 for aten::embedding translation"); + auto axis_0 = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {0})); + return {context.mark_node(std::make_shared(data, indices, axis_0))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/expand.cpp b/src/frontends/pytorch/src/op/expand.cpp new file mode 100644 index 00000000000..9f1f8599b56 --- /dev/null +++ b/src/frontends/pytorch/src/op/expand.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +namespace { +OutputVector base_expand(NodeContext& context, ov::Output x, ov::Output sizes) { + auto one = context.mark_node(opset10::Constant::create(element::i32, Shape{}, {1})); + auto sizes_shape = context.mark_node(std::make_shared(sizes, element::i32)); + auto neg_one = context.mark_node(opset10::Constant::create(element::i32, Shape{}, {-1})); + auto neg_ones = context.mark_node(std::make_shared(neg_one, sizes_shape)); + auto ones = context.mark_node(std::make_shared(one, sizes_shape)); + auto neg_sizes = context.mark_node(std::make_shared(sizes, neg_ones)); + auto shape = context.mark_node(std::make_shared(neg_sizes, ones, sizes)); + return {std::make_shared(x, shape, ov::op::BroadcastType::BIDIRECTIONAL)}; +}; +} // namespace + +OutputVector translate_expand(NodeContext& context) { + auto x = context.get_input(0); + auto sizes = context.get_input(1); + return base_expand(context, x, sizes); +}; + +OutputVector translate_expand_as(NodeContext& context) { + auto x = context.get_input(0); + auto y = context.get_input(1); + auto sizes = context.mark_node(std::make_shared(y, element::i32)); + return base_expand(context, x, sizes); +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/flatten.cpp b/src/frontends/pytorch/src/op/flatten.cpp new file mode 100644 index 00000000000..f8b4ee1d20b --- /dev/null +++ b/src/frontends/pytorch/src/op/flatten.cpp @@ -0,0 +1,64 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_flatten(NodeContext& context) { + auto start_dim = context.const_input(1); + auto end_dim = context.const_input(2); + + auto shape = std::make_shared(context.get_input(0), element::i32); + auto rank_ = std::make_shared(shape, element::i32); + auto rank = std::make_shared(rank_); + // Use opset::If for dim normalization + auto start_dim_node = context.get_input(1); + auto end_dim_node = context.get_input(2); + if (start_dim < 0) { + start_dim_node = std::make_shared(rank, start_dim_node); + } + if (end_dim < 0) { + end_dim_node = std::make_shared(rank, end_dim_node); + } + auto delta = std::make_shared(end_dim_node, start_dim_node); + auto rank_delta = std::make_shared(rank, delta); + auto true_const0 = opset10::Constant::create(element::boolean, Shape{}, {1}); + auto zeros_loop = std::make_shared(rank_delta, true_const0); + auto true_const = opset10::Constant::create(element::boolean, Shape{}, {1}); + auto result_true = std::make_shared(true_const); + auto zero_const = opset10::Constant::create(element::i32, Shape{1}, {0}); + auto result_zero = std::make_shared(zero_const); + auto f = std::make_shared(ResultVector{result_true, result_zero}, ParameterVector{}); + zeros_loop->set_function(f); + zeros_loop->set_special_body_ports({-1, 0}); + auto zeros = zeros_loop->get_concatenated_slices(result_zero, 0, 1, 1, -1, 0); + auto neg_1_const = opset10::Constant::create(element::i32, Shape{1}, {-1}); + auto axis_0 = opset10::Constant::create(element::i32, Shape{1}, {0}); + auto start_dim_node_ = std::make_shared(start_dim_node, axis_0); + auto new_shape = std::make_shared(zeros, start_dim_node_, neg_1_const, axis_0); + + context.mark_nodes({shape, + rank_, + rank, + delta, + rank_delta, + true_const0, + zeros_loop, + neg_1_const, + axis_0, + start_dim_node_, + new_shape}); + + return {context.mark_node(std::make_shared(context.get_input(0), new_shape, true))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/floor_divide.cpp b/src/frontends/pytorch/src/op/floor_divide.cpp new file mode 100644 index 00000000000..8ed455e9f36 --- /dev/null +++ b/src/frontends/pytorch/src/op/floor_divide.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_floor_divide(NodeContext& context) { + auto x = context.get_input(0); + auto y = context.get_input(1); + auto div = context.mark_node(std::make_shared(x, y, true)); + return {context.mark_node(std::make_shared(div))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/floordiv.cpp b/src/frontends/pytorch/src/op/floordiv.cpp new file mode 100644 index 00000000000..f878efe1736 --- /dev/null +++ b/src/frontends/pytorch/src/op/floordiv.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_floordiv(NodeContext& context) { + auto x = context.get_input(0); + auto y = context.get_input(1); + return {context.mark_node(std::make_shared(x, y, true))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/full.cpp b/src/frontends/pytorch/src/op/full.cpp new file mode 100644 index 00000000000..e45b3a611a5 --- /dev/null +++ b/src/frontends/pytorch/src/op/full.cpp @@ -0,0 +1,154 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +namespace { +ov::Output base_translate_full(NodeContext& context, ov::Output sizes, ov::Output value) { + return context.mark_node(std::make_shared(value, sizes)); +} + +ov::Output base_translate_full_with_convert(NodeContext& context, + ov::Output sizes, + ov::Output value, + size_t dtype_id) { + auto filled_tensor = base_translate_full(context, sizes, value); + if (!context.input_is_none(dtype_id)) { + auto dtype = convert_dtype(context.const_input(dtype_id)); + filled_tensor = context.mark_node(std::make_shared(filled_tensor, dtype)); + } + return filled_tensor; +} + +ov::Output base_translate_full_with_convertlike(NodeContext& context, + ov::Output sizes, + ov::Output value, + ov::Output out) { + auto filled_tensor = base_translate_full(context, sizes, value); + return context.mark_node(std::make_shared(filled_tensor, out)); +} +} // namespace + +OutputVector translate_full(NodeContext& context) { + auto sizes = context.get_input(0); + auto value = context.get_input(1); + auto num_inputs = context.get_input_size(); + if (num_inputs < 6) { + int out_id = num_inputs == 3 ? 2 : 3; + if (!context.input_is_none(static_cast(out_id))) { + auto out = context.get_input(out_id); + return {base_translate_full_with_convertlike(context, sizes, value, out)}; + } + return {base_translate_full(context, sizes, value)}; + } + size_t dtype_id = num_inputs == 6 ? 2 : 3; + return {base_translate_full_with_convert(context, sizes, value, dtype_id)}; +}; + +OutputVector translate_full_like(NodeContext& context) { + auto input = context.get_input(0); + auto value = context.get_input(1); + auto sizes = context.mark_node(std::make_shared(input)); + if (context.get_input_size() == 7) { + return {base_translate_full_with_convert(context, sizes, value, 2)}; + } + auto out = context.input_is_none(3) ? input : context.get_input(3); + return {base_translate_full_with_convertlike(context, sizes, value, out)}; +}; + +OutputVector translate_new_full(NodeContext& context) { + auto input = context.get_input(0); + auto sizes = context.get_input(1); + auto value = context.get_input(2); + if (context.get_input_size() == 7 && !context.input_is_none(3)) { + return {base_translate_full_with_convert(context, sizes, value, 3)}; + } + return {base_translate_full_with_convertlike(context, sizes, value, input)}; +}; + +OutputVector translate_zeros(NodeContext& context) { + auto sizes = context.get_input(0); + auto value = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {0})); + auto num_inputs = context.get_input_size(); + if (num_inputs < 5) { + int out_id = num_inputs == 2 ? 1 : 2; + if (!context.input_is_none(static_cast(out_id))) { + auto out = context.get_input(out_id); + return {base_translate_full_with_convertlike(context, sizes, value, out)}; + } + return {base_translate_full(context, sizes, value)}; + } + size_t dtype_id = num_inputs == 5 ? 1 : 2; + return {base_translate_full_with_convert(context, sizes, value, dtype_id)}; +}; + +OutputVector translate_zeros_like(NodeContext& context) { + auto input = context.get_input(0); + auto value = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {0})); + auto sizes = context.mark_node(std::make_shared(input)); + if (context.get_input_size() == 6) { + return {base_translate_full_with_convert(context, sizes, value, 1)}; + } + auto out = context.input_is_none(2) ? input : context.get_input(2); + return {base_translate_full_with_convertlike(context, sizes, value, out)}; +}; + +OutputVector translate_new_zeros(NodeContext& context) { + auto input = context.get_input(0); + auto sizes = context.get_input(1); + auto value = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {0})); + if (context.get_input_size() == 6 && !context.input_is_none(2)) { + return {base_translate_full_with_convert(context, sizes, value, 2)}; + } + return {base_translate_full_with_convertlike(context, sizes, value, input)}; +}; + +OutputVector translate_ones(NodeContext& context) { + auto sizes = context.get_input(0); + auto value = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {1})); + auto num_inputs = context.get_input_size(); + if (num_inputs < 5) { + int out_id = num_inputs == 2 ? 1 : 2; + if (!context.input_is_none(static_cast(out_id))) { + auto out = context.get_input(out_id); + return {base_translate_full_with_convertlike(context, sizes, value, out)}; + } + return {base_translate_full(context, sizes, value)}; + } + size_t dtype_id = num_inputs == 5 ? 1 : 2; + return {base_translate_full_with_convert(context, sizes, value, dtype_id)}; +}; + +OutputVector translate_ones_like(NodeContext& context) { + auto input = context.get_input(0); + auto value = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {1})); + auto sizes = context.mark_node(std::make_shared(input)); + if (context.get_input_size() == 6) { + return {base_translate_full_with_convert(context, sizes, value, 1)}; + } + auto out = context.input_is_none(2) ? input : context.get_input(2); + return {base_translate_full_with_convertlike(context, sizes, value, out)}; +}; + +OutputVector translate_new_ones(NodeContext& context) { + auto input = context.get_input(0); + auto sizes = context.get_input(1); + auto value = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {1})); + if (context.get_input_size() == 6 && !context.input_is_none(2)) { + return {base_translate_full_with_convert(context, sizes, value, 2)}; + } + return {base_translate_full_with_convertlike(context, sizes, value, input)}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/gelu.cpp b/src/frontends/pytorch/src/op/gelu.cpp new file mode 100644 index 00000000000..8f84fab7022 --- /dev/null +++ b/src/frontends/pytorch/src/op/gelu.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_gelu(NodeContext& context) { + auto x = context.get_input(0); + auto approximate = context.const_input(1); + // TODO: Add support for "tanh" approximate + FRONT_END_OP_CONVERSION_CHECK(approximate == "none", "Unsupported approximate for Gelu: ", approximate); + return {context.mark_node(std::make_shared(x))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/get_attr.cpp b/src/frontends/pytorch/src/op/get_attr.cpp new file mode 100644 index 00000000000..3575a5210a8 --- /dev/null +++ b/src/frontends/pytorch/src/op/get_attr.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "pt_framework_node.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_get_attr(NodeContext& context) { + auto res = context.get_decoder()->try_decode_get_attr(); + FRONT_END_OP_CONVERSION_CHECK(res.size() > 0, "GetAttr must have at least one output."); + return res; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/group_norm.cpp b/src/frontends/pytorch/src/op/group_norm.cpp new file mode 100644 index 00000000000..aaeb962af05 --- /dev/null +++ b/src/frontends/pytorch/src/op/group_norm.cpp @@ -0,0 +1,49 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_group_norm(NodeContext& context) { + auto data = context.get_input(0); + auto num_groups = context.const_input(1); + // input 2 - weights and input 3 - bias are optional without default value, we handle them later + auto eps = static_cast(context.const_input(4)); + auto input_shape = context.mark_node(std::make_shared(data, element::i64)); + auto scalar_one = context.mark_node(opset10::Constant::create(element::i64, {}, {1})); + auto shape = context.mark_node( + std::make_shared(element::i64, Shape({3}), std::vector{0, num_groups, -1})); + auto reshaped_input = context.mark_node(std::make_shared(data, shape, true)); + auto reduction_axes = + context.mark_node(opset10::Constant::create(element::i64, Shape({1}), std::vector(1, 2))); + auto reshaped_norm = context.mark_node( + std::make_shared(reshaped_input, reduction_axes, true, eps, ov::op::MVNEpsMode::INSIDE_SQRT)); + auto norm = context.mark_node(std::make_shared(reshaped_norm, input_shape, true)); + auto input_rank2d = context.mark_node(std::make_shared(input_shape, element::i64)); + auto input_rank = context.mark_node(std::make_shared(input_rank2d)); + auto skip_last = context.mark_node(std::make_shared(input_rank, scalar_one)); + auto axes = context.mark_node(std::make_shared(scalar_one, skip_last, scalar_one, element::i64)); + if (!context.input_is_none(2)) { + auto weights = context.get_input(2); + weights = context.mark_node(std::make_shared(weights, axes)); + norm = context.mark_node(std::make_shared(norm, weights)); + } + if (!context.input_is_none(3)) { + auto bias = context.get_input(3); + bias = context.mark_node(std::make_shared(bias, axes)); + norm = context.mark_node(std::make_shared(norm, bias)); + } + return {norm}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/hardtanh.cpp b/src/frontends/pytorch/src/op/hardtanh.cpp new file mode 100644 index 00000000000..0cd46948f67 --- /dev/null +++ b/src/frontends/pytorch/src/op/hardtanh.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_hardtanh(NodeContext& context) { + float min = -1; + float max = 1; + if (!context.input_is_none(1)) { + min = context.const_input(1); + } + if (!context.input_is_none(2)) { + max = context.const_input(2); + } + return {context.mark_node(std::make_shared(context.get_input(0), min, max))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/if.cpp b/src/frontends/pytorch/src/op/if.cpp new file mode 100644 index 00000000000..ac4970cc75a --- /dev/null +++ b/src/frontends/pytorch/src/op/if.cpp @@ -0,0 +1,152 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "openvino/util/log.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_if(NodeContext& context) { + auto if_node = std::make_shared(context.get_input(0)); + context.mark_node(if_node); + auto decoder = context.get_decoder(); + FRONT_END_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 2, "If must have 2 subgraphs."); + + auto then_decoder = decoder->get_subgraph_decoder(0); + auto then_body = context.convert_subgraph(0); + if_node->set_then_body(then_body); + auto then_inputs = then_decoder->inputs(); + + auto else_decoder = decoder->get_subgraph_decoder(1); + auto else_body = context.convert_subgraph(1); + if_node->set_else_body(else_body); + auto else_inputs = else_decoder->inputs(); + + std::set input_idxs; + input_idxs.insert(then_inputs.begin(), then_inputs.end()); + input_idxs.insert(else_inputs.begin(), else_inputs.end()); + + std::map inputs_map; + std::map outputs_map; + for (const auto& param : then_body->get_parameters()) { + auto name = param->get_output_tensor(0).get_any_name(); + size_t input_idx = (size_t)std::stoll(name); + FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(input_idx) == 0, + "More than one then_body input with same tensor name: ", + input_idx, + "; existing: ", + inputs_map.at(input_idx)[0], + " adding: ", + param); + inputs_map[input_idx] = {param, nullptr}; + } + for (const auto& param : else_body->get_parameters()) { + auto name = param->get_output_tensor(0).get_any_name(); + size_t input_idx = (size_t)std::stoll(name); + if (inputs_map.count(input_idx)) { + inputs_map[input_idx][1] = param; + } else { + inputs_map[input_idx] = {nullptr, param}; + } + } + OutputVector res; + const auto num_outs = context.num_of_outputs(); + const auto then_results = then_body->get_results(); + const auto else_results = else_body->get_results(); + FRONT_END_OP_CONVERSION_CHECK(then_results.size() >= num_outs && else_results.size() >= num_outs, + "Else or then body have less outputs than prim::If requires."); + for (int i = 0; i < num_outs; i++) { + res.push_back(if_node->set_output(then_results[i], else_results[i])); + } + // Each body can have mutated outputs that are not included into pytorch node outputs. + std::map> extra_then_body_results; + std::map> extra_else_body_results; + std::set extra_output_idxs; + for (int i = num_outs; i < then_results.size(); i++) { + const auto result = then_results[i]; + const auto name = result->input(0).get_tensor().get_any_name(); + size_t output_idx = (size_t)std::stoll(name); + FRONT_END_OP_CONVERSION_CHECK(extra_then_body_results.count(output_idx) == 0, + "More than one then_body output with same tensor name: ", + output_idx, + "; existing: ", + extra_then_body_results.at(output_idx), + " adding: ", + result); + extra_then_body_results[output_idx] = result; + extra_output_idxs.insert(output_idx); + } + for (int i = num_outs; i < else_results.size(); i++) { + const auto result = else_results[i]; + const auto name = result->input(0).get_tensor().get_any_name(); + size_t output_idx = (size_t)std::stoll(name); + FRONT_END_OP_CONVERSION_CHECK(extra_else_body_results.count(output_idx) == 0, + "More than one else_body output with same tensor name: ", + output_idx, + "; existing: ", + extra_else_body_results.at(output_idx), + " adding: ", + result); + extra_else_body_results[output_idx] = result; + extra_output_idxs.insert(output_idx); + } + // Each extra output may not have same extra output in the other body, so we need to create Parameter->Result + // pattern in the body. + for (const auto& output_idx : extra_output_idxs) { + if (!extra_then_body_results.count(output_idx)) { + // Need to add Parameter->Result construction in then body + auto new_parameter = std::make_shared(element::dynamic, PartialShape::dynamic()); + new_parameter->get_output_tensor(0).add_names({std::to_string(output_idx)}); + auto new_result = std::make_shared(new_parameter); + then_body->add_parameters({new_parameter}); + then_body->add_results({new_result}); + then_body->validate_nodes_and_infer_types(); + FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in else body"); + inputs_map[output_idx][0] = new_parameter; + extra_then_body_results[output_idx] = new_result; + OPENVINO_DEBUG << "Modified then body: " << if_node << '\n'; + } else if (!extra_else_body_results.count(output_idx)) { + // Need to add Parameter->Result construction in else body + auto new_parameter = std::make_shared(element::dynamic, PartialShape::dynamic()); + new_parameter->get_output_tensor(0).add_names({std::to_string(output_idx)}); + auto new_result = std::make_shared(new_parameter); + else_body->add_parameters({new_parameter}); + else_body->add_results({new_result}); + else_body->validate_nodes_and_infer_types(); + FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in then body"); + inputs_map[output_idx][1] = new_parameter; + extra_else_body_results[output_idx] = new_result; + OPENVINO_DEBUG << "Modified else body: " << if_node << '\n'; + } + } + // Create prim::If inputs and outputs + for (const auto& input : inputs_map) { + if (!input_idxs.count(input.first)) { + auto external_output = context.get_tensor_from_model_or_create_input(input.first); + if_node->set_input(external_output, input.second[0], input.second[1]); + } else { + auto external_output = context.get_tensor_from_model(input.first); + if (external_output.get_node()) { + if_node->set_input(external_output, input.second[0], input.second[1]); + } + } + } + for (const auto& output_idx : extra_output_idxs) { + context.add_tensor_to_context( + output_idx, + if_node->set_output(extra_then_body_results.at(output_idx), extra_else_body_results.at(output_idx))); + } + if_node->validate_and_infer_types(); + return res; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/im2col.cpp b/src/frontends/pytorch/src/op/im2col.cpp new file mode 100644 index 00000000000..2b9b00b9ed0 --- /dev/null +++ b/src/frontends/pytorch/src/op/im2col.cpp @@ -0,0 +1,96 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +namespace { +std::shared_ptr get_im2col_indices_along_dim(NodeContext& context, + ov::Output input_d, + int64_t kernel_size_d, + int64_t dilation_d, + int64_t padding_d, + int64_t stride_d) { + auto zero = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {0})); + auto minus_one = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {-1})); + auto kernel_size = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {kernel_size_d})); + auto padding_2 = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {padding_d * 2})); + auto stride = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {stride_d})); + auto input_d_squeezed = context.mark_node(std::make_shared(input_d, zero)); + auto blocks_d = context.mark_node(std::make_shared(input_d_squeezed, padding_2)); + auto subtrahend = + context.mark_node(opset10::Constant::create(element::i64, Shape{}, {dilation_d * (kernel_size_d - 1)})); + blocks_d = context.mark_node(std::make_shared(blocks_d, subtrahend)); + auto blocks_d_indices = context.mark_node(std::make_shared(zero, blocks_d, stride, element::i64)); + blocks_d_indices = context.mark_node(std::make_shared(blocks_d_indices, zero)); + std::vector rng; + for (int64_t i = 0; i < kernel_size_d * dilation_d; i += dilation_d) { + rng.push_back(i); + } + + auto kernel_grid = context.mark_node(opset10::Constant::create(element::i64, Shape{rng.size()}, rng)); + auto kernel_mask = context.mark_node(std::make_shared(kernel_grid, minus_one)); + return context.mark_node(std::make_shared(blocks_d_indices, kernel_mask)); +} +} // namespace + +OutputVector translate_im2col(NodeContext& context) { + auto input = context.get_input(0); + auto kernel_size = context.const_input>(1); + FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "kernel size should contains 2 elements"); + auto dilation = context.const_input>(2); + FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "dilation should contains 2 elements"); + auto padding = context.const_input>(3); + FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "padding should contains 2 elements"); + auto stride = context.const_input>(4); + FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "stride should contains 2 elements"); + auto zero = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {0})); + auto input_shape = context.mark_node(std::make_shared(input)); + auto zero_f = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {0})); + auto minus_one = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {-1})); + auto two = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {2})); + auto four = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {4})); + auto input_shape_split = context.mark_node(std::make_shared(input_shape, zero, 4)); + auto input_b = input_shape_split->output(0); + auto input_c = input_shape_split->output(1); + auto input_h = input_shape_split->output(2); + auto input_w = input_shape_split->output(3); + auto stride_h = stride[0]; + auto stride_w = stride[1]; + auto padding_h = padding[0]; + auto padding_w = padding[1]; + auto dilation_h = dilation[0]; + auto dilation_w = dilation[1]; + auto kernel_h = kernel_size[0]; + auto kernel_w = kernel_size[1]; + auto blocks_row_indices = get_im2col_indices_along_dim(context, input_h, kernel_h, dilation_h, padding_h, stride_h); + auto blocks_col_indices = get_im2col_indices_along_dim(context, input_w, kernel_w, dilation_w, padding_w, stride_w); + auto kernel_window = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {kernel_h * kernel_w})); + auto input_c_squeezed = context.mark_node(std::make_shared(input_c, zero)); + auto channel_unfolded = context.mark_node(std::make_shared(input_c_squeezed, kernel_window)); + auto channel_unfolded_unsqueezed = context.mark_node(std::make_shared(channel_unfolded, zero)); + auto output_shape = context.mark_node( + std::make_shared(OutputVector{input_b, channel_unfolded_unsqueezed, minus_one}, 0)); + auto pads = context.mark_node( + opset10::Constant::create(element::i64, Shape{4}, std::vector{0, 0, padding_h, padding_w})); + auto padded_input = + context.mark_node(std::make_shared(input, pads, pads, zero_f, ov::op::PadMode::CONSTANT)); + auto output = context.mark_node(std::make_shared(padded_input, blocks_row_indices, two)); + output = context.mark_node(std::make_shared(output, blocks_col_indices, four)); + auto permutation_dims = + context.mark_node(opset10::Constant::create(element::i64, Shape{6}, std::vector{0, 1, 2, 4, 3, 5})); + output = context.mark_node(std::make_shared(output, permutation_dims)); + return {context.mark_node(std::make_shared(output, output_shape, false))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/int.cpp b/src/frontends/pytorch/src/op/int.cpp new file mode 100644 index 00000000000..f49bc30bf76 --- /dev/null +++ b/src/frontends/pytorch/src/op/int.cpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_int(NodeContext& context) { + return {context.mark_node(std::make_shared(context.get_input(0), element::i64))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/layer_norm.cpp b/src/frontends/pytorch/src/op/layer_norm.cpp new file mode 100644 index 00000000000..16b77450790 --- /dev/null +++ b/src/frontends/pytorch/src/op/layer_norm.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_layer_norm(NodeContext& context) { + auto eps = context.const_input(4); + auto normalized_shape = context.const_input(1); + FRONT_END_OP_CONVERSION_CHECK(normalized_shape.size() == 1, + "Translation for aten::layer_norm supports only single normalized_shape value, " + "which means normalizing over the last dimension."); + // TODO: support any dimention + auto axes = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {-1})); + auto out_node = context.mark_node( + std::make_shared(context.get_input(0), axes, true, eps, ov::op::MVNEpsMode::INSIDE_SQRT)); + if (!context.input_is_none(2)) { + out_node = context.mark_node(std::make_shared(out_node, context.get_input(2))); + } + if (!context.input_is_none(3)) { + out_node = context.mark_node(std::make_shared(out_node, context.get_input(3))); + } + return {out_node}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/len.cpp b/src/frontends/pytorch/src/op/len.cpp new file mode 100644 index 00000000000..ce38f93c83d --- /dev/null +++ b/src/frontends/pytorch/src/op/len.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_len(NodeContext& context) { + auto const_0 = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {0})); + auto const_1 = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {1})); + auto input = context.get_input(0); + auto input_shape = context.mark_node(std::make_shared(input, element::i64)); + + auto slice = context.mark_node(std::make_shared(input_shape, const_0, const_1, const_1)); + auto squeeze = std::make_shared(slice, const_0); + return {context.mark_node(squeeze)}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/linear.cpp b/src/frontends/pytorch/src/op/linear.cpp new file mode 100644 index 00000000000..956f1d00646 --- /dev/null +++ b/src/frontends/pytorch/src/op/linear.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_linear(NodeContext& context) { + auto x = context.get_input(0); + auto y = context.get_input(1); + auto matmul = std::make_shared(x, y, false, true); + return {context.mark_output(make_optional_bias(matmul, context, 2))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/list_construct.cpp b/src/frontends/pytorch/src/op/list_construct.cpp new file mode 100644 index 00000000000..56aec34f54c --- /dev/null +++ b/src/frontends/pytorch/src/op/list_construct.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_list_construct(NodeContext& context) { + // Process the case when prim::ListConstruct has all inputs constant + ov::OutputVector consts; + for (int i = 0; i < context.get_input_size(); i++) { + auto input = context.get_input_from_visible_context(i); + auto c_node = std::dynamic_pointer_cast(input.get_node_shared_ptr()); + FRONT_END_OP_CONVERSION_CHECK(c_node, "Translation for prim::ListConstruct support only constant inputs"); + if (c_node->get_shape().size() == 0) { + c_node = std::make_shared(c_node->get_element_type(), Shape{1}, c_node->get_data_ptr()); + } + consts.push_back(c_node); + } + auto list_construct = std::make_shared(consts, 0); + if (list_construct->has_evaluate()) { + OutputVector replacements(list_construct->get_output_size()); + + if (list_construct->constant_fold(replacements, list_construct->input_values())) { + return replacements; + } + } + return {context.mark_output(list_construct)}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/loop.cpp b/src/frontends/pytorch/src/op/loop.cpp new file mode 100644 index 00000000000..aa64a3be0f3 --- /dev/null +++ b/src/frontends/pytorch/src/op/loop.cpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_loop(NodeContext& context) { + auto loop = std::make_shared(context.get_input(0), context.get_input(1)); + auto decoder = context.get_decoder(); + FRONT_END_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 1, "Loop must have 1 subgraph."); + auto subgraph_decoder = decoder->get_subgraph_decoder(0); + auto body = context.convert_subgraph(0); + loop->set_function(body); + opset10::Loop::SpecialBodyPorts spec_ports{0, 0}; + loop->set_special_body_ports(spec_ports); + + auto inputs = subgraph_decoder->inputs(); + std::set input_idxs(inputs.begin(), inputs.end()); + std::map inputs_map; + + auto body_parameters = body->get_parameters(); + // #0 parameter is counter + for (int i = 1; i < body_parameters.size(); i++) { + auto param = body_parameters[i]; + auto name = param->get_output_tensor(0).get_any_name(); + size_t input_idx = (size_t)std::stoll(name); + if (inputs_map.count(input_idx)) { + inputs_map[input_idx] = {param}; + } else { + inputs_map[input_idx].push_back(param); + } + } + for (const auto& input : inputs_map) { + if (!input_idxs.count(input.first)) { + auto external_output = context.get_tensor_from_model_or_create_input(input.first); + loop->set_invariant_inputs(external_output, input.second); + } else { + auto external_output = context.get_tensor_from_model(input.first); + if (external_output.get_node()) { + loop->set_invariant_inputs(external_output, input.second); + } + } + } + // TODO: Connect back edges (merged inputs) + auto body_results = body->get_results(); + FRONT_END_OP_CONVERSION_CHECK(body_results.size() > 0, "At least one output from loop is required - condition."); + std::set output_idxs; + // 0 output is condition, do not need to connect it + for (int i = 1; i < body_results.size(); i++) { + auto result = body_results[i]; + auto name = result->input(0).get_tensor().get_any_name(); + size_t out_idx = (size_t)std::stoll(name); + FRONT_END_OP_CONVERSION_CHECK(output_idxs.count(out_idx) == 0, + "More then one body output with same tensor name."); + output_idxs.insert(out_idx); + context.add_tensor_to_context(out_idx, loop->get_iter_value(result, -1)); + } + loop->validate_and_infer_types(); + return {context.mark_node(loop)->outputs()}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/masked_fill.cpp b/src/frontends/pytorch/src/op/masked_fill.cpp new file mode 100644 index 00000000000..5ac8141941e --- /dev/null +++ b/src/frontends/pytorch/src/op/masked_fill.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_masked_fill(NodeContext& context) { + auto data = context.get_input(0); + auto mask = context.get_input(1); + auto value = context.const_input(2); + auto data_shape = context.mark_node(std::make_shared(data)); + auto value_const = context.mark_node(opset10::Constant::create(element::f32, Shape({}), {value})); + auto broadcasted_value = context.mark_node(std::make_shared(value_const, data_shape)); + auto bool_mask = context.mark_node(std::make_shared(mask, element::boolean)); + return {context.mark_node(std::make_shared(bool_mask, broadcasted_value, data))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/max_poolnd.cpp b/src/frontends/pytorch/src/op/max_poolnd.cpp new file mode 100644 index 00000000000..b12b10b03a4 --- /dev/null +++ b/src/frontends/pytorch/src/op/max_poolnd.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_max_poolnd(NodeContext& context) { + auto kernel = context.const_input(1); + auto strides = context.const_input(2); + auto pads = context.const_input(3); // pytorch supports only symmetric paddings + auto dilations = context.const_input(4); + auto rounding_type = context.const_input(5) ? ov::op::RoundingType::CEIL : ov::op::RoundingType::FLOOR; + + return {context.mark_node(std::make_shared(context.get_input(0), + strides, + dilations, + pads, + pads, + kernel, + rounding_type))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/mean.cpp b/src/frontends/pytorch/src/op/mean.cpp new file mode 100644 index 00000000000..cf9d7973599 --- /dev/null +++ b/src/frontends/pytorch/src/op/mean.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_mean(NodeContext& context) { + auto x = context.get_input(0); + auto y = context.get_input(1); + auto keep_dims = context.const_input(2); + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(3), + "Only False is supported for input with index 3 for aten::mean"); + return {context.mark_node(std::make_shared(x, y, keep_dims))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/min_max.cpp b/src/frontends/pytorch/src/op/min_max.cpp new file mode 100644 index 00000000000..76457032071 --- /dev/null +++ b/src/frontends/pytorch/src/op/min_max.cpp @@ -0,0 +1,76 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_max(NodeContext& context) { + // torch.max (same for torch.min) actually has two interfaces smashed together: + // torch.max(x, dim, keepdim) and torch.max(x, y) + auto x = context.get_input(0); + // torch.max(input) + if (context.input_is_none(1) & context.input_is_none(2)) { + auto axes = get_axes_range(context, 0); + return {context.mark_node(std::make_shared(x, axes, false))}; + } + // torch.max(input, other) + if (context.input_is_none(2)) { + auto y = context.get_input(1); + return {context.mark_node(std::make_shared(x, y))}; + } + // torch.max(input, dim, keepdim), returns values and indicies + auto axes_node = context.get_input(1); + auto axis_const = context.const_input(1); + auto keepdims = context.const_input(2); + auto values = context.mark_node(std::make_shared(x, axes_node, keepdims)); + auto k = context.mark_node(std::make_shared(element::i64, Shape{}, 1)); + auto topk = + std::make_shared(x, k, axis_const, opset10::TopK::Mode::MAX, opset10::TopK::SortType::NONE); + auto indicies = context.mark_node(std::make_shared(topk->output(1), element::i64)); + if (!keepdims) { + indicies = std::make_shared(indicies, axes_node); + } + return {values, indicies}; +}; + +OutputVector translate_min(NodeContext& context) { + // torch.min (same for torch.max) actually has two interfaces smashed together: + // torch.min(x, dim, keepdim) and torch.min(x, y) + auto x = context.get_input(0); + // torch.min(input) + if (context.input_is_none(1) & context.input_is_none(2)) { + auto axes = get_axes_range(context, 0); + return {context.mark_node(std::make_shared(x, axes, false))}; + } + // torch.min(input, other) + if (context.input_is_none(2)) { + auto y = context.get_input(1); + return {context.mark_node(std::make_shared(x, y))}; + } + // torch.min(input, dim, keepdim), returns values and indicies + auto axes_node = context.get_input(1); + auto axis_const = context.const_input(1); + auto keepdims = context.const_input(2); + auto values = context.mark_node(std::make_shared(x, axes_node, keepdims)); + auto k = context.mark_node(std::make_shared(element::i64, Shape{}, 1)); + auto topk = + std::make_shared(x, k, axis_const, opset10::TopK::Mode::MIN, opset10::TopK::SortType::NONE); + auto indicies = context.mark_node(std::make_shared(topk->output(1), element::i64)); + + if (!keepdims) { + indicies = std::make_shared(indicies, axes_node); + } + return {values, indicies}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/neg.cpp b/src/frontends/pytorch/src/op/neg.cpp new file mode 100644 index 00000000000..428aaec0e84 --- /dev/null +++ b/src/frontends/pytorch/src/op/neg.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_neg(NodeContext& context) { + auto x = context.get_input(0); + auto const_neg_1 = context.mark_node(opset10::Constant::create(element::i32, Shape{}, {-1})); + auto cast = context.mark_node(std::make_shared(const_neg_1, x)); + return {context.mark_node(std::make_shared(x, cast))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/nms.cpp b/src/frontends/pytorch/src/op/nms.cpp new file mode 100644 index 00000000000..4fc611b10e5 --- /dev/null +++ b/src/frontends/pytorch/src/op/nms.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset9.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_nms(NodeContext& context) { + auto const_0 = context.mark_node(opset9::Constant::create(element::i64, Shape{}, {0})); + auto const_1 = context.mark_node(opset9::Constant::create(element::i64, Shape{}, {1})); + auto const_2 = context.mark_node(opset9::Constant::create(element::i64, Shape{1}, {2})); + // the shape that is required by PyTorch operator differs from the shape required in OpenVino + auto boxes_shape = context.mark_node(opset9::Constant::create(element::i64, Shape{3}, {1, -1, 4})); + + auto boxes = context.mark_node(std::make_shared(context.get_input(0), boxes_shape, false)); + // Unsqueeze operator is also used to align shapes required by PyTorch and OpenVino + auto axis_01 = context.mark_node(opset9::Constant::create(element::i64, Shape{2}, {0, 1})); + auto scores = context.mark_node(std::make_shared(context.get_input(1), axis_01)); + auto max_output_per_class = + context.mark_node(opset9::Constant::create(element::i64, Shape{1}, {std::numeric_limits::max()})); + auto iou_threshold = context.get_input(2); + + auto nms_out = context.mark_node( + std::make_shared(boxes, scores, max_output_per_class, iou_threshold)); + auto select = context.mark_node(std::make_shared(nms_out, const_2, const_1)); + auto squeeze = std::make_shared(select, const_1); + + return {context.mark_node(squeeze)}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/nonzero.cpp b/src/frontends/pytorch/src/op/nonzero.cpp new file mode 100644 index 00000000000..b5f70cbfc2a --- /dev/null +++ b/src/frontends/pytorch/src/op/nonzero.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_nonzero(NodeContext& context) { + auto cond = context.get_input(0); + auto non_zero = context.mark_node(std::make_shared(cond)); + auto input_order = context.mark_node(opset10::Constant::create(element::i64, Shape{2}, {1, 0})); + return {context.mark_node(std::make_shared(non_zero, input_order))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/norm.cpp b/src/frontends/pytorch/src/op/norm.cpp new file mode 100644 index 00000000000..7162c3708e2 --- /dev/null +++ b/src/frontends/pytorch/src/op/norm.cpp @@ -0,0 +1,52 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_norm(NodeContext& context) { + auto input_tensor = context.get_input(0); + auto p = context.const_input(1); + auto dim = context.get_input(2); + auto keep_dim = context.const_input(3); + + OutputVector res; + + if (p == 1) { + auto reduce_l1 = context.mark_node(std::make_shared(input_tensor, dim, keep_dim)); + res.push_back(reduce_l1); + } else if (p == 2) { + auto reduce_l2 = context.mark_node(std::make_shared(input_tensor, dim, keep_dim)); + res.push_back(reduce_l2); + } else if (p == std::numeric_limits::infinity()) { + auto abs = context.mark_node(std::make_shared(input_tensor)); + auto max = context.mark_node(std::make_shared(abs, dim, keep_dim)); + res.push_back(max); + } else if (p == -std::numeric_limits::infinity()) { + auto abs = context.mark_node(std::make_shared(input_tensor)); + auto min = context.mark_node(std::make_shared(abs, dim, keep_dim)); + res.push_back(min); + } else { + auto const_p = context.mark_node(opset10::Constant::create(element::f64, Shape{1}, {p})); + auto const_p_inv = context.mark_node(opset10::Constant::create(element::f64, Shape{1}, {1.0 / p})); + auto abs = context.mark_node(std::make_shared(input_tensor)); + auto pow = context.mark_node(std::make_shared(abs, const_p)); + auto sum = context.mark_node(std::make_shared(pow, dim, keep_dim)); + auto pow_inv = context.mark_node(std::make_shared(sum, const_p_inv)); + res.push_back(pow_inv); + } + + return res; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/numel.cpp b/src/frontends/pytorch/src/op/numel.cpp new file mode 100644 index 00000000000..0dc77b749a7 --- /dev/null +++ b/src/frontends/pytorch/src/op/numel.cpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_numel(NodeContext& context) { + return {numel(context, 0)}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/pad.cpp b/src/frontends/pytorch/src/op/pad.cpp new file mode 100644 index 00000000000..3f437deb8b6 --- /dev/null +++ b/src/frontends/pytorch/src/op/pad.cpp @@ -0,0 +1,111 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_pad(NodeContext& context) { + auto data = context.get_input(0); + auto paddings = context.const_input>(1); + std::string mode = "constant"; + auto shape = context.mark_node(std::make_shared(data, element::i32)); + auto rank = context.mark_node(std::make_shared(shape, element::i32)); + auto reduced_rank = context.mark_node(std::make_shared(rank)); + auto zero = context.mark_node(opset10::Constant::create(element::i32, Shape{}, {0})); + auto zero_f = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {0})); + auto pad_size_half = paddings.size() / 2; + std::vector pad_b(pad_size_half, 0); + std::vector pad_e(pad_size_half, 0); + for (int i = 0; i < pad_size_half; i++) { + pad_b[i] = paddings[paddings.size() - 2 - 2 * i]; + pad_e[i] = paddings[paddings.size() - 1 - 2 * i]; + } + auto pads_begin_short = context.mark_node(opset10::Constant::create(element::i32, Shape{pad_size_half}, pad_b)); + auto pads_end_short = context.mark_node(opset10::Constant::create(element::i32, Shape{pad_size_half}, pad_e)); + auto pads_short_len = context.mark_node(opset10::Constant::create(element::i32, Shape{1}, {pad_size_half})); + auto pads_diff = context.mark_node(std::make_shared(rank, pads_short_len)); + auto pads_remaining = context.mark_node(std::make_shared(zero, pads_diff)); + auto pads_begins = + context.mark_node(std::make_shared(NodeVector{pads_remaining, pads_begin_short}, 0)); + auto pads_ends = + context.mark_node(std::make_shared(NodeVector{pads_remaining, pads_end_short}, 0)); + if (!context.input_is_none(2)) { + mode = context.const_input(2); + } + if (mode == "circular") { + int64_t pad_l; + int64_t pad_r; + auto pad_last_id = paddings.size(); + auto cur = data.get_node_shared_ptr(); + auto step = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {1})); + for (auto i = 0; i < pad_size_half; i++) { + ov::NodeVector tensors; + pad_r = paddings[pad_last_id - (2 * i + 1)]; + pad_l = paddings[pad_last_id - (2 * i + 2)]; + auto axes = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {2 + i})); + if (pad_l > 0) { + auto start = + context.mark_node(context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {-pad_l}))); + auto end = context.mark_node(std::make_shared( + shape, + context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {2 + i})), + context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {0})))); + + auto left = context.mark_node(std::make_shared(cur, start, end, step, axes)); + tensors.push_back(left); + } + if (pad_l < 0 || pad_r < 0) { + auto start = context.mark_node( + context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {pad_l < 0 ? -pad_l : 0}))); + auto end = context.mark_node( + context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {pad_r < 0 ? pad_r : 0}))); + auto middle = context.mark_node(std::make_shared(cur, start, end, step, axes)); + tensors.push_back(middle); + } else { + tensors.push_back(cur); + } + if (pad_r > 0) { + auto start = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {0})); + auto end = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {pad_r})); + auto right = context.mark_node(std::make_shared(cur, start, end, step, axes)); + tensors.push_back(right); + } + if (tensors.size()) { + cur = context.mark_node(std::make_shared(tensors, 2 + i)); + } + } + return {cur}; + } + if (mode == "constant") { + if (!context.input_is_none(3)) { + auto pad_value = context.get_input(3); + return {context.mark_node( + std::make_shared(data, pads_begins, pads_ends, pad_value, ov::op::PadMode::CONSTANT))}; + } + return {context.mark_node( + std::make_shared(data, pads_begins, pads_ends, zero_f, ov::op::PadMode::CONSTANT))}; + } + if (mode == "reflect") { + return {context.mark_node( + std::make_shared(data, pads_begins, pads_ends, zero_f, ov::op::PadMode::REFLECT))}; + } + if (mode == "replicate") { + return {context.mark_node( + std::make_shared(data, pads_begins, pads_ends, zero_f, ov::op::PadMode::EDGE))}; + } + + FRONT_END_OP_CONVERSION_CHECK(false, "aten::pad conversion doesn't support [ " + mode + " ] padding mode"); +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/reciprocal.cpp b/src/frontends/pytorch/src/op/reciprocal.cpp new file mode 100644 index 00000000000..3e38a7e9728 --- /dev/null +++ b/src/frontends/pytorch/src/op/reciprocal.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_reciprocal(NodeContext& context) { + auto x = context.get_input(0); + auto const_neg_1 = opset10::Constant::create(element::i32, Shape{}, {-1}); + auto cast = std::make_shared(const_neg_1, x); + auto power = std::make_shared(x, cast); + return {context.mark_node(power)}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/relu6.cpp b/src/frontends/pytorch/src/op/relu6.cpp new file mode 100644 index 00000000000..77cbf619ec0 --- /dev/null +++ b/src/frontends/pytorch/src/op/relu6.cpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_relu6(NodeContext& context) { + auto x = context.get_input(0); + return {context.mark_node(std::make_shared(x, 0., 6.))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/repeat.cpp b/src/frontends/pytorch/src/op/repeat.cpp new file mode 100644 index 00000000000..8941ae6e490 --- /dev/null +++ b/src/frontends/pytorch/src/op/repeat.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_repeat(NodeContext& context) { + auto x = context.get_input(0); + auto repeats = context.get_input(1); + auto one = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {1})); + auto sizes_shape = context.mark_node(std::make_shared(repeats, element::i64)); + auto expand_shape = context.mark_node(std::make_shared(one, sizes_shape)); + auto expanded_input = + context.mark_node(std::make_shared(x, expand_shape, ov::op::BroadcastType::BIDIRECTIONAL)); + return {context.mark_node(std::make_shared(expanded_input, repeats))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/reshape.cpp b/src/frontends/pytorch/src/op/reshape.cpp new file mode 100644 index 00000000000..2da4cba5f19 --- /dev/null +++ b/src/frontends/pytorch/src/op/reshape.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "pt_framework_node.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_reshape(NodeContext& context) { + auto shape_node = context.get_input(1).get_node(); + auto shape_node_fw_node = dynamic_cast(shape_node); + std::shared_ptr reshape; + // TODO: move this to transform stage + if (shape_node_fw_node && shape_node_fw_node->get_decoder()->get_op_type() == "prim::ListConstruct") { + OutputVector inputs; + auto axis_0 = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {0})); + for (auto& input : shape_node->inputs()) { + auto rank = input.get_partial_shape().rank(); + FRONT_END_OP_CONVERSION_CHECK(rank.is_dynamic() || rank.get_length() == 0, "Rank must be 0"); + auto unsqueeze = context.mark_node(std::make_shared(input.get_source_output(), axis_0)); + inputs.push_back(unsqueeze); + } + auto concat = context.mark_node(std::make_shared(inputs, 0)); + reshape = context.mark_node(std::make_shared(context.get_input(0), concat, false)); + } else { + reshape = + context.mark_node(std::make_shared(context.get_input(0), context.get_input(1), false)); + } + return {reshape}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/reshape_as.cpp b/src/frontends/pytorch/src/op/reshape_as.cpp new file mode 100644 index 00000000000..145c5dd0621 --- /dev/null +++ b/src/frontends/pytorch/src/op/reshape_as.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_reshape_as(NodeContext& context) { + auto input_tensor = context.get_input(0); + auto shape_tesnor = context.get_input(1); + auto desired_shape = context.mark_node(std::make_shared(shape_tesnor)); + return {context.mark_node(std::make_shared(input_tensor, desired_shape, false))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/roll.cpp b/src/frontends/pytorch/src/op/roll.cpp new file mode 100644 index 00000000000..a3e76d94292 --- /dev/null +++ b/src/frontends/pytorch/src/op/roll.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_roll(NodeContext& context) { + const auto data = context.get_input(0); + const auto shifts = context.get_input(1); + const auto axes = context.get_input(2); + const auto shifts_pshape = shifts.get_partial_shape(); + const auto axes_pshape = axes.get_partial_shape(); + const auto match_dims = axes_pshape.compatible(shifts_pshape); + if (!match_dims) { + const auto const_minus_1 = opset10::Constant::create(element::i32, Shape{1}, {-1}); + const auto axis_0 = opset10::Constant::create(element::i32, Shape{1}, {0}); + const auto flat = std::make_shared(data, const_minus_1, false); + const auto roll = std::make_shared(flat, shifts, axis_0); + const auto shape_of_data = std::make_shared(data); + const auto reshape = std::make_shared(roll, shape_of_data, false); + context.mark_nodes({const_minus_1, flat, roll, shape_of_data, reshape}); + return {reshape}; + } + return {context.mark_node(std::make_shared(data, shifts, axes))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/rsqrt.cpp b/src/frontends/pytorch/src/op/rsqrt.cpp new file mode 100644 index 00000000000..3b6db63b6d5 --- /dev/null +++ b/src/frontends/pytorch/src/op/rsqrt.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_rsqrt(NodeContext& context) { + auto data = context.get_input(0); + auto input_shape = context.mark_node(std::make_shared(data)); + auto one_const = context.mark_node(opset10::Constant::create(element::f32, Shape({}), {1})); + auto sqrt_data = context.mark_node(std::make_shared(data)); + return {context.mark_node(std::make_shared(one_const, sqrt_data))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/rsub.cpp b/src/frontends/pytorch/src/op/rsub.cpp new file mode 100644 index 00000000000..6d4f031b46e --- /dev/null +++ b/src/frontends/pytorch/src/op/rsub.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_rsub(NodeContext& context) { + auto self = context.get_input(0); + auto other = context.get_input(1); + auto alpha = context.get_input(2); + // reverse aten::sub other - self * alpha + auto alpha_casted = context.mark_node(std::make_shared(alpha, self)); + auto alpha_mul = context.mark_node(std::make_shared(self, alpha_casted)); + return {context.mark_node(std::make_shared(other, alpha_mul))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/select.cpp b/src/frontends/pytorch/src/op/select.cpp new file mode 100644 index 00000000000..85bab739a88 --- /dev/null +++ b/src/frontends/pytorch/src/op/select.cpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_select(NodeContext& context) { + auto const_1 = context.mark_node(opset10::Constant::create(element::i32, Shape{1}, {1})); + auto const_minus_1 = context.mark_node(opset10::Constant::create(element::i32, Shape{1}, {-1})); + auto const_0 = context.mark_node(opset10::Constant::create(element::i32, Shape{1}, {0})); + auto input_tensor = context.get_input(0); + auto dim = context.mark_node(std::make_shared(context.get_input(1), const_1, false)); + auto start = context.mark_node(std::make_shared(context.get_input(2), const_1, false)); + + auto less = context.mark_node(std::make_shared(start, const_0)); + auto const_1_signed = context.mark_node(std::make_shared(less, const_minus_1, const_1)); + auto stop = context.mark_node(std::make_shared(start, const_1_signed)); + + auto slice_node = + context.mark_node(std::make_shared(input_tensor, start, stop, const_1_signed, dim)); + + return {context.mark_node(std::make_shared(slice_node, dim))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/selu.cpp b/src/frontends/pytorch/src/op/selu.cpp new file mode 100644 index 00000000000..14db79941a8 --- /dev/null +++ b/src/frontends/pytorch/src/op/selu.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_selu(NodeContext& context) { + auto x = context.get_input(0); + auto alpha = + context.mark_node(opset10::Constant::create(element::f64, Shape{}, {1.6732632423543772848170429916717})); + auto lambda = + context.mark_node(opset10::Constant::create(element::f64, Shape{}, {1.0507009873554804934193349852946})); + alpha = context.mark_node(std::make_shared(alpha, x)); + lambda = context.mark_node(std::make_shared(lambda, x)); + return {context.mark_node(std::make_shared(x, alpha, lambda))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/size.cpp b/src/frontends/pytorch/src/op/size.cpp new file mode 100644 index 00000000000..732d7be28b8 --- /dev/null +++ b/src/frontends/pytorch/src/op/size.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_size(NodeContext& context) { + auto shape = context.mark_node(std::make_shared(context.get_input(0), element::i32)); + if (context.input_is_none(1)) { + return shape->outputs(); + } else { + auto axis_0 = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {0})); + return {context.mark_node(std::make_shared(shape, context.get_input(1), axis_0))}; + } +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/slice.cpp b/src/frontends/pytorch/src/op/slice.cpp new file mode 100644 index 00000000000..334c458d0c5 --- /dev/null +++ b/src/frontends/pytorch/src/op/slice.cpp @@ -0,0 +1,75 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_slice(NodeContext& context) { + // aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> (t[]) + // aten::slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> (Tensor(a)) + ov::Output dim; + int start_idx; + int end_idx; + int step_idx; + auto axis_0 = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {0})); + if (context.get_input_size() == 5) { + dim = context.get_input(1); + if (dim.get_partial_shape().rank().is_dynamic() || dim.get_partial_shape().rank().get_length() == 0) { + dim = context.mark_node(std::make_shared(dim, axis_0)); + } + start_idx = 2; + end_idx = 3; + step_idx = 4; + } else if (context.get_input_size() == 4) { + start_idx = 1; + end_idx = 2; + step_idx = 3; + dim = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {0})); + } else { + FRONT_END_OP_CONVERSION_CHECK(false, "Slice must have either 4 or 5 inputs."); + } + // TODO: support default start/end with negative step + ov::Output start; + if (!context.input_is_none(start_idx)) { + start = context.get_input(start_idx); + if (start.get_partial_shape().rank().is_dynamic() || start.get_partial_shape().rank().get_length() == 0) { + start = context.mark_node(std::make_shared(start, axis_0)); + } + } else { + start = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {0})); + } + + ov::Output end; + if (!context.input_is_none(end_idx)) { + end = context.get_input(end_idx); + if (end.get_partial_shape().rank().is_dynamic() || end.get_partial_shape().rank().get_length() == 0) { + end = context.mark_node(std::make_shared(end, axis_0)); + } + } else { + end = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {INT_MAX})); + } + ov::Output step; + if (!context.input_is_none(step_idx)) { + step = context.get_input(step_idx); + if (step.get_partial_shape().rank().is_dynamic() || step.get_partial_shape().rank().get_length() == 0) { + step = context.mark_node(std::make_shared(step, axis_0)); + } + } else { + step = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {1})); + } + return {context.mark_node(std::make_shared(context.get_input(0), start, end, step, dim))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/softmax.cpp b/src/frontends/pytorch/src/op/softmax.cpp new file mode 100644 index 00000000000..a17919505f5 --- /dev/null +++ b/src/frontends/pytorch/src/op/softmax.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_softmax(NodeContext& context) { + auto x = context.get_input(0); + auto axis = context.const_input(1); + return {context.mark_node(std::make_shared(x, axis))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/square.cpp b/src/frontends/pytorch/src/op/square.cpp new file mode 100644 index 00000000000..02165227c34 --- /dev/null +++ b/src/frontends/pytorch/src/op/square.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_square(NodeContext& context) { + auto input_0 = context.get_input(0); + auto const_2 = context.mark_node(opset10::Constant::create(input_0.get_element_type(), Shape{1}, {2})); + return {context.mark_node(std::make_shared(input_0, const_2))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/squeeze.cpp b/src/frontends/pytorch/src/op/squeeze.cpp new file mode 100644 index 00000000000..f26c9171cf1 --- /dev/null +++ b/src/frontends/pytorch/src/op/squeeze.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_squeeze(NodeContext& context) { + auto inputs = context.inputs(); + FRONT_END_OP_CONVERSION_CHECK(inputs.size() >= 1, "Operation has no inputs."); + FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(0), "Input should not be None."); + if (inputs.size() == 1 || context.input_is_none(1)) { + return {context.mark_node(std::make_shared(inputs[0]))}; + } + return {context.mark_node(std::make_shared(inputs[0], inputs[1]))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/sub.cpp b/src/frontends/pytorch/src/op/sub.cpp new file mode 100644 index 00000000000..a7fa86663bd --- /dev/null +++ b/src/frontends/pytorch/src/op/sub.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_sub(NodeContext& context) { + auto x = context.get_input(0); + auto y = context.get_input(1); + // default alpha is 1 so no need to multiply if alpha is not provided + if (!context.input_is_none(2)) { + auto alpha = context.get_input(2); + auto casted_alpha = context.mark_node(std::make_shared(alpha, y)); + y = context.mark_node(std::make_shared(casted_alpha, y)); + } + return {context.mark_node(std::make_shared(x, y))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/sum.cpp b/src/frontends/pytorch/src/op/sum.cpp new file mode 100644 index 00000000000..49fa4d1d61c --- /dev/null +++ b/src/frontends/pytorch/src/op/sum.cpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_sum(NodeContext& context) { + bool keep_dims = false; + ov::Output axes; + Output cast; + FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(0), "Operation should have at least 1 input"); + auto data = context.get_input(0); + if (context.input_is_none(1)) { + axes = get_axes_range(context, 0); + } else { + axes = context.get_input(1); + } + if (!context.input_is_none(2)) { + keep_dims = context.const_input(2); + } + + return {context.mark_node(std::make_shared(data, axes, keep_dims))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/to.cpp b/src/frontends/pytorch/src/op/to.cpp new file mode 100644 index 00000000000..7e413749ceb --- /dev/null +++ b/src/frontends/pytorch/src/op/to.cpp @@ -0,0 +1,67 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "pt_framework_node.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_to(NodeContext& context) { + int dtype_idx; + int non_blocking_idx; + int copy_idx; + int memory_format_idx; + if (context.get_input_size() == 5) { + // aten::to.dtype(Tensor(a) self, int dtype, bool non_blocking=False, bool copy=False, int? memory_format=None) + // -> (Tensor(a)) + dtype_idx = 1; + non_blocking_idx = 2; + copy_idx = 3; + memory_format_idx = 4; + } else if (context.get_input_size() == 6) { + // aten::to.device(Tensor(a) self, Device device, int dtype, bool non_blocking=False, bool copy=False, int? + // memory_format=None) -> (Tensor(a)). + // Input with index 1 is device we skip that input. + dtype_idx = 2; + non_blocking_idx = 3; + copy_idx = 4; + memory_format_idx = 5; + } else { + FRONT_END_OP_CONVERSION_CHECK(false, "Unknown aten::to format"); + } + // We ignore both non_blocking and copy inputs since non_blocking argument is used + // in Pytorch during training to overlap data transfer from CPU to GPU which does + // not have a use case in OV. To copy or not to copy inputs should not be set + // on the frontend level since it can produce unexpected beviour in the later + // stages. (e.g. transformations passes) + + // memory_format sets the desired memory format of returned Tensor. + // memory format should not be set on the frontend level + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(memory_format_idx), + "aten::to translation do not support memory_format attribute"); + auto dtype_ext_node = context.get_input_from_visible_context(dtype_idx).get_node_shared_ptr(); + auto dtype_fw_node = std::dynamic_pointer_cast(dtype_ext_node); + Output cast; + if (dtype_fw_node && dtype_fw_node->get_op_type() == "prim::dtype") { + auto type_input = dtype_fw_node->input_value(0); + cast = context.mark_node(std::make_shared(context.get_input(0), type_input)); + } else if (const auto dtype_const = std::dynamic_pointer_cast(dtype_ext_node)) { + auto pt_type = dtype_const->cast_vector()[0]; + auto dtype = convert_dtype(pt_type); + cast = context.mark_node(std::make_shared(context.get_input(0), dtype)); + } else { + cast = context.mark_node(std::make_shared(context.get_input(0), context.get_input(1))); + } + return {cast}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/transpose.cpp b/src/frontends/pytorch/src/op/transpose.cpp new file mode 100644 index 00000000000..b230c9219a3 --- /dev/null +++ b/src/frontends/pytorch/src/op/transpose.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_transpose(NodeContext& context) { + auto dim0 = context.const_input(1); + auto dim1 = context.const_input(2); + auto shape = std::make_shared(context.get_input(0), element::i32); + auto rank_ = std::make_shared(shape, element::i32); + auto rank = std::make_shared(rank_); + // Use opset::If for dim normalization + auto dim0_node = context.get_input(1); + auto dim1_node = context.get_input(2); + if (dim0 < 0) { + dim0_node = std::make_shared(rank, dim0_node); + } + if (dim1 < 0) { + dim1_node = std::make_shared(rank, dim1_node); + } + auto start = opset10::Constant::create(element::i32, {}, {0}); + auto step = opset10::Constant::create(element::i32, {}, {1}); + auto range = std::make_shared(start, rank, step, element::i32); + + auto axis_0 = opset10::Constant::create(element::i64, Shape{}, {0}); + auto dim0_node_ = std::make_shared(dim0_node, axis_0); + auto dim1_node_ = std::make_shared(dim1_node, axis_0); + auto indices = std::make_shared(OutputVector{dim0_node_, dim1_node_}, 0); + auto updates = std::make_shared(OutputVector{dim1_node_, dim0_node_}, 0); + auto scatter = std::make_shared(range, indices, updates, axis_0); + context.mark_nodes( + {shape, rank_, rank, start, step, range, axis_0, dim0_node_, dim1_node_, indices, updates, scatter}); + + return {context.mark_node(std::make_shared(context.get_input(0), scatter))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/tuple_construct.cpp b/src/frontends/pytorch/src/op/tuple_construct.cpp new file mode 100644 index 00000000000..3aed1c4323d --- /dev/null +++ b/src/frontends/pytorch/src/op/tuple_construct.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_tuple_construct(NodeContext& context) { + auto n_inputs = context.get_input_size(); + FRONT_END_OP_CONVERSION_CHECK( + n_inputs == 1, + "prim::TupleConstruct conversion doesn't support cases when the number of inputs is not one."); + return {context.get_input(0)}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/upsample.cpp b/src/frontends/pytorch/src/op/upsample.cpp new file mode 100644 index 00000000000..ca326405e6d --- /dev/null +++ b/src/frontends/pytorch/src/op/upsample.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +namespace { +OutputVector base_translate_upsample2d(NodeContext& context, opset10::Interpolate::InterpolateMode interpolate_mode) { + auto data = context.get_input(0); + std::vector pad{0}; + auto size_mode = opset10::Interpolate::ShapeCalcMode::SIZES; + bool align_corners = false; + int scale_id = 2; + if (interpolate_mode != opset10::Interpolate::InterpolateMode::NEAREST) { + scale_id = 3; + if (!context.input_is_none(2)) { + align_corners = context.const_input(2); + } + } + auto target_axes = std::make_shared(element::i32, Shape{2}, std::vector({2, 3})); + auto scales = + context.mark_node(std::make_shared(element::f32, Shape{2}, std::vector({1, 1}))); + auto output_sizes = + context.mark_node(std::make_shared(element::i32, Shape{2}, std::vector({1, 1}))); + if (context.input_is_none(1)) { + FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(scale_id), "Scale or Output size should be provided"); + auto spatial_scales = context.get_input(scale_id); + + size_mode = opset10::Interpolate::ShapeCalcMode::SCALES; + scales = context.mark_node(std::make_shared(spatial_scales, scales)); + } else { + auto out_sizes = context.get_input(1); + output_sizes = context.mark_node(std::make_shared(out_sizes, output_sizes)); + } + auto attrs = opset10::Interpolate::InterpolateAttrs(interpolate_mode, size_mode, pad, pad); + attrs.coordinate_transformation_mode = opset10::Interpolate::CoordinateTransformMode::ASYMMETRIC; + attrs.nearest_mode = opset10::Interpolate::NearestMode::FLOOR; + if (attrs.mode != opset10::Interpolate::InterpolateMode::NEAREST) { + if (align_corners) { + attrs.coordinate_transformation_mode = opset10::Interpolate::CoordinateTransformMode::ALIGN_CORNERS; + } + } + return {context.mark_node(std::make_shared(data, output_sizes, scales, target_axes, attrs))}; +}; +} // namespace + +OutputVector translate_upsample_bilinear2d(NodeContext& context) { + return base_translate_upsample2d(context, opset10::Interpolate::InterpolateMode::LINEAR_ONNX); +}; + +OutputVector translate_upsample_nearest2d(NodeContext& context) { + return base_translate_upsample2d(context, opset10::Interpolate::InterpolateMode::NEAREST); +}; + +OutputVector translate_upsample_bicubic2d(NodeContext& context) { + return base_translate_upsample2d(context, opset10::Interpolate::InterpolateMode::CUBIC); +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/var.cpp b/src/frontends/pytorch/src/op/var.cpp new file mode 100644 index 00000000000..7c6e16cad83 --- /dev/null +++ b/src/frontends/pytorch/src/op/var.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_var(NodeContext& context) { + auto data = context.get_input(0); + bool unbiased = true; + bool keepdims = false; + auto num_elements = numel(context, 0); + bool keepdim_mean; + std::shared_ptr mean; + ov::Output axes; + if (context.inputs().size() == 2) { + // aten::var(input, unbiased) + axes = context.mark_node(get_axes_range(context, 0)); + unbiased = context.const_input(1); + mean = context.mark_node(std::make_shared(data, axes, keepdims)); + keepdim_mean = keepdims; + } else { + // aten::var(input, dim, unbiased:bool=None, keepdim:bool=None) + if (!context.input_is_none(2)) { + unbiased = context.const_input(2); + } + if (!context.input_is_none(3)) { + keepdims = context.const_input(3); + } + if (context.input_is_none(1)) { + axes = context.mark_node(get_axes_range(context, 0)); + mean = context.mark_node(std::make_shared(data, axes, keepdims)); + } else { + axes = context.get_input(1); + mean = context.mark_node(std::make_shared(data, axes, true)); + auto reduced_dims = context.mark_node(std::make_shared(data)); + auto zero = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {0})); + reduced_dims = context.mark_node(std::make_shared(reduced_dims, axes, zero)); + num_elements = context.mark_node(std::make_shared(reduced_dims, zero, false)); + } + keepdim_mean = context.input_is_none(1) ? false : keepdims; + } + auto sub_v = context.mark_node(std::make_shared(data, mean)); + auto sqr_sub = context.mark_node(std::make_shared(sub_v, sub_v)); + auto var = context.mark_node(std::make_shared(sqr_sub, axes, keepdim_mean)); + // if unbiased=true Bessel’s correction will be used + // Correct bias in calculating variance, by dividing it over (N - 1) instead on N + if (unbiased) { + num_elements = context.mark_node(std::make_shared(num_elements, data)); + auto one = context.mark_node(opset10::Constant::create(element::f32, Shape{}, {1})); + one = context.mark_node(std::make_shared(one, data)); + auto mul = context.mark_node(std::make_shared(var, num_elements)); + auto n_minus_one = context.mark_node(std::make_shared(num_elements, one)); + var = context.mark_node(std::make_shared(mul, n_minus_one)); + } + return {var}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/view.cpp b/src/frontends/pytorch/src/op/view.cpp new file mode 100644 index 00000000000..46a01d229a6 --- /dev/null +++ b/src/frontends/pytorch/src/op/view.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "pt_framework_node.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_view(NodeContext& context) { + auto shape_node = context.get_input(1).get_node(); + auto shape_node_fw_node = dynamic_cast(shape_node); + std::shared_ptr reshape; + // TODO: move this to transform stage + if (shape_node_fw_node && shape_node_fw_node->get_decoder()->get_op_type() == "prim::ListConstruct") { + OutputVector inputs; + auto axis_0 = context.mark_node(opset10::Constant::create(element::i64, Shape{}, {0})); + for (auto& input : shape_node->inputs()) { + auto rank = input.get_partial_shape().rank(); + FRONT_END_OP_CONVERSION_CHECK(rank.is_dynamic() || rank.get_length() == 0, "Rank must be 0"); + auto unsqueeze = context.mark_node(std::make_shared(input.get_source_output(), axis_0)); + inputs.push_back(unsqueeze); + } + auto concat = context.mark_node(std::make_shared(inputs, 0)); + reshape = context.mark_node(std::make_shared(context.get_input(0), concat, false)); + // TODO: fix rt_info + // auto list_set = shape_node_fw_node->get_rt_info()["pt_node"].as>(); + // reshape->get_rt_info()["pt_node"].as>().insert(list_set.begin(), + // list_set.end()); + } else { + reshape = + context.mark_node(std::make_shared(context.get_input(0), context.get_input(1), false)); + } + return {reshape}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/where.cpp b/src/frontends/pytorch/src/op/where.cpp new file mode 100644 index 00000000000..9cdf161bedc --- /dev/null +++ b/src/frontends/pytorch/src/op/where.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_where(NodeContext& context) { + auto cond = context.get_input(0); + FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(1), "aten::where(cond) unsupported"); + auto bool_cond = context.mark_node(std::make_shared(cond, element::boolean)); + auto x = context.get_input(1); + auto y = context.get_input(2); + return {context.mark_node(std::make_shared(bool_cond, x, y))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp new file mode 100644 index 00000000000..846a37aeb03 --- /dev/null +++ b/src/frontends/pytorch/src/op_table.cpp @@ -0,0 +1,276 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "op_table.hpp" + +#include "openvino/opsets/opset10.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +#define OP_CONVERTER(op) OutputVector op(NodeContext& node) + +OP_CONVERTER(translate_adaptive_avg_pool3d); +OP_CONVERTER(translate_adaptive_max_pool2d); +OP_CONVERTER(translate_add); +OP_CONVERTER(translate_addcmul); +OP_CONVERTER(translate_addmm); +OP_CONVERTER(translate_arange); +OP_CONVERTER(translate_as_tensor); +OP_CONVERTER(translate_avg_poolnd); +OP_CONVERTER(translate_batch_norm); +OP_CONVERTER(translate_clamp); +OP_CONVERTER(translate_constant); +OP_CONVERTER(translate_convnd); +OP_CONVERTER(translate_convolution); +OP_CONVERTER(translate_convolution_mode); +OP_CONVERTER(translate_dim); +OP_CONVERTER(translate_div); +OP_CONVERTER(translate_elu); +OP_CONVERTER(translate_expand); +OP_CONVERTER(translate_expand_as); +OP_CONVERTER(translate_embedding); +OP_CONVERTER(translate_flatten); +OP_CONVERTER(translate_floordiv); +OP_CONVERTER(translate_floor_divide); +OP_CONVERTER(translate_full); +OP_CONVERTER(translate_full_like); +OP_CONVERTER(translate_gelu); +OP_CONVERTER(translate_get_attr); +OP_CONVERTER(translate_group_norm); +OP_CONVERTER(translate_hardtanh); +OP_CONVERTER(translate_if); +OP_CONVERTER(translate_im2col); +OP_CONVERTER(translate_int); +OP_CONVERTER(translate_layer_norm); +OP_CONVERTER(translate_len); +OP_CONVERTER(translate_linear); +OP_CONVERTER(translate_list_construct); +OP_CONVERTER(translate_loop); +OP_CONVERTER(translate_max_poolnd); +OP_CONVERTER(translate_max); +OP_CONVERTER(translate_masked_fill); +OP_CONVERTER(translate_mean); +OP_CONVERTER(translate_min); +OP_CONVERTER(translate_neg); +OP_CONVERTER(translate_nonzero); +OP_CONVERTER(translate_norm); +OP_CONVERTER(translate_new_full); +OP_CONVERTER(translate_new_ones); +OP_CONVERTER(translate_new_zeros); +OP_CONVERTER(translate_nms); +OP_CONVERTER(translate_numel); +OP_CONVERTER(translate_ones); +OP_CONVERTER(translate_ones_like); +OP_CONVERTER(translate_pad); +OP_CONVERTER(translate_reciprocal); +OP_CONVERTER(translate_relu6); +OP_CONVERTER(translate_repeat); +OP_CONVERTER(translate_reshape); +OP_CONVERTER(translate_reshape_as); +OP_CONVERTER(translate_rsub); +OP_CONVERTER(translate_roll); +OP_CONVERTER(translate_rsqrt); +OP_CONVERTER(translate_select); +OP_CONVERTER(translate_selu); +OP_CONVERTER(translate_size); +OP_CONVERTER(translate_slice); +OP_CONVERTER(translate_softmax); +OP_CONVERTER(translate_square); +OP_CONVERTER(translate_squeeze); +OP_CONVERTER(translate_sub); +OP_CONVERTER(translate_sum); +OP_CONVERTER(translate_to); +OP_CONVERTER(translate_transpose); +OP_CONVERTER(translate_tuple_construct); +OP_CONVERTER(translate_upsample_bicubic2d); +OP_CONVERTER(translate_upsample_bilinear2d); +OP_CONVERTER(translate_upsample_nearest2d); +OP_CONVERTER(translate_var); +OP_CONVERTER(translate_view); +OP_CONVERTER(translate_where); +OP_CONVERTER(translate_zeros); +OP_CONVERTER(translate_zeros_like); + +} // namespace op + +const std::map get_supported_ops() { + return { + {"aten::__not__", op::translate_1to1_match_1_inputs}, + {"aten::_convolution", op::translate_convolution}, + {"aten::_convolution_mode", op::translate_convolution_mode}, + {"aten::abs", op::translate_1to1_match_1_inputs}, + {"aten::acos", op::translate_1to1_match_1_inputs}, + {"aten::acos_", op::inplace_op>}, + {"aten::acosh", op::translate_1to1_match_1_inputs}, + {"aten::acosh_", op::inplace_op>}, + {"aten::adaptive_avg_pool2d", op::translate_1to1_match_2_inputs}, + {"aten::adaptive_avg_pool3d", op::translate_adaptive_avg_pool3d}, + {"aten::adaptive_max_pool2d", op::translate_adaptive_max_pool2d}, + {"aten::add", op::translate_add}, + {"aten::add_", op::inplace_op}, + {"aten::addcmul", op::translate_addcmul}, + {"aten::addmm", op::translate_addmm}, + {"aten::arange", op::translate_arange}, + {"aten::asin", op::translate_1to1_match_1_inputs}, + {"aten::asin_", op::inplace_op>}, + {"aten::asinh", op::translate_1to1_match_1_inputs}, + {"aten::asinh_", op::inplace_op>}, + {"aten::as_tensor", op::translate_as_tensor}, + {"aten::atan", op::translate_1to1_match_1_inputs}, + {"aten::atan_", op::inplace_op>}, + {"aten::atanh", op::translate_1to1_match_1_inputs}, + {"aten::atanh_", op::inplace_op>}, + {"aten::avg_pool1d", op::translate_avg_poolnd}, + {"aten::avg_pool2d", op::translate_avg_poolnd}, + {"aten::avg_pool3d", op::translate_avg_poolnd}, + {"aten::batch_norm", op::translate_batch_norm}, + // {"aten::cat", done as transformation}, + {"aten::clamp", op::translate_clamp}, + {"aten::clamp_min", op::translate_1to1_match_2_inputs}, + {"aten::clamp_max", op::translate_1to1_match_2_inputs}, + {"aten::ceil", op::translate_1to1_match_1_inputs}, + {"aten::ceil_", op::inplace_op>}, + {"aten::clone", op::skip_node}, // ignore clone operators that are inserted by PyTorch autograd + {"aten::contiguous", op::skip_node}, // In openvino how tensors are stored in memory is internal plugin detail, + // we assume all tensors are contiguous + {"aten::conv1d", op::translate_convnd}, + {"aten::conv2d", op::translate_convnd}, + {"aten::conv3d", op::translate_convnd}, + {"aten::convolution", op::translate_convolution}, + {"aten::cos", op::translate_1to1_match_1_inputs}, + {"aten::cos_", op::inplace_op>}, + {"aten::cosh", op::translate_1to1_match_1_inputs}, + {"aten::cosh_", op::inplace_op>}, + {"aten::cumsum", op::translate_1to1_match_2_inputs}, + {"aten::dim", op::translate_dim}, + {"aten::div", op::translate_div}, + {"aten::div_", op::inplace_op}, + {"aten::dropout", op::skip_node}, + {"aten::dropout_", op::skip_node}, + {"aten::elu", op::translate_elu}, + {"aten::embedding", op::translate_embedding}, + {"aten::eq", op::translate_1to1_match_2_inputs}, + {"aten::exp", op::translate_1to1_match_1_inputs}, + {"aten::expand", op::translate_expand}, + {"aten::expand_as", op::translate_expand_as}, + {"aten::flatten", op::translate_flatten}, + {"aten::floor", op::translate_1to1_match_1_inputs}, + {"aten::floor_", op::inplace_op>}, + {"aten::floordiv", op::translate_floordiv}, + {"aten::floor_divide", op::translate_floor_divide}, + {"aten::full", op::translate_full}, + {"aten::full_like", op::translate_full_like}, + {"aten::gelu", op::translate_gelu}, + {"aten::group_norm", op::translate_group_norm}, + {"aten::ge", op::translate_1to1_match_2_inputs}, + {"aten::gt", op::translate_1to1_match_2_inputs}, + {"aten::hardsigmoid", op::translate_1to1_match_1_inputs}, + {"aten::hardswish", op::translate_1to1_match_1_inputs}, + {"aten::hardswish_", op::inplace_op>}, + {"aten::hardtanh", op::translate_hardtanh}, + {"aten::hardtanh_", op::inplace_op}, + {"aten::Int", op::translate_int}, + {"aten::im2col", op::translate_im2col}, + {"aten::is_grad_enabled", op::return_false_scalar}, + {"aten::layer_norm", op::translate_layer_norm}, + {"aten::leaky_relu", op::translate_1to1_match_2_inputs}, + {"aten::leaky_relu_", op::inplace_op>}, + {"aten::len", op::translate_len}, + {"aten::linear", op::translate_linear}, + {"aten::le", op::translate_1to1_match_2_inputs}, + {"aten::lt", op::translate_1to1_match_2_inputs}, + {"aten::matmul", op::translate_1to1_match_2_inputs}, + {"aten::masked_fill", op::translate_masked_fill}, + {"aten::masked_fill_", op::inplace_op}, + {"aten::max_pool1d", op::translate_max_poolnd}, + {"aten::max_pool2d", op::translate_max_poolnd}, + {"aten::max_pool3d", op::translate_max_poolnd}, + {"aten::max", op::translate_max}, + {"aten::mean", op::translate_mean}, + {"aten::min", op::translate_min}, + {"aten::mm", op::translate_1to1_match_2_inputs}, + {"aten::bmm", op::translate_1to1_match_2_inputs}, + {"aten::matmul", op::translate_1to1_match_2_inputs}, + {"aten::mul", op::translate_1to1_match_2_inputs}, + {"aten::mul_", op::inplace_op>}, + {"aten::ne", op::translate_1to1_match_2_inputs}, + {"aten::neg", op::translate_neg}, + {"aten::norm", op::translate_norm}, + {"aten::nonzero", op::translate_nonzero}, + {"aten::numel", op::translate_numel}, + {"aten::new_full", op::translate_new_full}, + {"aten::new_ones", op::translate_new_ones}, + {"aten::new_zeros", op::translate_new_zeros}, + {"aten::ones", op::translate_ones}, + {"aten::ones_like", op::translate_ones_like}, + {"aten::pad", op::translate_pad}, + {"aten::permute", op::translate_1to1_match_2_inputs}, + {"aten::pow", op::translate_1to1_match_2_inputs}, + {"aten::reciprocal", op::translate_reciprocal}, + {"aten::relu", op::translate_1to1_match_1_inputs}, + {"aten::relu_", op::inplace_op>}, + {"aten::relu6", op::translate_relu6}, + {"aten::repeat", op::translate_repeat}, + {"aten::reshape", op::translate_reshape}, + {"aten::reshape_as", op::translate_reshape_as}, + {"aten::rsub", op::translate_rsub}, + {"aten::roll", op::translate_roll}, + {"aten::rsqrt", op::translate_rsqrt}, + {"aten::select", op::translate_select}, + {"aten::selu", op::translate_selu}, + {"aten::selu_", op::inplace_op}, + {"aten::sigmoid", op::translate_1to1_match_1_inputs}, + {"aten::silu", op::translate_1to1_match_1_inputs}, + {"aten::silu_", op::inplace_op>}, + {"aten::sin", op::translate_1to1_match_1_inputs}, + {"aten::sin_", op::inplace_op>}, + {"aten::sinh", op::translate_1to1_match_1_inputs}, + {"aten::sinh_", op::inplace_op>}, + {"aten::size", op::translate_size}, + {"aten::slice", op::translate_slice}, + {"aten::softmax", op::translate_softmax}, + {"aten::sqrt", op::translate_1to1_match_1_inputs}, + {"aten::square", op::translate_square}, + {"aten::squeeze", op::translate_squeeze}, + {"aten::sub", op::translate_sub}, + {"aten::sum", op::translate_sum}, + {"aten::tan", op::translate_1to1_match_1_inputs}, + {"aten::tan_", op::inplace_op>}, + {"aten::tanh", op::translate_1to1_match_1_inputs}, + {"aten::tanh_", op::inplace_op>}, + {"aten::tensor", op::translate_as_tensor}, + {"aten::type_as", + op::translate_1to1_match_2_inputs}, // TODO: overflow semantics is different + {"aten::to", op::translate_to}, + {"aten::transpose", op::translate_transpose}, + {"aten::unsqueeze", op::translate_1to1_match_2_inputs}, + {"aten::unsqueeze_", op::inplace_op>}, + {"aten::upsample_bicubic2d", op::translate_upsample_bicubic2d}, + {"aten::upsample_bilinear2d", op::translate_upsample_bilinear2d}, + {"aten::upsample_nearest2d", op::translate_upsample_nearest2d}, + {"aten::var", op::translate_var}, + {"aten::view", op::translate_view}, + {"aten::where", op::translate_where}, + {"aten::zeros", op::translate_zeros}, + {"aten::zeros_like", op::translate_zeros_like}, + {"prim::Constant", op::translate_constant}, + {"prim::GetAttr", op::translate_get_attr}, + {"prim::If", op::translate_if}, + {"prim::is_cuda", op::return_false_scalar}, + {"prim::ListConstruct", op::translate_list_construct}, + {"prim::Loop", op::translate_loop}, + {"prim::NumToTensor", op::skip_node}, // In openvino we already store number as tensor with shape [] + {"prim::requires_grad", op::return_false_scalar}, + {"prim::TupleConstruct", op::translate_tuple_construct}, + {"torchvision::nms", op::translate_nms}, + }; +}; + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op_table.hpp b/src/frontends/pytorch/src/op_table.hpp new file mode 100644 index 00000000000..7344e65fbb2 --- /dev/null +++ b/src/frontends/pytorch/src/op_table.hpp @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/frontend/pytorch/node_context.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +using CreatorFunction = std::function; + +const std::map get_supported_ops(); + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/pt_framework_node.hpp b/src/frontends/pytorch/src/pt_framework_node.hpp new file mode 100644 index 00000000000..ac254d19f9b --- /dev/null +++ b/src/frontends/pytorch/src/pt_framework_node.hpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/framework_node.hpp" +#include "utils.hpp" + +#pragma once + +namespace ov { +namespace frontend { +namespace pytorch { +class PtFrameworkNode : public ov::op::util::FrameworkNode { +public: + OPENVINO_OP("PtFrameworkNode", "util", ::ov::op::util::FrameworkNode); + + PtFrameworkNode(const std::shared_ptr& decoder, const OutputVector& inputs, size_t output_size) + : ov::op::util::FrameworkNode(inputs, output_size, decoder->get_subgraph_size()), + m_decoder(decoder) { + ov::op::util::FrameworkNodeAttrs attrs; + attrs.set_type_name("PTFrameworkNode"); + attrs["PtTypeName"] = m_decoder->get_op_type(); + attrs["PtSchema"] = m_decoder->get_schema(); + set_attrs(attrs); + + // Set output shapes and types if recognized + for (size_t i = 0; i < output_size; ++i) { + PartialShape ps; + // TODO: Try to decode PT type as a custom type + auto type = element::dynamic; + if (i < decoder->num_of_outputs()) { + try { + ps = m_decoder->get_output_shape(i); + } catch (...) { + // nothing, means the info cannot be queried and remains unknown + } + } + // TODO: Set custom `type` via special API + set_output_type(i, type, ps); + } + } + + PtFrameworkNode(const std::shared_ptr& decoder, const OutputVector& inputs) + : PtFrameworkNode(decoder, inputs, decoder->num_of_outputs()) {} + + std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override { + auto op = std::make_shared(m_decoder, inputs, get_output_size()); + + for (auto body_index = 0; body_index < m_bodies.size(); ++body_index) { + op->set_function(body_index, clone_model(*get_function(body_index))); + for (const auto& m_input_descr : m_input_descriptions[body_index]) { + op->m_input_descriptions[body_index].push_back(m_input_descr->copy()); + } + for (const auto& m_output_descr : m_output_descriptions[body_index]) { + op->m_output_descriptions[body_index].push_back(m_output_descr->copy()); + } + } + op->validate_and_infer_types(); + + return op; + } + + std::string get_op_type() const { + return m_decoder->get_op_type(); + } + + TorchDecoder* get_decoder() const { + return m_decoder.get(); + } + +private: + std::shared_ptr m_decoder; +}; + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/pytorch.cpp b/src/frontends/pytorch/src/pytorch.cpp new file mode 100644 index 00000000000..fddfd299d85 --- /dev/null +++ b/src/frontends/pytorch/src/pytorch.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/manager.hpp" +#include "openvino/frontend/pytorch/frontend.hpp" +#include "openvino/frontend/pytorch/visibility.hpp" + +PYTORCH_C_API ov::frontend::FrontEndVersion GetAPIVersion() { + return OV_FRONTEND_API_VERSION; +} + +PYTORCH_C_API void* GetFrontEndData() { + auto res = new ov::frontend::FrontEndPluginInfo(); + res->m_name = "pytorch"; + res->m_creator = []() { + return std::make_shared(); + }; + return res; +} diff --git a/src/frontends/pytorch/src/transforms.cpp b/src/frontends/pytorch/src/transforms.cpp new file mode 100644 index 00000000000..d59c24304d4 --- /dev/null +++ b/src/frontends/pytorch/src/transforms.cpp @@ -0,0 +1,383 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transforms.hpp" + +#include +#include + +#include "openvino/frontend/pytorch/decoder.hpp" +#include "openvino/op/util/framework_node.hpp" +#include "openvino/opsets/opset10.hpp" +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { + +using ov::op::util::FrameworkNode; +using ov::pass::MatcherPass; +using ov::pass::pattern::any_input; +using ov::pass::pattern::Matcher; +using ov::pass::pattern::wrap_type; +using std::make_shared; +using std::shared_ptr; + +const type::List* is_list(const descriptor::Tensor& tensor) { + // TODO: Use special API to get custom type detalization + return nullptr; +} + +std::tuple is_list_of_tensors(const descriptor::Tensor& tensor) { + // TODO: Use special API to get custom type detalization + Any custom_type; + if (custom_type.empty()) { + return std::make_tuple(false, Any()); + } + + if (!custom_type.is()) { + return std::make_tuple(false, custom_type); + } + + Any element_type = custom_type.as().element_type; + + if (!element_type.is()) { + return std::make_tuple(false, custom_type); + } + + return std::make_tuple(true, custom_type); +} + +std::shared_ptr make_list_pack(const OutputVector& inputs, Any output_type, const PartialShape& shape) { + auto list_pack = make_shared(inputs, 1); // 6 inputs -- 1 output + if (output_type.empty()) { + throw std::runtime_error("Attemt to call make_list_pack with empty output_type"); + } + // TODO: Use special API to set custom type detalization + ov::op::util::FrameworkNodeAttrs attrs; + attrs.set_type_name("PTFE::ListPack"); + list_pack->set_attrs(attrs); + list_pack->validate_and_infer_types(); + return list_pack; +} + +std::shared_ptr cast_internal_node(std::shared_ptr node, const std::string& type) { + auto fw_node = std::dynamic_pointer_cast(node); + if (!fw_node) { + return nullptr; + } + if (fw_node->get_attrs().find("PtTypeName") != fw_node->get_attrs().end()) { + // This is FW node, not PT FW internal node, don't mix them + return nullptr; + } + if (fw_node->get_attrs().get_type_name() != type) { + return nullptr; + } + + return fw_node; +} + +class ListConstructPass : public MatcherPass { +public: + OPENVINO_RTTI("PytorchFrontendListConstructPass", "0"); + ListConstructPass() { + auto convert = wrap_type(); + + ov::matcher_pass_callback callback = [](Matcher& m) { + auto node = cast_fw_node(m.get_match_root(), "prim::ListConstruct"); + if (!node) + return false; + const descriptor::Tensor& list_output = node->output(0).get_tensor(); + + auto custom_types = is_list_of_tensors(list_output); + + if (!std::get<0>(custom_types)) { + return false; + } + + auto custom_type = std::get<1>(custom_types); + if (custom_type.empty()) { + throw std::runtime_error("Custom element type is empty"); + } + + // Replace a single ListConstruct with 6 constant tensors: + // - beginnings of tensor elements of type i32 and shape [0] + // - endings of tensor elements of type i32 and shape [0] + // - beginnnigs of shape dimensions of type i32 and shape [0] + // - endings of tensor elements of type i32 and shape [0] + // - shape dimensions of type i32 and shape [0] + // - tensor elements flattened of type i32 (any type) and shape [0] + // Type of elements for the latest tensor is not really known at the moment + // Even worse, it can be dynamic and differ among elements + // So for now we are selecting any type, say f32 + + // Make one i32 constant for all 6 inputs + auto empty_const = opset10::Constant::create(element::i32, {0}, {}); + OutputVector inputs(6, empty_const); + + auto list_pack = make_list_pack(inputs, custom_type, node->get_output_partial_shape(0)); + replace_node(node, list_pack); + + return true; + }; + + auto m = make_shared(convert, "PytorchFrontendListConstructPass"); + this->register_matcher(m, callback); + } +}; + +class DecomposeListParameters : public pass::ModelPass { +public: + bool run_on_model(const std::shared_ptr& model) override { + // Search for Parameter with List[Tensor] types + + ParameterVector parameters = model->get_parameters(); + ParameterVector new_parameters; // collect decomposed parameters + for (size_t i = 0; i < parameters.size(); ++i) { + auto parameter = parameters[i]; + + auto custom_types = is_list_of_tensors(parameter->get_output_tensor(0)); + + if (std::get<0>(custom_types)) { + // Decompose each parameter that represetns the list of tensors to 6 inputs + // Element type of the parameter that represents tensor elements is unknown (leave it dynamic) + // Keep original parameters in the model, just detach it from the network -- to avoid parameters + // renumbering for unchanged parameters + // TODO: Reorganize parameters handling (second level of parameters interpretation) + + OutputVector inputs_for_list_pack; + + // for tensors offsets and shapes + for (size_t i = 0; i < 5; ++i) { + auto new_parameter = + make_shared(element::i32, PartialShape{Dimension::dynamic()}); + new_parameters.push_back(new_parameter); + inputs_for_list_pack.push_back(new_parameter); + // TODO: add links via RT info between original parameter and new ones + } + + // for tensor elements + auto new_parameter = + make_shared(element::dynamic, PartialShape{Dimension::dynamic()}); + new_parameters.push_back(new_parameter); + inputs_for_list_pack.push_back(new_parameter); + + auto list_pack = make_list_pack(inputs_for_list_pack, + std::get<1>(custom_types), + parameter->get_output_partial_shape(0)); + replace_node(parameter, list_pack); + + model->remove_parameter({parameter}); + } + } + + model->add_parameters(new_parameters); + + return true; + } +}; + +class DecomposeGetItem : public MatcherPass { +public: + OPENVINO_RTTI("PytorchFrontendDecomposeGetItem", "0"); + DecomposeGetItem() { + auto begins = any_input(); + auto ends = any_input(); + auto shape_begins = any_input(); + auto shape_ends = any_input(); + auto shape_dims = any_input(); + auto tensor_elements = any_input(); + auto list_pack = + wrap_type({begins, ends, shape_begins, shape_ends, shape_dims, tensor_elements}); + auto index = any_input(); + auto get_item = wrap_type({list_pack, index}); + + ov::matcher_pass_callback callback = [=](Matcher& m) { + auto matches = m.get_pattern_map(); + + auto get_item_node = cast_fw_node(matches.at(get_item), "aten::__getitem__"); + if (!get_item_node) + return false; + + auto list_pack_node = cast_internal_node(matches.at(list_pack), "PTFE::ListPack"); + if (!list_pack_node) + return false; + + auto zero = opset10::Constant::create(element::i32, {1}, {0}); + auto one = opset10::Constant::create(element::i32, {1}, {1}); + auto mask = std::vector{0}; + + // Prepare index to be 1D tensor to have predictable ranks after Gather for StridedSlice + auto index_1D = make_shared(matches.at(index), one, false); + + // Slice out region with elements relevant to required item from tensor_elements based on begins and ends + auto elements = + make_shared(matches.at(tensor_elements), + make_shared(matches.at(begins), index_1D, zero), + make_shared(matches.at(ends), index_1D, zero), + // TODO: add strides + mask, + mask); + + // Get region of shape dimensions that belongs to the selected item + auto shape = make_shared( + matches.at(shape_dims), + make_shared(matches.at(shape_begins), index_1D, zero), + make_shared(matches.at(shape_ends), index_1D, zero), + // TODO: add strides + mask, + mask); + + // Reshape elements to have a given shape -- this is our result + auto item = make_shared(elements, shape, false); + + replace_node(get_item_node, item); + + return true; + }; + + auto m = make_shared(get_item, "PytorchFrontendDecomposeGetItem"); + this->register_matcher(m, callback); + } +}; + +class DecomposeAppend : public MatcherPass { +public: + OPENVINO_RTTI("PytorchFrontendDecomposeAppend", "0"); + DecomposeAppend() { + auto begins = any_input(); + auto ends = any_input(); + auto shape_begins = any_input(); + auto shape_ends = any_input(); + auto shape_dims = any_input(); + auto elements = any_input(); + auto list_pack = wrap_type({begins, ends, shape_begins, shape_ends, shape_dims, elements}); + auto item = any_input(); + auto append = wrap_type({list_pack, item}); + + ov::matcher_pass_callback callback = [=](Matcher& m) { + // TODO: replace by values whenever possible + auto matches = m.get_pattern_map(); + + auto append_node = cast_fw_node(matches.at(append), "aten::append"); + if (!append_node) + return false; + + auto list_pack_node = cast_internal_node(matches.at(list_pack), "PTFE::ListPack"); + if (!list_pack_node) + return false; + + auto custom_types = is_list_of_tensors(append_node->get_output_tensor(0)); + + if (!std::get<0>(custom_types)) { + return false; + } + + auto custom_type = std::get<1>(custom_types); + + // Appending new shape dimensions and producing adjusted versions of shape_begins and shape_ends + auto shape = make_shared(matches.at(item), element::i32); + auto cur_shape_dims_size = make_shared(matches.at(shape_dims), element::i32); + auto new_shape_begins = + make_shared(NodeVector{matches.at(shape_begins), cur_shape_dims_size}, 0); + auto new_shape_dims = make_shared(NodeVector{matches.at(shape_dims), shape}, 0); + auto new_shape_dims_size = make_shared(new_shape_dims, element::i32); + auto new_shape_ends = + make_shared(NodeVector{matches.at(shape_ends), new_shape_dims_size}, 0); + + // Appending new elements after flattening to existing elements + + auto item_flatten = make_shared(matches.at(item), + opset10::Constant::create(element::i32, {1}, {-1}), + false); + auto new_begins = make_shared( + NodeVector{matches.at(begins), make_shared(matches.at(elements), element::i32)}, + 0); + + auto initial_elements_const = std::dynamic_pointer_cast(matches.at(elements)); + // New elements content depends on whether we appending to an empty list or not + auto new_elements = + (initial_elements_const && shape_size(initial_elements_const->get_output_shape(0)) == 0) + ? shared_ptr(item_flatten) + : // empty initial list -- just take appended elements as a new content for the list; derive type + // from that tensor + shared_ptr(make_shared(NodeVector{matches.at(elements), item_flatten}, + 0)); // existing list, just concat + + auto new_ends = make_shared( + NodeVector{matches.at(ends), make_shared(new_elements, element::i32)}, + 0); + + auto new_list_pack = + make_list_pack({new_begins, new_ends, new_shape_begins, new_shape_ends, new_shape_dims, new_elements}, + std::get<1>(custom_types), + append_node->get_output_partial_shape(0)); + + replace_node(append_node, new_list_pack); + + return true; + }; + + auto m = make_shared(append, "PytorchFrontendDecomposeAppend"); + this->register_matcher(m, callback); + } +}; + +class DecomposeListResults : public pass::ModelPass { +public: + bool run_on_model(const std::shared_ptr& model) override { + // Search for Parameter with List[Tensor] types + + bool at_least_one_decomposed = false; + + ResultVector results = + model->get_results(); // make a copy, leter results in the model are going to be modified + + for (size_t i = 0; i < results.size(); ++i) { + auto result = results[i]; + auto custom_types = is_list_of_tensors(result->get_input_tensor(0)); + + auto list_pack = cast_internal_node(result->get_input_node_shared_ptr(0), "PTFE::ListPack"); + + if (std::get<0>(custom_types) && list_pack) { + // Replace a single result with 6 results, per each input of parent list_pack + + auto inputs = list_pack->inputs(); + for (auto input : inputs) { + model->add_results({make_shared(input.get_source_output())}); + // TODO: Keep tracking between original and new Results + } + + model->remove_result(result); + at_least_one_decomposed = true; + } + } + + return at_least_one_decomposed; + } +}; + +void apply_pytorch_conversion_transforms(std::shared_ptr model) { + // TODO: We have issues with List transformations, temporary disabled + return; + + pass::Manager manager; + manager.register_pass(); + + auto matchers = manager.register_pass(); + matchers->add_matcher(); + matchers->add_matcher(); + matchers->add_matcher(); + + manager.register_pass(); + manager.register_pass(); + + manager.run_passes(model); +} + +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/transforms.hpp b/src/frontends/pytorch/src/transforms.hpp new file mode 100644 index 00000000000..713386fe9a1 --- /dev/null +++ b/src/frontends/pytorch/src/transforms.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/frontend/pytorch/decoder.hpp" +#include "openvino/frontend/pytorch/frontend.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { + +// Apply all transformations for finalize PT model conversion to OV +// This transformaitons cannot be implemented during on-the-fly 1:n translation logic so they are applied in separate +// round Input model is a partiall converted model with PT FW internal ops and FW Nodes, the result of the first round +// of translation. +void apply_pytorch_conversion_transforms(std::shared_ptr model); + +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.cpp b/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.cpp new file mode 100644 index 00000000000..3fdf42d6207 --- /dev/null +++ b/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.cpp @@ -0,0 +1,97 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "append_list_unpack_replacer.hpp" + +#include +#include + +#include "openvino/core/rt_info.hpp" +#include "openvino/op/util/framework_node.hpp" +#include "openvino/pass/pattern/matcher.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +AppendListUnpackReplacer::AppendListUnpackReplacer() { + auto list_unpack = ov::pass::pattern::wrap_type(); + + ov::matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) { + auto list_unpack = cast_fw_node(m.get_match_root(), "prim::ListUnpack"); + if (!list_unpack) + return false; + + OutputVector tmp_inputs; + NodeVector rt_copy_from{list_unpack}; + auto input_node = list_unpack->input_value(0).get_node_shared_ptr(); + + // Optional aten::__getitem__ node. + auto getitem_node = cast_fw_node(input_node, "aten::__getitem__"); + if (getitem_node) { + rt_copy_from.push_back(getitem_node); + input_node = getitem_node->input(0).get_source_output().get_node_shared_ptr(); + } + + while (auto append_node = cast_fw_node(input_node, "aten::append")) { + rt_copy_from.push_back(append_node); + tmp_inputs.push_back(append_node->input(1).get_source_output()); + input_node = append_node->input(0).get_source_output().get_node_shared_ptr(); + } + OutputVector inputs; + auto list_construct_node = cast_fw_node(input_node, "prim::ListConstruct"); + if (!list_construct_node) { + return false; + } + rt_copy_from.push_back(list_construct_node); + for (auto& input : list_construct_node->inputs()) { + inputs.push_back(input.get_source_output()); + } + + inputs.insert(inputs.end(), tmp_inputs.rbegin(), tmp_inputs.rend()); + if (getitem_node) { + // If aten::__getitem__, expect inputs to be equivalent of pytorch Tensor[][]. + // Tensor selected by aten::__getitem__ index needs to be splitted in axis 0. + auto getitem_index_ptr = getitem_node->input_value(1).get_node_shared_ptr(); + auto getitem_index_const = std::dynamic_pointer_cast(getitem_index_ptr); + auto index_val = getitem_index_const->cast_vector(); + auto index = 0; + if (index_val[0] >= 0) { + index = index_val[0]; + } else { + index = inputs.size() + index_val[0]; + } + auto axis_0 = opset10::Constant::create(element::i64, Shape{}, {0}); + auto split = std::make_shared(inputs[index], axis_0, list_unpack->get_output_size()); + NodeVector to_copy_rt{axis_0, split}; + OutputVector res; + for (auto output : split->outputs()) { + auto squeeze = std::make_shared(output, axis_0); + to_copy_rt.push_back(squeeze); + res.push_back(squeeze); + } + copy_runtime_info(rt_copy_from, to_copy_rt); + replace_node(list_unpack, res); + return true; + } else { + // Without aten::__getitem__, expect inputs to be equivalent od pytorch Tensor[]. + // Return all inputs. + replace_node(list_unpack, inputs); + return true; + } + return false; + }; + + auto m = std::make_shared(list_unpack, + "ov::frontend::pytorch::pass::AppendListUnpackReplacer"); + this->register_matcher(m, callback); +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.hpp b/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.hpp new file mode 100644 index 00000000000..e509f2ff973 --- /dev/null +++ b/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pass.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +class AppendListUnpackReplacer : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ov::frontend::pytorch::pass::AppendListUnpackReplacer"); + AppendListUnpackReplacer(); +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/aten_cat_replacer.cpp b/src/frontends/pytorch/src/transforms/aten_cat_replacer.cpp new file mode 100644 index 00000000000..fceab43e846 --- /dev/null +++ b/src/frontends/pytorch/src/transforms/aten_cat_replacer.cpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "aten_cat_replacer.hpp" + +#include +#include + +#include "openvino/core/rt_info.hpp" +#include "openvino/op/util/framework_node.hpp" +#include "openvino/pass/pattern/matcher.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +// aten::cat needs a special handling since it takes a Tensor[] as input. We set the inputs of ListConstruct as the +// inputs of cat. +// +// Pytorch IR: OV model: +// %a %b %c %dim %a %b %c +// \ | / | \ | / +// prim::ListConstruct prim::Constant Concat[axis=%dim] +// \ / +// aten::cat +AtenCatToConcat::AtenCatToConcat() { + auto aten_cat = ov::pass::pattern::wrap_type(); + + ov::matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) { + auto cat = cast_fw_node(m.get_match_root(), "aten::cat"); + if (!cat) + return false; + + auto axis_node = cat->input(1).get_source_output().get_node_shared_ptr(); + auto axis_const = std::dynamic_pointer_cast(axis_node); + if (!axis_const) + return false; + auto axis = axis_const->cast_vector(); + if (axis.size() != 1) + return false; + + OutputVector tmp_inputs; + NodeVector rt_copy_from{cat}; + std::shared_ptr input_node = cat->input(0).get_source_output().get_node_shared_ptr(); + while (const auto& input_fw_node = cast_fw_node(input_node, "aten::append")) { + rt_copy_from.push_back(input_fw_node); + tmp_inputs.push_back(input_fw_node->input(1).get_source_output()); + input_node = input_fw_node->input(0).get_source_output().get_node_shared_ptr(); + } + auto list_construct = cast_fw_node(input_node, "prim::ListConstruct"); + if (!list_construct) + return false; + rt_copy_from.push_back(list_construct); + OutputVector inputs; + for (auto& input : list_construct->inputs()) { + inputs.push_back(input.get_source_output()); + } + inputs.insert(inputs.end(), tmp_inputs.rbegin(), tmp_inputs.rend()); + auto result = std::make_shared(inputs, axis[0]); + copy_runtime_info(rt_copy_from, result); + replace_node(cat, result); + + return true; + }; + + auto m = std::make_shared(aten_cat, "ov::frontend::pytorch::pass::AtenCatToConcat"); + this->register_matcher(m, callback); +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/transforms/aten_cat_replacer.hpp b/src/frontends/pytorch/src/transforms/aten_cat_replacer.hpp new file mode 100644 index 00000000000..d1439ca70e9 --- /dev/null +++ b/src/frontends/pytorch/src/transforms/aten_cat_replacer.hpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pass.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +// This transformation replaces pattern prim::ListConstruct->aten::append{none or many}->aten::cat +class AtenCatToConcat : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ov::frontend::pytorch::pass::AtenCatToConcat"); + AtenCatToConcat(); +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/transforms/aten_getitem_replacer.cpp b/src/frontends/pytorch/src/transforms/aten_getitem_replacer.cpp new file mode 100644 index 00000000000..85a6f9aa2cb --- /dev/null +++ b/src/frontends/pytorch/src/transforms/aten_getitem_replacer.cpp @@ -0,0 +1,108 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "aten_getitem_replacer.hpp" + +#include +#include + +#include "openvino/core/rt_info.hpp" +#include "openvino/op/util/framework_node.hpp" +#include "openvino/pass/pattern/matcher.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "pt_framework_node.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +AtenGetItemReplacer::AtenGetItemReplacer() { + auto getitem = ov::pass::pattern::wrap_type(); + + ov::matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) { + auto getitem = cast_fw_node(m.get_match_root(), "aten::__getitem__"); + if (!getitem) + return false; + + auto input_node = getitem->input_value(0).get_node_shared_ptr(); + if (auto torch_split = cast_fw_node(input_node, "aten::split")) { + auto rank = torch_split->input(1).get_partial_shape().rank(); + if (rank.is_dynamic()) { + return false; + } + if (rank.get_length() == 0) { + // Based on slice_size and output index select size. + // Constants required by transformation. + auto const_1 = opset10::Constant::create(element::i64, Shape{1}, {1}); + auto const_1_0d = opset10::Constant::create(element::i64, Shape{}, {1}); + auto const_0 = opset10::Constant::create(element::i64, Shape{1}, {0}); + auto const_0_0d = opset10::Constant::create(element::i64, Shape{}, {0}); + + // Load and convert op inputs. + auto input = torch_split->get_input_source_output(0); + auto split_size = torch_split->get_input_source_output(1); + auto split_size_1d = std::make_shared(split_size, const_0); + auto axis = torch_split->get_input_source_output(2); + auto axis_1d = std::make_shared(axis, const_0); + auto getitem_idx = getitem->input(1).get_source_output(); + + // Calculate number of splits based on input shape and split_size. + auto shape = std::make_shared(input); + auto len_to_split = std::make_shared(shape, axis, const_0); + // Convert to f64 from int to calculate reminder - last chunk can be smaller if Shape in given axis is + // not equally divisible. + auto len_to_split_float = std::make_shared(len_to_split, element::f64); + auto split_size_1d_float = std::make_shared(split_size_1d, element::f64); + auto out_div = std::make_shared(len_to_split_float, split_size_1d_float); + auto out_num = std::make_shared(out_div); + auto out_num_0d = std::make_shared(out_num, const_0); + + // Use Range and Gather to convert negative getitem indexes into positive due problems with indexing + // with -1. + auto possible_out_idx = + std::make_shared(const_0_0d, out_num_0d, const_1_0d, split_size.get_element_type()); + auto always_positive_out_idx = + std::make_shared(possible_out_idx, getitem_idx, const_0); + + // Use Slice to get only split output selected by getitem idx. Couldn't use VariadicSplit due to + // problems with dynamic inputs. + auto split_slice_start = std::make_shared(always_positive_out_idx, split_size_1d); + auto split_slice_end = std::make_shared(split_slice_start, split_size_1d); + auto split = + std::make_shared(input, split_slice_start, split_slice_end, const_1, axis_1d); + copy_runtime_info({getitem, input_node}, split); + replace_node(getitem, split); + } else { + auto getitem_index_ptr = getitem->input_value(1).get_node_shared_ptr(); + auto getitem_index_const = std::dynamic_pointer_cast(getitem_index_ptr); + auto index_val = getitem_index_const->cast_vector(); + auto split = std::make_shared(torch_split->get_input_source_output(0), + torch_split->get_input_source_output(2), + torch_split->get_input_source_output(1)); + auto index = 0; + if (index_val[0] >= 0) { + index = index_val[0]; + } else { + index = split->outputs().size() + index_val[0]; + } + OutputVector res{split->outputs()[index]}; + copy_runtime_info({getitem, input_node}, split); + replace_node(getitem, res); + } + return true; + } + + return false; + }; + + auto m = std::make_shared(getitem, "ov::frontend::pytorch::pass::AtenGetItemReplacer"); + this->register_matcher(m, callback); +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/aten_getitem_replacer.hpp b/src/frontends/pytorch/src/transforms/aten_getitem_replacer.hpp new file mode 100644 index 00000000000..18a945b8806 --- /dev/null +++ b/src/frontends/pytorch/src/transforms/aten_getitem_replacer.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pass.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +class AtenGetItemReplacer : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ov::frontend::pytorch::pass::AtenGetItemReplacer"); + AtenGetItemReplacer(); +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/max_prim_list_construct_replacer.cpp b/src/frontends/pytorch/src/transforms/max_prim_list_construct_replacer.cpp new file mode 100644 index 00000000000..70cc4214f08 --- /dev/null +++ b/src/frontends/pytorch/src/transforms/max_prim_list_construct_replacer.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "max_prim_list_construct_replacer.hpp" + +#include "openvino/core/rt_info.hpp" +#include "openvino/op/util/framework_node.hpp" +#include "openvino/pass/pattern/matcher.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +MaxPrimListConstructReplacer::MaxPrimListConstructReplacer() { + auto max_op = ov::pass::pattern::wrap_type(); + + ov::matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) { + auto max_op = cast_fw_node(m.get_match_root(), "prim::max"); + if (!max_op) { + return false; + } + auto input_node = max_op->input_value(0).get_node_shared_ptr(); + auto num_inputs = max_op->inputs().size(); + auto input = concat_list_construct(input_node); + if (num_inputs == 1) { + auto start = std::make_shared(element::i32, Shape{}, 0); + auto step = std::make_shared(element::i32, Shape{}, 1); + auto shape = std::make_shared(input, element::i32); + auto rank = std::make_shared(shape, element::i32); + auto reduced_rank = std::make_shared(rank); + auto axes = std::make_shared(start, reduced_rank, step, element::i32); + auto reduce_max = std::make_shared(input, axes); + copy_runtime_info({max_op, input_node}, reduce_max); + replace_node(max_op, reduce_max); + return true; + } + auto second_input_node = max_op->input_value(1).get_node_shared_ptr(); + auto second_input = concat_list_construct(second_input_node); + auto maximum_op = std::make_shared(input, second_input); + copy_runtime_info({max_op, input_node, second_input_node}, maximum_op); + replace_node(max_op, maximum_op); + return true; + }; + + auto m = std::make_shared(max_op, + "ov::frontend::pytorch::pass::MaxPrimListConstructReplacer"); + this->register_matcher(m, callback); +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/transforms/max_prim_list_construct_replacer.hpp b/src/frontends/pytorch/src/transforms/max_prim_list_construct_replacer.hpp new file mode 100644 index 00000000000..913ba096870 --- /dev/null +++ b/src/frontends/pytorch/src/transforms/max_prim_list_construct_replacer.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pass.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +class MaxPrimListConstructReplacer : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ov::frontend::pytorch::pass::MaxPrimListConstructReplacer"); + MaxPrimListConstructReplacer(); +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.cpp b/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.cpp new file mode 100644 index 00000000000..0f0baf0c66e --- /dev/null +++ b/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.cpp @@ -0,0 +1,191 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "prim_list_unpack_replacer.hpp" + +#include +#include + +#include "openvino/core/rt_info.hpp" +#include "openvino/op/util/framework_node.hpp" +#include "openvino/pass/pattern/matcher.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +PrimListUnpackReplacer::PrimListUnpackReplacer() { + auto list_unpack = ov::pass::pattern::wrap_type(); + + ov::matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) { + auto list_unpack = cast_fw_node(m.get_match_root(), "prim::ListUnpack"); + if (!list_unpack) + return false; + + auto input_node = list_unpack->input_value(0).get_node_shared_ptr(); + if (auto torch_split = cast_fw_node(input_node, "aten::split")) { + auto rank = torch_split->input(1).get_partial_shape().rank(); + if (rank.is_dynamic()) { + return false; + } + if (rank.get_length() == 0) { + // Create split_lenghts tensor from split_size int, + // allow for last chunk to be smaller if data is not equally divisible. + auto split_size = torch_split->get_input_source_output(1); + // Using number of ListUnpack outputs. + auto num_out_m_1 = opset10::Constant::create(split_size.get_element_type(), + Shape{1}, + {list_unpack->get_output_size() - 1}); + auto const_neg_1 = opset10::Constant::create(split_size.get_element_type(), Shape{1}, {-1}); + auto split_lenghts_m_1 = std::make_shared(split_size, num_out_m_1); + NodeVector concat_inputs{split_lenghts_m_1, const_neg_1}; + auto split_lenghts = std::make_shared(concat_inputs, 0); + auto split = std::make_shared(torch_split->get_input_source_output(0), + torch_split->get_input_source_output(2), + split_lenghts); + copy_runtime_info({list_unpack, input_node}, split); + replace_node(list_unpack, split); + } else { + auto split = std::make_shared(torch_split->get_input_source_output(0), + torch_split->get_input_source_output(2), + torch_split->get_input_source_output(1)); + copy_runtime_info({list_unpack, input_node}, split); + replace_node(list_unpack, split); + } + + return true; + } + + if (auto split_with_sizes = cast_fw_node(input_node, "aten::split_with_sizes")) { + auto split = std::make_shared(split_with_sizes->get_input_source_output(0), + split_with_sizes->get_input_source_output(2), + split_with_sizes->get_input_source_output(1)); + + copy_runtime_info({list_unpack, input_node}, split); + replace_node(list_unpack, split); + + return true; + } + + if (auto chunk = cast_fw_node(input_node, "aten::chunk")) { + // Using number of ListUnpack outputs instead of 1st input to chunk. + // TODO: confirm it works for all cases + auto split = std::make_shared(chunk->get_input_source_output(0), + chunk->get_input_source_output(2), + list_unpack->get_output_size()); + + copy_runtime_info({list_unpack, input_node}, split); + replace_node(list_unpack, split); + + return true; + } + + if (auto unbind = cast_fw_node(input_node, "aten::unbind")) { + const auto input = unbind->get_input_source_output(0); + const auto axis = unbind->get_input_source_output(1); + const auto num_splits = list_unpack->get_output_size(); + auto split = std::make_shared(input, axis, num_splits); + NodeVector to_copy_rt{split}; + OutputVector outputs; + for (auto output : split->outputs()) { + const auto squeeze = std::make_shared(output, axis); + outputs.push_back(squeeze); + to_copy_rt.push_back(squeeze); + } + copy_runtime_info({list_unpack, input_node}, to_copy_rt); + replace_node(list_unpack, outputs); + + return true; + } + if (auto where = cast_fw_node(input_node, "aten::where")) { + const auto input = where->get_input_source_output(0); + auto non_zero = std::make_shared(input); + auto axis = opset10::Constant::create(element::i64, Shape{}, {0}); + const auto num_splits = list_unpack->get_output_size(); + auto split = std::make_shared(non_zero, axis, num_splits); + NodeVector to_copy_rt{split}; + OutputVector outputs; + for (auto output : split->outputs()) { + const auto squeeze = std::make_shared(output, axis); + outputs.push_back(squeeze); + to_copy_rt.push_back(squeeze); + } + copy_runtime_info({list_unpack, input_node}, to_copy_rt); + replace_node(list_unpack, outputs); + + return true; + } + if (auto nonzero_numpy = cast_fw_node(input_node, "aten::nonzero_numpy")) { + const auto input = nonzero_numpy->get_input_source_output(0); + auto non_zero = std::make_shared(input); + auto axis = opset10::Constant::create(element::i64, Shape{}, {0}); + const auto num_splits = list_unpack->get_output_size(); + auto split = std::make_shared(non_zero, axis, num_splits); + NodeVector to_copy_rt{split}; + OutputVector outputs; + for (auto output : split->outputs()) { + const auto squeeze = std::make_shared(output, axis); + outputs.push_back(squeeze); + to_copy_rt.push_back(squeeze); + } + copy_runtime_info({list_unpack, input_node}, to_copy_rt); + replace_node(list_unpack, outputs); + + return true; + } + if (auto shape_of = std::dynamic_pointer_cast(input_node)) { + // case aten::size as input + // Number of ListUnpack outputs should be equal to rank of input shape. + auto axis_0 = opset10::Constant::create(element::i64, Shape{}, {0}); + auto split = std::make_shared(shape_of, axis_0, list_unpack->get_output_size()); + + NodeVector to_copy_rt{axis_0, split}; + OutputVector res; + for (auto output : split->outputs()) { + auto squeeze = std::make_shared(output, axis_0); + to_copy_rt.push_back(squeeze); + res.push_back(squeeze); + } + + copy_runtime_info({list_unpack, input_node}, to_copy_rt); + replace_node(list_unpack, res); + + return true; + } + + if (auto slice = std::dynamic_pointer_cast(input_node)) { + // case aten::slice as input + // Number of ListUnpack outputs should be equal to rank of input shape. + auto axis_0 = opset10::Constant::create(element::i64, Shape{}, {0}); + auto split = std::make_shared(slice, axis_0, list_unpack->get_output_size()); + + NodeVector to_copy_rt{axis_0, split}; + OutputVector res; + for (auto output : split->outputs()) { + auto squeeze = std::make_shared(output, axis_0); + to_copy_rt.push_back(squeeze); + res.push_back(squeeze); + } + + copy_runtime_info({list_unpack, input_node}, to_copy_rt); + replace_node(list_unpack, res); + + return true; + } + + return false; + }; + + auto m = std::make_shared(list_unpack, + "ov::frontend::pytorch::pass::PrimListUnpackReplacer"); + this->register_matcher(m, callback); +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.hpp b/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.hpp new file mode 100644 index 00000000000..356c03506f5 --- /dev/null +++ b/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pass.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +class PrimListUnpackReplacer : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ov::frontend::pytorch::pass::PrimListUnpackReplacer"); + PrimListUnpackReplacer(); +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/transforms/prim_tuple_construct_replacer.cpp b/src/frontends/pytorch/src/transforms/prim_tuple_construct_replacer.cpp new file mode 100644 index 00000000000..4bf1a92696f --- /dev/null +++ b/src/frontends/pytorch/src/transforms/prim_tuple_construct_replacer.cpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "prim_tuple_construct_replacer.hpp" + +#include +#include +#include + +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +bool DecomposeTupleResults::run_on_model(const std::shared_ptr& model) { + bool at_least_one_decomposed = false; + + ResultVector results = model->get_results(); + + for (size_t i = 0; i < results.size(); ++i) { + auto result = results[i]; + auto input_node = result->get_input_node_shared_ptr(0); + auto tuple_construct = cast_fw_node(input_node, "prim::TupleConstruct"); + if (!tuple_construct) { + continue; + } + auto inputs = input_node->inputs(); + for (auto input : inputs) { + model->add_results({std::make_shared(input.get_source_output())}); + } + + model->remove_result(result); + at_least_one_decomposed = true; + } + + return at_least_one_decomposed; +}; +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/prim_tuple_construct_replacer.hpp b/src/frontends/pytorch/src/transforms/prim_tuple_construct_replacer.hpp new file mode 100644 index 00000000000..926142b6481 --- /dev/null +++ b/src/frontends/pytorch/src/transforms/prim_tuple_construct_replacer.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pass.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace pass { + +class DecomposeTupleResults : public ov::pass::ModelPass { +public: + OPENVINO_RTTI("ov::frontend::pytorch::pass::DecomposeTupleResults"); + bool run_on_model(const std::shared_ptr& model) override; +}; + +} // namespace pass +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp new file mode 100644 index 00000000000..5388229630f --- /dev/null +++ b/src/frontends/pytorch/src/utils.cpp @@ -0,0 +1,455 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "utils.hpp" + +#include "op_table.hpp" +#include "openvino/frontend/pytorch/decoder.hpp" +#include "openvino/util/log.hpp" +#include "pt_framework_node.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { + +Output make_optional_bias(const Output& base_op, + const NodeContext& context, + size_t bias_input_idx, + const std::vector& unsqueeze_dims) { + using std::make_shared; + + if (!context.input_is_none(bias_input_idx)) { + auto bias = context.get_input(bias_input_idx); + if (!unsqueeze_dims.empty()) { + auto indices = opset10::Constant::create(element::i32, {unsqueeze_dims.size()}, unsqueeze_dims); + context.mark_node(indices); + bias = make_shared(bias, indices); + context.mark_output(bias); + } + return make_shared(context.mark_output(base_op), bias); + } else { + return base_op; + } +} + +Output reshape_conv_bias(NodeContext& context, Output bias, Output conv) { + auto conv_shape = context.mark_node(std::make_shared(conv)); + auto conv_rank = context.mark_node(std::make_shared(conv_shape)); + auto one_const = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {1})); + auto two_const = context.mark_node(opset10::Constant::create(element::i64, Shape{1}, {2})); + auto tail_shape_rank = context.mark_node(std::make_shared(conv_rank, two_const)); + auto tail_shape = context.mark_node(std::make_shared(one_const, tail_shape_rank)); + auto channels_dim = context.mark_node(std::make_shared(bias)); + auto new_shape = + context.mark_node(std::make_shared(OutputVector{one_const, channels_dim, tail_shape}, 0)); + + return context.mark_node(std::make_shared(bias, new_shape, false)); +} + +std::shared_ptr get_rank_node(const Output& node) { + auto shape = std::make_shared(node); + return std::make_shared(shape); +} + +Output reshape_kernel_for_group(const NodeContext& context, + const Output& input, + const Output& kernel, + int64_t groups) { + using std::make_shared; + + auto in_shape = std::make_shared(input); + auto c_in_idx = opset10::Constant::create(element::i64, Shape{}, {1}); + auto axis_0 = opset10::Constant::create(element::i64, Shape{}, {0}); + auto in_shape_1 = make_shared(in_shape, c_in_idx, axis_0); + auto in_shape_1_uns = make_shared(in_shape_1, axis_0); + auto groups_const = opset10::Constant::create(element::i64, Shape{1}, {groups}); + auto c_in_value = make_shared(in_shape_1_uns, groups_const); + + auto kernel_shape = std::make_shared(kernel); + auto c_out_idx = opset10::Constant::create(element::i64, Shape{}, {0}); + auto kernel_shape_0 = make_shared(kernel_shape, c_out_idx, axis_0); + auto kernel_shape_0_uns = make_shared(kernel_shape_0, axis_0); + auto c_out_value = make_shared(kernel_shape_0_uns, groups_const); + + auto start = opset10::Constant::create(element::i64, Shape{1}, {2}); + auto stop = opset10::Constant::create(element::i64, Shape{1}, {std::numeric_limits::max()}); + auto step = opset10::Constant::create(element::i64, Shape{1}, {1}); + auto remaining_shape = make_shared(kernel_shape, start, stop, step); + + auto new_kernel_shape = + make_shared(OutputVector{groups_const, c_out_value, c_in_value, remaining_shape}, 0); + context.mark_nodes({in_shape, + c_in_idx, + axis_0, + in_shape_1, + in_shape_1_uns, + groups_const, + c_in_value, + kernel_shape, + c_out_idx, + kernel_shape_0, + kernel_shape_0_uns, + c_out_value, + start, + stop, + step, + remaining_shape, + new_kernel_shape}); + return make_shared(kernel, new_kernel_shape, false); +} + +std::shared_ptr get_axes_range(NodeContext& context, size_t input_id) { + auto x = context.get_input(input_id); + auto start = std::make_shared(element::i32, Shape{}, 0); + auto step = std::make_shared(element::i32, Shape{}, 1); + auto shape = context.mark_node(std::make_shared(x, element::i32)); + auto rank = context.mark_node(std::make_shared(shape, element::i32)); + auto reduced_rank = context.mark_node(std::make_shared(rank)); + return context.mark_node(std::make_shared(start, reduced_rank, step, element::i32)); +}; + +std::shared_ptr numel(NodeContext& context, size_t input_id) { + auto x = context.get_input(input_id); + auto input_shape = context.mark_node(std::make_shared(x)); + auto axes = context.mark_node(opset10::Constant::create(element::i64, Shape({1}), {0})); + return context.mark_node(std::make_shared(input_shape, axes, false)); +}; + +namespace { +const std::unordered_map TORCH_TO_OV_TYPE{{0, element::u8}, + {1, element::i8}, + {2, element::i16}, + {3, element::i32}, + {4, element::i64}, + {5, element::f16}, + {6, element::f32}, + {7, element::f64}, + {11, element::boolean}}; + +const std::unordered_map TORCH_AUTO_PAD_TO_OV{{"valid", ov::op::PadType::VALID}, + {"same", ov::op::PadType::SAME_UPPER}}; +} // namespace + +ov::element::Type convert_dtype(int64_t pt_type) { + FRONT_END_OP_CONVERSION_CHECK(TORCH_TO_OV_TYPE.count(pt_type), "Unknown type: ", pt_type); + return TORCH_TO_OV_TYPE.at(pt_type); +}; + +ov::op::PadType convert_pad(const std::string& pt_pad) { + FRONT_END_OP_CONVERSION_CHECK(TORCH_AUTO_PAD_TO_OV.count(pt_pad), "Unknown pad: ", pt_pad); + return TORCH_AUTO_PAD_TO_OV.at(pt_pad); +}; + +std::shared_ptr concat_list_construct(std::shared_ptr input) { + if (auto list_construct = cast_fw_node(input, "prim::ListConstruct")) { + auto list_inputs = list_construct->input_values(); + OutputVector node_vector; + auto zero = opset10::Constant::create(element::i32, Shape{}, {0}); + for (size_t i = 0; i < list_inputs.size(); i++) { + auto node = concat_list_construct(list_inputs[i].get_node_shared_ptr()); + auto unsqueezed_node = std::make_shared(node, zero); + node_vector.push_back(unsqueezed_node); + } + return std::make_shared(node_vector, 0); + } + return input; +} + +OutputVector make_framework_node(NodeContext* context) { + auto schema = context->get_schema(); + // TODO: properly process schema to get the actual position of mutable input + // Hack. Can indicate mutable inputs, but can it be reliable? + if (schema.find('!') != std::string::npos) { + // We create additional output for such nodes. It contains new tensor that represents input that was changed. + auto fw_node = + std::make_shared(context->get_decoder(), context->inputs(), context->num_of_outputs() + 1); + fw_node->set_friendly_name(context->get_op_type()); + auto outputs = fw_node->outputs(); + // Usually mutated input index is 0, because it is usually "self" input, so we need to replace this tensor with + // output we created. + context->mutate_input(0, outputs.back()); + OPENVINO_DEBUG << "Created node with mutated 0 input. Schema: " << schema << '\n'; + context->mark_node(fw_node); + // For simplification we do not expect such operations to have extra bodies + FRONT_END_OP_CONVERSION_CHECK(context->get_decoder()->get_subgraph_size() == 0, + "Mutable operation has subgraphs."); + return outputs; + } + + // Pay attention to subgraphs that may appear in the node + auto fw_node = + std::make_shared(context->get_decoder(), context->inputs(), context->num_of_outputs()); + fw_node->set_friendly_name(context->get_op_type()); + + std::map inputs_map; + std::map extra_outputs_map; + std::set input_idxs; // initial inputs + // We need to remember initial inputs to be able to find extra inputs to body that were created to propagate + // external context + int num_body_outs = 0; + for (size_t i = 0; i < context->get_decoder()->get_subgraph_size(); ++i) { + auto subgraph_decoder = context->get_decoder()->get_subgraph_decoder(i); + auto inputs = subgraph_decoder->inputs(); + input_idxs.insert(inputs.begin(), inputs.end()); + auto body = context->convert_subgraph(i); + fw_node->set_function(i, body); + for (const auto& param : body->get_parameters()) { + auto name = param->get_output_tensor(0).get_any_name(); + size_t input_idx = (size_t)std::stoll(name); + inputs_map[input_idx].push_back(param); + } + auto body_outputs = subgraph_decoder->outputs(); + if (i == 0) { + num_body_outs = body_outputs.size(); + } else { + FRONT_END_OP_CONVERSION_CHECK( + num_body_outs == body_outputs.size(), + "Number of outputs of this body is different from number of outputs of first body"); + } + // Some bodies may have mutated inputs which we need to propagate to external context + auto body_results = body->get_results(); + for (int i = num_body_outs; i < body_results.size(); i++) { + auto name = body_results[i]->input(0).get_tensor().get_any_name(); + size_t out_idx = (size_t)std::stoll(name); + FRONT_END_OP_CONVERSION_CHECK(extra_outputs_map.count(out_idx) == 0, + "More then one body output with same tensor name."); + extra_outputs_map[out_idx].push_back(body_results[i]); + } + } + // Connect inputs with external context + for (const auto& input : inputs_map) { + if (!input_idxs.count(input.first)) { + auto external_output = context->get_tensor_from_model_or_create_input(input.first); + fw_node->set_invariant_inputs(external_output, input.second); + } else { + auto external_output = context->get_tensor_from_model(input.first); + if (external_output.get_node()) { + fw_node->set_invariant_inputs(external_output, input.second); + } + } + } + // Number of body outputs can be higher then number of pt node outputs, e.g. in case of loop first body output is + // condition, we have to skip such outputs + int num_skip_body_outputs = + num_body_outs > context->num_of_outputs() ? num_body_outs - context->num_of_outputs() : 0; + // We need to reduce number of outputs, because some outputs are outputs from body + fw_node->set_output_size(context->num_of_outputs() - num_body_outs + num_skip_body_outputs); + OutputVector res(context->mark_node(fw_node)->outputs()); + if (fw_node->get_internal_subgraphs_size() > 0) { + auto first_body_results = fw_node->get_function(0)->get_results(); + std::vector outputs; + for (int i = num_skip_body_outputs; i < num_body_outs; i++) { + outputs.push_back({first_body_results[i]}); + } + for (int i = 1; i < fw_node->get_internal_subgraphs_size(); i++) { + auto current_body_results = fw_node->get_function(i)->get_results(); + for (int i = num_skip_body_outputs; i < num_body_outs; i++) { + outputs[i].push_back(current_body_results[i]); + } + } + for (const auto& res_vec : outputs) { + res.push_back(fw_node->set_body_outputs(res_vec)); + } + } + // Propagate extra outputs to external context + for (const auto& output : extra_outputs_map) { + context->add_tensor_to_context(output.first, fw_node->set_body_outputs(output.second)); + } + return res; +} + +OutputVector convert_node(NodeContext* context) { + try { + auto CONVERTERS_MAP = get_supported_ops(); + auto it = CONVERTERS_MAP.find(context->get_op_type()); + if (it != CONVERTERS_MAP.end()) { + return it->second(*context); + } + + } catch (std::runtime_error& e) { + OPENVINO_DEBUG << "Exception happened during conversion of op: " << context->get_op_type() + << " with schema: " << context->get_schema() << ": " << e.what() << '\n'; + } catch (...) { + OPENVINO_DEBUG << "Some exception happened during conversion of node of type: " << context->get_op_type() + << '\n'; + } + // Create PtFrameworkNode for everything that wasn't able to be converted normally + return make_framework_node(context); +} + +/// \brief Completely convert pytorch_model, creates PtFrameworkNode if not possible to convert node +/// \param pytorch_model Input model +/// \param external_tensor_map Is used for recursive calls of convert_pytorch_model and represent the external context +/// which is visible from nested model. Empty external_tensor_map is used as an indication that this is a main body +/// conversion. +/// \return fully converted OV Model +std::shared_ptr convert_pytorch_model(std::shared_ptr pytorch_model, + const TensorMap& external_tensor_map) { + std::shared_ptr resulting_model; // define here to make a conversion in a nested scope + { + ParameterVector parameters; + TensorMap tensor_map; // tensor map of the current context + std::set mutated_tensors; + + // Go over all pytorch_model inputs and register them in the tensor map: + auto inputs = pytorch_model->inputs(); + for (int i = 0; i < inputs.size(); ++i) { + PartialShape ps = pytorch_model->get_input_shape(i); + auto type = simplified_type_interpret(pytorch_model->get_input_type(i)); + // TODO: Use special API to set custom type detalization + auto parameter = std::make_shared(ov::element::dynamic, ps); + parameter->get_output_tensor(0).add_names({std::to_string(pytorch_model->input(i))}); + parameters.push_back(parameter); + auto order = pytorch_model->get_input_transpose_order(i); + if (order.size() > 0 && !std::is_sorted(order.begin(), order.end())) { + FRONT_END_GENERAL_CHECK(ps.is_static(), "Shape must be static."); // TODO: make dynamic + auto sh = ps.get_shape(); + Shape new_shape(sh.size()); + for (int i = 0; i < sh.size(); i++) { + new_shape[order[i]] = sh[i]; + } + auto shape_const = opset10::Constant::create(element::i64, {new_shape.size()}, new_shape); + auto reshape = std::make_shared(parameter, shape_const, false); + auto order_const = opset10::Constant::create(element::i32, {order.size()}, order); + auto transpose = std::make_shared(reshape, order_const); + tensor_map[pytorch_model->input(i)] = transpose; + } else { + tensor_map[pytorch_model->input(i)] = parameter; + } + } + + auto node_visitor = [&](std::shared_ptr node) { + // Explore all inputs of node. Node may refer to input value that hasn't been created in the current scope. + // But this value can be found in the outer scope, for this purpose we create new input for the model to + // link with external scope on a higher level. + + auto raw_inputs = node->inputs(); + for (size_t i = 0; i < raw_inputs.size(); ++i) { + auto input = node->input(i); + if (tensor_map.find(input) == tensor_map.end()) { + // Input refers value in the outer scope, need to create a new Parameter in the current scope + // Linkage to external scope will be performed on the level of the parent operation (if or loop) + // TODO: Eliminate duplication with the main code for Parameters creation + PartialShape ps = node->get_input_shape(i); + auto type = simplified_type_interpret(node->get_input_type(i)); + // TODO: Use special API to set custom type detalization + auto parameter = std::make_shared(element::dynamic, ps); + // TODO: Missing get_input_transpose_order handling for not trivial layouts + tensor_map[input] = parameter; + // set name of parameter to the index of node in the model + parameter->get_output_tensor(0).add_names({std::to_string(input)}); + parameters.push_back(parameter); + } + } + auto context = NodeContext(node, &tensor_map, ¶meters, external_tensor_map); + auto converted_outputs = convert_node(&context); + + auto mutated_t = context.get_mutated_tensors(); + mutated_tensors.insert(mutated_t.begin(), mutated_t.end()); + + auto fw_outputs = node->outputs(); + // Ops with subgraphs or with mutated inputs may have more outputs after conversion compared to pytorch ones + FRONT_END_OP_CONVERSION_CHECK(fw_outputs.size() <= converted_outputs.size(), + "Number of ", + node->get_op_type(), + " outputs greater then number of converted outputs."); + + // TODO: Make sure that mapping of fw_outputs to converted_outputs does always work + // FIXME: Now it is not true for at least prim::Constant + for (size_t i = 0; i < fw_outputs.size(); ++i) { + size_t fw_tensor_id = node->output(i); + if (tensor_map.find(fw_tensor_id) != tensor_map.end()) { + throw std::runtime_error("Duplicated producer for PT value with unique ID: " + + std::to_string(fw_tensor_id)); + } + + // Output shape of converted node should match the original output shape + // OV_FRONTEND_REQUIRE(get_ov_shape(fw_outputs[i]) == converted_outputs[i].get_partial_shape()); + + tensor_map[fw_tensor_id] = converted_outputs[i]; + converted_outputs[i].get_tensor().add_names({std::to_string(fw_tensor_id)}); + } + }; + + FRONT_END_GENERAL_CHECK(pytorch_model->get_subgraph_size() == 1, "Model should have exactly 1 subgraph."); + pytorch_model->visit_subgraph(node_visitor); + + ResultVector results; + for (size_t i = 0; i < pytorch_model->num_of_outputs(); ++i) { + size_t id = pytorch_model->output(i); + if (tensor_map.find(id) == tensor_map.end()) { + // Not found in this scope, adding Parameter to connect to external scope + auto parameter = std::make_shared(element::dynamic, PartialShape::dynamic()); + parameter->get_output_tensor(0).add_names({std::to_string(id)}); + parameters.push_back(parameter); + tensor_map[id] = parameter; + } + auto ov_output = tensor_map[id]; + auto order = pytorch_model->get_output_transpose_order(i); + FRONT_END_GENERAL_CHECK(order.size() == 0 || std::is_sorted(order.begin(), order.end()), + "Output strides have wrong order."); + FRONT_END_GENERAL_CHECK(ov_output.get_names().size() > 0, + "Tensor doesn't have name, while it should have name: ", + id); + auto result = std::make_shared(ov_output); + results.push_back(result); + } + + // Since parameters can be added we need to list all current parameters + std::set param_names; + for (const auto& param : parameters) { + auto name = param->get_output_tensor(0).get_any_name(); + size_t input_idx = (size_t)std::stoll(name); + param_names.insert(input_idx); + } + for (const auto& tensor_id : mutated_tensors) { + if (param_names.count(tensor_id)) { + FRONT_END_GENERAL_CHECK(tensor_map.count(tensor_id), + "Tensor with id: ", + tensor_id, + " doesn't exist in tensor map."); + // model input was mutated we need to make a result for it + auto mutated_tensor = tensor_map.at(tensor_id); + // empty external_tensor_map means this is main body of the model and we don't want to create + // additional outputs in that case. + if (mutated_tensor.get_target_inputs().empty() && !external_tensor_map.empty()) + results.push_back(std::make_shared(tensor_map.at(tensor_id))); + } + } + resulting_model = std::make_shared(results, parameters); + // Did a conversion in a nested scope to automatically remove any holders of nodes except those in the graph + } + + return resulting_model; +} + +std::shared_ptr cast_fw_node(std::shared_ptr node, const std::string& type) { + auto fw_node = std::dynamic_pointer_cast(node); + if (!fw_node) { + return nullptr; + } + const auto& attrs = fw_node->get_attrs(); + if (attrs.find("PtTypeName") == attrs.end() || attrs.at("PtTypeName") != type) { + return nullptr; + } + return fw_node; +} + +Any simplified_type_interpret(Any type) { + // Interpret Tensor[type] as just type + // After applying of this interpretation we cannot distinguish true scalars (not tensors) and tensors with elements + // of the same types + if (type.is()) { + auto tensor = type.as(); + if (tensor.element_type.is()) { + return tensor.element_type; + } + } + + return type; +} + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp new file mode 100644 index 00000000000..52fae82bcb0 --- /dev/null +++ b/src/frontends/pytorch/src/utils.hpp @@ -0,0 +1,97 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/opsets/opset10.hpp" + +namespace ov { + +namespace op { +namespace util { +class FrameworkNode; +} +} // namespace op + +namespace frontend { +namespace pytorch { + +Output make_optional_bias(const Output& base_op, + const NodeContext& context, + size_t bias_input_idx, + const std::vector& unsqueeze_dims = {}); + +Output reshape_conv_bias(NodeContext& context, Output bias, Output conv); + +std::shared_ptr get_rank_node(const Output& node); + +Output reshape_kernel_for_group(const NodeContext& context, + const Output& input, + const Output& kernel, + int64_t groups); + +std::shared_ptr get_axes_range(NodeContext& context, size_t input_id); + +std::shared_ptr numel(NodeContext& context, size_t input_id); + +element::Type convert_dtype(int64_t dtype_value); +ov::op::PadType convert_pad(const std::string& pt_pad); + +std::shared_ptr concat_list_construct(std::shared_ptr input); + +std::shared_ptr convert_pytorch_model(std::shared_ptr pytorch_model, + const TensorMap& external_tensor_map = {}); + +OutputVector convert_node(NodeContext* context); + +std::shared_ptr cast_fw_node(std::shared_ptr node, const std::string& type); + +// TODO: Elimitate the need of this function by implementing more accurate custom data type handling +Any simplified_type_interpret(Any type); + +namespace op { +template +OutputVector inplace_op(NodeContext& context) { + auto translation_res = T(context); + FRONT_END_OP_CONVERSION_CHECK(translation_res.size() == 1, + "inplace_op function must be used on single output translators"); + context.mutate_input(idx, translation_res[0]); + return translation_res; +} + +template +OutputVector translate_1to1_match_1_inputs(NodeContext& context) { + auto inputs = context.inputs(); + FRONT_END_OP_CONVERSION_CHECK(inputs.size() >= 1, "Operation has no inputs."); + for (int i = 1; i < inputs.size(); i++) { + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(i), "Got more inputs than expected."); + } + FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(0), "Input should not be None."); + return {context.mark_node(std::make_shared(inputs[0]))}; +} + +template +OutputVector translate_1to1_match_2_inputs(NodeContext& context) { + auto inputs = context.inputs(); + FRONT_END_OP_CONVERSION_CHECK(inputs.size() >= 2, "Operation has less then 2 inputs."); + for (int i = 2; i < inputs.size(); i++) { + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(i), "Got more inputs than expected."); + } + FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(0) && !context.input_is_none(1), "Inputs should not be None."); + return {context.mark_node(std::make_shared(inputs[0], inputs[1]))}; +} + +inline OutputVector return_false_scalar(NodeContext& context) { + return {context.mark_node(opset10::Constant::create(element::boolean, Shape{}, {false}))}; +} + +inline OutputVector skip_node(NodeContext& context) { + return {context.get_input(0).get_node_shared_ptr()}; +} +} // namespace op + +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tests/frontend/shared/test_builtin_extensions/CMakeLists.txt b/src/frontends/tests/frontend/shared/test_builtin_extensions/CMakeLists.txt index 0f18cf851cf..a447da5b82d 100644 --- a/src/frontends/tests/frontend/shared/test_builtin_extensions/CMakeLists.txt +++ b/src/frontends/tests/frontend/shared/test_builtin_extensions/CMakeLists.txt @@ -30,6 +30,11 @@ if (ENABLE_OV_PADDLE_FRONTEND) list(APPEND DEFINITIONS ENABLE_OV_PADDLE_FRONTEND) endif() +if (ENABLE_OV_PYTORCH_FRONTEND) + list(APPEND DEPENDENCIES openvino::frontend::pytorch) + list(APPEND DEFINITIONS ENABLE_OV_PYTORCH_FRONTEND) +endif() + # Create library add_library(${TARGET_NAME} MODULE ${LIBRARY_SRC} ${LIBRARY_HEADERS}) diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt b/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt index 0baa9504a0a..5f7dffeb95c 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/CMakeLists.txt @@ -28,6 +28,11 @@ if(TARGET openvino_paddle_frontend) list(APPEND DEFINITIONS ENABLE_OV_PADDLE_FRONTEND) endif() +if(TARGET openvino_pytorch_frontend) + list(APPEND DEPENDENCIES openvino_pytorch_frontend) + list(APPEND DEFINITIONS ENABLE_OV_PYTORCH_FRONTEND) +endif() + if(TARGET openvino_tensorflow_frontend) list(APPEND DEPENDENCIES openvino_tensorflow_frontend) list(APPEND DEFINITIONS ENABLE_OV_TF_FRONTEND) diff --git a/tests/layer_tests/pytorch_tests/conftest.py b/tests/layer_tests/pytorch_tests/conftest.py new file mode 100644 index 00000000000..4303c12eef6 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/conftest.py @@ -0,0 +1,12 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import inspect + +from pytorch_layer_test_class import get_params + + +def pytest_generate_tests(metafunc): + test_gen_attrs_names = list(inspect.signature(get_params).parameters) + params = get_params() + metafunc.parametrize(test_gen_attrs_names, params, scope="function") diff --git a/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py b/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py new file mode 100644 index 00000000000..0b5835186e8 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py @@ -0,0 +1,157 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import itertools +import warnings + +import numpy as np +from common.constants import test_device, test_precision +from openvino.frontend.pytorch.decoder import TorchScriptPythonDecoder + +from openvino.frontend import FrontEndManager +from openvino.runtime import Core, Type, PartialShape + + +class PytorchLayerTest: + _type_map = { + "float64": Type.f64, + "float32": Type.f32, + "bool": Type.boolean, + "int32": Type.i32, + "int64": Type.i64, + "int16": Type.i16, + "int8": Type.i8, + "uint8": Type.u8 + } + + @staticmethod + def _check_kind_exist(graph, kind): + for n in graph.nodes(): + if n.kind() == kind: + return True + for b in n.blocks(): + if PytorchLayerTest._check_kind_exist(b, kind): + return True + return False + + def _test(self, model, ref_net, kind, ie_device, precision, ir_version, infer_timeout=60, dynamic_shapes=True, + **kwargs): + """ + :param enabled_transforms/disabled_transforms: string with idxs of transforms that should be enabled/disabled. + Example: "transform_1,transform_2" + """ + import torch + if 'kwargs_to_prepare_input' in kwargs and kwargs['kwargs_to_prepare_input']: + inputs = self._prepare_input(**kwargs['kwargs_to_prepare_input']) + else: + inputs = self._prepare_input() + with torch.no_grad(): + model.eval() + if not kwargs.get('trace_model', False): + model = torch.jit.script(model) + else: + torch_inputs = [torch.from_numpy(inp) for inp in inputs] + model = torch.jit.trace(model, torch_inputs) + model = torch.jit.freeze(model) + graph = model.inlined_graph + print(graph) + + assert kind is None or self._check_kind_exist( + graph, kind), "Operation type doesn't exist in provided graph" + + fe_manager = FrontEndManager() + fe = fe_manager.load_by_framework('pytorch') + + decoder = TorchScriptPythonDecoder(model) + + im = fe.load(decoder) + om = fe.convert(im) + + params = om.get_parameters() + # todo: support lists and dicts + for i in range(len(inputs)): + inp = inputs[i] + assert inp.dtype.name in self._type_map, f"Unknown type {inp.dtype}." + params[i].set_element_type(self._type_map[inp.dtype.name]) + shape = [-1] * len(inp.shape) if dynamic_shapes else inp.shape + params[i].set_partial_shape(PartialShape(shape)) + om.validate_nodes_and_infer_types() + + # OV infer: + core = Core() + compiled = core.compile_model(om, ie_device) + infer_res = compiled(inputs) + + if hasattr(self, 'skip_framework') and self.skip_framework: + warnings.warn('Framework is skipped') + return + + # Framework infer: + torch_inps = [torch.from_numpy(inp) for inp in inputs] + fw_res = model(*torch_inps) + + if not isinstance(fw_res, (tuple)): + fw_res = (fw_res,) + + output_list = list(infer_res.values()) + assert len(fw_res) == len( + output_list), f'number of outputs not equal, {len(fw_res)} != {len(output_list)}' + # check if results dtypes match + for fw_tensor, ov_tensor in zip(fw_res, output_list): + if not isinstance(fw_tensor, torch.Tensor): + if np.isscalar(fw_tensor): + assert fw_tensor == np.array(ov_tensor).item() + else: + if isinstance(fw_tensor, list): + ov_tensor = ov_tensor.tolist() + assert ov_tensor == fw_tensor + assert type(fw_tensor) == type(ov_tensor) + continue + assert torch.tensor(np.array( + ov_tensor)).dtype == fw_tensor.dtype, f"dtype validation failed: {torch.tensor(np.array(ov_tensor)).dtype} != {fw_tensor.dtype}" + + if 'custom_eps' in kwargs and kwargs['custom_eps'] is not None: + custom_eps = kwargs['custom_eps'] + else: + custom_eps = 1e-4 + + # Compare Ie results with Framework results + fw_eps = custom_eps if precision == 'FP32' else 5e-2 + is_ok = True + for i in range(len(infer_res)): + cur_fw_res = fw_res[i].to(memory_format=torch.contiguous_format).numpy( + ) if isinstance(fw_res[i], torch.Tensor) else fw_res[i] + cur_ov_res = infer_res[compiled.output(i)] + print(f"fw_re: {cur_fw_res};\n ov_res: {cur_ov_res}") + if not np.allclose(cur_ov_res, cur_fw_res, + atol=fw_eps, + rtol=fw_eps, equal_nan=True): + is_ok = False + print("Max diff is {}".format( + np.array( + abs(cur_ov_res - cur_fw_res)).max())) + else: + print("Accuracy validation successful!\n") + print("absolute eps: {}, relative eps: {}".format(fw_eps, fw_eps)) + assert is_ok, "Accuracy validation failed" + + # Each model should specify inputs + def _prepare_input(self): + raise RuntimeError("Please provide inputs generation function") + + +def get_params(ie_device=None, precision=None): + """ + :param ie_device: list of devices + :param precision: list of precisions + """ + + ie_device_params = ie_device if ie_device else test_device + precision_params = precision if precision else test_precision + + test_args = [] + for element in itertools.product(ie_device_params, precision_params): + if element[0] == 'CPU' and element[1] == 'FP16': + continue + test_args.append(element) + return test_args diff --git a/tests/layer_tests/pytorch_tests/test_adaptive_avg_pool3d.py b/tests/layer_tests/pytorch_tests/test_adaptive_avg_pool3d.py new file mode 100644 index 00000000000..f1561c809d9 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_adaptive_avg_pool3d.py @@ -0,0 +1,37 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +@pytest.mark.parametrize('input_tensor', (np.random.randn(1, 2, 8, 9, 10).astype(np.float32), + np.random.randn(2, 8, 9, 10).astype(np.float32))) +@pytest.mark.parametrize('output_size', ([5, 7, 9], 7)) +class TestAdaptiveAvgPool3D(PytorchLayerTest): + + def _prepare_input(self): + return (self.input_tensor,) + + def create_model(self, output_size): + class aten_adaptive_avg_pool3d(torch.nn.Module): + + def __init__(self, output_size) -> None: + super().__init__() + self.output_size = output_size + + def forward(self, input_tensor): + return torch.nn.functional.adaptive_avg_pool3d(input_tensor, self.output_size) + + ref_net = None + + return aten_adaptive_avg_pool3d(output_size), ref_net, "aten::adaptive_avg_pool3d" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_adaptive_avg_pool3d(self, ie_device, precision, ir_version, input_tensor, output_size): + self.input_tensor = input_tensor + self._test(*self.create_model(output_size), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_adaptive_max_pool_2d.py b/tests/layer_tests/pytorch_tests/test_adaptive_max_pool_2d.py new file mode 100644 index 00000000000..3edfe261f6d --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_adaptive_max_pool_2d.py @@ -0,0 +1,51 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch +import torch.nn.functional as F + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestAdaptiveMaxPool2D(PytorchLayerTest): + + def _prepare_input(self): + return (self.input_tensor,) + + def create_model(self, output_size=None, return_indices=False): + class aten_adaptive_max_pool2d(torch.nn.Module): + + def __init__(self, output_size=None, return_indices=False) -> None: + super().__init__() + self.output_size = output_size + self.return_indices = return_indices + + def forward(self, input_tensor): + if self.return_indices: + output, indices = F.adaptive_max_pool2d(input_tensor, self.output_size, True) + return output + return F.adaptive_max_pool2d(input_tensor, self.output_size, False) + + ref_net = None + + return aten_adaptive_max_pool2d(output_size, return_indices), ref_net, "aten::adaptive_max_pool2d" + + @pytest.mark.parametrize('input_tensor', ([ + np.random.randn(1, 1, 4, 4).astype(np.float32), + np.random.randn(1, 3, 32, 32).astype(np.float32) + ])) + @pytest.mark.parametrize('output_size', ([ + [2, 2], + [4, 4], + ])) + @pytest.mark.parametrize('return_indices', ([ + False, + True, + ])) + @pytest.mark.nightly + @pytest.mark.precommit + def test_adaptive_max_pool2d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices): + self.input_tensor = input_tensor + self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_add.py b/tests/layer_tests/pytorch_tests/test_add.py new file mode 100644 index 00000000000..313a781a63a --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_add.py @@ -0,0 +1,38 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +@pytest.mark.parametrize('alpha', (-0.5, 0, 0.5, 1, 2)) +@pytest.mark.parametrize('input_rhs', (np.random.randn(2, 5, 3, 4).astype(np.float32), + np.random.randn(1, 5, 3, 4).astype(np.float32), + np.random.randn(1).astype(np.float32))) +class TestAdd(PytorchLayerTest): + + def _prepare_input(self): + return (np.random.randn(2, 5, 3, 4).astype(np.float32), self.input_rhs) + + def create_model(self, alpha): + class aten_add(torch.nn.Module): + + def __init__(self, alpha) -> None: + super().__init__() + self.alpha = alpha + + def forward(self, lhs, rhs): + return torch.add(lhs, rhs, alpha=self.alpha) + + ref_net = None + + return aten_add(alpha), ref_net, "aten::add" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_add(self, ie_device, precision, ir_version, alpha, input_rhs): + self.input_rhs = input_rhs + self._test(*self.create_model(alpha), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_addcmul.py b/tests/layer_tests/pytorch_tests/test_addcmul.py new file mode 100644 index 00000000000..abe9e56e550 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_addcmul.py @@ -0,0 +1,51 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestAddCMul(PytorchLayerTest): + def _prepare_input(self): + return (np.random.uniform(0, 50, 3).astype(self.input_type), + np.random.uniform(0, 50, 3).astype(self.input_type), + np.random.uniform(0, 50, 3).astype(self.input_type)) + + def create_model(self, value=None): + import torch + + class aten_addcmul(torch.nn.Module): + def __init__(self, value=None): + super(aten_addcmul, self).__init__() + self.value = value + + def forward(self, x, y, z): + if self.value is not None: + return torch.addcmul(x, y, z, value=self.value) + return torch.addcmul(x, y, z) + + ref_net = None + + return aten_addcmul(value), ref_net, "aten::addcmul" + + @pytest.mark.parametrize(("input_type", "value"), [ + [np.int32, None], + [np.float32, None], + [np.float64, None], + [np.int32, 1], + [np.int32, 2], + [np.int32, 10], + [np.int32, 110], + [np.float32, 2.0], + [np.float32, 3.1], + [np.float32, 4.5], + [np.float64, 41.5], + [np.float64, 24.5], + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_addcmul(self, input_type, value, ie_device, precision, ir_version): + self.input_type = input_type + self._test(*self.create_model(value), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_addmm.py b/tests/layer_tests/pytorch_tests/test_addmm.py new file mode 100644 index 00000000000..fe4d5450cd7 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_addmm.py @@ -0,0 +1,49 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestAddMM(PytorchLayerTest): + def _prepare_input(self, input_shape=(2, 2), matrix1_shape=(2, 2), matrix2_shape=(2, 2)): + import numpy as np + return ( + np.random.randn(*input_shape).astype(np.float32), + np.random.randn(*matrix1_shape).astype(np.float32), + np.random.randn(*matrix2_shape).astype(np.float32) + ) + + def create_model(self, alpha, beta): + import torch + + class aten_addmm(torch.nn.Module): + def __init__(self, alpha, beta): + super(aten_addmm, self).__init__() + self.alpha = alpha + self.beta = beta + + def forward(self, m0, m1, m2): + return torch.addmm(m0, m1, m2, alpha=self.alpha, beta=self.beta) + + ref_net = None + + return aten_addmm(alpha, beta), ref_net, 'aten::addmm' + + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {"input_shape": (3, 3), 'matrix1_shape': (3, 3), 'matrix2_shape': (3, 3)}, + {"input_shape": (2, 2), 'matrix1_shape': (2, 3), 'matrix2_shape': (3, 2)}, + {"input_shape": (10, 1), 'matrix1_shape': (10, 5), 'matrix2_shape': (5, 1)}, + {"input_shape": (1, 2), 'matrix1_shape': (1, 10), 'matrix2_shape': (10, 2)}, + {"input_shape": (1, 1), 'matrix1_shape': (1, 10), 'matrix2_shape': (10, 1)}, + + ]) + @pytest.mark.parametrize("alpha,beta", + [(1., 1.), (0., 1.), (1., 0.), (1., 2.), (2., 1.), (-5., -6.), (3., 4.), (0.5, 0.75), + (1, 1)]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_addmm(self, kwargs_to_prepare_input, alpha, beta, ie_device, precision, ir_version): + self._test(*self.create_model(alpha, beta), ie_device, precision, ir_version, + kwargs_to_prepare_input=kwargs_to_prepare_input) diff --git a/tests/layer_tests/pytorch_tests/test_arange.py b/tests/layer_tests/pytorch_tests/test_arange.py new file mode 100644 index 00000000000..b01222c78b5 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_arange.py @@ -0,0 +1,113 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestExp(PytorchLayerTest): + def _prepare_input(self, end, start=None, step=None, dtype="int64"): + import numpy as np + if start is None and step is None: + return (np.array(end).astype(dtype),) + if step is None: + return (np.array(start).astype(dtype), np.array(end).astype(dtype)) + return (np.array(start).astype(dtype), np.array(end).astype(dtype), np.array(step).astype(dtype)) + + def create_model(self, dtype, num_inputs, use_out=False): + import torch + + dtype_map = { + "float32": torch.float32, + "float64": torch.float64, + "int64": torch.int64, + "int32": torch.int32, + "uint8": torch.uint8, + "int8": torch.int8 + } + + class aten_arange_end_dtype(torch.nn.Module): + def __init__(self, dtype) -> None: + super(aten_arange_end_dtype, self).__init__() + self.dtype = dtype + + def forward(self, x: int): + return torch.arange(x, dtype=self.dtype) + + class aten_arange_start_end_dtype(torch.nn.Module): + def __init__(self, dtype) -> None: + super(aten_arange_start_end_dtype, self).__init__() + self.dtype = dtype + + def forward(self, x: float, y: float): + return torch.arange(start=x, end=y, dtype=self.dtype) + + class aten_arange_start_end_step_dtype(torch.nn.Module): + def __init__(self, dtype) -> None: + super(aten_arange_start_end_step_dtype, self).__init__() + self.dtype = dtype + + def forward(self, x: float, y: float, z: float): + return torch.arange(start=x, end=y, step=z, dtype=self.dtype) + + class aten_arange_end_out(torch.nn.Module): + def __init__(self, dtype) -> None: + super(aten_arange_end_out, self).__init__() + self.dtype = dtype + + def forward(self, x: int): + return torch.arange(x, out=torch.zeros(1, dtype=self.dtype)) + + class aten_arange_start_end_out(torch.nn.Module): + def __init__(self, out) -> None: + super(aten_arange_start_end_out, self).__init__() + self.out = out + + def forward(self, x: float, y: float): + return torch.arange(start=x, end=y, out=self.out) + + class aten_arange_start_end_step_out(torch.nn.Module): + def __init__(self, out) -> None: + super(aten_arange_start_end_step_out, self).__init__() + self.out = out + + def forward(self, x: float, y: float, z: float): + return torch.arange(start=x, end=y, step=z, out=self.out) + + model_classes = { + 1: (aten_arange_end_dtype, aten_arange_end_out), + 2: (aten_arange_start_end_dtype, aten_arange_start_end_out), + 3: (aten_arange_start_end_step_dtype, aten_arange_start_end_step_out) + } + dtype = dtype_map.get(dtype) + model_class = model_classes[num_inputs][0](dtype) if not use_out or dtype is None else \ + model_classes[num_inputs][1](dtype) + print(model_class) + + ref_net = None + + return model_class, ref_net, "aten::arange" + + @pytest.mark.nightly + @pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8", "uin8"]) + @pytest.mark.parametrize("end", [1, 2, 3]) + @pytest.mark.parametrize("use_out", [True, False]) + def test_arange_end_only(self, dtype, end, use_out, ie_device, precision, ir_version): + self._test(*self.create_model(dtype, 1, use_out), ie_device, precision, ir_version, + kwargs_to_prepare_input={"end": end}) + + @pytest.mark.nightly + @pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8"]) + @pytest.mark.parametrize("start,end", [(0, 1), (-1, 1), (1, 5), (0.5, 2.5)]) + def test_arange_start_end(self, dtype, end, start, ie_device, precision, ir_version): + self._test(*self.create_model(dtype, 2), ie_device, precision, ir_version, + kwargs_to_prepare_input={"end": end, "start": start, "dtype": "float32"}) + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("dtype", [None, "float32", "float64", "int32", "int64", "int8"]) + @pytest.mark.parametrize("start,end,step", [(0, 1, 1), (-2, 1, 1.25), (1, -5, -1), (1, 10, 2), (-1, -5, -2)]) + def test_arange_start_end_step(self, dtype, end, start, step, ie_device, precision, ir_version): + self._test(*self.create_model(dtype, 3), ie_device, precision, ir_version, + kwargs_to_prepare_input={"end": end, "start": start, "step": step, "dtype": "float32"}) diff --git a/tests/layer_tests/pytorch_tests/test_batch_norm.py b/tests/layer_tests/pytorch_tests/test_batch_norm.py new file mode 100644 index 00000000000..8f94fe16ae4 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_batch_norm.py @@ -0,0 +1,48 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from pytorch_layer_test_class import PytorchLayerTest + + +class TestBatchNorm(PytorchLayerTest): + def _prepare_input(self, ndim=4): + import numpy as np + shape5d = [20, 6, 10, 10, 10] + shape = shape5d[:ndim] + return (np.random.randn(*shape).astype(np.float32),) + + def create_model(self, weights, bias, eps): + + import torch + import torch.nn.functional as F + + class aten_batch_norm_inference(torch.nn.Module): + def __init__(self, weights=True, bias=True, eps=1e-05): + super(aten_batch_norm_inference, self).__init__() + self.weight = torch.randn(6) if weights else None + self.bias = torch.randn(6) if bias else None + self.running_mean = torch.randn(6) + self.running_var = torch.randn(6) + self.eps = eps + + def forward(self, x): + return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, eps=self.eps, training=False) + + ref_net = None + + return aten_batch_norm_inference(weights, bias, eps), ref_net, "aten::batch_norm" + + @pytest.mark.parametrize("weights", [True, False]) + @pytest.mark.parametrize("bias", [True, False]) + @pytest.mark.parametrize("eps", [1.0, 0.00005, 0.5, 0.042]) + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {"ndim": 3}, + {'ndim': 4}, + {"ndim": 5} + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_batch_norm(self, weights, bias, eps, ie_device, precision, ir_version, kwargs_to_prepare_input): + self._test(*self.create_model(weights, bias, eps), + ie_device, precision, ir_version, kwargs_to_prepare_input=kwargs_to_prepare_input, dynamic_shapes=False) \ No newline at end of file diff --git a/tests/layer_tests/pytorch_tests/test_ceil.py b/tests/layer_tests/pytorch_tests/test_ceil.py new file mode 100644 index 00000000000..e91e2e5d18e --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_ceil.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestCeil(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, inplace): + import torch + + class aten_ceil(torch.nn.Module): + def __init__(self, inplace): + super(aten_ceil, self).__init__() + self.op = torch.ceil_ if inplace else torch.ceil + + def forward(self, x): + return x, self.op(x) + + ref_net = None + + return aten_ceil(inplace), ref_net, "aten::ceil" if not inplace else "aten::ceil_" + + @pytest.mark.parametrize("inplace", [False, True]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_ceil(self, inplace, ie_device, precision, ir_version): + self._test(*self.create_model(inplace), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_clamp.py b/tests/layer_tests/pytorch_tests/test_clamp.py new file mode 100644 index 00000000000..346b47c3d1f --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_clamp.py @@ -0,0 +1,98 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestClamp(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, minimum, maximum, as_tensors=False): + import torch + + class aten_clamp(torch.nn.Module): + def __init__(self, minimum, maximum, as_tensors): + super(aten_clamp, self).__init__() + if minimum is not None and as_tensors: + minimum = torch.tensor(minimum) + self.min = minimum + if maximum is not None and as_tensors: + maximum = torch.tensor(maximum) + self.max = maximum + + def forward(self, x): + return torch.clamp(x, self.min, self.max) + + ref_net = None + op_name = "aten::clamp" + return aten_clamp(minimum, maximum, as_tensors), ref_net, op_name + + @pytest.mark.parametrize("minimum,maximum", + [(0., 1.), (-0.5, 1.5), (None, 10.), (None, -10.), (10., None), (-10., None), (100, 200)]) + @pytest.mark.parametrize("as_tensors", [True, False]) + @pytest.mark.nightly + def test_clamp(self, minimum, maximum, as_tensors, ie_device, precision, ir_version): + self._test(*self.create_model(minimum, maximum, as_tensors), ie_device, precision, ir_version) + + @pytest.mark.xfail(reason='OpenVINO clamp does not support min > max') + def test_clamp_min_greater(self, ie_device, precision, ir_version): + self._test(*self.create_model(1.0, 0.0), ie_device, precision, ir_version) + + +class TestClampMin(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, minimum, as_tensor=False): + import torch + + class aten_clamp_min(torch.nn.Module): + def __init__(self, minimum, as_tensor): + super(aten_clamp_min, self).__init__() + self.min = torch.tensor(minimum) if as_tensor else minimum + + def forward(self, x): + return torch.clamp_min(x, self.min) + + ref_net = None + op_name = "aten::clamp_min" + return aten_clamp_min(minimum, as_tensor), ref_net, op_name + + @pytest.mark.parametrize("minimum", [0., 1., -1., 0.5]) + @pytest.mark.parametrize("as_tensor", [True, False]) + @pytest.mark.nightly + def test_clamp_min(self, minimum, as_tensor, ie_device, precision, ir_version): + self._test(*self.create_model(minimum, as_tensor), ie_device, precision, ir_version) + + +class TestClampMax(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, maximum, as_tensor=False): + import torch + + class aten_clamp_max(torch.nn.Module): + def __init__(self, maximum, as_tensor): + super(aten_clamp_max, self).__init__() + self.max = torch.tensor(maximum) if as_tensor else maximum + + def forward(self, x): + return torch.clamp_max(x, self.max) + + ref_net = None + op_name = "aten::clamp_max" + return aten_clamp_max(maximum, as_tensor), ref_net, op_name + + @pytest.mark.parametrize("maximum", [0., 1., -1., 0.5]) + @pytest.mark.parametrize("as_tensor", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_clamp(self, maximum, as_tensor, ie_device, precision, ir_version): + self._test(*self.create_model(maximum, as_tensor), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_clone.py b/tests/layer_tests/pytorch_tests/test_clone.py new file mode 100644 index 00000000000..f56aac44e9d --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_clone.py @@ -0,0 +1,29 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestClone(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self): + import torch + + class aten_clone(torch.nn.Module): + + def forward(self, x): + return torch.clone(x) + + ref_net = None + + return aten_clone(), ref_net, "aten::clone" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_clone(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_comparision.py b/tests/layer_tests/pytorch_tests/test_comparision.py new file mode 100644 index 00000000000..b667d6d2c95 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_comparision.py @@ -0,0 +1,59 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestComp(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 24, 24).astype(np.float32), np.random.randn(1, 3, 24, 24).astype(np.float32)) + + def create_model(self, op_type): + import torch + + class aten_eq(torch.nn.Module): + def forward(self, x, y): + return x == y + + class aten_ne(torch.nn.Module): + def forward(self, x, y): + return x != y + + class aten_lt(torch.nn.Module): + def forward(self, x, y): + return x < y + + class aten_gt(torch.nn.Module): + def forward(self, x, y): + return x > y + + class aten_le(torch.nn.Module): + def forward(self, x, y): + return x <= y + + class aten_ge(torch.nn.Module): + def forward(self, x, y): + return x >= y + + ops = { + "eq": aten_eq, + "ne": aten_ne, + "lt": aten_lt, + "gt": aten_gt, + "ge": aten_ge, + "le": aten_le + } + model_cls = ops[op_type] + + ref_net = None + + return model_cls(), ref_net, f"aten::{op_type}" + + @pytest.mark.parametrize("op", ["eq", "ne", "lt", "gt", "le", "ge"]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_comp(self, op, ie_device, precision, ir_version): + self._test(*self.create_model(op), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_convnd.py b/tests/layer_tests/pytorch_tests/test_convnd.py new file mode 100644 index 00000000000..d3ef7e42e05 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_convnd.py @@ -0,0 +1,164 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestConv2D(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(2, 3, 25, 25).astype(np.float32),) + + def create_model(self, weights_shape, strides, pads, dilations, groups, bias): + import torch + import torch.nn.functional as F + + class aten_conv2d(torch.nn.Module): + def __init__(self): + super(aten_conv2d, self).__init__() + self.weight = torch.randn(weights_shape) + self.bias = None + if bias: + self.bias = torch.randn(weights_shape[0]) + self.strides = strides + self.pads = pads + self.dilations = dilations + self.groups = groups + + def forward(self, x): + return F.conv2d(x, self.weight, self.bias, self.strides, self.pads, self.dilations, self.groups) + + ref_net = None + + return aten_conv2d(), ref_net, "aten::conv2d" + + @pytest.mark.parametrize("params", + [{'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': 2, 'pads': 0, 'dilations': 1, 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': 1, 'dilations': 1, 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 2, 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': [0, 1], 'dilations': 1, + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': [1, 0], 'dilations': 1, + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': 'same', 'dilations': 1, + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': 1, 'pads': 'valid', 'dilations': 1, + 'groups': 1}, + # doesn't work because input shape is dynamic which makes kernel shape dynamic + # {'weights_shape': [2, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 2}, + ]) + @pytest.mark.parametrize("bias", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_conv2d(self, params, bias, ie_device, precision, ir_version): + self._test(*self.create_model(**params, bias=bias), + ie_device, precision, ir_version) + + +class TestConv1D(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(2, 3, 25).astype(np.float32),) + + def create_model(self, weights_shape, strides, pads, dilations, groups, bias): + import torch + import torch.nn.functional as F + + class aten_conv1d(torch.nn.Module): + def __init__(self): + super(aten_conv1d, self).__init__() + self.weight = torch.randn(weights_shape) + self.bias = None + if bias: + self.bias = torch.randn(weights_shape[0]) + self.strides = strides + self.pads = pads + self.dilations = dilations + self.groups = groups + + def forward(self, x): + return F.conv1d(x, self.weight, self.bias, self.strides, self.pads, self.dilations, self.groups) + + ref_net = None + + return aten_conv1d(), ref_net, "aten::conv1d" + + @pytest.mark.parametrize("params", + [{'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 1}, + {'weights_shape': [3, 3, 3], 'strides': 2, 'pads': 0, 'dilations': 1, 'groups': 1}, + {'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 1, 'dilations': 1, 'groups': 1}, + {'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 2, 'groups': 1}, + {'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 'same', 'dilations': 1, 'groups': 1}, + {'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 'valid', 'dilations': 1, 'groups': 1}, + # doesn't work because input shape is dynamic which makes kernel shape dynamic + # {'weights_shape': [3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 2}, + ]) + @pytest.mark.parametrize("bias", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_conv1d(self, params, bias, ie_device, precision, ir_version): + self._test(*self.create_model(**params, bias=bias), + ie_device, precision, ir_version) + + +class TestConv3D(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(2, 3, 25, 25, 25).astype(np.float32),) + + def create_model(self, weights_shape, strides, pads, dilations, groups, bias): + import torch + import torch.nn.functional as F + + class aten_conv3d(torch.nn.Module): + def __init__(self): + super(aten_conv3d, self).__init__() + self.weight = torch.randn(weights_shape) + self.bias = None + if bias: + self.bias = torch.randn(weights_shape[0]) + self.strides = strides + self.pads = pads + self.dilations = dilations + self.groups = groups + + def forward(self, x): + return F.conv3d(x, self.weight, self.bias, self.strides, self.pads, self.dilations, self.groups) + + ref_net = None + + return aten_conv3d(), ref_net, "aten::conv3d" + + @pytest.mark.parametrize("params", + [{'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 2, 'pads': 0, 'dilations': 1, 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': 1, 'dilations': 1, 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 2, 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [0, 1, 0], 'dilations': 1, + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [1, 0, 0], 'dilations': 1, + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [0, 0, 1], 'dilations': 1, + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [1, 1, 0], 'dilations': 1, + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [0, 1, 1], 'dilations': 1, + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': [1, 0, 1], 'dilations': 1, + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': 'same', 'dilations': 1, + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': 1, 'pads': 'valid', 'dilations': 1, + 'groups': 1}, + # doesn't work because input shape is dynamic which makes kernel shape dynamic + # {'weights_shape': [2, 3, 3, 3], 'strides': 1, 'pads': 0, 'dilations': 1, 'groups': 2}, + ]) + @pytest.mark.parametrize("bias", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_conv3d(self, params, bias, ie_device, precision, ir_version): + self._test(*self.create_model(**params, bias=bias), + ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_convolution.py b/tests/layer_tests/pytorch_tests/test_convolution.py new file mode 100644 index 00000000000..7261e2ea766 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_convolution.py @@ -0,0 +1,231 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + +d2_params = [{'weights_shape': [3, 3, 2, 2], 'strides': [1, 1], 'pads': [0, 0], 'dilations': [1, 1], 'groups': 1, + 'output_padding': [0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 2, 2], 'strides': [1, 1], 'pads': [0, 0], 'dilations': [ + 1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': [0, 0], 'dilations': [ + 1, 1], 'groups': 3, 'output_padding': [0, 0], 'transposed': True}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': [0, 0], 'dilations': [ + 1, 1], 'groups': 3, 'output_padding': [0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'bias_shape': [1], 'pads': [ + 1, 1], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 1, 1], 'strides': [1, 1], 'pads': [ + 1, 1], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'bias_shape': [1], 'pads': [ + 3, 1], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 1, 1], 'strides': [1, 1], 'pads': [ + 3, 1], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'bias_shape': [1], 'pads': [ + 1, 0], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 1, 1], 'strides': [1, 1], 'pads': [ + 0, 1], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': [ + 1, 0], 'dilations': [1, 1], 'groups': 3, 'output_padding': [0, 0], 'transposed': True}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': [ + 0, 1], 'dilations': [1, 1], 'groups': 3, 'output_padding': [0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': [ + 1, 0], 'dilations': [2, 2], 'groups': 3, 'output_padding': [0, 0], 'transposed': True}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': [ + 0, 0], 'dilations': [2, 2], 'groups': 3, 'output_padding': [0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1], 'strides': [2, 1], 'bias_shape': [1], 'pads': [ + 1, 0], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 1, 1], 'strides': [2, 1], 'pads': [ + 0, 0], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1], 'strides': [2, 2], 'bias_shape': [1], 'pads': [ + 0, 0], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 1, 1], 'strides': [2, 2], 'pads': [ + 0, 0], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': False}, + {'weights_shape': [3, 3, 1, 1], 'strides': [2, 1], 'pads': [ + 0, 0], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1], 'strides': [2, 2], 'bias_shape': [1], 'pads': [ + 0, 0], 'dilations': [1, 1], 'groups': 1, 'output_padding': [0, 0], 'transposed': True}, + {'weights_shape': [3, 1, 1, 1], 'strides': [2, 2], 'bias_shape': [1], 'pads': [ + 1, 1], 'dilations': [2, 2], 'groups': 1, 'output_padding': [1, 1], 'transposed': True}, + ] + +d1_params = [ + {'weights_shape': [3, 3, 2], 'strides': [1], 'pads': [0], 'dilations': [1], 'groups': 1, 'output_padding': [0], + 'transposed': True}, + {'weights_shape': [3, 3, 2], 'strides': [1], 'pads': [0], 'dilations': [ + 1], 'groups': 1, 'output_padding': [0], 'transposed': False}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'pads': [0], 'dilations': [ + 1], 'groups': 3, 'output_padding': [0], 'transposed': True}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'pads': [0], 'dilations': [ + 1], 'groups': 3, 'output_padding': [0], 'transposed': False}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'bias_shape': [1], 'pads': [ + 1], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': True}, + {'weights_shape': [3, 3, 1], 'strides': [1], 'pads': [ + 1], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': False}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'bias_shape': [1], 'pads': [ + 3], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': True}, + {'weights_shape': [3, 3, 1], 'strides': [1], 'pads': [ + 3], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': False}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'bias_shape': [1], 'pads': [ + 1], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': True}, + {'weights_shape': [3, 3, 1], 'strides': [1], 'pads': [ + 0], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': False}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'pads': [ + 1], 'dilations': [1], 'groups': 3, 'output_padding': [0], 'transposed': True}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'pads': [1], 'dilations': [ + 1], 'groups': 3, 'output_padding': [0], 'transposed': False}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'pads': [ + 1], 'dilations': [2], 'groups': 3, 'output_padding': [0], 'transposed': True}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'pads': [ + 0], 'dilations': [2], 'groups': 3, 'output_padding': [0], 'transposed': False}, + {'weights_shape': [3, 1, 1], 'strides': [2], 'bias_shape': [1], 'pads': [ + 1], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': True}, + {'weights_shape': [3, 3, 1], 'strides': [2], 'pads': [ + 0], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': False}, + {'weights_shape': [3, 1, 1], 'strides': [2], 'bias_shape': [1], 'pads': [ + 0], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': True}, + {'weights_shape': [3, 3, 1], 'strides': [2], 'pads': [ + 0], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': False}, + {'weights_shape': [3, 3, 1], 'strides': [1], 'pads': [0], 'dilations': [ + 1], 'groups': 1, 'output_padding': [0], 'transposed': False}, + {'weights_shape': [3, 1, 1], 'strides': [2], 'bias_shape': [1], 'pads': [ + 0], 'dilations': [1], 'groups': 1, 'output_padding': [0], 'transposed': True}, + {'weights_shape': [3, 1, 1], 'strides': [2], 'bias_shape': [1], 'pads': [ + 1], 'dilations': [2], 'groups': 1, 'output_padding': [1], 'transposed': True}, + ] + +d3_params = [ + {'weights_shape': [3, 3, 2, 2, 1], 'strides': [1, 1, 1], 'pads': [0, 0, 0], 'dilations': [1, 1, 1], 'groups': 1, + 'output_padding': [0, 0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 2, 2, 1], 'strides': [1, 1, 1], 'pads': [0, 0, 0], 'dilations': [ + 1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'pads': [0, 0, 0], 'dilations': [ + 1, 1, 1], 'groups': 3, 'output_padding': [0, 0, 0], 'transposed': True}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'pads': [0, 0, 0], 'dilations': [ + 1, 1, 1], 'groups': 3, 'output_padding': [0, 0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'bias_shape': [1], 'pads': [ + 1, 1, 1], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 1, 1, 1], 'strides': [1, 1, 1], 'pads': [ + 1, 1, 1], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'bias_shape': [1], 'pads': [ + 3, 1, 3], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 1, 1, 1], 'strides': [1, 1, 1], 'pads': [ + 3, 1, 3], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'bias_shape': [1], 'pads': [ + 1, 0, 0], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 1, 1, 1], 'strides': [1, 1, 1], 'pads': [ + 0, 1, 0], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'pads': [ + 1, 0, 0], 'dilations': [1, 1, 1], 'groups': 3, 'output_padding': [0, 0, 0], 'transposed': True}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'pads': [ + 0, 1, 1], 'dilations': [1, 1, 1], 'groups': 3, 'output_padding': [0, 0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'pads': [ + 1, 0, 0], 'dilations': [2, 2, 1], 'groups': 3, 'output_padding': [0, 0, 0], 'transposed': True}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'pads': [ + 0, 0, 0], 'dilations': [2, 2, 2], 'groups': 3, 'output_padding': [0, 0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [2, 1, 1], 'bias_shape': [1], 'pads': [ + 1, 0, 0], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 1, 1, 1], 'strides': [2, 1, 1], 'pads': [ + 0, 0, 0], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [2, 2, 2], 'bias_shape': [1], 'pads': [ + 0, 0, 0], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': True}, + {'weights_shape': [3, 3, 1, 1, 1], 'strides': [2, 2, 2], 'pads': [ + 0, 0, 0], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': False}, + {'weights_shape': [3, 3, 1, 1, 1], 'strides': [2, 1, 1], 'pads': [ + 0, 0, 1], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': False}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [2, 2, 2], 'bias_shape': [1], 'pads': [ + 0, 0, 0], 'dilations': [1, 1, 1], 'groups': 1, 'output_padding': [0, 0, 0], 'transposed': True}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [2, 2, 2], 'bias_shape': [1], 'pads': [ + 1, 1, 1], 'dilations': [2, 2, 2], 'groups': 1, 'output_padding': [1, 1, 1], 'transposed': True}, + ] + + +class TestConvolution(PytorchLayerTest): + def _prepare_input(self, ndim=4): + import numpy as np + shape = (1, 3, 10, 10, 10) + return (np.random.randn(*shape[:ndim]).astype(np.float32),) + + def create_model(self, weights_shape, strides, pads, dilations, groups, bias, transposed, output_padding=0, + bias_shape=None, underscore=True): + + import torch + + bias_dim = 0 + + class aten__convolution(torch.nn.Module): + def __init__(self): + super(aten__convolution, self).__init__() + self.weight = torch.randn(weights_shape) + self.bias_shape = bias_shape + if self.bias_shape is None: + self.bias_shape = weights_shape[bias_dim] + self.bias = torch.randn(self.bias_shape) if bias else None + self.strides = strides + self.pads = pads + self.dilations = dilations + self.groups = groups + self.transposed = transposed + self.output_padding = output_padding + self._op = torch._convolution + + def forward(self, x): + return self._op( + x, self.weight, self.bias, self.strides, self.pads, self.dilations, self.transposed, + self.output_padding, self.groups, False, False, False, False + ) + + class aten_convolution(torch.nn.Module): + def __init__(self): + super(aten_convolution, self).__init__() + self.weight = torch.randn(weights_shape) + self.bias_shape = bias_shape + if self.bias_shape is None: + self.bias_shape = weights_shape[bias_dim] + self.bias = torch.randn(self.bias_shape) if bias else None + self.strides = strides + self.pads = pads + self.dilations = dilations + self.groups = groups + self.transposed = transposed + self.output_padding = output_padding + self._op = torch.convolution + + def forward(self, x): + return self._op( + x, self.weight, self.bias, self.strides, self.pads, self.dilations, self.transposed, + self.output_padding, self.groups + ) + + ref_net = None + if underscore: + return aten__convolution(), ref_net, "aten::_convolution" + return aten_convolution(), ref_net, "aten::convolution" + + @pytest.mark.parametrize("params", d1_params) + @pytest.mark.parametrize("bias", [True, False]) + @pytest.mark.parametrize("underscore", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_convolution1d(self, params, bias, underscore, ie_device, precision, ir_version): + self._test(*self.create_model(**params, bias=bias, underscore=underscore), + ie_device, precision, ir_version, dynamic_shapes=params['groups'] == 1, + kwargs_to_prepare_input={'ndim': 3}) + + @pytest.mark.parametrize("params", d2_params) + @pytest.mark.parametrize("bias", [True, False]) + @pytest.mark.parametrize("underscore", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_convolution2d(self, params, bias, underscore, ie_device, precision, ir_version): + self._test(*self.create_model(**params, bias=bias, underscore=underscore), + ie_device, precision, ir_version, dynamic_shapes=params['groups'] == 1) + + @pytest.mark.parametrize("params", d3_params) + @pytest.mark.parametrize("bias", [True, False]) + @pytest.mark.parametrize("underscore", [True, False]) + @pytest.mark.nightly + def test_convolution3d(self, params, bias, underscore, ie_device, precision, ir_version): + self._test(*self.create_model(**params, bias=bias, underscore=underscore), + ie_device, precision, ir_version, dynamic_shapes=params['groups'] == 1, + kwargs_to_prepare_input={'ndim': 5}) diff --git a/tests/layer_tests/pytorch_tests/test_convolution_mode.py b/tests/layer_tests/pytorch_tests/test_convolution_mode.py new file mode 100644 index 00000000000..d51086dbccf --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_convolution_mode.py @@ -0,0 +1,138 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestConv2D(PytorchLayerTest): + def _prepare_input(self, ndim=4): + import numpy as np + input_shape = (1, 3, 10, 10, 10) + return (np.random.randn(*input_shape[:ndim]).astype(np.float32),) + + def create_model(self, weights_shape, strides, pads, dilations, groups, bias): + import torch + + class aten_convolution_mode(torch.nn.Module): + def __init__(self): + super(aten_convolution_mode, self).__init__() + self.weight = torch.randn(weights_shape) + self.bias = None + if bias: + self.bias = torch.randn(weights_shape[0]) + self.strides = strides + self.pads = pads + self.dilations = dilations + self.groups = groups + + def forward(self, x): + return torch._convolution_mode(x, self.weight, self.bias, self.strides, self.pads, self.dilations, + self.groups) + + ref_net = None + + return aten_convolution_mode(), ref_net, "aten::_convolution_mode" + + @pytest.mark.parametrize("params", + [ + {'weights_shape': [1, 3, 3], 'strides': [1], 'pads': 'same', 'dilations': [1], + 'groups': 1}, + {'weights_shape': [1, 3, 3], 'strides': [1], 'pads': 'valid', 'dilations': [1], + 'groups': 1}, + {'weights_shape': [1, 3, 3], 'strides': [1], 'pads': 'same', 'dilations': [2], + 'groups': 1}, + {'weights_shape': [1, 3, 3], 'strides': [1], 'pads': 'valid', 'dilations': [2], + 'groups': 1}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'pads': 'same', 'dilations': [1], + 'groups': 3}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'pads': 'valid', 'dilations': [1], + 'groups': 3}, + {'weights_shape': [1, 3, 3], 'strides': [2], 'pads': 'valid', 'dilations': [1], + 'groups': 1}, + {'weights_shape': [1, 3, 3], 'strides': [2], 'pads': 'valid', 'dilations': [2], + 'groups': 1}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'pads': 'same', 'dilations': [2], + 'groups': 3}, + {'weights_shape': [3, 1, 1], 'strides': [1], 'pads': 'valid', 'dilations': [2], + 'groups': 3}, + ]) + @pytest.mark.parametrize("bias", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_convolution_mode_1d(self, params, bias, ie_device, precision, ir_version): + self._test(*self.create_model(**params, bias=bias), + ie_device, precision, ir_version, dynamic_shapes=params['groups'] == 1, + kwargs_to_prepare_input={'ndim': 3}) + + @pytest.mark.parametrize("params", + [ + {'weights_shape': [1, 3, 3, 3], 'strides': [1, 1], 'pads': 'same', 'dilations': [1, 1], + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': [1, 1], 'pads': 'valid', + 'dilations': [1, 1], 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': [1, 1], 'pads': 'same', 'dilations': [2, 2], + 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': [1, 1], 'pads': 'valid', + 'dilations': [2, 2], 'groups': 1}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': 'same', 'dilations': [1, 1], + 'groups': 3}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': 'valid', + 'dilations': [1, 1], 'groups': 3}, + {'weights_shape': [1, 3, 3, 3], 'strides': [2, 2], 'pads': 'valid', + 'dilations': [1, 1], 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': [2, 2], 'pads': 'valid', + 'dilations': [2, 2], 'groups': 1}, + {'weights_shape': [1, 3, 3, 3], 'strides': [2, 1], 'pads': 'valid', + 'dilations': [1, 1], 'groups': 1}, + {'weights_shape': [3, 1, 1, 1], 'strides': [2, 2], 'pads': 'valid', + 'dilations': [1, 1], 'groups': 3}, + {'weights_shape': [3, 1, 1, 1], 'strides': [2, 2], 'pads': 'valid', + 'dilations': [2, 2], 'groups': 3}, + {'weights_shape': [3, 1, 1, 1], 'strides': [2, 1], 'pads': 'valid', + 'dilations': [1, 1], 'groups': 3}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': 'same', 'dilations': [2, 1], + 'groups': 3}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': 'valid', + 'dilations': [2, 1], 'groups': 3}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': 'same', 'dilations': [2, 2], + 'groups': 3}, + {'weights_shape': [3, 1, 1, 1], 'strides': [1, 1], 'pads': 'valid', + 'dilations': [2, 2], 'groups': 3}, + ]) + @pytest.mark.parametrize("bias", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_convolution_mode_2d(self, params, bias, ie_device, precision, ir_version): + self._test(*self.create_model(**params, bias=bias), + ie_device, precision, ir_version, dynamic_shapes=params['groups'] == 1) + + @pytest.mark.parametrize("params", + [ + {'weights_shape': [1, 3, 3, 3, 3], 'strides': [1, 1, 1], 'pads': 'same', + 'dilations': [1, 1, 1], 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': [1, 1, 1], 'pads': 'valid', + 'dilations': [1, 1, 1], 'groups': 1}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'pads': 'same', + 'dilations': [1, 1, 1], 'groups': 3}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'pads': 'valid', + 'dilations': [1, 1, 1], 'groups': 3}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': [2, 2, 1], 'pads': 'valid', + 'dilations': [1, 1, 1], 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': [2, 2, 2], 'pads': 'valid', + 'dilations': [1, 1, 1], 'groups': 1}, + {'weights_shape': [1, 3, 3, 3, 3], 'strides': [2, 2, 2], 'pads': 'valid', + 'dilations': [2, 2, 2], 'groups': 1}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'pads': 'same', + 'dilations': [2, 1, 2], 'groups': 3}, + {'weights_shape': [3, 1, 1, 1, 1], 'strides': [1, 1, 1], 'pads': 'valid', + 'dilations': [2, 1, 2], 'groups': 3}, + ]) + @pytest.mark.parametrize("bias", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_convolution_mode_3d(self, params, bias, ie_device, precision, ir_version): + self._test(*self.create_model(**params, bias=bias), + ie_device, precision, ir_version, dynamic_shapes=params['groups'] == 1, + kwargs_to_prepare_input={'ndim': 5}) diff --git a/tests/layer_tests/pytorch_tests/test_cumsum.py b/tests/layer_tests/pytorch_tests/test_cumsum.py new file mode 100644 index 00000000000..9ce39fc2b7c --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_cumsum.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestCumSum(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, axis): + import torch + + class aten_cumsum(torch.nn.Module): + def __init__(self, axis): + super(aten_cumsum, self).__init__() + self.axis = axis + + def forward(self, x): + return torch.cumsum(x, self.axis) + + ref_net = None + + return aten_cumsum(axis), ref_net, "aten::cumsum" + + @pytest.mark.parametrize("axis", [0, 1, 2, 3, -1, -2, -3, -4]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_cumsum(self, axis, ie_device, precision, ir_version): + self._test(*self.create_model(axis), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_div.py b/tests/layer_tests/pytorch_tests/test_div.py new file mode 100644 index 00000000000..af7b0ad7573 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_div.py @@ -0,0 +1,78 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestDiv(PytorchLayerTest): + def _prepare_input(self): + return (self.input_array.astype(self.input_type), self.other_array.astype(self.other_type)) + + def create_model(self, rounding_mode): + import torch + + class aten_div(torch.nn.Module): + def __init__(self, rounding_mode): + super(aten_div, self).__init__() + self.rounding_mode = rounding_mode + + def forward(self, input_tensor, other_tensor): + return torch.div(input_tensor, other_tensor, rounding_mode=self.rounding_mode) + + ref_net = None + + return aten_div(rounding_mode), ref_net, "aten::div" + + @pytest.mark.parametrize(("input_array", "other_array"), [ + [10 * np.random.rand(5, 5), np.random.uniform(low=1, high=5, size=(1))], + [10 * np.random.rand(5, 5, 1), np.random.uniform(low=1, high=5, size=(1))], + [10 * np.random.rand(1, 1, 5, 5), np.random.uniform( + low=1, high=5, size=(1))], + [10 * np.random.rand(5, 5, 1), np.random.uniform( + low=1, high=5, size=(5, 1))] + ]) + @pytest.mark.parametrize(("types"), [ + (np.float32, np.float32), + pytest.param((np.int32, np.float32), marks=pytest.mark.xfail), + pytest.param((np.float32, np.int32), marks=pytest.mark.xfail), + pytest.param((np.int32, np.int32), marks=pytest.mark.xfail) + ]) + @pytest.mark.parametrize('rounding_mode', ([ + None, + "floor", + "trunc" + ])) + @pytest.mark.nightly + def test_div(self, input_array, other_array, types, rounding_mode, ie_device, precision, ir_version): + self.input_array = input_array + self.input_type = types[0] + self.other_array = other_array + self.other_type = types[1] + self._test(*self.create_model(rounding_mode), + ie_device, precision, ir_version) + + @pytest.mark.parametrize(("input_array", "other_array"), [ + [np.array([0.7620, 2.5548, -0.5944, -0.7438, 0.9274]), np.array(0.5)], + [np.array([[-0.3711, -1.9353, -0.4605, -0.2917], + [0.1815, -1.0111, 0.9805, -1.5923], + [0.1062, 1.4581, 0.7759, -1.2344], + [-0.1830, -0.0313, 1.1908, -1.4757]]), + np.array([0.8032, 0.2930, -0.8113, -0.2308])] + ]) + @pytest.mark.parametrize('rounding_mode', ([ + None, + "floor", + "trunc" + ])) + @pytest.mark.nightly + @pytest.mark.precommit + def test_div_pt_spec(self, input_array, other_array, rounding_mode, ie_device, precision, ir_version): + self.input_array = input_array + self.input_type = np.float32 + self.other_array = other_array + self.other_type = np.float32 + self._test(*self.create_model(rounding_mode), + ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_exp.py b/tests/layer_tests/pytorch_tests/test_exp.py new file mode 100644 index 00000000000..af0cc508ef1 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_exp.py @@ -0,0 +1,29 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestExp(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self): + import torch + + class aten_exp(torch.nn.Module): + + def forward(self, x): + return torch.exp(x) + + ref_net = None + + return aten_exp(), ref_net, "aten::exp" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_exp(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_expand.py b/tests/layer_tests/pytorch_tests/test_expand.py new file mode 100644 index 00000000000..12242657d06 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_expand.py @@ -0,0 +1,70 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestExpand(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3).astype(np.float32),) + + def create_model(self, dim): + import torch + + class aten_expand(torch.nn.Module): + def __init__(self, dims): + super(aten_expand, self).__init__() + self.dims = dims + + def forward(self, x): + return x.expand(self.dims) + + ref_net = None + + return aten_expand(dim), ref_net, "aten::expand" + + @pytest.mark.parametrize("dims", [(4, 3), (-1, -1), (1, 2, 3), (1, 2, 2, 3)]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_expand(self, dims, ie_device, precision, ir_version): + self._test(*self.create_model(dims), ie_device, precision, ir_version) + + +class TestExpandAs(PytorchLayerTest): + def _prepare_input(self, input_shape, broadcast_shape): + import numpy as np + return (np.random.randn(*input_shape).astype(np.float32), np.random.randn(*broadcast_shape).astype(np.float32),) + + def create_model(self): + import torch + + class aten_expand_as(torch.nn.Module): + def __init__(self): + super(aten_expand_as, self).__init__() + + def forward(self, x, y): + return x.expand_as(y) + + ref_net = None + + return aten_expand_as(), ref_net, "aten::expand_as" + + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {'input_shape': [1, 2], "broadcast_shape": [1, 2]}, + {'input_shape': [1, 2], "broadcast_shape": [1, 4, 2]}, + {'input_shape': [1, 2], "broadcast_shape": [2, 2]}, + {'input_shape': [1, 2], "broadcast_shape": [2, 2, 2]}, + {'input_shape': [1, 2], "broadcast_shape": [1, 4, 2]}, + {'input_shape': [1, 2, 3], "broadcast_shape": [1, 2, 3]}, + {'input_shape': [1, 2, 3], "broadcast_shape": [1, 4, 2, 3]}, + {'input_shape': [1, 2, 3, 4], "broadcast_shape": [1, 2, 3, 4]}, + {'input_shape': [1, 2, 3, 4], "broadcast_shape": [1, 4, 2, 3, 4]}, + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_expand(self, ie_device, precision, ir_version, kwargs_to_prepare_input): + self._test(*self.create_model(), ie_device, precision, + ir_version, kwargs_to_prepare_input=kwargs_to_prepare_input) diff --git a/tests/layer_tests/pytorch_tests/test_floor.py b/tests/layer_tests/pytorch_tests/test_floor.py new file mode 100644 index 00000000000..bc7306346af --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_floor.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestFloor(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, inplace): + import torch + + class aten_floor(torch.nn.Module): + def __init__(self, inplace): + super(aten_floor, self).__init__() + self.op = torch.floor_ if inplace else torch.floor + + def forward(self, x): + return x, self.op(x) + + ref_net = None + + return aten_floor(inplace), ref_net, "aten::floor" if not inplace else "aten::floor_" + + @pytest.mark.parametrize("inplace", [False, True]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_floor(self, inplace, ie_device, precision, ir_version): + self._test(*self.create_model(inplace), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_floor_divide.py b/tests/layer_tests/pytorch_tests/test_floor_divide.py new file mode 100644 index 00000000000..2d5592ee0e0 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_floor_divide.py @@ -0,0 +1,45 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestFloorDivide(PytorchLayerTest): + def _prepare_input(self): + return (self.input_tensor, self.other_tensor) + + def create_model(self): + import torch + + class aten_floor_divide(torch.nn.Module): + def __init__(self): + super(aten_floor_divide, self).__init__() + + def forward(self, input_tensor, other_tensor): + return torch.floor_divide(input_tensor, other_tensor) + + ref_net = None + + # return aten_floor_divide(), ref_net, "aten::floor_divide" + return aten_floor_divide(), ref_net, "aten::floor_divide" + + @pytest.mark.parametrize('input_tensor', ([ + np.random.randn(5).astype(np.float32), + np.random.randn(5, 5, 1).astype(np.float32), + np.random.randn(1, 1, 5, 5).astype(np.float32), + ])) + @pytest.mark.parametrize('other_tensor', ([ + np.array([[0.5]]).astype(np.float32), + np.random.randn(5).astype(np.float32), + np.random.randn(5, 1).astype(np.float32), + np.random.randn(1, 5).astype(np.float32), + ])) + @pytest.mark.nightly + @pytest.mark.precommit + def test_floor_divide(self, input_tensor, other_tensor, ie_device, precision, ir_version): + self.input_tensor = input_tensor + self.other_tensor = other_tensor + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_full.py b/tests/layer_tests/pytorch_tests/test_full.py new file mode 100644 index 00000000000..244f6bda15a --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_full.py @@ -0,0 +1,511 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestFull(PytorchLayerTest): + def _prepare_input(self, value): + return (np.array(value, dtype=np.float32),) + + def create_model(self, shape, dtype=None, use_dtype=False, use_out=False, with_names=False): + import torch + dtype_map = { + "float32": torch.float32, + "float64": torch.float64, + "int64": torch.int64, + "int32": torch.int32, + "uint8": torch.uint8, + "int8": torch.int8, + "bool": torch.bool + } + + class aten_full(torch.nn.Module): + def __init__(self, shape): + super(aten_full, self).__init__() + self.shape = shape + + def forward(self, x: float): + return torch.full(self.shape, x) + + class aten_full_dtype(torch.nn.Module): + def __init__(self, shape, dtype): + super(aten_full_dtype, self).__init__() + self.shape = shape + self.dtype = dtype + + def forward(self, x: float): + return torch.full(self.shape, x, dtype=self.dtype) + + class aten_full_dtype_with_names(torch.nn.Module): + def __init__(self, shape, dtype): + super(aten_full_dtype_with_names, self).__init__() + self.shape = shape + self.dtype = dtype + + def forward(self, x: float): + return torch.full(self.shape, x, dtype=self.dtype, names=None) + + class aten_full_out(torch.nn.Module): + def __init__(self, shape, dtype): + super(aten_full_out, self).__init__() + self.shape = shape + self.dtype = dtype + + def forward(self, x: float): + return torch.full(self.shape, x, out=torch.tensor(1, dtype=self.dtype)) + + class aten_full_out_with_names(torch.nn.Module): + def __init__(self, shape, dtype): + super(aten_full_out_with_names, self).__init__() + self.shape = shape + self.dtype = dtype + + def forward(self, x: float): + return torch.full(self.shape, x, out=torch.tensor(1, dtype=self.dtype), names=None) + + ref_net = None + model = aten_full(shape) + if use_dtype or use_out: + dtype = dtype_map.get(dtype, dtype) + if not use_out: + model = aten_full_dtype(shape, dtype) if not with_names else aten_full_dtype_with_names(shape, dtype) + else: + model = aten_full_out(shape, dtype) if not with_names else aten_full_out_with_names(shape, dtype) + + return model, ref_net, "aten::full" + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("value", [0, 1, -1, 0.5]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_full(self, shape, value, ie_device, precision, ir_version): + self._test(*self.create_model(shape), ie_device, precision, + ir_version, kwargs_to_prepare_input={'value': value}) + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("value", [0, 1, -1, 0.5]) + @pytest.mark.parametrize("dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.parametrize("with_names", [True, False]) + @pytest.mark.nightly + def test_full_dtype(self, shape, value, dtype, with_names, ie_device, precision, ir_version): + self._test(*self.create_model(shape, dtype=dtype, use_dtype=True, with_names=with_names), ie_device, precision, + ir_version, kwargs_to_prepare_input={'value': value}) + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("value", [0, 1, -1, 0.5]) + @pytest.mark.parametrize("dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.parametrize("with_names", [True, False]) + @pytest.mark.nightly + def test_full_out(self, shape, value, dtype, with_names, ie_device, precision, ir_version): + self._test(*self.create_model(shape, dtype=dtype, use_out=True, with_names=with_names), ie_device, precision, + ir_version, kwargs_to_prepare_input={'value': value}) + + +class TestFullLike(PytorchLayerTest): + def _prepare_input(self, value, shape): + return (np.random.randn(*shape).astype(np.float32), np.array(value, dtype=np.float32),) + + def create_model(self, dtype=None, use_dtype=False, use_out=False): + import torch + dtype_map = { + "float32": torch.float32, + "float64": torch.float64, + "int64": torch.int64, + "int32": torch.int32, + "uint8": torch.uint8, + "int8": torch.int8, + "bool": torch.bool + } + + class aten_full_like(torch.nn.Module): + + def forward(self, input_t: torch.Tensor, x: float): + return torch.full_like(input_t, x) + + class aten_full_like_dtype(torch.nn.Module): + def __init__(self, dtype): + super(aten_full_like_dtype, self).__init__() + self.dtype = dtype + + def forward(self, input_t: torch.Tensor, x: float): + return torch.full_like(input_t, x, dtype=self.dtype) + + class aten_full_like_out(torch.nn.Module): + def __init__(self, dtype): + super(aten_full_like_out, self).__init__() + self.dtype = dtype + + def forward(self, input_t: torch.Tensor, x: float): + return torch.full_like(input_t, x, out=torch.tensor(1, dtype=self.dtype)) + + ref_net = None + + model = aten_full_like() + if use_dtype or use_out: + dtype = dtype_map.get(dtype, dtype) + if not use_out: + model = aten_full_like_dtype(dtype) + else: + model = aten_full_like_out(dtype) + + return model, ref_net, "aten::full_like" + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("value", [0, 1, -1, 0.5]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_full_like(self, shape, value, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version, + kwargs_to_prepare_input={'value': value, 'shape': shape}) + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("value", [0, 1, -1, 0.5]) + @pytest.mark.parametrize("dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.nightly + def test_full_like_dtype(self, shape, value, dtype, ie_device, precision, ir_version): + self._test(*self.create_model(dtype, use_dtype=True), ie_device, precision, ir_version, + kwargs_to_prepare_input={'value': value, 'shape': shape}) + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("value", [0, 1, -1, 0.5]) + @pytest.mark.parametrize("dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.nightly + def test_full_like_out(self, shape, value, dtype, ie_device, precision, ir_version): + self._test(*self.create_model(dtype, use_out=True), ie_device, precision, ir_version, + kwargs_to_prepare_input={'value': value, 'shape': shape}) + + +class TestNewFull(PytorchLayerTest): + def _prepare_input(self, value, input_dtype=np.float32): + return (np.random.randn(1, 3, 10, 10).astype(input_dtype), np.array(value, dtype=np.float32)) + + def create_model(self, shape, dtype=None, used_dtype=False): + import torch + dtype_map = { + "float32": torch.float32, + "float64": torch.float64, + "int64": torch.int64, + "int32": torch.int32, + "uint8": torch.uint8, + "int8": torch.int8, + "bool": torch.bool + } + + class aten_full(torch.nn.Module): + def __init__(self, shape): + super(aten_full, self).__init__() + self.shape = shape + + def forward(self, input_tensor: torch.Tensor, x: float): + return input_tensor.new_full(self.shape, x) + + class aten_full_with_dtype(torch.nn.Module): + def __init__(self, shape, dtype): + super(aten_full_with_dtype, self).__init__() + self.shape = shape + self.dtype = dtype + + def forward(self, input_tensor: torch.Tensor, x: float): + return input_tensor.new_full(size=self.shape, fill_value=x, dtype=self.dtype) + + ref_net = None + model = aten_full(shape) + + if used_dtype: + dtype = dtype_map[dtype] + model = aten_full_with_dtype(shape, dtype) + + return model, ref_net, "aten::new_full" + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("value,input_dtype", [(0, np.uint8), (1, np.int32), (-1, np.float32), (0.5, np.float64)]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_new_full(self, shape, value, input_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(shape), ie_device, precision, ir_version, + kwargs_to_prepare_input={'value': value, 'input_dtype': input_dtype}) + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("value,input_dtype", [(0, np.uint8), (1, np.int32), (-1, np.float32), (0.5, np.float64)]) + @pytest.mark.parametrize("dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.nightly + def test_new_full_with_dtype(self, value, shape, dtype, input_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(shape, dtype=dtype, used_dtype=True), ie_device, precision, ir_version, + kwargs_to_prepare_input={'value': value, 'input_dtype': input_dtype}) + + +class TestZerosAndOnes(PytorchLayerTest): + def _prepare_input(self, shape): + return (np.random.randn(*shape).astype(np.float32),) + + def create_model(self, op_type, dtype=None, with_dtype=False, with_out=False, with_names=False): + import torch + ops = { + "aten::zeros": torch.zeros, + "aten::ones": torch.ones, + "aten::zeros_like": torch.zeros_like, + "aten::ones_like": torch.ones_like + } + dtype_map = { + "float32": torch.float32, + "float64": torch.float64, + "int64": torch.int64, + "int32": torch.int32, + "uint8": torch.uint8, + "int8": torch.int8, + "bool": torch.bool + } + + class aten_op(torch.nn.Module): + def __init__(self, op): + super(aten_op, self).__init__() + self.op = op + + def forward(self, x): + shape = x.shape + return self.op(shape) + + class aten_op_like(torch.nn.Module): + def __init__(self, op): + super(aten_op_like, self).__init__() + self.op = op + + def forward(self, x): + return self.op(x) + + class aten_op_dtype(torch.nn.Module): + def __init__(self, op, dtype): + super(aten_op_dtype, self).__init__() + self.op = op + self.dtype = dtype + + def forward(self, x): + shape = x.shape + return self.op(shape, dtype=self.dtype) + + class aten_op_dtype_with_names(aten_op_dtype): + def forward(self, x): + shape = x.shape + return self.op(shape, dtype=self.dtype, names=None) + + class aten_op_like_dtype(torch.nn.Module): + def __init__(self, op, dtype): + super(aten_op_like_dtype, self).__init__() + self.op = op + self.dtype = dtype + + def forward(self, x): + return self.op(x, dtype=self.dtype) + + class aten_op_out(torch.nn.Module): + def __init__(self, op, dtype): + super(aten_op_out, self).__init__() + self.op = op + self.dtype = dtype + + def forward(self, x): + shape = x.shape + return self.op(shape, out=torch.tensor(0, dtype=self.dtype)) + + class aten_op_out_with_names(torch.nn.Module): + def __init__(self, op, dtype): + super(aten_op_out_with_names, self).__init__() + self.op = op + self.dtype = dtype + + def forward(self, x): + shape = x.shape + return self.op(shape, out=torch.tensor(0, dtype=self.dtype), names=None) + + class aten_op_like_out(torch.nn.Module): + def __init__(self, op, dtype): + super(aten_op_like_out, self).__init__() + self.op = op + self.dtype = dtype + + def forward(self, x): + return self.op(x, out=torch.tensor(0, dtype=self.dtype)) + + like = op_type.endswith('_like') + op = ops[op_type] + if not like: + model_cls = aten_op(op) + if with_dtype or with_out: + dtype = dtype_map[dtype] + if with_dtype: + model_cls = aten_op_dtype(op, dtype) if not with_names else aten_op_dtype_with_names(op, dtype) + if with_out: + model_cls = aten_op_out(op, dtype) if not with_names else aten_op_out_with_names(op, dtype) + else: + model_cls = aten_op_like(op) + if with_dtype or with_out: + dtype = dtype_map[dtype] + model_cls = aten_op_like_dtype(op, dtype) if not with_out else aten_op_like_out(op, dtype) + + ref_net = None + + return model_cls, ref_net, op_type + + @pytest.mark.parametrize("shape", [(1, 1), (1, 2), (1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5, 6)]) + @pytest.mark.parametrize("op_type", ["aten::zeros", "aten::ones", "aten::zeros_like", "aten::ones_like"]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_fill(self, op_type, shape, ie_device, precision, ir_version): + self._test(*self.create_model(op_type), ie_device, precision, + ir_version, kwargs_to_prepare_input={'shape': shape}) + + @pytest.mark.parametrize("shape", [(1, 1), (1, 2), (1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5, 6)]) + @pytest.mark.parametrize("op_type", ["aten::zeros", "aten::ones"]) + @pytest.mark.parametrize("dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.parametrize("with_names", [True, False]) + @pytest.mark.nightly + def test_fill_with_dtype(self, op_type, shape, dtype, with_names, ie_device, precision, ir_version): + self._test(*self.create_model(op_type, dtype=dtype, with_dtype=True, with_names=with_names), ie_device, + precision, + ir_version, kwargs_to_prepare_input={'shape': shape}) + + @pytest.mark.parametrize("shape", [(1, 1), (1, 2), (1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5, 6)]) + @pytest.mark.parametrize("op_type", ["aten::zeros", "aten::ones"]) + @pytest.mark.parametrize("dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.parametrize("with_names", [True, False]) + @pytest.mark.nightly + def test_fill_with_out(self, op_type, shape, dtype, with_names, ie_device, precision, ir_version): + self._test(*self.create_model(op_type, dtype=dtype, with_out=True, with_names=with_names), ie_device, precision, + ir_version, kwargs_to_prepare_input={'shape': shape}) + + @pytest.mark.parametrize("shape", [(1, 1), (1, 2), (1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5, 6)]) + @pytest.mark.parametrize("op_type", ["aten::zeros_like", "aten::ones_like"]) + @pytest.mark.parametrize("dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.nightly + def test_fill_like_with_dtype(self, op_type, shape, dtype, ie_device, precision, ir_version): + self._test(*self.create_model(op_type, dtype=dtype, with_dtype=True), ie_device, precision, + ir_version, kwargs_to_prepare_input={'shape': shape}) + + @pytest.mark.parametrize("shape", [(1, 1), (1, 2), (1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5, 6)]) + @pytest.mark.parametrize("op_type", ["aten::zeros_like", "aten::ones_like"]) + @pytest.mark.parametrize("dtype", ["int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.nightly + def test_fill_like_with_out(self, op_type, shape, dtype, ie_device, precision, ir_version): + self._test(*self.create_model(op_type, dtype=dtype, with_out=True), ie_device, precision, + ir_version, kwargs_to_prepare_input={'shape': shape}) + + +class TestNewZeros(PytorchLayerTest): + def _prepare_input(self, input_dtype=np.float32): + return (np.random.randn(1, 3, 10, 10).astype(input_dtype),) + + def create_model(self, shape, dtype=None, used_dtype=False): + import torch + dtype_map = { + "float32": torch.float32, + "float64": torch.float64, + "int64": torch.int64, + "int32": torch.int32, + "uint8": torch.uint8, + "int8": torch.int8, + "bool": torch.bool + } + + class aten_full(torch.nn.Module): + def __init__(self, shape): + super(aten_full, self).__init__() + self.shape = shape + + def forward(self, input_tensor: torch.Tensor): + return input_tensor.new_zeros(self.shape) + + class aten_full_with_dtype(torch.nn.Module): + def __init__(self, shape, dtype): + super(aten_full_with_dtype, self).__init__() + self.shape = shape + self.dtype = dtype + + def forward(self, input_tensor: torch.Tensor): + return input_tensor.new_zeros(self.shape, dtype=self.dtype) + + ref_net = None + model = aten_full(shape) + + if used_dtype: + dtype = dtype_map[dtype] + model = aten_full_with_dtype(shape, dtype) + + return model, ref_net, "aten::new_zeros" + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("input_dtype", [np.uint8, np.int8, np.int32, np.int64, np.float32, np.float64]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_new_zeros(self, shape, input_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(shape), ie_device, precision, ir_version, + kwargs_to_prepare_input={'input_dtype': input_dtype}) + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("input_dtype", [bool, np.uint8, np.int8, np.int32, np.int64, np.float32, np.float64]) + @pytest.mark.parametrize("dtype", ["bool", "uint8", "int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.nightly + def test_new_zeros_with_dtype(self, shape, dtype, input_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(shape, dtype=dtype, used_dtype=True), ie_device, precision, ir_version, + kwargs_to_prepare_input={'input_dtype': input_dtype}) + + +class TestNewOnes(PytorchLayerTest): + def _prepare_input(self, input_dtype=np.float32): + return (np.random.randn(1, 3, 10, 10).astype(input_dtype),) + + def create_model(self, shape, dtype=None, used_dtype=False): + import torch + dtype_map = { + "float32": torch.float32, + "float64": torch.float64, + "int64": torch.int64, + "int32": torch.int32, + "uint8": torch.uint8, + "int8": torch.int8, + "bool": torch.bool + } + + class aten_full(torch.nn.Module): + def __init__(self, shape): + super(aten_full, self).__init__() + self.shape = shape + + def forward(self, input_tensor: torch.Tensor): + return input_tensor.new_ones(self.shape) + + class aten_full_with_dtype(torch.nn.Module): + def __init__(self, shape, dtype): + super(aten_full_with_dtype, self).__init__() + self.shape = shape + self.dtype = dtype + + def forward(self, input_tensor: torch.Tensor): + return input_tensor.new_ones(self.shape, dtype=self.dtype) + + ref_net = None + model = aten_full(shape) + + if used_dtype: + dtype = dtype_map[dtype] + model = aten_full_with_dtype(shape, dtype) + + return model, ref_net, "aten::new_ones" + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("input_dtype", [np.uint8, np.int8, np.int32, np.int64, np.float32, np.float64]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_new_ones(self, shape, input_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(shape), ie_device, precision, ir_version, + kwargs_to_prepare_input={'input_dtype': input_dtype}) + + @pytest.mark.parametrize("shape", [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("input_dtype", [bool, np.uint8, np.int8, np.int32, np.int64, np.float32, np.float64]) + @pytest.mark.parametrize("dtype", ["bool", "uint8", "int8", "int32", "int64", "float32", "float64"]) + @pytest.mark.nightly + def test_new_ones_with_dtype(self, shape, dtype, input_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(shape, dtype=dtype, used_dtype=True), ie_device, precision, ir_version, + kwargs_to_prepare_input={'input_dtype': input_dtype}) diff --git a/tests/layer_tests/pytorch_tests/test_group_norm.py b/tests/layer_tests/pytorch_tests/test_group_norm.py new file mode 100644 index 00000000000..effb8ea9048 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_group_norm.py @@ -0,0 +1,58 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestGroupNorm(PytorchLayerTest): + def _prepare_input(self, ndim=4): + import numpy as np + shape5d = [20, 6, 10, 10, 10] + shape = shape5d[:ndim] + return (np.random.randn(*shape).astype(np.float32),) + + def create_model(self, n_groups, weights_shape=None, bias=False, eps=1e-05): + import torch + import torch.nn.functional as F + + class aten_group_norm(torch.nn.Module): + def __init__(self, n_groups, weights_shape=None, bias=True, eps=1e-05): + super(aten_group_norm, self).__init__() + self.weight = torch.randn(weights_shape) if weights_shape else None + self.bias = None + if bias: + self.bias = torch.randn(weights_shape) + self.n_groups = n_groups + self.eps = eps + + def forward(self, x): + return F.group_norm(x, self.n_groups, self.weight, self.bias, self.eps) + + ref_net = None + + return aten_group_norm(n_groups, weights_shape, bias, eps), ref_net, "aten::group_norm" + + @pytest.mark.parametrize("params", + [ + {"n_groups": 3}, + {"n_groups": 1}, + {"n_groups": 3, 'eps': 1.0}, + {"n_groups": 3, 'weights_shape': (6,), 'eps': -0.05}, + {"n_groups": 3, 'weights_shape': (6,)}, + {"n_groups": 2, 'weights_shape': (6,), 'bias': True}, + {"n_groups": 2, 'weights_shape': (6,), 'bias': False}, + {"n_groups": 2, 'weights_shape': (6,), 'bias': True, 'eps': 0.0}, + {"n_groups": 2, 'weights_shape': (6,), 'bias': False, 'eps': 0.0001}, + ]) + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {"ndim": 3}, + {'ndim': 4}, + {"ndim": 5} + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_group_norm(self, params, ie_device, precision, ir_version, kwargs_to_prepare_input): + self._test(*self.create_model(**params), + ie_device, precision, ir_version, kwargs_to_prepare_input=kwargs_to_prepare_input) diff --git a/tests/layer_tests/pytorch_tests/test_im2col.py b/tests/layer_tests/pytorch_tests/test_im2col.py new file mode 100644 index 00000000000..3783e4be1fb --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_im2col.py @@ -0,0 +1,45 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestIm2Col(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(10, 3, 24, 24).astype(np.float32),) + + def create_model(self, kernel_size, dilation, padding, stride): + import torch + + class aten_im2col(torch.nn.Module): + def __init__(self, kernel_size, dilation, padding, stride): + super(aten_im2col, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + self.padding = padding + self.stride = stride + + def forward(self, x): + return torch.nn.functional.unfold( + x, + kernel_size=self.kernel_size, + dilation=self.dilation, + padding=self.padding, + stride=self.stride + ) + + ref_net = None + + return aten_im2col(kernel_size, dilation, padding, stride), ref_net, "aten::im2col" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("kernel_size", [[2, 3], [3, 2], [3, 3], [2, 2], [1, 1]]) + @pytest.mark.parametrize("dilation", [1, 2, 3, (1, 2)]) + @pytest.mark.parametrize("padding", [0, 5, 1, [2, 3]]) + @pytest.mark.parametrize("stride", [3, 1, [2, 1]]) + def test_exp(self, kernel_size, dilation, padding, stride, ie_device, precision, ir_version): + self._test(*self.create_model(kernel_size, dilation, padding, stride), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_leaky_relu.py b/tests/layer_tests/pytorch_tests/test_leaky_relu.py new file mode 100644 index 00000000000..b01211675bb --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_leaky_relu.py @@ -0,0 +1,36 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestLeakyRelu(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, alpha, inplace): + import torch + import torch.nn.functional as F + + class aten_leaky_relu(torch.nn.Module): + def __init__(self, alpha, inplace): + super(aten_leaky_relu, self).__init__() + self.alpha = alpha + self.inplace = inplace + + def forward(self, x): + return x, F.leaky_relu(x, self.alpha, inplace=self.inplace) + + ref_net = None + + return aten_leaky_relu(alpha, inplace), ref_net, "aten::leaky_relu" if not inplace else "aten::leaky_relu_" + + @pytest.mark.parametrize("alpha,inplace", + [(0.01, True), (0.01, False), (1.01, True), (1.01, False), (-0.01, True), (-0.01, False)]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_leaky_relu(self, alpha, inplace, ie_device, precision, ir_version): + self._test(*self.create_model(alpha, inplace), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_len.py b/tests/layer_tests/pytorch_tests/test_len.py new file mode 100644 index 00000000000..cb4ce812b91 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_len.py @@ -0,0 +1,51 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +@pytest.mark.parametrize('input_tensor', (np.random.randn(2, 1, 3), np.random.randn(3, 7), + np.random.randn(1, 1, 4, 4))) +class TestLen(PytorchLayerTest): + + def _prepare_input(self): + input_tensor = self.input_tensor * 10 + return (input_tensor.astype(np.int64),) + + def create_model(self): + class aten_len(torch.nn.Module): + + def forward(self, input_tensor): + return torch.as_tensor(len(input_tensor), dtype=torch.int) + + ref_net = None + + return aten_len(), ref_net, "aten::len" + + def create_model_int_list(self): + class aten_len(torch.nn.Module): + + def forward(self, input_tensor): + int_list = input_tensor.size() + return torch.as_tensor(len(int_list), dtype=torch.int) + + ref_net = None + + return aten_len(), ref_net, "aten::len" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_len(self, ie_device, precision, ir_version, input_tensor): + self.input_tensor = input_tensor + self._test(*self.create_model(), ie_device, precision, ir_version) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_len_int_list(self, ie_device, precision, ir_version, input_tensor): + self.input_tensor = input_tensor + self._test(*self.create_model_int_list(), + ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_listunpack.py b/tests/layer_tests/pytorch_tests/test_listunpack.py new file mode 100644 index 00000000000..787ec4a3beb --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_listunpack.py @@ -0,0 +1,126 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from typing import List + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestListUnpack(PytorchLayerTest): + def _prepare_input(self): + return ( + np.random.randn(8, 3, 512, 512), + np.random.randn(1, 3, 224, 224), + np.random.randn(10, 1, 8, 8), + np.random.randn(1, 1, 1, 1), + ) + + def create_model_size_listunpack(self): + class prim_listunpack(torch.nn.Module): + def forward(self, in1, in2, in3, in4): + a, b, c, d = in1.size() + return a, b, c, d + + ref_net = None + + return ( + prim_listunpack(), + ref_net, + "prim::ListUnpack", + ) + + def create_model_size_slice_listunpack(self, slices): + class prim_listunpack(torch.nn.Module): + def __init__(self, slices): + self.start = slices[0] + self.stop = slices[1] + self.step = slices[2] + super(prim_listunpack, self).__init__() + + def forward(self, in1, in2, in3, in4): + a, b = in1.size()[self.start: self.stop: self.step] + return a, b + + ref_net = None + + return prim_listunpack(slices), ref_net, "prim::ListUnpack" + + def create_model_listconstruct_append_listunpack(self): + class prim_listunpack(torch.nn.Module): + def forward(self, in1, in2, in3, in4): + in_list = [in1, in2] + in_list.append(in3) + in_list.append(in4) + a, b, c, d = in_list + return a, b, c, d + + ref_net = None + + return prim_listunpack(), ref_net, "prim::ListUnpack" + + def create_model_listconstruct_getitem_listunpack(self, idx): + class prim_listunpack(torch.nn.Module): + def __init__(self, idx): + self.idx = idx + super(prim_listunpack, self).__init__() + + def forward(self, in1, in2, in3, in4): + items: List[List[int]] = [] + items.append(in1.size()) + items.append(in2.size()) + items.append(in3.size()) + items.append(in4.size()) + getitem_0 = items[self.idx] + a, b, c, d = getitem_0 + return a, b, c, d + + ref_net = None + + return prim_listunpack(idx), ref_net, "prim::ListUnpack" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_size_listunpack(self, ie_device, precision, ir_version): + self._test( + *self.create_model_size_listunpack(), ie_device, precision, ir_version + ) + + @pytest.mark.parametrize( + "slices", [(0, 2, 1), (0, 4, 2), (-1, -3, -1), (-3, -1, 1)] + ) + @pytest.mark.nightly + @pytest.mark.precommit + def test_size_slice_listunpack(self, slices, ie_device, precision, ir_version): + self._test( + *self.create_model_size_slice_listunpack(slices), + ie_device, + precision, + ir_version + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_listconstruct_append_listunpack(self, ie_device, precision, ir_version): + self._test( + *self.create_model_listconstruct_append_listunpack(), + ie_device, + precision, + ir_version + ) + + @pytest.mark.parametrize("idx", [-4, -3, -2, -1, 0, 1, 2, 3]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_listconstruct_getitem_listunpack( + self, idx, ie_device, precision, ir_version + ): + self._test( + *self.create_model_listconstruct_getitem_listunpack(idx), + ie_device, + precision, + ir_version + ) diff --git a/tests/layer_tests/pytorch_tests/test_masked_fill.py b/tests/layer_tests/pytorch_tests/test_masked_fill.py new file mode 100644 index 00000000000..b2a85a336be --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_masked_fill.py @@ -0,0 +1,57 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestMaskedFill(PytorchLayerTest): + def _prepare_input(self, mask_fill='ones', mask_dtype=bool): + input_shape = [1, 10] + mask = np.zeros(input_shape).astype(mask_dtype) + if mask_fill == 'ones': + mask = np.ones(input_shape).astype(mask_dtype) + if mask_fill == 'random': + idx = np.random.choice(10, 5) + mask[:, idx] = 1 + + return (np.random.randn(1, 10).astype(np.float32), mask) + + def create_model(self, value, inplace): + import torch + + class aten_masked_fill(torch.nn.Module): + def __init__(self, value): + super(aten_masked_fill, self).__init__() + self.value = value + + def forward(self, x, mask): + return x.masked_fill(mask, self.value) + + class aten_masked_fill_(torch.nn.Module): + def __init__(self, value): + super(aten_masked_fill_, self).__init__() + self.value = value + + def forward(self, x, mask): + return x.masked_fill_(mask, self.value) + + ref_net = None + + if not inplace: + return aten_masked_fill(value), ref_net, "aten::masked_fill" + return aten_masked_fill_(value), ref_net, "aten::masked_fill_" + + @pytest.mark.parametrize("value", [0.0, 1.0, -1.0]) + @pytest.mark.parametrize( + "mask_fill", ['zeros', 'ones', 'random']) + @pytest.mark.parametrize("mask_dtype", [np.uint8, bool]) # np.float32 incorrectly casted to bool + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_masked_fill(self, value, mask_fill, mask_dtype, inplace, ie_device, precision, ir_version): + self._test(*self.create_model(value, inplace), + ie_device, precision, ir_version, + kwargs_to_prepare_input={'mask_fill': mask_fill, 'mask_dtype': mask_dtype}) diff --git a/tests/layer_tests/pytorch_tests/test_min_max.py b/tests/layer_tests/pytorch_tests/test_min_max.py new file mode 100644 index 00000000000..186a26ad848 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_min_max.py @@ -0,0 +1,138 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestMinMax(PytorchLayerTest): + def _prepare_input(self, second_input=False): + import numpy as np + if not second_input: + return (np.random.randn(1, 3, 10, 10).astype(np.float32),) + return (np.random.randn(1, 3, 10, 10).astype(np.float32), np.random.randn(1, 3, 10, 10).astype(np.float32)) + + def create_model(self, op_type, axes, keep_dims, single_input=True): + import torch + op_types = { + 'max': torch.max, + 'min': torch.min + } + + op = op_types[op_type] + + class aten_min_max(torch.nn.Module): + def __init__(self, op): + super(aten_min_max, self).__init__() + self.op = op + + def forward(self, x): + return self.op(x) + + class aten_min_max_3args(torch.nn.Module): + def __init__(self, op, axes=None, keep_dims=None): + super(aten_min_max_3args, self).__init__() + self.op = op + self.axes = axes + self.keep_dims = keep_dims + + def forward(self, x): + return self.op(x, self.axes, self.keep_dims) + + class aten_min_max_2args(torch.nn.Module): + def __init__(self, op): + super(aten_min_max_2args, self).__init__() + self.op = op + + def forward(self, x, y): + return self.op(x, y) + + ref_net = None + if axes is None and keep_dims is None: + model_cls = aten_min_max( + op) if single_input else aten_min_max_2args(op) + else: + model_cls = aten_min_max_3args(op, axes, keep_dims) + + return model_cls, ref_net, f"aten::{op_type}" + + @pytest.mark.parametrize("axes,keep_dims", [(None, None), (1, False), (1, True), (-1, False), (-1, True)]) + @pytest.mark.parametrize("op_type", ['min', 'max']) + @pytest.mark.nightly + @pytest.mark.precommit + def test_reduce_min_max(self, axes, keep_dims, op_type, ie_device, precision, ir_version): + self._test(*self.create_model(op_type, axes, keep_dims, + single_input=True), ie_device, precision, ir_version) + + @pytest.mark.parametrize("op_type", ['min', 'max']) + @pytest.mark.nightly + @pytest.mark.precommit + def test_min_max(self, op_type, ie_device, precision, ir_version): + self._test(*self.create_model(op_type, None, None, single_input=False), + ie_device, precision, ir_version, kwargs_to_prepare_input={"second_input": True}) + + +class TestPrimMax(PytorchLayerTest): + def _prepare_input(self, first_input, second_input, dtype="float"): + import numpy as np + first_array = np.array(first_input).astype(dtype) + if not second_input: + return (first_array,) + second_array = np.array(second_input).astype(dtype) + return (first_array, second_array) + + def create_model(self, case): + import torch + + class prim_max_2_values(torch.nn.Module): + + def forward(self, x: float, y: float): + return max(x, y) + + class prim_max_2_list_values(torch.nn.Module): + def forward(self, x: float, y: float): + return max([x, x + y], [y, y - x]) + + class prim_max_1list_several_values(torch.nn.Module): + + def forward(self, x: float, y: float): + return max([x, y, x + y]) + + class prim_max_one_value(torch.nn.Module): + def forward(self, x: float, y: float): + return max(x) + + cases = { + "2_values": prim_max_2_values, + "2_list_values": prim_max_2_list_values, + "list_several_values": prim_max_1list_several_values, + "one_value": prim_max_one_value + } + model_cls = cases[case]() + + ref_net = None + + return model_cls, ref_net, f"prim::max" + + @pytest.mark.parametrize("case", ["2_values", "2_list_values", "list_several_values", "one_value"]) + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {"first_input": 0, "second_input": 1, "dtype": "float"}, + {"first_input": 1, "second_input": 1, "dtype": "float"}, + {"first_input": 2, "second_input": 1, "dtype": "float"}, + {"first_input": 0, "second_input": 1, "dtype": "int"}, + {"first_input": 1, "second_input": 1, "dtype": "int"}, + {"first_input": 2, "second_input": 1, "dtype": "int"}, + # is not supported by OV + pytest.param({"first_input": 0, "second_input": 1, + "dtype": "bool"}, marks=pytest.mark.xfail), + pytest.param({"first_input": 1, "second_input": 1, + "dtype": "bool"}, marks=pytest.mark.xfail), + pytest.param({"first_input": 2, "second_input": 1, + "dtype": "bool"}, marks=pytest.mark.xfail), + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_min_max(self, case, kwargs_to_prepare_input, ie_device, precision, ir_version): + self._test(*self.create_model(case), + ie_device, precision, ir_version, kwargs_to_prepare_input=kwargs_to_prepare_input) diff --git a/tests/layer_tests/pytorch_tests/test_mm.py b/tests/layer_tests/pytorch_tests/test_mm.py new file mode 100644 index 00000000000..b0f920e47f5 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_mm.py @@ -0,0 +1,86 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestMatMul(PytorchLayerTest): + def _prepare_input(self, matrix1_shape=(2, 2), matrix2_shape=(2, 2)): + import numpy as np + return (np.random.randn(*matrix1_shape).astype(np.float32), np.random.randn(*matrix2_shape).astype(np.float32)) + + def create_model(self, op_type="aten::mm"): + import torch + ops = { + "aten::mm": torch.mm, + "aten::bmm": torch.bmm, + "aten::matmul": torch.matmul + } + + class aten_mm(torch.nn.Module): + def __init__(self, op): + super(aten_mm, self).__init__() + self.op = op + + def forward(self, m1, m2): + return self.op(m1, m2) + + ref_net = None + + return aten_mm(ops[op_type]), ref_net, op_type + + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {'matrix1_shape': (3, 3), 'matrix2_shape': (3, 3)}, + {'matrix1_shape': (2, 3), 'matrix2_shape': (3, 2)}, + {'matrix1_shape': (10, 5), 'matrix2_shape': (5, 1)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (10, 2)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (10, 1)}, + + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_mm(self, kwargs_to_prepare_input, ie_device, precision, ir_version): + self._test(*self.create_model('aten::mm'), ie_device, precision, ir_version, + kwargs_to_prepare_input=kwargs_to_prepare_input) + + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {'matrix1_shape': (10, 3, 3), 'matrix2_shape': (10, 3, 3)}, + {'matrix1_shape': (1, 2, 3), 'matrix2_shape': (1, 3, 2)}, + {'matrix1_shape': (2, 10, 5), 'matrix2_shape': (2, 5, 1)}, + {'matrix1_shape': (3, 1, 10), 'matrix2_shape': (3, 10, 2)}, + {'matrix1_shape': (4, 1, 10), 'matrix2_shape': (4, 10, 1)}, + + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_bmm(self, kwargs_to_prepare_input, ie_device, precision, ir_version): + self._test(*self.create_model('aten::bmm'), ie_device, precision, ir_version, + kwargs_to_prepare_input=kwargs_to_prepare_input) + + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {'matrix1_shape': (10, 3, 3), 'matrix2_shape': (10, 3, 3)}, + {'matrix1_shape': (1, 2, 3), 'matrix2_shape': (1, 3, 2)}, + {'matrix1_shape': (2, 10, 5), 'matrix2_shape': (2, 5, 1)}, + {'matrix1_shape': (3, 1, 10), 'matrix2_shape': (3, 10, 2)}, + {'matrix1_shape': (4, 1, 10), 'matrix2_shape': (4, 10, 1)}, + {'matrix1_shape': (3, 3), 'matrix2_shape': (3, 3)}, + {'matrix1_shape': (2, 3), 'matrix2_shape': (3, 2)}, + {'matrix1_shape': (10, 5), 'matrix2_shape': (5, 1)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (10, 2)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (10, 1)}, + {'matrix1_shape': (10, 3, 3), 'matrix2_shape': (3, 3)}, + {'matrix1_shape': (2, 3), 'matrix2_shape': (10, 3, 2)}, + {'matrix1_shape': (1, 10, 5), 'matrix2_shape': (5, 1)}, + {'matrix1_shape': (5, 1, 10), 'matrix2_shape': (10, 2)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (4, 10, 2)}, + {'matrix1_shape': (2, 1, 10), 'matrix2_shape': (10, 1)}, + {'matrix1_shape': (1, 10), 'matrix2_shape': (2, 10, 1)}, + + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_matmul(self, kwargs_to_prepare_input, ie_device, precision, ir_version): + self._test(*self.create_model('aten::matmul'), ie_device, precision, ir_version, + kwargs_to_prepare_input=kwargs_to_prepare_input) diff --git a/tests/layer_tests/pytorch_tests/test_nms.py b/tests/layer_tests/pytorch_tests/test_nms.py new file mode 100644 index 00000000000..dc4613215f0 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_nms.py @@ -0,0 +1,39 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from pytorch_layer_test_class import PytorchLayerTest +import numpy as np +import torch +import torchvision + + +@pytest.mark.parametrize('boxes_num', (1, 2, 3, 4, 5)) +class TestNms(PytorchLayerTest): + + def _prepare_input(self): + # PyTorch requires that boxes are in (x1, y1, x2, y2) format, where 0<=x1 None: + super().__init__() + self.iou_threshold = 0.5 + + def forward(self, boxes, scores): + return torchvision.ops.nms(boxes=boxes, scores=scores, iou_threshold=self.iou_threshold) + + ref_net = None + + return torchvision_nms(), ref_net, "torchvision::nms" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_nms(self, ie_device, precision, ir_version, boxes_num): + self.boxes_num = boxes_num + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_nonzero.py b/tests/layer_tests/pytorch_tests/test_nonzero.py new file mode 100644 index 00000000000..c23c152c1ab --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_nonzero.py @@ -0,0 +1,49 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestNonZero(PytorchLayerTest): + def _prepare_input(self, mask_fill='ones', mask_dtype=bool): + input_shape = [2, 10, 2] + mask = np.zeros(input_shape).astype(mask_dtype) + if mask_fill == 'ones': + mask = np.ones(input_shape).astype(mask_dtype) + if mask_fill == 'random': + idx = np.random.choice(10, 5) + mask[:, idx, 1] = 1 + return (mask,) + + def create_model(self, as_tuple): + import torch + + class aten_nonzero(torch.nn.Module): + + def forward(self, cond): + return torch.nonzero(cond) + + class aten_nonzero_numpy(torch.nn.Module): + + def forward(self, cond): + return torch.nonzero(cond, as_tuple=True) + + ref_net = None + + if not as_tuple: + return aten_nonzero(), ref_net, "aten::nonzero" + return aten_nonzero_numpy(), ref_net, "aten::nonzero_numpy" + + @pytest.mark.parametrize( + "mask_fill", ['zeros', 'ones', 'random']) # np.float32 incorrectly casted to bool + @pytest.mark.parametrize("mask_dtype", [np.uint8, bool]) + @pytest.mark.parametrize("as_tuple", [False, True]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_nonzero(self, mask_fill, mask_dtype, as_tuple, ie_device, precision, ir_version): + self._test(*self.create_model(as_tuple), + ie_device, precision, ir_version, + kwargs_to_prepare_input={'mask_fill': mask_fill, 'mask_dtype': mask_dtype}, trace_model=as_tuple) diff --git a/tests/layer_tests/pytorch_tests/test_norm.py b/tests/layer_tests/pytorch_tests/test_norm.py new file mode 100644 index 00000000000..7cea64f4825 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_norm.py @@ -0,0 +1,39 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +@pytest.mark.parametrize('p', [-2, -1, 0, 1, 2, 2.5, float('inf'), float('-inf')]) +@pytest.mark.parametrize('dim', [[0], [0, 1], [0, 1, 2]]) +@pytest.mark.parametrize('keepdim', [True, False]) +class TestNorm(PytorchLayerTest): + + def _prepare_input(self): + return (np.random.randn(2, 3, 4, 5),) + + def create_model(self, p, dim, keepdim): + class aten_norm(torch.nn.Module): + + def __init__(self, p, dim, keepdim) -> None: + super().__init__() + self.p = p + self.dim = dim + self.keepdim = keepdim + + def forward(self, input_data): + return torch._VF.norm(input_data, self.p, self.dim, self.keepdim) + + ref_net = None + + return aten_norm(p, dim, keepdim), ref_net, "aten::norm" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_norm(self, ie_device, precision, ir_version, p, dim, keepdim): + self._test(*self.create_model(p, dim, keepdim), + ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_numel.py b/tests/layer_tests/pytorch_tests/test_numel.py new file mode 100644 index 00000000000..af111434acc --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_numel.py @@ -0,0 +1,37 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestNumel(PytorchLayerTest): + def _prepare_input(self, input_shape=(2)): + import numpy as np + return (np.random.randn(*input_shape).astype(np.float32),) + + def create_model(self): + import torch + class aten_numel(torch.nn.Module): + + def forward(self, x): + return torch.numel(x) + + ref_net = None + + return aten_numel(), ref_net, 'aten::numel' + + @pytest.mark.parametrize("kwargs_to_prepare_input", [ + {'input_shape': (1,)}, + {'input_shape': (2,)}, + {'input_shape': (2, 3)}, + {'input_shape': (3, 4, 5)}, + {'input_shape': (1, 2, 3, 4)}, + {'input_shape': (1, 2, 3, 4, 5)} + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_numel(self, kwargs_to_prepare_input, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version, + kwargs_to_prepare_input=kwargs_to_prepare_input) diff --git a/tests/layer_tests/pytorch_tests/test_pad.py b/tests/layer_tests/pytorch_tests/test_pad.py new file mode 100644 index 00000000000..66de7e59306 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_pad.py @@ -0,0 +1,110 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestPad(PytorchLayerTest): + def _prepare_input(self, ndim=4): + import numpy as np + input_5d_shape = [1, 3, 14, 14, 18] + return (np.random.randn(*input_5d_shape[:ndim]).astype(np.float32),) + + def create_model(self, pads, mode, value=None): + import torch + import torch.nn.functional as F + + class aten_pad(torch.nn.Module): + def __init__(self, pads, mode, value=None): + super().__init__() + self.pads = pads + self.mode = mode + self.value = value + + def forward(self, x): + return F.pad(x, self.pads, mode=self.mode, value=self.value) + + ref_net = None + + return aten_pad(pads, mode, value), ref_net, "aten::pad" + + @pytest.mark.parametrize("pads,mode,value", [ + ((1, 2, 3, 4), "reflect", None), + ((1, 0, 0, 0, 0, 1), "reflect", None), + ((0, 0, 0, 0, 0, 0), "reflect", None), + ((1, 2, 3, 4), "replicate", None), + ((1, 0, 0, 0, 0, 0), "replicate", None), + ((1, 0, 0, 0, 0, 1), "replicate", None), + ((0, 0, 0, 0, 0, 0), "replicate", None), + ((1, 2, 3, 4), "constant", None), + ((1, 2, 3, 4), "constant", 42.), + ((1, 2, 3, 4), "constant", -0.57), + ((1, 2), "constant", None), + ((1, 0, 0, 0, 0, 1), "constant", None), + ((0, 0, 0, 0, 0, 0), "constant", None), + ((1, 0, 0, 0, 0, 1, 1, 2), "constant", 0.), + ((1, 2, 0, 0), "circular", None), + ((1, 2, 3, 4), "circular", None), + ((0, 1, 0, 0), "circular", None), + ((0, 0, 0, 0), "circular", None), + ((0, 0, -1, -2), "circular", None), + ((-1, -2, -1, -2), "circular", None), + ((-5, -8, 0, 0), "circular", None), + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_pad4d(self, pads, mode, value, ie_device, precision, ir_version): + self._test(*self.create_model(pads, mode, value), ie_device, precision, ir_version, + kwargs_to_prepare_input={'ndim': 4}) + + @pytest.mark.parametrize("pads,mode,value", [ + ((1, 2, 3, 4, 5, 6), "reflect", None), + ((1, 0, 0, 0, 0, 1), "reflect", None), + ((1, 0, 0, 0, 0, 0), "reflect", None), + ((0, 0, 0, 0, 0, 0), "reflect", None), + ((1, 2, 3, 4, 5, 6), "replicate", None), + ((1, 0, 0, 0, 0, 0), "replicate", None), + ((1, 0, 0, 0, 0, 1), "replicate", None), + ((0, 0, 0, 0, 0, 0), "replicate", None), + ((1, 2, 3, 4), "constant", None), + ((1, 2, 3, 4), "constant", 42.), + ((1, 2, 3, 4), "constant", -0.57), + ((1, 2), "constant", None), + ((1, 0, 0, 0, 0, 1), "constant", None), + ((0, 0, 0, 0, 0, 0), "constant", None), + ((1, 0, 0, 0, 0, 1, 1, 2), "constant", 0.), + ((1, 0, 0, 0, 0, 1, 1, 2, 2, 3), "constant", 0.), + ((1, 2, 0, 0, 0, 0), "circular", None), + ((1, 2, 3, 4, 5, 6), "circular", None), + ((0, 1, 0, 0, 0, 0), "circular", None), + ((0, 0, 0, 0, 0, 0), "circular", None), + ((0, 0, -1, -2, 0, 0), "circular", None), + ((-1, -2, -1, -2, -1, -2), "circular", None), + ((-5, -8, 0, 0, 0, 0), "circular", None), + ((10, 10, 10, 10, 10, 10), "circular", None), + ]) + @pytest.mark.nightly + def test_pad5d(self, pads, mode, value, ie_device, precision, ir_version): + self._test(*self.create_model(pads, mode, value), ie_device, precision, ir_version, + kwargs_to_prepare_input={'ndim': 5}, trace_model=True) + + @pytest.mark.parametrize("pads,mode,value", [ + ((1, 2), "reflect", None), + ((1, 0), "reflect", None), + ((0, 0), "reflect", None), + ((1, 2), "replicate", None), + ((1, 0), "replicate", None), + ((0, 0), "replicate", None), + ((1, 0), "constant", None), + ((1, 0), "constant", 42.), + ((1, 0), "constant", -0.57), + ((1, 2, 3, 4), "constant", None), + ((1, 2, 3, 4), "constant", 42.), + ((1, 2, 3, 4), "constant", -0.57), + ]) + @pytest.mark.nightly + def test_pad2d(self, pads, mode, value, ie_device, precision, ir_version): + self._test(*self.create_model(pads, mode, value), ie_device, precision, ir_version, + kwargs_to_prepare_input={'ndim': 2}, trace_model=True) diff --git a/tests/layer_tests/pytorch_tests/test_permute.py b/tests/layer_tests/pytorch_tests/test_permute.py new file mode 100644 index 00000000000..9fa107c9ea1 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_permute.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestPermute(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, order): + import torch + + class aten_permute(torch.nn.Module): + def __init__(self, order): + super(aten_permute, self).__init__() + self.order = order + + def forward(self, x): + return torch.permute(x, self.order) + + ref_net = None + + return aten_permute(order), ref_net, "aten::permute" + + @pytest.mark.parametrize("order", [[0, 2, 3, 1], [0, 3, 1, 2]]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_relu(self, order, ie_device, precision, ir_version): + self._test(*self.create_model(order), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_pooling.py b/tests/layer_tests/pytorch_tests/test_pooling.py new file mode 100644 index 00000000000..bb8ea035aa3 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_pooling.py @@ -0,0 +1,158 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + +d2_avg_params = [{'kernel_size': [3, 3], 'stride': 1, 'padding': 0}, + {'kernel_size': [3, 3], 'stride': [1, 1], 'padding': 1}, + {'kernel_size': [3, 3], 'stride': [1, 1], 'padding': [0, 1]}, + {'kernel_size': [3, 3], 'stride': [1, 1], 'padding': [1, 0]}, + {'kernel_size': [3, 3], 'stride': [2, 1], 'padding': 0}, + {'kernel_size': [2, 1], 'stride': [2, 1], 'padding': 0}, + ] + +d1_avg_params = [{'kernel_size': 3, 'stride': 1, 'padding': 0}, + {'kernel_size': (4,), 'stride': 1, 'padding': 1}, + {'kernel_size': 4, 'stride': (5,), 'padding': 2}, + ] +d3_avg_params = [{'kernel_size': [3, 3, 3], 'stride': 1, 'padding': 0}, + {'kernel_size': [3, 3, 3], 'stride': [1, 1, 1], 'padding': 1}, + {'kernel_size': [3, 3, 3], 'stride': [3, 3, 3], 'padding': [0, 0, 0]}, + {'kernel_size': [3, 2, 1], 'stride': [3, 1, 1], 'padding': [0, 0, 0]}, + ] + + +class TestPooling(PytorchLayerTest): + def _prepare_input(self, ndim=4): + import numpy as np + shape = (1, 3, 15, 15, 15) + return (np.random.randn(*shape[:ndim]).astype(np.float32),) + + def create_model(self, op_type, kernel_size, stride, padding, dilation=1, ceil_mode=True, count_include_pad=True): + import torch + + class aten_avg_pooling_base(torch.nn.Module): + def __init__(self): + super(aten_avg_pooling_base, self).__init__() + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + self.ceil_mode = ceil_mode + self.count_include_pad = count_include_pad + + def forward(self, x): + pass + + class aten_max_pooling_base(torch.nn.Module): + def __init__(self): + super(aten_max_pooling_base, self).__init__() + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + self.dilation = dilation + self.ceil_mode = ceil_mode + + def forward(self, x): + pass + + class aten_avg_pool2d(aten_avg_pooling_base): + def forward(self, x): + return torch.nn.functional.avg_pool2d(x, self.kernel_size, self.stride, self.padding, self.ceil_mode, + self.count_include_pad) + + class aten_avg_pool3d(aten_avg_pooling_base): + def forward(self, x): + return torch.nn.functional.avg_pool3d(x, self.kernel_size, self.stride, self.padding, self.ceil_mode, + self.count_include_pad) + + class aten_avg_pool1d(aten_avg_pooling_base): + def forward(self, x): + return torch.nn.functional.avg_pool1d(x, self.kernel_size, self.stride, self.padding, self.ceil_mode, + self.count_include_pad) + + class aten_max_pool2d(aten_max_pooling_base): + def forward(self, x): + return torch.nn.functional.max_pool2d(x, self.kernel_size, self.stride, self.padding, self.dilation, + self.ceil_mode) + + class aten_max_pool3d(aten_max_pooling_base): + def forward(self, x): + return torch.nn.functional.max_pool3d(x, self.kernel_size, self.stride, self.padding, self.dilation, + self.ceil_mode) + + class aten_max_pool1d(aten_max_pooling_base): + def forward(self, x): + return torch.nn.functional.max_pool1d(x, self.kernel_size, self.stride, self.padding, self.dilation, + self.ceil_mode) + + ops = { + "max_pool1d": aten_max_pool1d, + "max_pool2d": aten_max_pool2d, + "max_pool3d": aten_max_pool3d, + "avg_pool1d": aten_avg_pool1d, + "avg_pool2d": aten_avg_pool2d, + "avg_pool3d": aten_avg_pool3d + } + + ref_net = None + aten_pooling = ops[op_type] + + return aten_pooling(), ref_net, f"aten::{op_type}" + + @pytest.mark.parametrize("params", d1_avg_params) + @pytest.mark.parametrize("ceil_mode", [True, False]) + @pytest.mark.parametrize("count_include_pad", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_avg_pool1d(self, params, ceil_mode, count_include_pad, ie_device, precision, ir_version): + self._test(*self.create_model("avg_pool1d", **params, ceil_mode=ceil_mode, count_include_pad=count_include_pad), + ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 3}, trace_model=True, + dynamic_shapes=False) + + @pytest.mark.parametrize("params", d2_avg_params) + @pytest.mark.parametrize("ceil_mode", [True, False]) + @pytest.mark.parametrize("count_include_pad", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_avg_pool2d(self, params, ceil_mode, count_include_pad, ie_device, precision, ir_version): + self._test(*self.create_model("avg_pool2d", **params, ceil_mode=ceil_mode, count_include_pad=count_include_pad), + ie_device, precision, ir_version, trace_model=True, dynamic_shapes=False) + + @pytest.mark.parametrize("params", d3_avg_params) + @pytest.mark.parametrize("ceil_mode", [True, False]) + @pytest.mark.parametrize("count_include_pad", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_avg_pool3d(self, params, ceil_mode, count_include_pad, ie_device, precision, ir_version): + self._test(*self.create_model("avg_pool3d", **params, ceil_mode=ceil_mode, count_include_pad=count_include_pad), + ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 5}, trace_model=True, + dynamic_shapes=False) + + @pytest.mark.parametrize("params", d1_avg_params) + @pytest.mark.parametrize("ceil_mode", [True, False]) + @pytest.mark.parametrize("dilation", [1, 2]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_max_pool1d(self, params, ceil_mode, dilation, ie_device, precision, ir_version): + self._test(*self.create_model("max_pool1d", **params, ceil_mode=ceil_mode, dilation=dilation), + ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 3}, dynamic_shapes=False) + + @pytest.mark.parametrize("params", d2_avg_params) + @pytest.mark.parametrize("ceil_mode", [True, False]) + @pytest.mark.parametrize("dilation", [1, 2]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_max_pool2d(self, params, ceil_mode, dilation, ie_device, precision, ir_version): + self._test(*self.create_model("max_pool2d", **params, ceil_mode=ceil_mode, dilation=dilation), + ie_device, precision, ir_version, dynamic_shapes=False) + + @pytest.mark.parametrize("params", d3_avg_params) + @pytest.mark.parametrize("ceil_mode", [True, False]) + @pytest.mark.parametrize("dilation", [1, 2]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_max_pool3d(self, params, ceil_mode, dilation, ie_device, precision, ir_version): + self._test(*self.create_model("max_pool3d", **params, ceil_mode=ceil_mode, dilation=dilation), + ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 5}, dynamic_shapes=False) diff --git a/tests/layer_tests/pytorch_tests/test_pow.py b/tests/layer_tests/pytorch_tests/test_pow.py new file mode 100644 index 00000000000..6266203b273 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_pow.py @@ -0,0 +1,44 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +@pytest.mark.parametrize('test_input', [(np.array([[1, 2], [3, 4]], dtype=np.float32), + np.array([[1, 1], [2, 2]], dtype=np.float32),), + (np.array([[1, 2], [3, 4]], dtype=np.float32), + np.array([2, 3], dtype=np.float32),), + (np.array([[1, 2], [3, 4]], dtype=np.float32), + np.array([2], dtype=np.float32),), + (np.array([5, 6], dtype=np.float32), + np.array([[1, 2], [3, 4]], dtype=np.float32),), + (np.array([5], dtype=np.float32), + np.array([[1, 2], [3, 4]], dtype=np.float32),)]) +class TestPow(PytorchLayerTest): + """ + Input test data contains five test cases - elementwise power, broadcast exponent, one exponent, + broadcast base, one base. + """ + + def _prepare_input(self): + return self.test_input + + def create_model(self): + class aten_pow(torch.nn.Module): + + def forward(self, input_data, exponent): + return torch.pow(input_data, exponent) + + ref_net = None + + return aten_pow(), ref_net, "aten::pow" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_pow(self, ie_device, precision, ir_version, test_input): + self.test_input = test_input + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_relu.py b/tests/layer_tests/pytorch_tests/test_relu.py new file mode 100644 index 00000000000..a7d9ac3f182 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_relu.py @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestRelu(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, inplace=False): + import torch + import torch.nn.functional as F + + class aten_relu(torch.nn.Module): + def __init__(self, inplace): + super(aten_relu, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x, F.relu(x, inplace=self.inplace) + + ref_net = None + + return aten_relu(inplace), ref_net, "aten::relu" if not inplace else "aten::relu_" + + @pytest.mark.parametrize("inplace", [False, True]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_relu(self, inplace, ie_device, precision, ir_version): + self._test(*self.create_model(inplace), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_repeat.py b/tests/layer_tests/pytorch_tests/test_repeat.py new file mode 100644 index 00000000000..c75831908b4 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_repeat.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestRelu(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(2, 10).astype(np.float32),) + + def create_model(self, repeats): + import torch + + class aten_repeat(torch.nn.Module): + def __init__(self, repeats): + super(aten_repeat, self).__init__() + self.repeats = repeats + + def forward(self, x): + return x.repeat(self.repeats) + + ref_net = None + + return aten_repeat(repeats), ref_net, "aten::repeat" + + @pytest.mark.parametrize("repeats", [(4, 3), (1, 1), (1, 2, 3), (1, 2, 2, 3)]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_relu(self, repeats, ie_device, precision, ir_version): + self._test(*self.create_model(repeats), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_reshape.py b/tests/layer_tests/pytorch_tests/test_reshape.py new file mode 100644 index 00000000000..5b89cab94d0 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_reshape.py @@ -0,0 +1,42 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestReshape(PytorchLayerTest): + def _prepare_input(self): + return (np.random.uniform(0, 50, (1, 12, 12, 24)).astype(np.float32),) + + def create_model(self, shape): + import torch + + class aten_reshape(torch.nn.Module): + def __init__(self, shape): + super(aten_reshape, self).__init__() + self.shape = shape + + def forward(self, x): + return torch.reshape(x, self.shape) + + ref_net = None + + return aten_reshape(shape), ref_net, "aten::reshape" + + @pytest.mark.parametrize(("shape"), [ + [-1, 6], + [12, 12, 24, 1], + [12, 12, 12, 2], + [12, -1, 12, 24], + [24, 12, 12, 1], + [24, 12, 12, -1], + [24, 1, -1, 12], + [24, 1, 1, -1, 12], + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_reshape(self, shape, ie_device, precision, ir_version): + self._test(*self.create_model(shape), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_reshape_as.py b/tests/layer_tests/pytorch_tests/test_reshape_as.py new file mode 100644 index 00000000000..6831279cce0 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_reshape_as.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +@pytest.mark.parametrize('input_tesnors', ((np.ones((3, 6)), np.ones((2, 9))), + (np.ones((2, 2, 3)), np.ones((6, 2))), + (np.ones((6, 2)), np.ones((2, 2, 3))))) +class TestReshapeAs(PytorchLayerTest): + + def _prepare_input(self): + return self.input_tesnors + + def create_model(self): + class aten_reshape_as(torch.nn.Module): + + def forward(self, input_tensor, shape_tensor): + return input_tensor.reshape_as(shape_tensor) + + ref_net = None + + return aten_reshape_as(), ref_net, "aten::reshape_as" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_reshape_as(self, ie_device, precision, ir_version, input_tesnors): + self.input_tesnors = input_tesnors + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_roll.py b/tests/layer_tests/pytorch_tests/test_roll.py new file mode 100644 index 00000000000..a405fa839af --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_roll.py @@ -0,0 +1,41 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestRoll(PytorchLayerTest): + def _prepare_input(self): + return (np.random.uniform(0, 50, (2, 3, 4)).astype(np.float32),) + + def create_model(self, shifts, dim): + import torch + + class aten_roll(torch.nn.Module): + def __init__(self, shifts, dim=None): + super(aten_roll, self).__init__() + self.dim = dim + self.shits = shifts + + def forward(self, x): + if self.dim is not None: + return torch.roll(x, self.shits, self.dim) + return torch.roll(x, self.shits) + + ref_net = None + + return aten_roll(shifts, dim), ref_net, "aten::roll" + + @pytest.mark.parametrize(("shifts", "dim"), [ + [(2, 1), (0, 1)], + [1, 0], + [-1, 0], + [1, None], + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_roll(self, shifts, dim, ie_device, precision, ir_version): + self._test(*self.create_model(shifts, dim), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_rsqrt.py b/tests/layer_tests/pytorch_tests/test_rsqrt.py new file mode 100644 index 00000000000..6bb5adadff6 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_rsqrt.py @@ -0,0 +1,29 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestRSqrt(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 10).astype(np.float32),) + + def create_model(self): + import torch + + class aten_rsqrt(torch.nn.Module): + + def forward(self, x): + return torch.rsqrt(x) + + ref_net = None + + return aten_rsqrt(), ref_net, "aten::rsqrt" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_relu(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_select.py b/tests/layer_tests/pytorch_tests/test_select.py new file mode 100644 index 00000000000..fd1c325d4a6 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_select.py @@ -0,0 +1,37 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +@pytest.mark.parametrize('input_dim', list(range(-3, 4))) +@pytest.mark.parametrize('input_index', list(range(-3, 4))) +class TestSelect(PytorchLayerTest): + + def _prepare_input(self): + return (np.random.randn(4, 4, 5, 5).astype(np.float32),) + + def create_model(self, input_dim, input_index): + class aten_select(torch.nn.Module): + + def __init__(self, input_dim, input_index) -> None: + super().__init__() + self.dim = input_dim + self.index = input_index + + def forward(self, input_tensor): + return torch.select(input_tensor, int(self.dim), int(self.index)) + + ref_net = None + + return aten_select(input_dim, input_index), ref_net, "aten::select" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_select(self, ie_device, precision, ir_version, input_dim, input_index): + self._test(*self.create_model(input_dim, input_index), + ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_selu.py b/tests/layer_tests/pytorch_tests/test_selu.py new file mode 100644 index 00000000000..4be0b7029e3 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_selu.py @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestSilu(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, inplace=False): + import torch + import torch.nn.functional as F + + class aten_selu(torch.nn.Module): + def __init__(self, inplace): + super(aten_selu, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x, F.selu(x, inplace=self.inplace) + + ref_net = None + + return aten_selu(inplace), ref_net, "aten::selu" if not inplace else "aten::selu_" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("inplace", [True, False]) + def test_silu(self, inplace, ie_device, precision, ir_version): + self._test(*self.create_model(inplace), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_silu.py b/tests/layer_tests/pytorch_tests/test_silu.py new file mode 100644 index 00000000000..4477184dba7 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_silu.py @@ -0,0 +1,32 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestSilu(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self): + import torch + import torch.nn.functional as F + + class aten_silu(torch.nn.Module): + def __init__(self): + super(aten_silu, self).__init__() + + def forward(self, x): + return F.silu(x) + + ref_net = None + + return aten_silu(), ref_net, "aten::silu" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_silu(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_softmax.py b/tests/layer_tests/pytorch_tests/test_softmax.py new file mode 100644 index 00000000000..eea6b23640d --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_softmax.py @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestSoftmax(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, dim): + import torch + import torch.nn.functional as F + + class aten_softmax(torch.nn.Module): + def __init__(self, dim): + super(aten_softmax, self).__init__() + self.dim = dim + + def forward(self, x): + return F.softmax(x, self.dim) + + ref_net = None + + return aten_softmax(dim), ref_net, "aten::softmax" + + @pytest.mark.parametrize("dim", [-1, 3]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_softmax(self, dim, ie_device, precision, ir_version): + self._test(*self.create_model(dim), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_split.py b/tests/layer_tests/pytorch_tests/test_split.py new file mode 100644 index 00000000000..3328557c2f1 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_split.py @@ -0,0 +1,76 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestSplit(PytorchLayerTest): + def _prepare_input(self): + return (np.random.randn(1, 10, 224, 224).astype(np.float32),) + + def create_model_split_getitem(self): + class aten_split(torch.nn.Module): + def __init__(self, split, axis, getitem): + self.split = split + self.axis = axis + self.getitem = getitem + super(aten_split, self).__init__() + + def forward(self, input): + return torch.split(input, self.split, self.axis)[self.getitem] + + ref_net = None + + return ( + aten_split(self.split_param, self.axis, self.getitem), + ref_net, + "aten::split", + ) + + def create_model_split_listunpack(self): + class aten_split(torch.nn.Module): + def __init__(self, split, axis): + self.split = split + self.axis = axis + super(aten_split, self).__init__() + + def forward(self, input): + # Hardcode to test with ListUnpack + a, b, c, d, e = torch.split(input, self.split, self.axis) + return b + + ref_net = None + + return aten_split(self.split_param, self.axis), ref_net, "aten::split" + + # Test case - (split_param, axis), always split into 5 due to hardcoded number of outputs in ListUnpack test. + test_cases = [ + (2, 1), + (45, 2), + (45, -1), + ([2, 2, 2, 2, 2], 1), + ([200, 20, 1, 1, 2], 2), + ([20, 200, 1, 1, 2], -1), + ] + + @pytest.mark.parametrize("params", test_cases) + @pytest.mark.parametrize("getitem", [-5, -2, -1, 0, 1, 4]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_split_getitem(self, params, getitem, ie_device, precision, ir_version): + (self.split_param, self.axis) = params + self.getitem = getitem + self._test(*self.create_model_split_getitem(), ie_device, precision, ir_version) + + @pytest.mark.parametrize("params", test_cases) + @pytest.mark.nightly + @pytest.mark.precommit + def test_split_listunpack(self, params, ie_device, precision, ir_version): + (self.split_param, self.axis) = params + self._test( + *self.create_model_split_listunpack(), ie_device, precision, ir_version + ) diff --git a/tests/layer_tests/pytorch_tests/test_sqrt.py b/tests/layer_tests/pytorch_tests/test_sqrt.py new file mode 100644 index 00000000000..f8071816bed --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_sqrt.py @@ -0,0 +1,29 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestSqrt(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 10).astype(np.float32),) + + def create_model(self): + import torch + + class aten_sqrt(torch.nn.Module): + + def forward(self, x): + return torch.sqrt(x) + + ref_net = None + + return aten_sqrt(), ref_net, "aten::sqrt" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_sqrt(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_squeeze.py b/tests/layer_tests/pytorch_tests/test_squeeze.py new file mode 100644 index 00000000000..b240f2da9c1 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_squeeze.py @@ -0,0 +1,35 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestSqueeze(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 10).astype(np.float32),) + + def create_model(self, dim): + import torch + + class aten_squeeze(torch.nn.Module): + def __init__(self, dim): + super(aten_squeeze, self).__init__() + self.dim = dim + + def forward(self, x): + if self.dim is not None: + return torch.squeeze(x, self.dim) + return torch.squeeze(x) + + ref_net = None + + return aten_squeeze(dim), ref_net, "aten::squeeze" + + @pytest.mark.parametrize("dim,dynamic_shapes", [(-2, True), (0, True), (None, False)]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_squeeze(self, dim, ie_device, precision, ir_version, dynamic_shapes): + self._test(*self.create_model(dim), ie_device, precision, ir_version, dynamic_shapes=dynamic_shapes) diff --git a/tests/layer_tests/pytorch_tests/test_strided_const.py b/tests/layer_tests/pytorch_tests/test_strided_const.py new file mode 100644 index 00000000000..438edbc88e2 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_strided_const.py @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestStrides(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + x = np.random.randint(0, 10, [1, 3, 2, 2]).astype(np.float32) + return (x,) + + def create_model(self): + import torch + + class strided_const(torch.nn.Module): + def __init__(self): + super(strided_const, self).__init__() + self.const = torch.randint(0, 10, [1, 3, 2, 2], dtype=torch.float32) + self.const = self.const.to(memory_format=torch.channels_last) + + def forward(self, x): + return x + self.const + + ref_net = None + + return strided_const(), ref_net, None + + @pytest.mark.nightly + @pytest.mark.precommit + def test_strides(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_sum.py b/tests/layer_tests/pytorch_tests/test_sum.py new file mode 100644 index 00000000000..040bc813fb0 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_sum.py @@ -0,0 +1,40 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestSum(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, axes, keep_dims): + + import torch + + class aten_sum(torch.nn.Module): + def __init__(self, axes=None, keep_dims=None): + super(aten_sum, self).__init__() + self.axes = axes + self.keep_dims = keep_dims + + def forward(self, x): + if self.axes is None and self.keep_dims is None: + return torch.sum(x) + if self.axes is not None and self.keep_dims is None: + return torch.sum(x, self.axes) + return torch.sum(x, self.axes, self.keep_dims) + + ref_net = None + + return aten_sum(axes, keep_dims), ref_net, "aten::sum" + + @pytest.mark.parametrize("axes,keep_dim", + [(None, None), (None, False), (-1, None), (1, None), ((2, 3), False), ((3, 2), True)]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_sum(self, axes, keep_dim, ie_device, precision, ir_version): + self._test(*self.create_model(axes, keep_dim), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_to.py b/tests/layer_tests/pytorch_tests/test_to.py new file mode 100644 index 00000000000..313ba22d88d --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_to.py @@ -0,0 +1,93 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from openvino.pyopenvino import OpConversionFailure +from pytorch_layer_test_class import PytorchLayerTest + + +class TestAtenTo(PytorchLayerTest): + def _prepare_input(self): + return (np.random.uniform(low=0.0, high=50.0, size=(3,)),) + + def create_model(self, type, non_blocking=False, copy=False, memory_format=None): + import torch + + class aten_to(torch.nn.Module): + def __init__(self, type, non_blocking=False, copy=False, memory_format=None): + super(aten_to, self).__init__() + self.type = type + self.non_blocking = non_blocking + self.copy = copy + self.memory_format = memory_format + + def forward(self, x): + return x.to(self.type, self.non_blocking, self.copy, self.memory_format) + + ref_net = None + + return aten_to(type, non_blocking, copy, memory_format), ref_net, "aten::to" + + # Cartesian product of input/output types + @pytest.mark.parametrize("input_type", [np.int32, np.float32, np.float64]) + @pytest.mark.parametrize("output_type", + [torch.uint8, torch.int8, torch.int16, torch.int32, torch.float32, torch.int64]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_aten_to(self, input_type, output_type, ie_device, precision, ir_version): + self.input_type = input_type + self._test(*self.create_model(output_type), ie_device, precision, ir_version) + + # Cartesian product of input/output types + @pytest.mark.parametrize("input_type", [np.int32, np.float32, np.float64]) + @pytest.mark.parametrize(("output_type", "non_blocking"), [ + [torch.uint8, True], + [torch.int8, True], + [torch.int16, True], + [torch.int32, True], + [torch.int64, True], + [torch.float32, True], + [torch.float64, True], + [torch.bool, True] + ]) + @pytest.mark.nightly + def test_aten_to_non_blocking_arg(self, input_type, output_type, non_blocking, ie_device, precision, ir_version): + self.input_type = input_type + self._test(*self.create_model(output_type, non_blocking=non_blocking), ie_device, precision, ir_version) + + # Cartesian product of input/output types + @pytest.mark.parametrize("input_type", [np.int32, np.float32, np.float64]) + @pytest.mark.parametrize(("output_type", "copy"), [ + [torch.uint8, True], + [torch.int8, True], + [torch.int16, True], + [torch.int32, True], + [torch.int64, True], + [torch.float32, True], + [torch.float64, True], + ]) + @pytest.mark.nightly + def test_aten_to_copy_arg(self, input_type, output_type, copy, ie_device, precision, ir_version): + self.input_type = input_type + self._test(*self.create_model(output_type, copy=copy), ie_device, precision, ir_version) + + # Cartesian product of input/output types + @pytest.mark.parametrize("input_type", [np.int32, np.float32, np.float64]) + @pytest.mark.parametrize(("output_type", "memory_format"), [ + [torch.uint8, 1], + [torch.int8, 1], + [torch.int16, 2], + [torch.int32, 2], + [torch.int64, 3], + [torch.float32, 3], + [torch.float64, 4], + ]) + @pytest.mark.nightly + def test_aten_to_raise_memory_format_arg(self, input_type, output_type, memory_format, ie_device, precision, + ir_version): + self.input_type = input_type + with pytest.raises(OpConversionFailure) as e: + self._test(*self.create_model(output_type, memory_format=memory_format), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_trigonometry.py b/tests/layer_tests/pytorch_tests/test_trigonometry.py new file mode 100644 index 00000000000..9da6c442c99 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_trigonometry.py @@ -0,0 +1,65 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestTrigonom(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 2, 3, 4).astype(np.float32),) + + def create_model(self, op_type): + import torch + ops = { + "cos": torch.cos, + "cos_": torch.cos_, + "sin": torch.sin, + "sin_": torch.sin_, + "tan": torch.tan, + "tan_": torch.tan_, + "cosh": torch.cosh, + "cosh_": torch.cosh_, + "sinh": torch.sinh, + "sinh_": torch.sinh_, + "tanh": torch.tanh, + "tanh_": torch.tanh_, + "acos": torch.acos, + "acos_": torch.acos_, + "asin": torch.asin, + "asin_": torch.asin_, + "atan": torch.atan, + "atan_": torch.atan_, + "acosh": torch.acosh, + "acosh_": torch.acosh_, + "asinh": torch.asinh, + "asinh_": torch.asinh_, + "atanh": torch.atanh, + "atanh_": torch.atanh_, + } + + class aten_op(torch.nn.Module): + def __init__(self, op): + super(aten_op, self).__init__() + self.op = op + + def forward(self, x): + return self.op(x) + + ref_net = None + + return aten_op(ops[op_type]), ref_net, f'aten::{op_type}' + + @pytest.mark.parametrize("op", [ + "acos", "acos_", "acosh", "acosh_", + "asin", "asin_", "asinh", "asinh_", + "atan", "atan_", "atanh", "atanh_", + "cos", "cos_", "cosh", "cosh_", + "sin", "sin_", "sinh", "sinh_", + "tan", "tan_", "tanh", "tanh_"]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_mm(self, op, ie_device, precision, ir_version): + self._test(*self.create_model(op), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_type_as.py b/tests/layer_tests/pytorch_tests/test_type_as.py new file mode 100644 index 00000000000..35fe0732602 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_type_as.py @@ -0,0 +1,33 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestTypeAs(PytorchLayerTest): + def _prepare_input(self, input_dtype=np.float32, cast_dtype=np.float32): + input_data = np.random.randint(127, size=(1, 3, 224, 224)) + return (input_data.astype(input_dtype), input_data.astype(cast_dtype)) + + def create_model(self): + import torch + + class aten_type_as(torch.nn.Module): + + def forward(self, x, y): + return x.type_as(y) + + ref_net = None + + return aten_type_as(), ref_net, "aten::type_as" + + @pytest.mark.parametrize("input_dtype", [np.float64, np.float32, np.int64, np.int32, np.int16, np.int8, np.uint8]) + @pytest.mark.parametrize("cast_dtype", [np.float64, np.float32, np.int64, np.int32, np.int16, np.int8, np.uint8]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_type_as(self, input_dtype, cast_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version, + kwargs_to_prepare_input={"input_dtype": input_dtype, "cast_dtype": cast_dtype}) diff --git a/tests/layer_tests/pytorch_tests/test_unbind.py b/tests/layer_tests/pytorch_tests/test_unbind.py new file mode 100644 index 00000000000..9ef1877b418 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_unbind.py @@ -0,0 +1,35 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestUnbind(PytorchLayerTest): + def _prepare_input(self): + return (np.random.uniform(0, 50, (3, 3, 3, 3)).astype(np.float32),) + + def create_model(self, shape): + import torch + + class aten_unbind(torch.nn.Module): + def __init__(self, dim): + super(aten_unbind, self).__init__() + self.dim = dim + + def forward(self, x): + # Create aten::unbind -> ListUnpack + a, b, c = torch.unbind(x, self.dim) + return b + + ref_net = None + + return aten_unbind(shape), ref_net, "aten::unbind" + + @pytest.mark.parametrize(("dim"), [0, 1, 2, 3]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_unbind(self, dim, ie_device, precision, ir_version): + self._test(*self.create_model(dim), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_unsqueeze.py b/tests/layer_tests/pytorch_tests/test_unsqueeze.py new file mode 100644 index 00000000000..7c40bb3837e --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_unsqueeze.py @@ -0,0 +1,44 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestUnsqueeze(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(5, 10).astype(np.float32),) + + def create_model(self, inplace=False, dim=0): + import torch + + class aten_unsqueeze(torch.nn.Module): + def __init__(self, dim): + super(aten_unsqueeze, self).__init__() + self.op = torch.unsqueeze + self.dim = dim + + def forward(self, x): + return x, self.op(x, self.dim) + + class aten_unsqueeze_(torch.nn.Module): + def __init__(self, dim): + super(aten_unsqueeze_, self).__init__() + self.dim = dim + + def forward(self, x): + return x, x.unsqueeze_(self.dim) + + ref_net = None + model_class, op = (aten_unsqueeze, "aten::unsqueeze") if not inplace else (aten_unsqueeze_, "aten::unsqueeze_") + + return model_class(dim), ref_net, op + + @pytest.mark.parametrize("inplace", [False, True]) + @pytest.mark.parametrize("dim", [0, 1, -1]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_relu(self, inplace, dim, ie_device, precision, ir_version): + self._test(*self.create_model(inplace, dim), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_upsample.py b/tests/layer_tests/pytorch_tests/test_upsample.py new file mode 100644 index 00000000000..a5ea7df4157 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_upsample.py @@ -0,0 +1,55 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestUpsample2D(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.zeros((1, 3, 224, 224)).astype(np.float32),) + + def create_model(self, size, scale, mode): + import torch + import torch.nn.functional as F + + class aten_upsample(torch.nn.Module): + def __init__(self, size, scale, mode): + super().__init__() + self.size = size + self.scale = scale + self.mode = mode + + def forward(self, x): + return F.interpolate(x, self.size, scale_factor=self.scale, mode=self.mode) + + ref_net = None + + return aten_upsample(size, scale, mode), ref_net, F"aten::upsample_{mode}2d" + + @pytest.mark.parametrize("mode,size,scale", [ + ('nearest', 300, None), + ('nearest', 200, None), + ('nearest', (128, 480), None), + ('nearest', None, 2.5,), + ('nearest', None, 0.75), + ('nearest', None, (1.2, 0.8)), + ('bilinear', 300, None), + ('bilinear', 200, None), + ('bilinear', (128, 480), None), + ('bilinear', None, 2.5,), + ('bilinear', None, 0.75), + ('bilinear', None, (1.2, 0.8)), + ('bicubic', 300, None), + ('bicubic', 200, None), + ('bicubic', (128, 480), None), + ('bicubic', None, 2.5,), + ('bicubic', None, 0.75), + ('bicubic', None, (1.2, 0.8))] + ) + @pytest.mark.nightly + @pytest.mark.precommit + def test_upsample(self, mode, size, scale, ie_device, precision, ir_version): + self._test(*self.create_model(size, scale, mode), ie_device, precision, ir_version, trace_model=True) diff --git a/tests/layer_tests/pytorch_tests/test_var.py b/tests/layer_tests/pytorch_tests/test_var.py new file mode 100644 index 00000000000..95025396836 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_var.py @@ -0,0 +1,52 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestVar(PytorchLayerTest): + def _prepare_input(self): + import numpy as np + return (np.random.randn(1, 3, 224, 224).astype(np.float32),) + + def create_model(self, unbiased, dim=None, keepdim=True, two_args_case=True): + import torch + + class aten_var(torch.nn.Module): + def __init__(self, dim, unbiased, keepdim): + super(aten_var, self).__init__() + self.unbiased = unbiased + self.dim = dim + self.keepdim = keepdim + + def forward(self, x): + return torch.var(x, self.dim, unbiased=self.unbiased, keepdim=self.keepdim) + + class aten_var2args(torch.nn.Module): + def __init__(self, unbiased): + super(aten_var2args, self).__init__() + self.unbiased = unbiased + + def forward(self, x): + return torch.var(x, self.unbiased) + + ref_net = None + if two_args_case: + return aten_var2args(unbiased), ref_net, "aten::var" + return aten_var(dim, unbiased, keepdim), ref_net, "aten::var" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("unbiased", [True, False]) + def test_var2args(self, unbiased, ie_device, precision, ir_version): + self._test(*self.create_model(unbiased), ie_device, precision, ir_version) + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("unbiased", [False, True]) + @pytest.mark.parametrize("dim", [None, 0, 1, 2, 3, -1, -2, (0, 1), (-1, -2), (0, 1, -1), (0, 1, 2, 3)]) + @pytest.mark.parametrize("keepdim", [True, False]) + def test_var(self, unbiased, dim, keepdim, ie_device, precision, ir_version): + self._test(*self.create_model(unbiased, dim, keepdim, two_args_case=False), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_view.py b/tests/layer_tests/pytorch_tests/test_view.py new file mode 100644 index 00000000000..492c0d1d25b --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_view.py @@ -0,0 +1,61 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +@pytest.mark.parametrize('input_data', [(np.random.randn(2, 3, 2), np.array(2), np.array(6)), + (np.random.randn(4), np.array(2), np.array(2))]) +class TestViewListConstruct(PytorchLayerTest): + + def _prepare_input(self): + return self.input_data + + def create_model(self): + class aten_view_list_construct(torch.nn.Module): + + def forward(self, input_tensor, dim1: int, dim2: int): + return input_tensor.view(dim1, dim2) + + ref_net = None + + return aten_view_list_construct(), ref_net, "aten::view" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_view_list_construct(self, ie_device, precision, ir_version, input_data): + self.input_data = input_data + self._test(*self.create_model(), ie_device, precision, ir_version) + + +@pytest.mark.parametrize('input_data', [(np.random.randn(2, 3, 2), 2, 6), + (np.random.randn(4), 2, 2)]) +class TestView(PytorchLayerTest): + + def _prepare_input(self): + return (self.input_data[0],) + + def create_model(self): + class aten_view(torch.nn.Module): + + def __init__(self, input_data) -> None: + super().__init__() + self.dim1 = input_data[1] + self.dim2 = input_data[2] + + def forward(self, input_tensor): + return input_tensor.view(self.dim1, self.dim2) + + ref_net = None + + return aten_view(self.input_data), ref_net, "aten::view" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_view(self, ie_device, precision, ir_version, input_data): + self.input_data = input_data + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_where.py b/tests/layer_tests/pytorch_tests/test_where.py new file mode 100644 index 00000000000..20d9fa1d19b --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_where.py @@ -0,0 +1,59 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class Testwhere(PytorchLayerTest): + def _prepare_input(self, mask_fill='ones', mask_dtype=bool, return_x_y=False): + input_shape = [2, 10] + mask = np.zeros(input_shape).astype(mask_dtype) + if mask_fill == 'ones': + mask = np.ones(input_shape).astype(mask_dtype) + if mask_fill == 'random': + idx = np.random.choice(10, 5) + mask[:, idx] = 1 + x = np.random.randn(*input_shape) + y = np.random.randn(*input_shape) + return (mask,) if not return_x_y else (mask, x, y) + + def create_model(self, as_non_zero): + import torch + + class aten_where(torch.nn.Module): + def forward(self, cond, x, y): + return torch.where(cond, x, y) + + class aten_where_as_nonzero(torch.nn.Module): + def forward(self, cond): + return torch.where(cond) + + ref_net = None + + if as_non_zero: + return aten_where_as_nonzero(), ref_net, "aten::where" + return aten_where(), ref_net, "aten::where" + + @pytest.mark.parametrize( + "mask_fill", ['zeros', 'ones', 'random']) + @pytest.mark.parametrize("mask_dtype", [np.uint8, bool]) # np.float32 incorrectly casted to bool + @pytest.mark.nightly + @pytest.mark.precommit + def test_where(self, mask_fill, mask_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(False), + ie_device, precision, ir_version, + kwargs_to_prepare_input={'mask_fill': mask_fill, 'mask_dtype': mask_dtype, 'return_x_y': True}) + + @pytest.mark.parametrize( + "mask_fill", ['zeros', 'ones', 'random']) + @pytest.mark.parametrize("mask_dtype", [np.uint8, bool]) # np.float32 incorrectly casted to bool + @pytest.mark.nightly + @pytest.mark.precommit + def test_where_as_nonzero(self, mask_fill, mask_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(True), + ie_device, precision, ir_version, + kwargs_to_prepare_input={'mask_fill': mask_fill, 'mask_dtype': mask_dtype, 'return_x_y': False}, + trace_model=True) diff --git a/tests/layer_tests/requirements.txt b/tests/layer_tests/requirements.txt index 1e21b2f438a..6aa2b645d23 100644 --- a/tests/layer_tests/requirements.txt +++ b/tests/layer_tests/requirements.txt @@ -1,4 +1,5 @@ requests>=2.25.1 numpy>=1.19.2 torch +torchvision pytest