diff --git a/src/frontends/pytorch/src/frontend.cpp b/src/frontends/pytorch/src/frontend.cpp index 06accebefd8..04ced45afb2 100644 --- a/src/frontends/pytorch/src/frontend.cpp +++ b/src/frontends/pytorch/src/frontend.cpp @@ -119,7 +119,9 @@ void FrontEnd::normalize(const std::shared_ptr& model) const { } void FrontEnd::add_extension(const std::shared_ptr& extension) { - FRONT_END_NOT_IMPLEMENTED(add_extension); + // Extension loading mechanism is not implemented, any extensions will be ignored + // see CVS-98766 for tracking progress + return; } bool FrontEnd::supported_impl(const std::vector& variants) const { diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py index 993a973c0b5..4472526d51f 100644 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py +++ b/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py @@ -8,7 +8,7 @@ import numpy as np import openvino.runtime as ov import pytest import torch -from openvino.runtime import PartialShape, Dimension, Model +from openvino.runtime import PartialShape, Dimension, Model, Type from common.mo_convert_test_class import CommonMOConvertTest @@ -84,7 +84,7 @@ def create_pytorch_nn_module_case1(tmp_dir): sample_input = sample_input1, sample_input2 return pt_model, ref_model, {'input_shape': [PartialShape([-1, 3, -1, -1]), PartialShape([-1, 3, -1, -1])], - 'example_input': sample_input} + 'example_input': sample_input, "use_legacy_frontend": True} def create_pytorch_nn_module_case2(tmp_dir): @@ -96,7 +96,7 @@ def create_pytorch_nn_module_case2(tmp_dir): sample_input = sample_input1, sample_input2 return pt_model, ref_model, {'input_shape': ["[?,3,?,?]", PartialShape([-1, 3, -1, -1])], - 'example_input': sample_input, 'onnx_opset_version': 11} + 'example_input': sample_input, 'onnx_opset_version': 11, "use_legacy_frontend": True} def create_pytorch_nn_module_case3(tmp_dir): @@ -107,7 +107,7 @@ def create_pytorch_nn_module_case3(tmp_dir): sample_input2 = torch.zeros(1, 3, 10, 10) sample_input = tuple([sample_input1, sample_input2]) - return pt_model, ref_model, {'input_shape': "[?,3,?,?],[?,3,?,?]", 'example_input': sample_input} + return pt_model, ref_model, {'input_shape': "[?,3,?,?],[?,3,?,?]", 'example_input': sample_input, "use_legacy_frontend": True} def create_pytorch_nn_module_case4(tmp_dir): @@ -117,7 +117,7 @@ def create_pytorch_nn_module_case4(tmp_dir): ref_model = make_ref_pt_model_one_input([1, 3, 10, 10]) - return pt_model, ref_model, {'example_input': sample_input} + return pt_model, ref_model, {'example_input': sample_input, "use_legacy_frontend": True} def create_pytorch_nn_module_case5(tmp_dir): @@ -127,7 +127,7 @@ def create_pytorch_nn_module_case5(tmp_dir): sample_input = torch.zeros(3, 3, 10, 10) return pt_model, ref_model, {'example_input': sample_input, - 'input_shape': inp_shape} + 'input_shape': inp_shape, "use_legacy_frontend": True} def create_pytorch_nn_module_case6(tmp_dir): @@ -135,14 +135,14 @@ def create_pytorch_nn_module_case6(tmp_dir): shape = PartialShape([1, 3, Dimension(2, -1), Dimension(-1, 10)]) ref_model = make_ref_pt_model_one_input(shape) - return pt_model, ref_model, {'input_shape': shape} + return pt_model, ref_model, {'input_shape': shape, "use_legacy_frontend": True} def create_pytorch_nn_module_torch_size(tmp_dir): pt_model = make_pt_model_one_input() ref_model = make_ref_pt_model_one_input([1, 3, 2, 10]) - return pt_model, ref_model, {'input_shape': torch.Size([1, 3, 2, 10])} + return pt_model, ref_model, {'input_shape': torch.Size([1, 3, 2, 10]), "use_legacy_frontend": True} def create_pytorch_nn_module_sample_input_int32(tmp_dir): @@ -154,7 +154,7 @@ def create_pytorch_nn_module_sample_input_int32(tmp_dir): ref_model = make_ref_pt_model_one_input(shape, dtype=numpy.int32) return pt_model, ref_model, {'example_input': sample_input, - 'input_shape': shape} + 'input_shape': shape, "use_legacy_frontend": True} def create_pytorch_nn_module_sample_input_int32_two_inputs(tmp_dir): @@ -167,7 +167,7 @@ def create_pytorch_nn_module_sample_input_int32_two_inputs(tmp_dir): ref_model = make_ref_pt_model_two_inputs([PartialShape([-1, 3, -1, -1]), inp_shapes[1]], dtype=np.int32) return pt_model, ref_model, {'input_shape': inp_shapes, - 'example_input': sample_input, 'onnx_opset_version': 11} + 'example_input': sample_input, 'onnx_opset_version': 11, "use_legacy_frontend": True} def create_pytorch_nn_module_compare_convert_paths_case1(tmp_dir): @@ -179,7 +179,7 @@ def create_pytorch_nn_module_compare_convert_paths_case1(tmp_dir): torch.onnx.export(pt_model, sample_input, onnx_model_path, opset_version=16) ref_model = convert_model(onnx_model_path) - return pt_model, ref_model, {'example_input': sample_input, 'onnx_opset_version': 16} + return pt_model, ref_model, {'example_input': sample_input, 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_nn_module_compare_convert_paths_case2(tmp_dir): @@ -193,7 +193,9 @@ def create_pytorch_nn_module_compare_convert_paths_case2(tmp_dir): ref_model = convert_model(onnx_model_path) return pt_model, ref_model, {'example_input': sample_input, 'input_shape': [1, 3, 10, 10], - 'onnx_opset_version': 16} + 'onnx_opset_version': 16, + "use_legacy_frontend": True + } def create_pytorch_nn_module_compare_convert_paths_case3(tmp_dir): @@ -206,7 +208,7 @@ def create_pytorch_nn_module_compare_convert_paths_case3(tmp_dir): ref_model = convert_model(onnx_model_path) return pt_model, ref_model, {'input_shape': [1, 3, 10, 10], - 'onnx_opset_version': 16} + 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_nn_module_compare_convert_paths_case4(tmp_dir): @@ -222,7 +224,7 @@ def create_pytorch_nn_module_compare_convert_paths_case4(tmp_dir): ref_model = convert_model(onnx_model_path) - return pt_model, ref_model, {'example_input': sample_input, 'onnx_opset_version': 16} + return pt_model, ref_model, {'example_input': sample_input, 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_nn_module_compare_convert_paths_case5(tmp_dir): @@ -240,7 +242,7 @@ def create_pytorch_nn_module_compare_convert_paths_case5(tmp_dir): return pt_model, ref_model, {'example_input': sample_input, 'input_shape': [torch.Size([1, 3, 10, 10]), PartialShape([1, 3, 10, 10])], - 'onnx_opset_version': 16} + 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_nn_module_compare_convert_paths_case6(tmp_dir): @@ -257,7 +259,7 @@ def create_pytorch_nn_module_compare_convert_paths_case6(tmp_dir): ref_model = convert_model(onnx_model_path) return pt_model, ref_model, {'input_shape': [torch.Size([1, 3, 10, 10]), torch.Size([1, 3, 10, 10])], - 'onnx_opset_version': 16} + 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_jit_script_module(tmp_dir): @@ -267,7 +269,7 @@ def create_pytorch_jit_script_module(tmp_dir): scripted_model = torch.jit.script(net) model_ref = make_ref_pt_model_two_inputs([1, 3, 5, 5]) - return scripted_model, model_ref, {'input_shape': [PartialShape([1, 3, 5, 5]), PartialShape([1, 3, 5, 5])]} + return scripted_model, model_ref, {'input_shape': [PartialShape([1, 3, 5, 5]), PartialShape([1, 3, 5, 5])], "use_legacy_frontend": True} def create_pytorch_jit_script_function(tmp_dir): @@ -279,7 +281,7 @@ def create_pytorch_jit_script_function(tmp_dir): inp_shape = PartialShape([Dimension(1, -1), Dimension(-1, 5), 10]) ref_model = make_ref_pt_model_two_inputs(inp_shape) - return scripted_fn, ref_model, {'input_shape': [inp_shape, inp_shape]} + return scripted_fn, ref_model, {'input_shape': [inp_shape, inp_shape], "use_legacy_frontend": True} def create_pytorch_nn_module_sample_input_numpy(tmp_dir): @@ -293,7 +295,7 @@ def create_pytorch_nn_module_sample_input_numpy(tmp_dir): ref_model = convert_model(onnx_model_path) return pt_model, ref_model, {'example_input': example_inputs, 'input_shape': [1, 3, 10, 10], - 'onnx_opset_version': 16} + 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_nn_module_sample_input_dict(tmp_dir): @@ -306,7 +308,7 @@ def create_pytorch_nn_module_sample_input_dict(tmp_dir): ref_model = convert_model(onnx_model_path) return pt_model, ref_model, {'example_input': example_inputs, - 'onnx_opset_version': 16} + 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_nn_module_sample_input_dict_two_inputs(tmp_dir): @@ -321,7 +323,7 @@ def create_pytorch_nn_module_sample_input_dict_two_inputs(tmp_dir): ref_model = convert_model(onnx_model_path) return pt_model, ref_model, {'example_input': example_inputs, - 'onnx_opset_version': 16} + 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_nn_module_sample_list_of_tensors(tmp_dir): @@ -335,7 +337,7 @@ def create_pytorch_nn_module_sample_list_of_tensors(tmp_dir): ref_model = convert_model(onnx_model_path) return pt_model, ref_model, {'example_input': example_inputs, - 'onnx_opset_version': 16} + 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_nn_module_sample_input_ov_host_tensor(tmp_dir): @@ -350,7 +352,7 @@ def create_pytorch_nn_module_sample_input_ov_host_tensor(tmp_dir): ref_model = convert_model(onnx_model_path) return pt_model, ref_model, {'example_input': sample_input, 'input_shape': [1, 3, 10, 10], - 'onnx_opset_version': 16} + 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_nn_module_sample_input_ov_host_tensor_two_inputs(tmp_dir): @@ -370,7 +372,7 @@ def create_pytorch_nn_module_sample_input_ov_host_tensor_two_inputs(tmp_dir): ref_model = convert_model(onnx_model_path) return pt_model, ref_model, {'example_input': sample_input, - 'onnx_opset_version': 16} + 'onnx_opset_version': 16, "use_legacy_frontend": True} def create_pytorch_nn_module_layout_list(tmp_dir): @@ -384,7 +386,7 @@ def create_pytorch_nn_module_layout_list(tmp_dir): ref_model.inputs[1].node.layout = Layout('nhwc') return pt_model, ref_model, {'input_shape': [shape, shape], 'layout': ['nchw', Layout('nhwc')], - 'onnx_opset_version': 11} + 'onnx_opset_version': 11, "use_legacy_frontend": True} def create_pytorch_nn_module_layout_list_case2(tmp_dir): @@ -398,7 +400,7 @@ def create_pytorch_nn_module_layout_list_case2(tmp_dir): ref_model.inputs[1].node.layout = Layout('nhwc') return pt_model, ref_model, {'input_shape': [shape, shape], 'layout': ('nchw', Layout('nhwc')), - 'onnx_opset_version': 11} + 'onnx_opset_version': 11, "use_legacy_frontend": True} def create_pytorch_nn_module_mean_list(tmp_dir): @@ -420,7 +422,7 @@ def create_pytorch_nn_module_mean_list(tmp_dir): ref_model = Model([sigm], parameter_list, "test") return pt_model, ref_model, {'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'onnx_opset_version': 11} + 'onnx_opset_version': 11, "use_legacy_frontend": True} def create_pytorch_nn_module_scale_list(tmp_dir): @@ -442,14 +444,14 @@ def create_pytorch_nn_module_scale_list(tmp_dir): ref_model = Model([sigm], parameter_list, "test") return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'onnx_opset_version': 11} + 'onnx_opset_version': 11, "use_legacy_frontend": True} def create_pytorch_nn_module_shapes_list_static(tmp_dir): pt_model = make_pt_model_two_inputs() ref_model = make_ref_pt_model_two_inputs([1, 3, 20, 20]) - return pt_model, ref_model, {'input_shape': [[1, 3, 20, 20], [1, 3, 20, 20]], 'onnx_opset_version': 11} + return pt_model, ref_model, {'input_shape': [[1, 3, 20, 20], [1, 3, 20, 20]], 'onnx_opset_version': 11, "use_legacy_frontend": True} def create_pytorch_nn_module_shapes_list_dynamic(tmp_dir): @@ -464,21 +466,118 @@ def create_pytorch_nn_module_shapes_list_dynamic(tmp_dir): parameter_list = [param1, param2] ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, {'input_shape': inp_shapes, 'onnx_opset_version': 11} + return pt_model, ref_model, {'input_shape': inp_shapes, 'onnx_opset_version': 11, "use_legacy_frontend": True} def create_pytorch_nn_module_shapes_list_dynamic_single_input(tmp_dir): pt_model = make_pt_model_one_input() inp_shapes = [[Dimension(-1), 3, 20, Dimension(20, -1)]] ref_model = make_ref_pt_model_one_input(inp_shapes[0]) - return pt_model, ref_model, {'input_shape': inp_shapes, 'onnx_opset_version': 11} + return pt_model, ref_model, {'input_shape': inp_shapes, 'onnx_opset_version': 11, "use_legacy_frontend": True} def create_pytorch_nn_module_shapes_list_static_single_input(tmp_dir): pt_model = make_pt_model_one_input() inp_shapes = [[1, 3, 20, 20]] ref_model = make_ref_pt_model_one_input(inp_shapes[0]) - return pt_model, ref_model, {'input_shape': inp_shapes, 'onnx_opset_version': 11} + return pt_model, ref_model, {'input_shape': inp_shapes, 'onnx_opset_version': 11, "use_legacy_frontend": True} + + +def create_pytorch_nn_module_convert_pytorch_frontend1(tmp_dir): + pt_model = make_pt_model_one_input() + shape = [-1, -1, -1, -1] + shape = PartialShape(shape) + param = ov.opset10.parameter(shape) + relu = ov.opset10.relu(param) + sigm = ov.opset10.sigmoid(relu) + + parameter_list = [param] + ref_model = Model([sigm], parameter_list, "test") + return pt_model, ref_model, {"example_input": torch.zeros((1, 3, 10, 10))} + + +def create_pytorch_nn_module_convert_pytorch_frontend2(tmp_dir): + pt_model = make_pt_model_one_input() + shape = [-1, -1, -1, -1] + shape = PartialShape(shape) + param = ov.opset10.parameter(shape) + relu = ov.opset10.relu(param) + sigm = ov.opset10.sigmoid(relu) + + parameter_list = [param] + ref_model = Model([sigm], parameter_list, "test") + ref_model.input(0).get_node().set_element_type(Type.i32) + ref_model.validate_nodes_and_infer_types() + return pt_model, ref_model, {"example_input": torch.zeros((1, 3, 10, 10), dtype=torch.int32)} + + +def create_pytorch_nn_module_convert_pytorch_frontend3(tmp_dir): + pt_model = make_pt_model_two_inputs() + shape = [-1, -1, -1, -1] + shape = PartialShape(shape) + param1 = ov.opset10.parameter(shape) + param2 = ov.opset10.parameter(shape) + param2_convert = ov.opset10.convert_like(param2, param1) + add = ov.opset10.add(param1, param2_convert) + relu = ov.opset10.relu(add) + sigm = ov.opset10.sigmoid(relu) + + parameter_list = [param1, param2] + ref_model = Model([sigm], parameter_list, "test") + return pt_model, ref_model, {"example_input": [torch.zeros((1, 3, 10, 10)), torch.ones((1, 3, 10, 10))]} + + +def create_pytorch_nn_module_convert_pytorch_frontend4(tmp_dir): + pt_model = make_pt_model_two_inputs() + shape = [-1, -1, -1, -1] + shape = PartialShape(shape) + param1 = ov.opset10.parameter(shape) + param2 = ov.opset10.parameter(shape) + param2_convert = ov.opset10.convert_like(param2, param1) + add = ov.opset10.add(param1, param2_convert) + relu = ov.opset10.relu(add) + sigm = ov.opset10.sigmoid(relu) + + parameter_list = [param1, param2] + ref_model = Model([sigm], parameter_list, "test") + return pt_model, ref_model, {"example_input": {"x": torch.zeros((1, 3, 10, 10)), "y": torch.ones((1, 3, 10, 10))}} + + +def create_pytorch_jit_script_module_convert_pytorch_frontend(tmp_dir): + import torch + + net = make_pt_model_two_inputs() + scripted_model = torch.jit.script(net) + shape = [-1, -1, -1, -1] + shape = PartialShape(shape) + param1 = ov.opset10.parameter(shape) + param2 = ov.opset10.parameter(shape) + param2_convert = ov.opset10.convert_like(param2, param1) + add = ov.opset10.add(param1, param2_convert) + relu = ov.opset10.relu(add) + sigm = ov.opset10.sigmoid(relu) + parameter_list = [param1, param2] + ref_model = Model([sigm], parameter_list, "test") + return scripted_model, ref_model, {"example_input": {"x": torch.zeros((1, 3, 10, 10)), "y": torch.ones((1, 3, 10, 10))}} + + +def create_pytorch_jit_trace_module_convert_pytorch_frontend(tmp_dir): + import torch + + net = make_pt_model_two_inputs() + example_input = [torch.zeros((1, 3, 10, 10)), torch.ones((1, 3, 10, 10))] + scripted_model = torch.jit.trace(net, example_input) + shape = [-1, -1, -1, -1] + shape = PartialShape(shape) + param1 = ov.opset10.parameter(shape) + param2 = ov.opset10.parameter(shape) + param2_convert = ov.opset10.convert_like(param2, param1) + add = ov.opset10.add(param1, param2_convert) + relu = ov.opset10.relu(add) + sigm = ov.opset10.sigmoid(relu) + parameter_list = [param1, param2] + ref_model = Model([sigm], parameter_list, "test") + return scripted_model, ref_model, {"example_input": example_input} class TestMoConvertPyTorch(CommonMOConvertTest): @@ -512,7 +611,13 @@ class TestMoConvertPyTorch(CommonMOConvertTest): create_pytorch_nn_module_shapes_list_static, create_pytorch_nn_module_shapes_list_dynamic, create_pytorch_nn_module_shapes_list_dynamic_single_input, - create_pytorch_nn_module_shapes_list_static_single_input + create_pytorch_nn_module_shapes_list_static_single_input, + create_pytorch_nn_module_convert_pytorch_frontend1, + create_pytorch_nn_module_convert_pytorch_frontend2, + create_pytorch_nn_module_convert_pytorch_frontend3, + create_pytorch_nn_module_convert_pytorch_frontend4, + create_pytorch_jit_script_module_convert_pytorch_frontend, + create_pytorch_jit_trace_module_convert_pytorch_frontend ] @pytest.mark.parametrize("create_model", test_data) diff --git a/tools/mo/automation/package_BOM.txt b/tools/mo/automation/package_BOM.txt index 4a0f65edfce..41efe5ff551 100644 --- a/tools/mo/automation/package_BOM.txt +++ b/tools/mo/automation/package_BOM.txt @@ -836,7 +836,9 @@ openvino/tools/mo/moc_frontend/analysis.py openvino/tools/mo/moc_frontend/check_config.py openvino/tools/mo/moc_frontend/extractor.py openvino/tools/mo/moc_frontend/pipeline.py +openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py openvino/tools/mo/moc_frontend/serialize.py +openvino/tools/mo/moc_frontend/shape_utils.py openvino/tools/mo/ops/__init__.py openvino/tools/mo/ops/activation.py openvino/tools/mo/ops/activation_ops.py diff --git a/tools/mo/openvino/tools/mo/convert_impl.py b/tools/mo/openvino/tools/mo/convert_impl.py index 36d29c80a7f..5572c551305 100644 --- a/tools/mo/openvino/tools/mo/convert_impl.py +++ b/tools/mo/openvino/tools/mo/convert_impl.py @@ -11,8 +11,6 @@ from collections import OrderedDict from copy import deepcopy from pathlib import Path -import numpy as np - try: import openvino_telemetry as tm except ImportError: @@ -32,8 +30,8 @@ from openvino.tools.mo.utils.cli_parser import check_available_transforms, \ get_advanced_cli_options, get_available_front_ends, get_caffe_cli_options, \ get_common_cli_options, get_freeze_placeholder_values, get_kaldi_cli_options, get_layout_values, \ get_mean_scale_dictionary, get_mxnet_cli_options, get_onnx_cli_options, \ - get_placeholder_shapes, get_tf_cli_options, get_tuple_values, parse_transform, parse_tuple_pairs, \ - get_all_cli_parser, mo_convert_params, get_model_name_from_args, split_shapes, depersonalize + get_placeholder_shapes, get_tf_cli_options, parse_transform, parse_tuple_pairs, \ + mo_convert_params, get_model_name_from_args, depersonalize from openvino.tools.mo.utils.error import Error from openvino.tools.mo.utils.find_ie_version import find_ie_version @@ -44,12 +42,12 @@ from openvino.tools.mo.utils.telemetry_utils import send_params_info, send_frame from openvino.tools.mo.utils.version import get_simplified_mo_version, get_simplified_ie_version, get_version from openvino.tools.mo.utils.versions_checker import check_requirements # pylint: disable=no-name-in-module from openvino.tools.mo.utils.telemetry_utils import get_tid -from openvino.tools.mo.front.common.partial_infer.utils import mo_array from openvino.tools.mo.moc_frontend.check_config import legacy_extensions_used +from openvino.tools.mo.moc_frontend.pytorch_frontend_utils import get_pytorch_decoder, convert_pytorch_via_onnx +from openvino.tools.mo.moc_frontend.shape_utils import parse_input_shapes, get_static_shape # pylint: disable=no-name-in-module,import-error from openvino.frontend import FrontEndManager, OpConversionFailure, ProgressReporterExtension, TelemetryExtension -from openvino.runtime import PartialShape, Dimension from openvino.runtime import get_version as get_rt_version @@ -537,64 +535,6 @@ def emit_ir(graph: Graph, argv: argparse.Namespace, non_default_params: dict): return func -def get_static_shape(shape: [PartialShape, list, tuple], dynamic_value=None): - # Current function returns list with static dimensions with following logic. - # For dynamic dimensions return lower boundaries if they are set, otherwise - # return upper boundaries if they are set. If dimension is fully dynamic then raise error. - shape_list = [] - for idx, dim in enumerate(shape): - if isinstance(dim, int): - if dim == -1: - shape_list.append(dynamic_value) - continue - shape_list.append(dim) - elif isinstance(dim, np.int64): - if dim == np.int64(-1): - shape_list.append(dynamic_value) - continue - shape_list.append(dim) - elif isinstance(dim, tuple): - # tuple where (min_length, max_length), the format which uses MO cli parser - assert len(dim) == 2, "Unknown dimension type {}".format(dim) - if dim[0] > 0: - shape_list.append(dim[0]) - elif dim[1] < np.iinfo(np.int64).max: - shape_list.append(dim[1]) - else: - shape_list.append(dynamic_value) - continue - elif isinstance(dim, Dimension): - if dim.is_static or dim.get_min_length() > 0: - shape_list.append(dim.get_min_length()) - elif dim.get_max_length() != -1: - shape_list.append(dim.get_max_length()) - else: - shape_list.append(dynamic_value) - continue - else: - raise Error("Unknown dimension type {}".format(dim)) - - return tuple(shape_list) - - -def get_dynamic_dims(shape: [PartialShape, list, tuple]): - dynamic_dims = [] - for idx, dim in enumerate(shape): - if isinstance(dim, int): - if dim == -1: - dynamic_dims.append(idx) - if isinstance(dim, np.int64): - if dim == np.int64(-1): - dynamic_dims.append(idx) - elif isinstance(dim, tuple): - dynamic_dims.append(idx) - elif isinstance(dim, Dimension): - if dim.get_min_length() == 0 and dim.get_max_length() == -1: - dynamic_dims.append(idx) - - return dynamic_dims - - def check_model_object(argv): model = argv['input_model'] if 'tensorflow' in sys.modules: @@ -648,123 +588,6 @@ def check_model_object(argv): raise Error('Unknown model type: {}'.format(type(model))) -def get_onnx_temp_filename(output_dir): - output_dir = output_dir if output_dir is not None else os.getcwd() - return os.path.normpath(os.path.join(output_dir, "model.onnx")) - - -def to_torch_tensor(tensor): - import torch - from openvino.runtime import Tensor - if isinstance(tensor, torch.Tensor): - return tensor - if isinstance(tensor, np.ndarray): - return torch.tensor(tensor) - if isinstance(tensor, np.ndarray): - return torch.tensor(tensor) - if isinstance(tensor, Tensor): - return torch.tensor(tensor.data) - else: - raise Error("Unexpected type of example_input. Supported types torch.Tensor, np.array or ov.Tensor. " - "Got {}".format(type(tensor))) - - -def convert_pytorch_to_onnx(model, input_shape, opset_version, example_inputs, output_dir): - import io - import torch - - input_names = None - if example_inputs is not None: - inputs = example_inputs - if isinstance(inputs, list): - inputs = [to_torch_tensor(x) for x in inputs] - if len(inputs) == 1: - inputs = torch.unsqueeze(inputs[0], 0) - else: - inputs = inputs - elif isinstance(inputs, tuple): - inputs = [to_torch_tensor(x) for x in inputs] - inputs = tuple(inputs) - elif isinstance(inputs, dict): - for name, tensor in inputs.items(): - assert isinstance(name, str), "Expected dictionary where keys are input names of string type and" \ - " values are tensors. Got key of type {}".format(type(name)) - inputs[name] = to_torch_tensor(tensor) - else: - inputs = to_torch_tensor(inputs) - elif input_shape is not None: - inputs = [] - for shape_idx, shape in enumerate(input_shape): - static_shape = get_static_shape(shape, dynamic_value=1) - inputs.append(torch.zeros(static_shape)) - inputs = tuple(inputs) - else: - raise Error("Please provide input_shape or example_input for converting PyTorch model.") - - dynamic_dims_dict = {} - if input_shape is not None and input_names is None: - input_names = ["input_{}".format(idx) for idx in range(len(input_shape))] - for shape_idx, shape in enumerate(input_shape): - dynamic_dims = get_dynamic_dims(shape) - if len(dynamic_dims) > 0: - dynamic_dims_dict[input_names[shape_idx]] = dynamic_dims - additional_params = {} - if len(dynamic_dims_dict) > 0: - additional_params.update({'dynamic_axes': dynamic_dims_dict}) - if input_names is not None and len(input_names) > 0: - additional_params.update({'input_names': input_names}) - - if os.environ.get('SAVE_TO_BYTES_IO_ONNX_MODEL'): - model_onnx = io.BytesIO() - else: - model_onnx = get_onnx_temp_filename(output_dir) - if opset_version is not None: - additional_params.update({'opset_version': opset_version}) - - torch.onnx.export(model, - inputs, - model_onnx, - **additional_params) - return model_onnx - - -def parse_input_shapes(argv): - input_shapes = None - if 'input_shape' in argv and argv['input_shape'] is not None: - shapes = argv['input_shape'] - if isinstance(shapes, str): - shapes = ["[{}]".format(x) for x in split_shapes(shapes)] - if isinstance(shapes, list) or isinstance(shapes, tuple): - input_shapes = [] - is_single_shape = False - for shape in shapes: - if isinstance(shape, str): - _, shape_tuple, _ = get_placeholder_shapes(argv_input=None, argv_input_shape=shape) - input_shapes.append(shape_tuple) - if is_single_shape: - raise Error("Incorrect format of shape.") - elif isinstance(shape, int) or isinstance(shape, np.int64) or isinstance(shape, Dimension): - is_single_shape = True - input_shapes.append(shape) - else: - input_shapes.append(shape) - if is_single_shape: - return [input_shapes] - else: - return input_shapes - elif isinstance(shapes, PartialShape): - return [shapes] - else: - try: - import torch - if isinstance(shapes, torch.Size): - return [shapes] - except ImportError: - raise Error("Unknown type of input shape {}.".format(type(shapes))) - - return input_shapes - - def driver(argv: argparse.Namespace, non_default_params: dict): init_logger(argv.log_level.upper(), argv.silent) @@ -910,21 +733,13 @@ def pack_params_to_args_namespace(args: dict, cli_parser: argparse.ArgumentParse # Non string params like input_model or extensions are ignored by parse_args() # so we need to set them in argv separately - if value is not None and getattr(argv, key) != value: + if value is not None and getattr(argv, key, None) != value: setattr(argv, key, value) else: argv = cli_parser.parse_args() return argv -def remove_tmp_onnx_model(out_dir): - if not os.environ.get('SAVE_TO_BYTES_IO_ONNX_MODEL'): - tmp_onnx_model = get_onnx_temp_filename(out_dir) - - if os.path.exists(tmp_onnx_model): - os.remove(tmp_onnx_model) - - def _convert(cli_parser: argparse.ArgumentParser, framework, args): if 'help' in args and args['help']: show_mo_convert_help() @@ -933,47 +748,29 @@ def _convert(cli_parser: argparse.ArgumentParser, framework, args): telemetry = tm.Telemetry(tid=get_tid(), app_name='Model Optimizer', app_version=get_simplified_mo_version()) telemetry.start_session('mo') telemetry.send_event('mo', 'version', get_simplified_mo_version()) + # Initialize logger with 'ERROR' as default level to be able to form nice messages + # before arg parser deliver log_level requested by user + init_logger('ERROR', False) try: model_framework = None inp_model_is_object = input_model_is_object(args) if inp_model_is_object: model_framework = check_model_object(args) - if model_framework == "pytorch" and not os.environ.get('USE_PYTORCH_FRONTEND'): - - opset_version = None - if 'onnx_opset_version' in args and args['onnx_opset_version'] is not None: - opset_version = args['onnx_opset_version'] - + if model_framework == "pytorch": example_inputs = None if 'example_input' in args and args['example_input'] is not None: example_inputs = args['example_input'] + + if 'use_legacy_frontend' in args and args['use_legacy_frontend']: + # TO DO: remove this path, when pytorch frontend productization is finished, CVS-103726 + # prevent invoking legacy mo python onnx frontend for models converted on the fly + args.pop("use_legacy_frontend") + return convert_pytorch_via_onnx(args, example_inputs, cli_parser, framework, _convert) - out_dir = args['output_dir'] if 'output_dir' in args else None - - model_onnx = convert_pytorch_to_onnx(args['input_model'], - parse_input_shapes(args), - opset_version, - example_inputs, - out_dir) - - args['input_model'] = model_onnx - if os.environ.get('SAVE_TO_BYTES_IO_ONNX_MODEL'): - args['use_legacy_frontend'] = True - args['example_input'] = None - args['onnx_opset_version'] = None - - try: - ov_model, argv = _convert(cli_parser, framework, args) - except Exception as e: - remove_tmp_onnx_model(out_dir) - raise e - - remove_tmp_onnx_model(out_dir) - return ov_model, argv - - # Initialize logger with 'ERROR' as default level to be able to form nice messages - # before arg parser deliver log_level requested by user - init_logger('ERROR', False) + decoder, input_signature = get_pytorch_decoder(args['input_model'], parse_input_shapes(args), example_inputs) + args['input_model'] = decoder + args["framework"] = "pytorch" + args["input_signature"] = input_signature argv = pack_params_to_args_namespace(args, cli_parser) @@ -995,9 +792,9 @@ def _convert(cli_parser: argparse.ArgumentParser, framework, args): if argv.framework != model_framework: raise Error("Provided model does not correspond to provided framework. The provided " "framework is {}, the model type is {} which is expected to be {} framework.".format( - argv.framework, - type(argv.input_model), - model_framework)) + argv.framework, + type(argv.input_model), + model_framework)) else: argv.framework = model_framework diff --git a/tools/mo/openvino/tools/mo/moc_frontend/pipeline.py b/tools/mo/openvino/tools/mo/moc_frontend/pipeline.py index cd183ecfae4..b9c49c23384 100644 --- a/tools/mo/openvino/tools/mo/moc_frontend/pipeline.py +++ b/tools/mo/openvino/tools/mo/moc_frontend/pipeline.py @@ -19,6 +19,7 @@ from openvino.tools.mo.moc_frontend.analysis import json_model_analysis_dump from openvino.tools.mo.moc_frontend.extractor import fe_user_data_repack from openvino.tools.mo.utils.class_registration import get_enabled_and_disabled_transforms from openvino.tools.mo.utils.error import Error +from openvino.tools.mo.moc_frontend.pytorch_frontend_utils import pytorch_process_after_convert def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd): @@ -213,4 +214,8 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd): input_model.set_partial_shape(place, new_partial_shape) ngraph_function = moc_front_end.convert(input_model) + + # TO DO: remove as part of PyTorch frontend productization CVS-103615 + if argv.framework == "pytorch": + pytorch_process_after_convert(argv, ngraph_function) return ngraph_function diff --git a/tools/mo/openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py b/tools/mo/openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py new file mode 100644 index 00000000000..a86409b9a48 --- /dev/null +++ b/tools/mo/openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py @@ -0,0 +1,214 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import logging as log +import numpy as np +from openvino.tools.mo.moc_frontend.shape_utils import get_static_shape, get_dynamic_dims, parse_input_shapes +from openvino.tools.mo.utils.error import Error +from openvino.runtime import PartialShape, Tensor + +def get_onnx_temp_filename(output_dir): + output_dir = output_dir if output_dir is not None else os.getcwd() + return os.path.normpath(os.path.join(output_dir, "model.onnx")) + + +def remove_tmp_onnx_model(out_dir): + if not os.environ.get('SAVE_TO_BYTES_IO_ONNX_MODEL'): + tmp_onnx_model = get_onnx_temp_filename(out_dir) + + if os.path.exists(tmp_onnx_model): + os.remove(tmp_onnx_model) + + +def get_pytorch_decoder(model, input_shape, example_inputs): + import torch + import inspect + try: + from openvino.frontend.pytorch.decoder import TorchScriptPythonDecoder + except Exception as e: + log.error("PyTorch frontend loading failed") + raise e + inputs = prepare_torch_inputs(example_inputs, input_shape, allow_none=True) + model.eval() + input_signature = None + if isinstance(model, torch.nn.Module) and not isinstance(model, torch.jit._trace.TopLevelTracedModule): + input_signature = list(inspect.signature(model.forward).parameters.keys()) + try: + scripted = torch.jit.script(model) + except Exception as scripting_err: + if example_inputs is not None: + try: + scripted = torch.jit.trace(model, inputs) + except Exception as tracing_e: + log.error('Both traicing and scripting failed') + raise tracing_e + else: + log.error("Model scripting failed") + raise scripting_err + else: + scripted = model + f_model = torch.jit.freeze(scripted) + decoder = TorchScriptPythonDecoder(f_model) + return decoder, input_signature + + +def to_torch_tensor(tensor): + import torch + if isinstance(tensor, torch.Tensor): + return tensor + if isinstance(tensor, np.ndarray): + return torch.tensor(tensor) + if isinstance(tensor, np.ndarray): + return torch.tensor(tensor) + if isinstance(tensor, Tensor): + return torch.tensor(tensor.data) + else: + raise Error("Unexpected type of example_input. Supported types torch.Tensor, np.array or ov.Tensor. " + "Got {}".format(type(tensor))) + + +def prepare_torch_inputs(example_inputs, input_shape, allow_none=False): + import torch + inputs = None + if example_inputs is not None: + inputs = example_inputs + if isinstance(inputs, list): + inputs = [to_torch_tensor(x) for x in inputs] + if len(inputs) == 1: + inputs = torch.unsqueeze(inputs[0], 0) + else: + inputs = inputs + elif isinstance(inputs, tuple): + inputs = [to_torch_tensor(x) for x in inputs] + inputs = tuple(inputs) + elif isinstance(inputs, dict): + for name, tensor in inputs.items(): + assert isinstance(name, str), "Expected dictionary where keys are input names of string type and" \ + " values are tensors. Got key of type {}".format(type(name)) + inputs[name] = to_torch_tensor(tensor) + else: + inputs = to_torch_tensor(inputs) + elif input_shape is not None: + inputs = [] + for shape in input_shape: + static_shape = get_static_shape(shape, dynamic_value=1) + inputs.append(torch.zeros(static_shape)) + inputs = tuple(inputs) + else: + if not allow_none: + raise Error("Please provide input_shape or example_input for converting PyTorch model.") + return inputs + + +def convert_pytorch_to_onnx(model, input_shape, opset_version, example_inputs, output_dir): + import io + import torch + + input_names = None + inputs = prepare_torch_inputs(example_inputs, input_shape) + + dynamic_dims_dict = {} + if input_shape is not None and input_names is None: + input_names = ["input_{}".format(idx) for idx in range(len(input_shape))] + for shape_idx, shape in enumerate(input_shape): + dynamic_dims = get_dynamic_dims(shape) + if len(dynamic_dims) > 0: + dynamic_dims_dict[input_names[shape_idx]] = dynamic_dims + additional_params = {} + if len(dynamic_dims_dict) > 0: + additional_params.update({'dynamic_axes': dynamic_dims_dict}) + if input_names is not None and len(input_names) > 0: + additional_params.update({'input_names': input_names}) + + if os.environ.get('SAVE_TO_BYTES_IO_ONNX_MODEL'): + model_onnx = io.BytesIO() + else: + model_onnx = get_onnx_temp_filename(output_dir) + if opset_version is not None: + additional_params.update({'opset_version': opset_version}) + + torch.onnx.export(model, + inputs, + model_onnx, + **additional_params) + return model_onnx + + +def convert_pytorch_via_onnx(args, example_inputs, cli_parser, framework, main_convert): + opset_version = None + if 'onnx_opset_version' in args and args['onnx_opset_version'] is not None: + opset_version = args['onnx_opset_version'] + out_dir = args['output_dir'] if 'output_dir' in args else None + if os.environ.get('SAVE_TO_BYTES_IO_ONNX_MODEL'): + args['use_legacy_frontend'] = True + # these parameters used only on PyTorch to ONNX conversion, + # remove them before passing model to next step + args['example_input'] = None + args['onnx_opset_version'] = None + try: + + model_onnx = convert_pytorch_to_onnx(args['input_model'], + parse_input_shapes(args), + opset_version, + example_inputs, + out_dir) + + args['input_model'] = model_onnx + + ov_model, argv = main_convert(cli_parser, framework, args) + except Exception as e: + raise e + finally: + remove_tmp_onnx_model(out_dir) + return ov_model, argv + + +def pytorch_process_after_convert(argv, ov_model): + import torch + from openvino.frontend.pytorch.decoder import pt_to_ov_type_map + + def add_tensor_name(input_desc, input_name): + tensor = input_desc.get_tensor() + input_names = tensor.names + input_names.update(input_name) + tensor.set_names(input_names) + + example_inputs = getattr(argv, "example_input", None) + input_signature = getattr(argv, "input_signature", None) + provide_shapes = argv.input_shape is not None + if example_inputs is not None: + inputs = [example_inputs] if isinstance(example_inputs, torch.Tensor) else example_inputs + if input_signature is not None and isinstance(inputs, dict): + ordered_inputs = [] + upd_sign = [] + for key in input_signature: + if key not in inputs: + continue + ordered_inputs.append(inputs[key]) + upd_sign.append(key) + inputs = ordered_inputs + input_signature = upd_sign + for idx, input_tensor in enumerate(ov_model.inputs): + if isinstance(inputs, (list, tuple)): + input_data = inputs[idx] + else: + input_data = list(inputs.values())[idx] + pt_dtype = input_data.dtype if isinstance(input_data, torch.Tensor) else type(input_data) + dtype = pt_to_ov_type_map.get(str(pt_dtype)) + if dtype is None: + raise f"Unknown input dtype {pt_dtype}" + + input_tensor.get_node().set_element_type(dtype) + if input_signature is not None: + add_tensor_name(input_tensor, input_signature[idx]) + if not provide_shapes: + # prevent dynamic rank issue + shape = [-1] * len(input_data.shape) + input_tensor.get_node().set_partial_shape(PartialShape(shape)) + + ov_model.validate_nodes_and_infer_types() + elif input_signature is not None: + for idx, input_tensor in enumerate(ov_model.inputs): + add_tensor_name(input_tensor, input_signature[idx]) + return ov_model diff --git a/tools/mo/openvino/tools/mo/moc_frontend/shape_utils.py b/tools/mo/openvino/tools/mo/moc_frontend/shape_utils.py new file mode 100644 index 00000000000..2b2d336a1da --- /dev/null +++ b/tools/mo/openvino/tools/mo/moc_frontend/shape_utils.py @@ -0,0 +1,102 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +from openvino.runtime import PartialShape, Dimension +from openvino.tools.mo.utils.error import Error +from openvino.tools.mo.utils.cli_parser import get_placeholder_shapes, split_shapes + + +def get_static_shape(shape: [PartialShape, list, tuple], dynamic_value=None): + # Current function returns list with static dimensions with following logic. + # For dynamic dimensions return lower boundaries if they are set, otherwise + # return upper boundaries if they are set. If dimension is fully dynamic then raise error. + shape_list = [] + for idx, dim in enumerate(shape): + if isinstance(dim, int): + if dim == -1: + shape_list.append(dynamic_value) + continue + shape_list.append(dim) + elif isinstance(dim, np.int64): + if dim == np.int64(-1): + shape_list.append(dynamic_value) + continue + shape_list.append(dim) + elif isinstance(dim, tuple): + # tuple where (min_length, max_length), the format which uses MO cli parser + assert len(dim) == 2, "Unknown dimension type {}".format(dim) + if dim[0] > 0: + shape_list.append(dim[0]) + elif dim[1] < np.iinfo(np.int64).max: + shape_list.append(dim[1]) + else: + shape_list.append(dynamic_value) + continue + elif isinstance(dim, Dimension): + if dim.is_static or dim.get_min_length() > 0: + shape_list.append(dim.get_min_length()) + elif dim.get_max_length() != -1: + shape_list.append(dim.get_max_length()) + else: + shape_list.append(dynamic_value) + continue + else: + raise Error("Unknown dimension type {}".format(dim)) + + return tuple(shape_list) + + +def get_dynamic_dims(shape: [PartialShape, list, tuple]): + dynamic_dims = [] + for idx, dim in enumerate(shape): + if isinstance(dim, int): + if dim == -1: + dynamic_dims.append(idx) + if isinstance(dim, np.int64): + if dim == np.int64(-1): + dynamic_dims.append(idx) + elif isinstance(dim, tuple): + dynamic_dims.append(idx) + elif isinstance(dim, Dimension): + if dim.get_min_length() == 0 and dim.get_max_length() == -1: + dynamic_dims.append(idx) + + return dynamic_dims + + +def parse_input_shapes(argv): + input_shapes = None + if 'input_shape' in argv and argv['input_shape'] is not None: + shapes = argv['input_shape'] + if isinstance(shapes, str): + shapes = ["[{}]".format(x) for x in split_shapes(shapes)] + if isinstance(shapes, list) or isinstance(shapes, tuple): + input_shapes = [] + is_single_shape = False + for shape in shapes: + if isinstance(shape, str): + _, shape_tuple, _ = get_placeholder_shapes(argv_input=None, argv_input_shape=shape) + input_shapes.append(shape_tuple) + if is_single_shape: + raise Error("Incorrect format of shape.") + elif isinstance(shape, int) or isinstance(shape, np.int64) or isinstance(shape, Dimension): + is_single_shape = True + input_shapes.append(shape) + else: + input_shapes.append(shape) + if is_single_shape: + return [input_shapes] + else: + return input_shapes + elif isinstance(shapes, PartialShape): + return [shapes] + else: + try: + import torch + if isinstance(shapes, torch.Size): + return [shapes] + except ImportError: + raise Error("Unknown type of input shape {}.".format(type(shapes))) + + return input_shapes \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/utils/cli_parser.py b/tools/mo/openvino/tools/mo/utils/cli_parser.py index 13fec9fca13..a0bc48771de 100644 --- a/tools/mo/openvino/tools/mo/utils/cli_parser.py +++ b/tools/mo/openvino/tools/mo/utils/cli_parser.py @@ -704,7 +704,11 @@ mo_convert_params = { 'example_input': ParamDescription('Sample of model input in original framework. ' 'For PyTorch it can be torch.Tensor.', '', '', None), 'onnx_opset_version': ParamDescription('Version of ONNX opset that is used for converting from PyTorch to ONNX.', - '', '', None) + '', '', None), + 'input_signature': ParamDescription('PyTorch model forward method input signature, ' + 'will be detected automatically for torch.nn.Module based model instances, ' + 'for for scripted models may requires to set manually. Example of usage: for forward method defined as' + ' def forward(self, x, y), it will be ["x", "y"]', '', '', None) } }